code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import argparse
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import data
from experiment import Experiment
import faulthandler
faulthandler.enable()
"""
nohup python run.py --lr 1e-3 --num_workers 4 --batch_size 4 --epochs 60 --cuda --ngpu 1 --refs 2 --patch_size 35 --patch_stride 30 --test_patch 75 --pretrained encoder.pth --save_dir out --train_dir data/train --val_dir data/val --test_dir data/val &> out.log &
"""
# 获取模型运行时必须的一些参数
parser = argparse.ArgumentParser(description='Acquire some parameters for fusion restore')
parser.add_argument('--lr', type=float, default=1e-3,
help='the initial learning rate')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training')
parser.add_argument('--epochs', type=int, default=30,
help='number of epochs to train')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--num_workers', type=int, default=0, help='number of threads to load data')
parser.add_argument('--save_dir', type=Path, default=Path('.'),
help='the output directory')
parser.add_argument('--pretrained', type=Path, help='the path of the pretained encoder')
# 获取对输入数据进行预处理时的一些参数
parser.add_argument('--refs', type=int, help='the reference data counts for fusion')
parser.add_argument('--train_dir', type=Path, default=(data.data_dir / 'train'),
help='the training data directory')
parser.add_argument('--val_dir', type=Path, default=(data.data_dir / 'val'),
help='the validation data directory')
parser.add_argument('--test_dir', type=Path, default=(data.data_dir / 'val'),
help='the test data directory')
parser.add_argument('--image_size', type=int, nargs='+', default=[300, 300],
help='the size of the coarse image (width, height)')
parser.add_argument('--patch_size', type=int, nargs='+', default=10,
help='the coarse image patch size for training restore')
parser.add_argument('--patch_stride', type=int, nargs='+', default=5,
help='the coarse patch stride for image division')
parser.add_argument('--test_patch', type=int, nargs='+', default=50,
help='the coarse image patch size for fusion test')
opt = parser.parse_args()
torch.manual_seed(2019)
if not torch.cuda.is_available():
opt.cuda = False
if opt.cuda:
torch.cuda.manual_seed_all(2019)
cudnn.benchmark = True
cudnn.deterministic = True
if __name__ == '__main__':
experiment = Experiment(opt)
if opt.epochs > 0:
experiment.train(opt.train_dir, opt.val_dir,
opt.patch_size, opt.patch_stride, opt.batch_size,
opt.refs, num_workers=opt.num_workers, epochs=opt.epochs)
experiment.test(opt.test_dir, opt.test_patch, opt.refs,
num_workers=opt.num_workers) | run.py | import argparse
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import data
from experiment import Experiment
import faulthandler
faulthandler.enable()
"""
nohup python run.py --lr 1e-3 --num_workers 4 --batch_size 4 --epochs 60 --cuda --ngpu 1 --refs 2 --patch_size 35 --patch_stride 30 --test_patch 75 --pretrained encoder.pth --save_dir out --train_dir data/train --val_dir data/val --test_dir data/val &> out.log &
"""
# 获取模型运行时必须的一些参数
parser = argparse.ArgumentParser(description='Acquire some parameters for fusion restore')
parser.add_argument('--lr', type=float, default=1e-3,
help='the initial learning rate')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training')
parser.add_argument('--epochs', type=int, default=30,
help='number of epochs to train')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--num_workers', type=int, default=0, help='number of threads to load data')
parser.add_argument('--save_dir', type=Path, default=Path('.'),
help='the output directory')
parser.add_argument('--pretrained', type=Path, help='the path of the pretained encoder')
# 获取对输入数据进行预处理时的一些参数
parser.add_argument('--refs', type=int, help='the reference data counts for fusion')
parser.add_argument('--train_dir', type=Path, default=(data.data_dir / 'train'),
help='the training data directory')
parser.add_argument('--val_dir', type=Path, default=(data.data_dir / 'val'),
help='the validation data directory')
parser.add_argument('--test_dir', type=Path, default=(data.data_dir / 'val'),
help='the test data directory')
parser.add_argument('--image_size', type=int, nargs='+', default=[300, 300],
help='the size of the coarse image (width, height)')
parser.add_argument('--patch_size', type=int, nargs='+', default=10,
help='the coarse image patch size for training restore')
parser.add_argument('--patch_stride', type=int, nargs='+', default=5,
help='the coarse patch stride for image division')
parser.add_argument('--test_patch', type=int, nargs='+', default=50,
help='the coarse image patch size for fusion test')
opt = parser.parse_args()
torch.manual_seed(2019)
if not torch.cuda.is_available():
opt.cuda = False
if opt.cuda:
torch.cuda.manual_seed_all(2019)
cudnn.benchmark = True
cudnn.deterministic = True
if __name__ == '__main__':
experiment = Experiment(opt)
if opt.epochs > 0:
experiment.train(opt.train_dir, opt.val_dir,
opt.patch_size, opt.patch_stride, opt.batch_size,
opt.refs, num_workers=opt.num_workers, epochs=opt.epochs)
experiment.test(opt.test_dir, opt.test_patch, opt.refs,
num_workers=opt.num_workers) | 0.539226 | 0.123075 |
import json
from flask import request
from config.constants import Constants
from config.appconfig import app
from config.message_cn import MessageConstants_CN
from services.department_service import DepartmentService
@app.route('/query_department/<shopId>/<parentId>',methods=['GET'])
def query_department(shopId,parentId):
node = request.args.get('node')
ds = DepartmentService()
if not shopId.isdigit() or not parentId.isdigit():
res = {}
res['success'] = False
res['msg'] = MessageConstants_CN.MSG_INVALID_ARGS
return json.dumps(res)
if not node or not node.isdigit():
res = ds.query(shopId=shopId,parentId=-1)
return res
else:
res = ds.query(shopId=shopId,parentId=node)
return res
@app.route('/add_department',methods=['POST'])
def add_department():
print(request.form)
ds = DepartmentService()
try:
name = request.form['name']
shopId = request.form['shopId']
parentId = request.form['pid']
if not name or not shopId or not parentId:
result = {}
result['success'] = False
result['msg'] = MessageConstants_CN.MSG_INVALID_ARGS
return json.dumps(result)
_,res = ds.add(shopId=shopId,parentId=parentId,name=name)
print(res)
return json.dumps(res)
except Exception as ex:
print(ex)
res = {}
res['success'] = False
res['msg'] = MessageConstants_CN.MSG_INTER_ERROR
return json.dumps(res)
@app.route('/remove_department',methods=['POST'])
def remove_department():
print(request.form)
ds = DepartmentService()
try:
shopId = request.form['shopId']
nodeId = request.form['id']
if not shopId or not nodeId:
res = {}
res['success'] = False
res['msg'] = MessageConstants_CN.MSG_INVALID_ARGS
return json.dumps(res)
_,result = ds.remove(shopId=shopId,nodeId=nodeId)
return json.dumps(result)
except Exception as ex:
result = {}
result['success'] = True
result['msg'] = MessageConstants_CN.MSG_INTER_ERROR
return json.dumps(result)
@app.route('/update_department',methods=['POST'])
def update_department():
print(request.form)
ds = DepartmentService()
try:
nodeId = request.form['id']
shopId = request.form['shopId']
name = request.form['name']
newdata = {}
newdata['id'] = nodeId
newdata['shopId'] = shopId
newdata['name'] = name
_,result = ds.update(newdata)
print(result)
return json.dumps(result)
except Exception as ex:
print(ex)
res = {}
res['success'] = False
res['msg'] = MessageConstants_CN.MSG_INTER_ERROR
return json.dumps(res) | controller/department_controller.py |
import json
from flask import request
from config.constants import Constants
from config.appconfig import app
from config.message_cn import MessageConstants_CN
from services.department_service import DepartmentService
@app.route('/query_department/<shopId>/<parentId>',methods=['GET'])
def query_department(shopId,parentId):
node = request.args.get('node')
ds = DepartmentService()
if not shopId.isdigit() or not parentId.isdigit():
res = {}
res['success'] = False
res['msg'] = MessageConstants_CN.MSG_INVALID_ARGS
return json.dumps(res)
if not node or not node.isdigit():
res = ds.query(shopId=shopId,parentId=-1)
return res
else:
res = ds.query(shopId=shopId,parentId=node)
return res
@app.route('/add_department',methods=['POST'])
def add_department():
print(request.form)
ds = DepartmentService()
try:
name = request.form['name']
shopId = request.form['shopId']
parentId = request.form['pid']
if not name or not shopId or not parentId:
result = {}
result['success'] = False
result['msg'] = MessageConstants_CN.MSG_INVALID_ARGS
return json.dumps(result)
_,res = ds.add(shopId=shopId,parentId=parentId,name=name)
print(res)
return json.dumps(res)
except Exception as ex:
print(ex)
res = {}
res['success'] = False
res['msg'] = MessageConstants_CN.MSG_INTER_ERROR
return json.dumps(res)
@app.route('/remove_department',methods=['POST'])
def remove_department():
print(request.form)
ds = DepartmentService()
try:
shopId = request.form['shopId']
nodeId = request.form['id']
if not shopId or not nodeId:
res = {}
res['success'] = False
res['msg'] = MessageConstants_CN.MSG_INVALID_ARGS
return json.dumps(res)
_,result = ds.remove(shopId=shopId,nodeId=nodeId)
return json.dumps(result)
except Exception as ex:
result = {}
result['success'] = True
result['msg'] = MessageConstants_CN.MSG_INTER_ERROR
return json.dumps(result)
@app.route('/update_department',methods=['POST'])
def update_department():
print(request.form)
ds = DepartmentService()
try:
nodeId = request.form['id']
shopId = request.form['shopId']
name = request.form['name']
newdata = {}
newdata['id'] = nodeId
newdata['shopId'] = shopId
newdata['name'] = name
_,result = ds.update(newdata)
print(result)
return json.dumps(result)
except Exception as ex:
print(ex)
res = {}
res['success'] = False
res['msg'] = MessageConstants_CN.MSG_INTER_ERROR
return json.dumps(res) | 0.152347 | 0.04197 |
import copy
import functools
import re
import cerberus
import cerberus.errors
from molecule import interpolation
from molecule import util
def coerce_env(env, keep_string, v):
i = interpolation.Interpolator(interpolation.TemplateWithDefaults, env)
return i.interpolate(v, keep_string)
def pre_validate_base_schema(env, keep_string):
return {
'dependency': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'galaxy',
'gilt',
'shell',
],
},
}
},
'driver': {
'type': 'dict',
'schema': {
'name': {
'type':
'string',
'molecule_env_var':
True,
'allowed': [
'azure',
'delegated',
'docker',
'ec2',
'gce',
'lxc',
'lxd',
'openstack',
'vagrant',
],
# NOTE(retr0h): Some users use an environment variable to
# change the driver name. May add this coercion to rest of
# config using allowed validation.
'coerce': (str,
functools.partial(coerce_env, env, keep_string))
},
}
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'yamllint',
],
},
}
},
'platforms': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'registry': {
'type': 'dict',
'schema': {
'credentials': {
'type': 'dict',
'schema': {
'password': {
'type': 'string',
'regex': '^[{$]+[a-z0-9A-Z]+[}]*$',
},
}
},
}
},
}
}
},
'provisioner': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'ansible',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'ansible-lint',
],
},
}
},
}
},
'scenario': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
},
}
},
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'testinfra',
'inspec',
'goss',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'flake8',
'rubocop',
'yamllint',
],
},
}
},
}
},
}
base_schema = {
'dependency': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
'command': {
'type': 'string',
'nullable': True,
},
}
},
'driver': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'provider': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'nullable': True,
},
}
},
'options': {
'type': 'dict',
'schema': {
'managed': {
'type': 'boolean',
},
}
},
'ssh_connection_options': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'safe_files': {
'type': 'list',
'schema': {
'type': 'string',
}
},
}
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
}
},
'platforms': {},
'provisioner': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'config_options': {
'type': 'dict',
'schema': {
'defaults': {
'type': 'dict',
'schema': {
'roles_path': {
'type': 'string',
'disallowed': True,
},
'library': {
'type': 'string',
'disallowed': True,
},
'filter_plugins': {
'type': 'string',
'disallowed': True,
},
}
},
'privilege_escalation': {
'type': 'dict',
'disallowed': True,
},
}
},
'connection_options': {
'type': 'dict',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
'valueschema': {
'nullable': False,
},
'schema': {
'ANSIBLE_BECOME': {
'type': 'string',
'disallowed': True,
},
'ANSIBLE_BECOME_METHOD': {
'type': 'string',
'disallowed': True,
},
'ANSIBLE_BECOME_USER': {
'type': 'string',
'disallowed': True,
},
}
},
'inventory': {
'type': 'dict',
'schema': {
'host_vars': {
'type': 'dict',
},
'group_vars': {
'type': 'dict',
},
'links': {
'type': 'dict',
},
}
},
'children': {
'type': 'dict',
},
'playbooks': {
'type': 'dict',
'schema': {
'create': {
'type': 'string',
},
'converge': {
'type': 'string',
},
'destroy': {
'type': 'string',
},
'prepare': {
'type': 'string',
},
'side_effect': {
'type': 'string',
},
'verify': {
'type': 'string',
},
}
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
}
},
}
},
'scenario': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'check_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'converge_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'create_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'destroy_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'test_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
}
},
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
'directory': {
'type': 'string',
},
'additional_files_or_dirs': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
}
},
}
},
}
driver_vagrant_provider_section_schema = {
'driver': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'provider': {
'type': 'dict',
'schema': {
'name': {
'type':
'string',
'nullable':
False,
'allowed': [
'virtualbox',
'vmware_fusion',
'vmware_workstation',
'vmware_desktop',
'parallels',
'libvirt',
],
},
}
},
}
},
}
platforms_base_schema = {
'platforms': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'required': True,
},
'groups': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'children': {
'type': 'list',
'schema': {
'type': 'string',
}
},
}
}
},
}
platforms_vagrant_schema = {
'platforms': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'interfaces': {
'type': 'list',
'schema': {
'type': 'dict',
}
},
'instance_raw_config_args': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'config_options': {
'type': 'dict',
},
'box': {
'type': 'string',
},
'box_version': {
'type': 'string',
},
'box_url': {
'type': 'string',
},
'memory': {
'type': 'integer',
},
'cpus': {
'type': 'integer',
},
'provider_options': {
'type': 'dict',
},
'provider_raw_config_args': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'provision': {
'type': 'boolean',
},
}
}
},
}
platforms_docker_schema = {
'platforms': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'hostname': {
'type': 'string',
},
'image': {
'type': 'string',
},
'registry': {
'type': 'dict',
'schema': {
'url': {
'type': 'string',
},
'credentials': {
'type': 'dict',
'schema': {
'username': {
'type': 'string',
},
'password': {
'type': 'string',
},
'email': {
'type': 'string',
},
}
},
}
},
'command': {
'type': 'string',
},
'privileged': {
'type': 'boolean',
},
'security_opts': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'volumes': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'tmpfs': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'capabilities': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'exposed_ports': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'published_ports': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'ulimits': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'dns_servers': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'networks': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
}
}
},
}
}
},
}
dependency_command_nullable_schema = {
'dependency': {
'type': 'dict',
'schema': {
'command': {
'type': 'string',
'nullable': False,
},
}
},
}
verifier_options_readonly_schema = {
'verifier': {
'type': 'dict',
'schema': {
'options': {
'keyschema': {
'readonly': True,
},
},
}
},
}
verifier_goss_mutually_exclusive_schema = {
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'goss',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'yamllint',
],
},
}
},
}
},
}
verifier_inspec_mutually_exclusive_schema = {
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'inspec',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'rubocop',
],
},
}
},
}
},
}
verifier_testinfra_mutually_exclusive_schema = {
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'testinfra',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'flake8',
],
},
}
},
}
},
}
class Validator(cerberus.Validator):
def __init__(self, *args, **kwargs):
super(Validator, self).__init__(*args, **kwargs)
def _validate_disallowed(self, disallowed, field, value):
""" Readonly but with a custom error.
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if disallowed:
msg = 'disallowed user provided config option'
self._error(field, msg)
def _validate_molecule_env_var(self, molecule_env_var, field, value):
""" Readonly but with a custom error.
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
# TODO(retr0h): This needs to be better handled.
pattern = r'^[{$]+MOLECULE[_a-z0-9A-Z]+[}]*$'
if molecule_env_var:
if re.match(pattern, value):
msg = ('cannot reference $MOLECULE special variables '
'in this section')
self._error(field, msg)
def pre_validate(stream, env, keep_string):
data = util.safe_load(stream)
v = Validator(allow_unknown=True)
v.validate(data, pre_validate_base_schema(env, keep_string))
return v.errors
def validate(c):
schema = copy.deepcopy(base_schema)
# Dependency
if c['dependency']['name'] == 'shell':
util.merge_dicts(schema, dependency_command_nullable_schema)
# Driver
util.merge_dicts(schema, platforms_base_schema)
if c['driver']['name'] == 'docker':
util.merge_dicts(schema, platforms_docker_schema)
elif c['driver']['name'] == 'vagrant':
util.merge_dicts(schema, driver_vagrant_provider_section_schema)
util.merge_dicts(schema, platforms_vagrant_schema)
else:
util.merge_dicts(schema, platforms_base_schema)
# Verifier
if c['verifier']['name'] == 'goss':
util.merge_dicts(schema, verifier_options_readonly_schema)
util.merge_dicts(schema, verifier_goss_mutually_exclusive_schema)
elif c['verifier']['name'] == 'inspec':
util.merge_dicts(schema, verifier_options_readonly_schema)
util.merge_dicts(schema, verifier_inspec_mutually_exclusive_schema)
elif c['verifier']['name'] == 'testinfra':
util.merge_dicts(schema, verifier_testinfra_mutually_exclusive_schema)
v = Validator(allow_unknown=True)
v.validate(c, schema)
return v.errors | molecule/model/schema_v2.py |
import copy
import functools
import re
import cerberus
import cerberus.errors
from molecule import interpolation
from molecule import util
def coerce_env(env, keep_string, v):
i = interpolation.Interpolator(interpolation.TemplateWithDefaults, env)
return i.interpolate(v, keep_string)
def pre_validate_base_schema(env, keep_string):
return {
'dependency': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'galaxy',
'gilt',
'shell',
],
},
}
},
'driver': {
'type': 'dict',
'schema': {
'name': {
'type':
'string',
'molecule_env_var':
True,
'allowed': [
'azure',
'delegated',
'docker',
'ec2',
'gce',
'lxc',
'lxd',
'openstack',
'vagrant',
],
# NOTE(retr0h): Some users use an environment variable to
# change the driver name. May add this coercion to rest of
# config using allowed validation.
'coerce': (str,
functools.partial(coerce_env, env, keep_string))
},
}
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'yamllint',
],
},
}
},
'platforms': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'registry': {
'type': 'dict',
'schema': {
'credentials': {
'type': 'dict',
'schema': {
'password': {
'type': 'string',
'regex': '^[{$]+[a-z0-9A-Z]+[}]*$',
},
}
},
}
},
}
}
},
'provisioner': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'ansible',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'ansible-lint',
],
},
}
},
}
},
'scenario': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
},
}
},
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'testinfra',
'inspec',
'goss',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'molecule_env_var': True,
'allowed': [
'flake8',
'rubocop',
'yamllint',
],
},
}
},
}
},
}
base_schema = {
'dependency': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
'command': {
'type': 'string',
'nullable': True,
},
}
},
'driver': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'provider': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'nullable': True,
},
}
},
'options': {
'type': 'dict',
'schema': {
'managed': {
'type': 'boolean',
},
}
},
'ssh_connection_options': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'safe_files': {
'type': 'list',
'schema': {
'type': 'string',
}
},
}
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
}
},
'platforms': {},
'provisioner': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'config_options': {
'type': 'dict',
'schema': {
'defaults': {
'type': 'dict',
'schema': {
'roles_path': {
'type': 'string',
'disallowed': True,
},
'library': {
'type': 'string',
'disallowed': True,
},
'filter_plugins': {
'type': 'string',
'disallowed': True,
},
}
},
'privilege_escalation': {
'type': 'dict',
'disallowed': True,
},
}
},
'connection_options': {
'type': 'dict',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
'valueschema': {
'nullable': False,
},
'schema': {
'ANSIBLE_BECOME': {
'type': 'string',
'disallowed': True,
},
'ANSIBLE_BECOME_METHOD': {
'type': 'string',
'disallowed': True,
},
'ANSIBLE_BECOME_USER': {
'type': 'string',
'disallowed': True,
},
}
},
'inventory': {
'type': 'dict',
'schema': {
'host_vars': {
'type': 'dict',
},
'group_vars': {
'type': 'dict',
},
'links': {
'type': 'dict',
},
}
},
'children': {
'type': 'dict',
},
'playbooks': {
'type': 'dict',
'schema': {
'create': {
'type': 'string',
},
'converge': {
'type': 'string',
},
'destroy': {
'type': 'string',
},
'prepare': {
'type': 'string',
},
'side_effect': {
'type': 'string',
},
'verify': {
'type': 'string',
},
}
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
}
},
}
},
'scenario': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'check_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'converge_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'create_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'destroy_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'test_sequence': {
'type': 'list',
'schema': {
'type': 'string',
}
},
}
},
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
'directory': {
'type': 'string',
},
'additional_files_or_dirs': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'enabled': {
'type': 'boolean',
},
'options': {
'type': 'dict',
},
'env': {
'type': 'dict',
'keyschema': {
'type': 'string',
'regex': '^[A-Z0-9_-]+$',
},
},
}
},
}
},
}
driver_vagrant_provider_section_schema = {
'driver': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'provider': {
'type': 'dict',
'schema': {
'name': {
'type':
'string',
'nullable':
False,
'allowed': [
'virtualbox',
'vmware_fusion',
'vmware_workstation',
'vmware_desktop',
'parallels',
'libvirt',
],
},
}
},
}
},
}
platforms_base_schema = {
'platforms': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'required': True,
},
'groups': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'children': {
'type': 'list',
'schema': {
'type': 'string',
}
},
}
}
},
}
platforms_vagrant_schema = {
'platforms': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'interfaces': {
'type': 'list',
'schema': {
'type': 'dict',
}
},
'instance_raw_config_args': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'config_options': {
'type': 'dict',
},
'box': {
'type': 'string',
},
'box_version': {
'type': 'string',
},
'box_url': {
'type': 'string',
},
'memory': {
'type': 'integer',
},
'cpus': {
'type': 'integer',
},
'provider_options': {
'type': 'dict',
},
'provider_raw_config_args': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'provision': {
'type': 'boolean',
},
}
}
},
}
platforms_docker_schema = {
'platforms': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
'hostname': {
'type': 'string',
},
'image': {
'type': 'string',
},
'registry': {
'type': 'dict',
'schema': {
'url': {
'type': 'string',
},
'credentials': {
'type': 'dict',
'schema': {
'username': {
'type': 'string',
},
'password': {
'type': 'string',
},
'email': {
'type': 'string',
},
}
},
}
},
'command': {
'type': 'string',
},
'privileged': {
'type': 'boolean',
},
'security_opts': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'volumes': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'tmpfs': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'capabilities': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'exposed_ports': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'published_ports': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'ulimits': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'dns_servers': {
'type': 'list',
'schema': {
'type': 'string',
}
},
'networks': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
},
}
}
},
}
}
},
}
dependency_command_nullable_schema = {
'dependency': {
'type': 'dict',
'schema': {
'command': {
'type': 'string',
'nullable': False,
},
}
},
}
verifier_options_readonly_schema = {
'verifier': {
'type': 'dict',
'schema': {
'options': {
'keyschema': {
'readonly': True,
},
},
}
},
}
verifier_goss_mutually_exclusive_schema = {
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'goss',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'yamllint',
],
},
}
},
}
},
}
verifier_inspec_mutually_exclusive_schema = {
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'inspec',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'rubocop',
],
},
}
},
}
},
}
verifier_testinfra_mutually_exclusive_schema = {
'verifier': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'testinfra',
],
},
'lint': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'allowed': [
'flake8',
],
},
}
},
}
},
}
class Validator(cerberus.Validator):
def __init__(self, *args, **kwargs):
super(Validator, self).__init__(*args, **kwargs)
def _validate_disallowed(self, disallowed, field, value):
""" Readonly but with a custom error.
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if disallowed:
msg = 'disallowed user provided config option'
self._error(field, msg)
def _validate_molecule_env_var(self, molecule_env_var, field, value):
""" Readonly but with a custom error.
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
# TODO(retr0h): This needs to be better handled.
pattern = r'^[{$]+MOLECULE[_a-z0-9A-Z]+[}]*$'
if molecule_env_var:
if re.match(pattern, value):
msg = ('cannot reference $MOLECULE special variables '
'in this section')
self._error(field, msg)
def pre_validate(stream, env, keep_string):
data = util.safe_load(stream)
v = Validator(allow_unknown=True)
v.validate(data, pre_validate_base_schema(env, keep_string))
return v.errors
def validate(c):
schema = copy.deepcopy(base_schema)
# Dependency
if c['dependency']['name'] == 'shell':
util.merge_dicts(schema, dependency_command_nullable_schema)
# Driver
util.merge_dicts(schema, platforms_base_schema)
if c['driver']['name'] == 'docker':
util.merge_dicts(schema, platforms_docker_schema)
elif c['driver']['name'] == 'vagrant':
util.merge_dicts(schema, driver_vagrant_provider_section_schema)
util.merge_dicts(schema, platforms_vagrant_schema)
else:
util.merge_dicts(schema, platforms_base_schema)
# Verifier
if c['verifier']['name'] == 'goss':
util.merge_dicts(schema, verifier_options_readonly_schema)
util.merge_dicts(schema, verifier_goss_mutually_exclusive_schema)
elif c['verifier']['name'] == 'inspec':
util.merge_dicts(schema, verifier_options_readonly_schema)
util.merge_dicts(schema, verifier_inspec_mutually_exclusive_schema)
elif c['verifier']['name'] == 'testinfra':
util.merge_dicts(schema, verifier_testinfra_mutually_exclusive_schema)
v = Validator(allow_unknown=True)
v.validate(c, schema)
return v.errors | 0.423816 | 0.180179 |
from gurobipy import *
import sys
"""
Input Format:
# X - Num of defender actions
# ---
# X * X matrix -- (i, j) represents cost to switch from configuration i to j
# ---
# Num Attackers
# Prob of each attacker
# Attack names of each attacker seperated by |
# ---
# X * Attack actions -- Utility matrix showcasing (Reward for defender, Reward for attacker)
# ---
"""
# Create a new model
m = Model("MIQP")
f = open(str(sys.argv[1]), "r")
# Add defender stategies to the model
X = int(f.readline())
x = []
for i in range(X):
n = "x-" + str(i)
x.append(m.addVar(lb=0, ub=1, vtype=GRB.CONTINUOUS, name=n))
m.update()
# Add defender's switching cost
cost = []
for i in range(X):
cost.append([int(j) for j in f.readline().split()])
# Add defender stategy constraints
con = LinExpr()
for i in range(X):
con.add(x[i])
m.addConstr(con == 1)
m.update()
# Add transition cost variables
w = []
to_config_constr = [LinExpr() for i in range(X)]
for i in range(X):
_w = []
from_config_constr = LinExpr()
for j in range(X):
n = "w-" + str(i) + str(j)
temp = m.addVar(vtype=GRB.CONTINUOUS, name=n)
# Use McCormick_envelopes to find upper and lower bounds for the
# non-convex function x_i * x_j
if i == j:
m.addConstr(temp == 0)
else:
m.addConstr(temp >= 0)
m.addConstr(temp >= x[i] + x[j] - 1)
m.addConstr(temp <= x[i])
m.addConstr(temp <= x[j])
_w.append(temp)
from_config_constr.add(temp)
to_config_constr[j].add(temp)
m.addConstr(from_config_constr == x[i])
w.append(_w)
for i in range(X):
m.addConstr(to_config_constr[i] == x[i])
m.update()
# subtract costs from the objective function
"""
# Actual computation
obj = QuadExpr()
alpha = 0.85
for i in range(X):
for j in range(X):
obj.add( alpha * cost[i][j] * ( x[i] + x[j] ), -1)
"""
# McCormick envelope approximation
obj = QuadExpr()
alpha = float(sys.argv[2])
two_step_configs = LinExpr()
for i in range(X):
for j in range(X):
obj.add(alpha * cost[i][j] * w[i][j], -1)
two_step_configs.add(w[i][j])
m.addConstr(two_step_configs == 1)
""" Start processing for attacker types """
L = int(f.readline())
M = 100000000
for l in range(L):
# Probability of l-th attacker
v = f.readline().strip()
p = float(v)
# Add l-th attacker info to the model
Q = int(f.readline())
q = []
cve_names = f.readline().strip().split("|")
for i in range(Q):
n = str(l) + "-" + cve_names[i]
q.append(m.addVar(lb=0, ub=1, vtype=GRB.INTEGER, name=n))
a = m.addVar(
lb=-GRB.INFINITY, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name="a-" + str(l)
)
m.update()
# Get reward for attacker and defender
R = []
C = []
for i in range(X):
rewards = f.readline().split()
r = []
c = []
for j in range(Q):
r_and_c = rewards[j].split(",")
r.append(r_and_c[0])
c.append(r_and_c[1])
R.append(r)
C.append(c)
# Update objective function
for i in range(X):
for j in range(Q):
r = p * float(R[i][j])
obj.add(r * x[i] * q[j])
# Add constraints to make attaker have a pure strategy
con = LinExpr()
for j in range(Q):
con.add(q[j])
m.addConstr(con == 1)
# Add constrains to make attacker select dominant pure strategy
for j in range(Q):
val = LinExpr()
val.add(a)
for i in range(X):
val.add(float(C[i][j]) * x[i], -1.0)
m.addConstr(val >= 0, q[j].getAttr("VarName") + "lb")
m.addConstr(val <= (1 - q[j]) * M, q[j].getAttr("VarName") + "ub")
# Set objective funcion as all attackers have now been considered
m.setObjective(obj, GRB.MAXIMIZE)
# Solve MIQP
m.optimize()
# Print out values
def printSeperator():
print("---------------")
printSeperator()
for v in m.getVars():
print("%s -> %g" % (v.varName, v.x))
printSeperator()
print("Obj -> %g" % m.objVal)
printSeperator() | src/switch_cost_DOBSS/cost_BSG_miqp.py |
from gurobipy import *
import sys
"""
Input Format:
# X - Num of defender actions
# ---
# X * X matrix -- (i, j) represents cost to switch from configuration i to j
# ---
# Num Attackers
# Prob of each attacker
# Attack names of each attacker seperated by |
# ---
# X * Attack actions -- Utility matrix showcasing (Reward for defender, Reward for attacker)
# ---
"""
# Create a new model
m = Model("MIQP")
f = open(str(sys.argv[1]), "r")
# Add defender stategies to the model
X = int(f.readline())
x = []
for i in range(X):
n = "x-" + str(i)
x.append(m.addVar(lb=0, ub=1, vtype=GRB.CONTINUOUS, name=n))
m.update()
# Add defender's switching cost
cost = []
for i in range(X):
cost.append([int(j) for j in f.readline().split()])
# Add defender stategy constraints
con = LinExpr()
for i in range(X):
con.add(x[i])
m.addConstr(con == 1)
m.update()
# Add transition cost variables
w = []
to_config_constr = [LinExpr() for i in range(X)]
for i in range(X):
_w = []
from_config_constr = LinExpr()
for j in range(X):
n = "w-" + str(i) + str(j)
temp = m.addVar(vtype=GRB.CONTINUOUS, name=n)
# Use McCormick_envelopes to find upper and lower bounds for the
# non-convex function x_i * x_j
if i == j:
m.addConstr(temp == 0)
else:
m.addConstr(temp >= 0)
m.addConstr(temp >= x[i] + x[j] - 1)
m.addConstr(temp <= x[i])
m.addConstr(temp <= x[j])
_w.append(temp)
from_config_constr.add(temp)
to_config_constr[j].add(temp)
m.addConstr(from_config_constr == x[i])
w.append(_w)
for i in range(X):
m.addConstr(to_config_constr[i] == x[i])
m.update()
# subtract costs from the objective function
"""
# Actual computation
obj = QuadExpr()
alpha = 0.85
for i in range(X):
for j in range(X):
obj.add( alpha * cost[i][j] * ( x[i] + x[j] ), -1)
"""
# McCormick envelope approximation
obj = QuadExpr()
alpha = float(sys.argv[2])
two_step_configs = LinExpr()
for i in range(X):
for j in range(X):
obj.add(alpha * cost[i][j] * w[i][j], -1)
two_step_configs.add(w[i][j])
m.addConstr(two_step_configs == 1)
""" Start processing for attacker types """
L = int(f.readline())
M = 100000000
for l in range(L):
# Probability of l-th attacker
v = f.readline().strip()
p = float(v)
# Add l-th attacker info to the model
Q = int(f.readline())
q = []
cve_names = f.readline().strip().split("|")
for i in range(Q):
n = str(l) + "-" + cve_names[i]
q.append(m.addVar(lb=0, ub=1, vtype=GRB.INTEGER, name=n))
a = m.addVar(
lb=-GRB.INFINITY, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name="a-" + str(l)
)
m.update()
# Get reward for attacker and defender
R = []
C = []
for i in range(X):
rewards = f.readline().split()
r = []
c = []
for j in range(Q):
r_and_c = rewards[j].split(",")
r.append(r_and_c[0])
c.append(r_and_c[1])
R.append(r)
C.append(c)
# Update objective function
for i in range(X):
for j in range(Q):
r = p * float(R[i][j])
obj.add(r * x[i] * q[j])
# Add constraints to make attaker have a pure strategy
con = LinExpr()
for j in range(Q):
con.add(q[j])
m.addConstr(con == 1)
# Add constrains to make attacker select dominant pure strategy
for j in range(Q):
val = LinExpr()
val.add(a)
for i in range(X):
val.add(float(C[i][j]) * x[i], -1.0)
m.addConstr(val >= 0, q[j].getAttr("VarName") + "lb")
m.addConstr(val <= (1 - q[j]) * M, q[j].getAttr("VarName") + "ub")
# Set objective funcion as all attackers have now been considered
m.setObjective(obj, GRB.MAXIMIZE)
# Solve MIQP
m.optimize()
# Print out values
def printSeperator():
print("---------------")
printSeperator()
for v in m.getVars():
print("%s -> %g" % (v.varName, v.x))
printSeperator()
print("Obj -> %g" % m.objVal)
printSeperator() | 0.443841 | 0.37399 |
import sys
import os
this_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.abspath(os.path.join(this_path, os.pardir, os.pardir))
sys.path.append(root_path)
print(root_path)
from model.probability.generator import DataGenerator
from keras.callbacks import EarlyStopping, TensorBoard
from dataset.dataset_manager import DatasetManager
from model import encoderdecoder
from model import config
from model.probability import output_generator
config = config.probabilistic_cfg
# Model save name
model_name = 'n2t_tfidf50k_embedding_' + str(config.glove_embedding_len) + '_latent_' + str(
config.latent_dim) + '_patience_5.h5 '
# Overfitting avoidance
early_stopping = EarlyStopping(monitor='val_loss', patience=5, min_delta=0)
# Model checkpoint
# checkpoint = ModelCheckpoint(filepath=model_name+'_earlystopped_.h5', monitor='val_loss', save_best_only=True)
# Tensorboard
# histogram_freq=1, write_graph=True
tensorboard = TensorBoard(log_dir=config.tensorboard_log_dir, write_graph=True)
# Callbacks
callbacks = [tensorboard, early_stopping]
if config.preprocess_data:
# -------------------------------------------------------------------------------------
# --------------------------------- DATA PROCESSING -----------------------------------
# -------------------------------------------------------------------------------------
mgr = DatasetManager(max_headline_len=config.max_headline_len, max_article_len=config.max_article_len,
min_headline_len=config.min_headline_len, min_article_len=config.min_article_len, verbose=True,
get_in_out=output_generator.get_inputs_outputs)
mgr.tokenize(size=100, only_tfidf=False, folder=config.preprocess_folder)
mgr.generate_embeddings_from_tfidf(glove_embedding_len=config.glove_embedding_len, fname='TF-IDF_50000.pkl',
embedding_dir=config.embedding_matrix_location)
mgr.generate_emebedded_documents(tokenized_dir=config.preprocess_folder,
embedding_dir=config.embedding_matrix_location)
embeddings = DatasetManager.load_embeddings(embedding_dir=config.embedding_matrix_location)
word2index = DatasetManager.load_word2index(embedding_dir=config.embedding_matrix_location)
# ----------------------------------------------------------------------------------------
# ----------------------------------------- TRAIN ----------------------------------------
# ----------------------------------------------------------------------------------------
print('\nBuilding model')
model = encoderdecoder.encoder_decoder(latent_dim=config.latent_dim, max_encoder_seq_len=config.max_article_len,
max_decoder_seq_len=config.max_headline_len,
num_encoder_tokens=embeddings.shape[0], num_decoder_tokens=embeddings.shape[0],
glove_embedding_len=config.glove_embedding_len, embeddings=embeddings,
optimizer=config.optimizer, dense_activation=config.dense_activation,
loss=config.loss,
output_dimension=embeddings.shape[0])
model.summary()
print('-' * 100)
print('We will train for a total of', config.tot_epochs, 'epochs')
print('-' * 100)
data_generator = DataGenerator(max_decoder_seq_len=config.max_headline_len, decoder_tokens=embeddings.shape[0],
test_size=config.test_ratio)
model.fit_generator(generator=data_generator.generate_train(), validation_data=data_generator.generate_test(),
validation_steps=data_generator.get_steps_validation(), epochs=config.tot_epochs, max_queue_size=2,
use_multiprocessing=False, verbose=2, steps_per_epoch=data_generator.get_steps_per_epoch(),
callbacks=callbacks)
# Save model
print('Saving model...')
model.save(model_name) | model/probability/train.py | import sys
import os
this_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.abspath(os.path.join(this_path, os.pardir, os.pardir))
sys.path.append(root_path)
print(root_path)
from model.probability.generator import DataGenerator
from keras.callbacks import EarlyStopping, TensorBoard
from dataset.dataset_manager import DatasetManager
from model import encoderdecoder
from model import config
from model.probability import output_generator
config = config.probabilistic_cfg
# Model save name
model_name = 'n2t_tfidf50k_embedding_' + str(config.glove_embedding_len) + '_latent_' + str(
config.latent_dim) + '_patience_5.h5 '
# Overfitting avoidance
early_stopping = EarlyStopping(monitor='val_loss', patience=5, min_delta=0)
# Model checkpoint
# checkpoint = ModelCheckpoint(filepath=model_name+'_earlystopped_.h5', monitor='val_loss', save_best_only=True)
# Tensorboard
# histogram_freq=1, write_graph=True
tensorboard = TensorBoard(log_dir=config.tensorboard_log_dir, write_graph=True)
# Callbacks
callbacks = [tensorboard, early_stopping]
if config.preprocess_data:
# -------------------------------------------------------------------------------------
# --------------------------------- DATA PROCESSING -----------------------------------
# -------------------------------------------------------------------------------------
mgr = DatasetManager(max_headline_len=config.max_headline_len, max_article_len=config.max_article_len,
min_headline_len=config.min_headline_len, min_article_len=config.min_article_len, verbose=True,
get_in_out=output_generator.get_inputs_outputs)
mgr.tokenize(size=100, only_tfidf=False, folder=config.preprocess_folder)
mgr.generate_embeddings_from_tfidf(glove_embedding_len=config.glove_embedding_len, fname='TF-IDF_50000.pkl',
embedding_dir=config.embedding_matrix_location)
mgr.generate_emebedded_documents(tokenized_dir=config.preprocess_folder,
embedding_dir=config.embedding_matrix_location)
embeddings = DatasetManager.load_embeddings(embedding_dir=config.embedding_matrix_location)
word2index = DatasetManager.load_word2index(embedding_dir=config.embedding_matrix_location)
# ----------------------------------------------------------------------------------------
# ----------------------------------------- TRAIN ----------------------------------------
# ----------------------------------------------------------------------------------------
print('\nBuilding model')
model = encoderdecoder.encoder_decoder(latent_dim=config.latent_dim, max_encoder_seq_len=config.max_article_len,
max_decoder_seq_len=config.max_headline_len,
num_encoder_tokens=embeddings.shape[0], num_decoder_tokens=embeddings.shape[0],
glove_embedding_len=config.glove_embedding_len, embeddings=embeddings,
optimizer=config.optimizer, dense_activation=config.dense_activation,
loss=config.loss,
output_dimension=embeddings.shape[0])
model.summary()
print('-' * 100)
print('We will train for a total of', config.tot_epochs, 'epochs')
print('-' * 100)
data_generator = DataGenerator(max_decoder_seq_len=config.max_headline_len, decoder_tokens=embeddings.shape[0],
test_size=config.test_ratio)
model.fit_generator(generator=data_generator.generate_train(), validation_data=data_generator.generate_test(),
validation_steps=data_generator.get_steps_validation(), epochs=config.tot_epochs, max_queue_size=2,
use_multiprocessing=False, verbose=2, steps_per_epoch=data_generator.get_steps_per_epoch(),
callbacks=callbacks)
# Save model
print('Saving model...')
model.save(model_name) | 0.461017 | 0.121895 |
## \package pts.evolve.genomebase This module have the class which every representation extends,
# if you are planning to create a new representation, you must
# take a inside look into this module.
# -----------------------------------------------------------------
# Import standard modules
import inspect
# Import other evolve modules
from functionslot import FunctionSlot
import utils
# Import the relevant PTS classes and modules
from ..core.tools.random import prng
# -----------------------------------------------------------------
class GenomeBase(object):
"""
GenomeBase Class - The base of all chromosome representation
"""
__slots__ = ["evaluator", "initializator", "mutator", "crossover", "internalParams", "score", "fitness"]
def __init__(self):
"""
Genome Constructor
"""
self.evaluator = FunctionSlot("Evaluator")
self.initializator = FunctionSlot("Initializator")
self.mutator = FunctionSlot("Mutator")
self.crossover = FunctionSlot("Crossover")
self.internalParams = {}
self.score = 0.0
self.fitness = 0.0
# -----------------------------------------------------------------
def getRawScore(self):
"""
Get the Raw Score of the genome
:rtype: genome raw score
"""
return self.score
# -----------------------------------------------------------------
def getFitnessScore(self):
"""
Get the Fitness Score of the genome
:rtype: genome fitness score
"""
return self.fitness
# -----------------------------------------------------------------
def __repr__(self):
"""String representation of Genome"""
allSlots = [self.evaluator, self.initializator, self.mutator,
self.crossover]
ret = "- GenomeBase\n"
ret += "\tScore:\t\t\t %.6f\n" % (self.score,)
ret += "\tFitness:\t\t %.6f\n\n" % (self.fitness,)
ret += "\tParams:\t\t %s\n\n" % (self.internalParams,)
for slot in allSlots:
ret += "\t" + slot.__repr__()
ret += "\n"
return ret
# -----------------------------------------------------------------
def setParams(self, **args):
""" Set the internal params
Example:
>>> genome.setParams(rangemin=0, rangemax=100, gauss_mu=0, gauss_sigma=1)
.. note:: All the individuals of the population shares this parameters and uses
the same instance of this dict.
:param args: this params will saved in every chromosome for genetic op. use
"""
self.internalParams.update(args)
# -----------------------------------------------------------------
def getParam(self, key, nvl=None):
""" Gets an internal parameter
Example:
>>> genome.getParam("rangemax")
100
.. note:: All the individuals of the population shares this parameters and uses
the same instance of this dict.
:param key: the key of param
:param nvl: if the key doesn't exist, the nvl will be returned
"""
return self.internalParams.get(key, nvl)
# -----------------------------------------------------------------
def resetStats(self):
""" Clear score and fitness of genome """
self.score = 0.0
self.fitness = 0.0
# -----------------------------------------------------------------
def evaluate(self, **args):
""" Called to evaluate genome
:param args: this parameters will be passes to the evaluator
"""
self.resetStats()
for it in self.evaluator.applyFunctions(self, **args):
self.score += it
# -----------------------------------------------------------------
def initialize(self, **args):
""" Called to initialize genome
:param args: this parameters will be passed to the initializator
"""
for it in self.initializator.applyFunctions(self, **args):
pass
# -----------------------------------------------------------------
def mutate(self, **args):
"""
Called to mutate the genome
:param args: this parameters will be passed to the mutator
:rtype: the number of mutations returned by mutation operator
"""
nmuts = 0
for it in self.mutator.applyFunctions(self, **args):
nmuts += it
return nmuts
# -----------------------------------------------------------------
def copy(self, g):
""" Copy the current GenomeBase to 'g'
:param g: the destination genome
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
g.score = self.score
g.fitness = self.fitness
g.evaluator = self.evaluator
g.initializator = self.initializator
g.mutator = self.mutator
g.crossover = self.crossover
g.internalParams = self.internalParams
# -----------------------------------------------------------------
def clone(self):
""" Clone this GenomeBase
:rtype: the clone genome
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
newcopy = GenomeBase()
self.copy(newcopy)
return newcopy
# -----------------------------------------------------------------
class G1DBase(GenomeBase):
""" G1DBase Class - The base class for 1D chromosomes
This chromosome class extends the :class:`GenomeBase` classes.
:param size: the 1D list size
.. versionadded:: 0.6
Added the *G1DBase* class
"""
__slots__ = ["genomeSize", "genomeList"]
# -----------------------------------------------------------------
def __init__(self, size):
super(G1DBase, self).__init__()
self.genomeSize = size
self.genomeList = []
# -----------------------------------------------------------------
def __iadd__(self, item):
""" To add more items using the += operator """
self.genomeList.append(item)
return self
# -----------------------------------------------------------------
def __eq__(self, other):
""" Compares one chromosome with another """
cond1 = (self.genomeList == other.genomeList)
cond2 = (self.genomeSize == other.genomeSize)
return True if cond1 and cond2 else False
# -----------------------------------------------------------------
def __contains__(self, value):
""" Used on: *value in genome* """
return value in self.genomeList
# -----------------------------------------------------------------
def __getslice__(self, a, b):
""" Return the sliced part of chromosome """
return self.genomeList[a:b]
# -----------------------------------------------------------------
def __setslice__(self, a, b, val):
""" Sets the slice part of chromosome """
self.genomeList[a:b] = val
# -----------------------------------------------------------------
def __getitem__(self, key):
""" Return the specified gene of List """
return self.genomeList[key]
# -----------------------------------------------------------------
def __setitem__(self, key, value):
""" Set the specified value for an gene of List """
self.genomeList[key] = value
# -----------------------------------------------------------------
def __iter__(self):
""" Iterator support to the list """
return iter(self.genomeList)
# -----------------------------------------------------------------
def __len__(self):
""" Return the size of the List """
return len(self.genomeList)
# -----------------------------------------------------------------
def getListSize(self):
""" Returns the list supposed size
.. warning:: this is different from what the len(obj) returns
"""
return self.genomeSize
# -----------------------------------------------------------------
def resumeString(self):
""" Returns a resumed string representation of the Genome """
return str(self.genomeList)
# -----------------------------------------------------------------
def append(self, value):
""" Appends an item to the end of the list
Example:
>>> genome.append(44)
:param value: value to be added
"""
self.genomeList.append(value)
# -----------------------------------------------------------------
def remove(self, value):
""" Removes an item from the list
Example:
>>> genome.remove(44)
:param value: value to be added
"""
self.genomeList.remove(value)
# -----------------------------------------------------------------
def clearList(self):
""" Remove all genes from Genome """
del self.genomeList[:]
# -----------------------------------------------------------------
def copy(self, g):
""" Copy genome to 'g'
Example:
>>> genome_origin.copy(genome_destination)
:param g: the destination instance
"""
g.genomeSize = self.genomeSize
g.genomeList = self.genomeList[:]
# -----------------------------------------------------------------
def getInternalList(self):
""" Returns the internal list of the genome
... note:: this method was created to solve performance issues
:rtype: the internal list
"""
return self.genomeList
# -----------------------------------------------------------------
def setInternalList(self, lst):
""" Assigns a list to the internal list of the chromosome
:param lst: the list to assign the internal list of the chromosome
"""
self.genomeList = lst
# -----------------------------------------------------------------
class GTreeNodeBase(object):
""" GTreeNodeBase Class - The base class for the node tree genomes
:param parent: the parent node of the node
:param childs: the childs of the node, must be a list of nodes
.. versionadded:: 0.6
Added the *GTreeNodeBase* class
"""
__slots__ = ["parent", "childs"]
def __init__(self, parent, childs=None):
"""
The constructor ...
:param parent:
:param childs:
"""
self.parent = parent
self.childs = []
if childs is not None:
if type(childs) != list:
utils.raiseException("Childs must be a list of nodes", TypeError)
typecheck_list = filter(lambda x: not isinstance(x, GTreeNodeBase), childs)
if len(typecheck_list) > 0:
utils.raiseException("Childs must be a list of nodes", TypeError)
self.childs += childs
# -----------------------------------------------------------------
def isLeaf(self):
""" Return True if the node is a leaf
:rtype: True or False
"""
return len(self.childs) == 0
# -----------------------------------------------------------------
def getChild(self, index):
""" Returns the index-child of the node
:rtype: child node
"""
return self.childs[index]
# -----------------------------------------------------------------
def getChilds(self):
""" Return the childs of the node
.. warning :: use .getChilds()[:] if you'll change the list itself, like using childs.reverse(),
otherwise the original genome child order will be changed.
:rtype: a list of nodes
"""
return self.childs
# -----------------------------------------------------------------
def addChild(self, child):
""" Adds a child to the node
:param child: the node to be added
"""
if type(child) == list:
self.childs.extend(child)
else:
if not isinstance(child, GTreeNodeBase):
utils.raiseException("The child must be a node", TypeError)
self.childs.append(child)
# -----------------------------------------------------------------
def replaceChild(self, older, newer):
""" Replaces a child of the node
:param older: the child to be replaces
:param newer: the new child which replaces the older
"""
index = self.childs.index(older)
self.childs[index] = newer
# -----------------------------------------------------------------
def setParent(self, parent):
""" Sets the parent of the node
:param parent: the parent node
"""
self.parent = parent
# -----------------------------------------------------------------
def getParent(self):
""" Get the parent node of the node
:rtype: the parent node
"""
return self.parent
# -----------------------------------------------------------------
def __repr__(self):
str_repr = "GTreeNodeBase [Childs=%d]" % len(self)
return str_repr
# -----------------------------------------------------------------
def __len__(self):
return len(self.childs)
# -----------------------------------------------------------------
def copy(self, g):
""" Copy the current contents GTreeNodeBase to 'g'
:param g: the destination node
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
g.parent = self.parent
g.childs = self.childs[:]
# -----------------------------------------------------------------
def clone(self):
""" Clone this GenomeBase
:rtype: the clone genome
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
newcopy = GTreeNodeBase(None)
self.copy(newcopy)
return newcopy
# -----------------------------------------------------------------
class GTreeBase(GenomeBase):
""" GTreeBase Class - The base class for the tree genomes
This chromosome class extends the :class:`GenomeBase` classes.
:param root_node: the root node of the tree
.. versionadded:: 0.6
Added the *GTreeBase* class
"""
__slots__ = ["root_node", "tree_height", "nodes_list", "nodes_leaf", "nodes_branch"]
def __init__(self, root_node):
super(GTreeBase, self).__init__()
self.root_node = root_node
self.tree_height = None
self.nodes_list = None
# -----------------------------------------------------------------
def processNodes(self, cloning=False):
""" Creates a *cache* on the tree, this method must be called
every time you change the shape of the tree. It updates the
internal nodes list and the internal nodes properties such as
depth and height.
"""
if self.root_node is None:
return
self.nodes_list = self.getAllNodes()
self.nodes_leaf = filter(lambda n: n.isLeaf(), self.nodes_list)
self.nodes_branch = filter(lambda n: n.isLeaf() is False, self.nodes_list)
if not cloning: self.tree_height = self.getNodeHeight(self.getRoot())
# -----------------------------------------------------------------
def getRoot(self):
""" Return the tree root node
:rtype: the tree root node
"""
return self.root_node
# -----------------------------------------------------------------
def setRoot(self, root):
""" Sets the root of the tree
:param root: the tree root node
"""
if not isinstance(root, GTreeNodeBase):
utils.raiseException("The root must be a node", TypeError)
self.root_node = root
# -----------------------------------------------------------------
def getNodeDepth(self, node):
""" Returns the depth of a node
:rtype: the depth of the node, the depth of root node is 0
"""
if node == self.getRoot():
return 0
else:
return 1 + self.getNodeDepth(node.getParent())
# -----------------------------------------------------------------
def getNodeHeight(self, node):
""" Returns the height of a node
.. note:: If the node has no childs, the height will be 0.
:rtype: the height of the node
"""
height = 0
if len(node) <= 0:
return 0
for child in node.getChilds():
h_inner = self.getNodeHeight(child) + 1
if h_inner > height:
height = h_inner
return height
# -----------------------------------------------------------------
def getHeight(self):
""" Return the tree height
:rtype: the tree height
"""
return self.tree_height
# -----------------------------------------------------------------
def getNodesCount(self, start_node=None):
""" Return the number of the nodes on the tree
starting at the *start_node*, if *start_node* is None,
then the method will count all the tree nodes.
:rtype: the number of nodes
"""
count = 1
if start_node is None:
start_node = self.getRoot()
for i in start_node.getChilds():
count += self.getNodesCount(i)
return count
# -----------------------------------------------------------------
def getTraversalString(self, start_node=None, spc=0):
""" Returns a tree-formated string of the tree. This
method is used by the __repr__ method of the tree
:rtype: a string representing the tree
"""
str_buff = ""
if start_node is None:
start_node = self.getRoot()
str_buff += "%s\n" % start_node
spaces = spc + 2
for child_node in start_node.getChilds():
str_buff += "%s%s\n" % (" " * spaces, child_node)
str_buff += self.getTraversalString(child_node, spaces)
return str_buff
# -----------------------------------------------------------------
def traversal(self, callback, start_node=None):
""" Traversal the tree, this method will call the
user-defined callback function for each node on the tree
:param callback: a function
:param start_node: the start node to begin the traversal
"""
if not inspect.isfunction(callback):
utils.raiseException("The callback for the tree traversal must be a function", TypeError)
if start_node is None:
start_node = self.getRoot()
callback(start_node)
for child_node in start_node.getChilds():
callback(child_node)
self.traversal(callback, child_node)
# -----------------------------------------------------------------
def getRandomNode(self, node_type=0):
""" Returns a random node from the Tree
:param node_type: 0 = Any, 1 = Leaf, 2 = Branch
:rtype: random node
"""
lists = (self.nodes_list, self.nodes_leaf, self.nodes_branch)
cho = lists[node_type]
if len(cho) <= 0:
return None
return prng.choice(cho)
# -----------------------------------------------------------------
def getAllNodes(self):
""" Return a new list with all nodes
:rtype: the list with all nodes
"""
node_stack = []
all_nodes = []
tmp = None
node_stack.append(self.getRoot())
while len(node_stack) > 0:
tmp = node_stack.pop()
all_nodes.append(tmp)
childs = tmp.getChilds()
node_stack.extend(childs)
return all_nodes
# -----------------------------------------------------------------
def __repr__(self):
str_buff = "- GTree\n"
str_buff += "\tHeight:\t\t\t%d\n" % self.getHeight()
str_buff += "\tNodes:\t\t\t%d\n" % self.getNodesCount()
str_buff += "\n" + self.getTraversalString()
return str_buff
# -----------------------------------------------------------------
def __len__(self):
return len(self.nodes_list)
# -----------------------------------------------------------------
def __getitem__(self, index):
return self.nodes_list[index]
# -----------------------------------------------------------------
def __iter__(self):
return iter(self.nodes_list)
# -----------------------------------------------------------------
def copy(self, g, node=None, node_parent=None):
""" Copy the current contents GTreeBase to 'g'
:param g: the destination GTreeBase tree
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
if node is None:
g.tree_height = self.tree_height
node = self.root_node
if node is None:
return None
newnode = node.clone()
if node_parent is None:
g.setRoot(newnode)
else:
newnode.setParent(node_parent)
node_parent.replaceChild(node, newnode)
for ci in xrange(len(newnode)):
GTreeBase.copy(self, g, newnode.getChild(ci), newnode)
return newnode
# -----------------------------------------------------------------
def clone(self):
""" Clone this GenomeBase
:rtype: the clone genome
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
newcopy = GTreeBase(None)
self.copy(newcopy)
newcopy.processNodes()
return newcopy
# ----------------------------------------------------------------- | CAAPR/CAAPR_AstroMagic/PTS/pts/evolve/genome.py |
## \package pts.evolve.genomebase This module have the class which every representation extends,
# if you are planning to create a new representation, you must
# take a inside look into this module.
# -----------------------------------------------------------------
# Import standard modules
import inspect
# Import other evolve modules
from functionslot import FunctionSlot
import utils
# Import the relevant PTS classes and modules
from ..core.tools.random import prng
# -----------------------------------------------------------------
class GenomeBase(object):
"""
GenomeBase Class - The base of all chromosome representation
"""
__slots__ = ["evaluator", "initializator", "mutator", "crossover", "internalParams", "score", "fitness"]
def __init__(self):
"""
Genome Constructor
"""
self.evaluator = FunctionSlot("Evaluator")
self.initializator = FunctionSlot("Initializator")
self.mutator = FunctionSlot("Mutator")
self.crossover = FunctionSlot("Crossover")
self.internalParams = {}
self.score = 0.0
self.fitness = 0.0
# -----------------------------------------------------------------
def getRawScore(self):
"""
Get the Raw Score of the genome
:rtype: genome raw score
"""
return self.score
# -----------------------------------------------------------------
def getFitnessScore(self):
"""
Get the Fitness Score of the genome
:rtype: genome fitness score
"""
return self.fitness
# -----------------------------------------------------------------
def __repr__(self):
"""String representation of Genome"""
allSlots = [self.evaluator, self.initializator, self.mutator,
self.crossover]
ret = "- GenomeBase\n"
ret += "\tScore:\t\t\t %.6f\n" % (self.score,)
ret += "\tFitness:\t\t %.6f\n\n" % (self.fitness,)
ret += "\tParams:\t\t %s\n\n" % (self.internalParams,)
for slot in allSlots:
ret += "\t" + slot.__repr__()
ret += "\n"
return ret
# -----------------------------------------------------------------
def setParams(self, **args):
""" Set the internal params
Example:
>>> genome.setParams(rangemin=0, rangemax=100, gauss_mu=0, gauss_sigma=1)
.. note:: All the individuals of the population shares this parameters and uses
the same instance of this dict.
:param args: this params will saved in every chromosome for genetic op. use
"""
self.internalParams.update(args)
# -----------------------------------------------------------------
def getParam(self, key, nvl=None):
""" Gets an internal parameter
Example:
>>> genome.getParam("rangemax")
100
.. note:: All the individuals of the population shares this parameters and uses
the same instance of this dict.
:param key: the key of param
:param nvl: if the key doesn't exist, the nvl will be returned
"""
return self.internalParams.get(key, nvl)
# -----------------------------------------------------------------
def resetStats(self):
""" Clear score and fitness of genome """
self.score = 0.0
self.fitness = 0.0
# -----------------------------------------------------------------
def evaluate(self, **args):
""" Called to evaluate genome
:param args: this parameters will be passes to the evaluator
"""
self.resetStats()
for it in self.evaluator.applyFunctions(self, **args):
self.score += it
# -----------------------------------------------------------------
def initialize(self, **args):
""" Called to initialize genome
:param args: this parameters will be passed to the initializator
"""
for it in self.initializator.applyFunctions(self, **args):
pass
# -----------------------------------------------------------------
def mutate(self, **args):
"""
Called to mutate the genome
:param args: this parameters will be passed to the mutator
:rtype: the number of mutations returned by mutation operator
"""
nmuts = 0
for it in self.mutator.applyFunctions(self, **args):
nmuts += it
return nmuts
# -----------------------------------------------------------------
def copy(self, g):
""" Copy the current GenomeBase to 'g'
:param g: the destination genome
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
g.score = self.score
g.fitness = self.fitness
g.evaluator = self.evaluator
g.initializator = self.initializator
g.mutator = self.mutator
g.crossover = self.crossover
g.internalParams = self.internalParams
# -----------------------------------------------------------------
def clone(self):
""" Clone this GenomeBase
:rtype: the clone genome
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
newcopy = GenomeBase()
self.copy(newcopy)
return newcopy
# -----------------------------------------------------------------
class G1DBase(GenomeBase):
""" G1DBase Class - The base class for 1D chromosomes
This chromosome class extends the :class:`GenomeBase` classes.
:param size: the 1D list size
.. versionadded:: 0.6
Added the *G1DBase* class
"""
__slots__ = ["genomeSize", "genomeList"]
# -----------------------------------------------------------------
def __init__(self, size):
super(G1DBase, self).__init__()
self.genomeSize = size
self.genomeList = []
# -----------------------------------------------------------------
def __iadd__(self, item):
""" To add more items using the += operator """
self.genomeList.append(item)
return self
# -----------------------------------------------------------------
def __eq__(self, other):
""" Compares one chromosome with another """
cond1 = (self.genomeList == other.genomeList)
cond2 = (self.genomeSize == other.genomeSize)
return True if cond1 and cond2 else False
# -----------------------------------------------------------------
def __contains__(self, value):
""" Used on: *value in genome* """
return value in self.genomeList
# -----------------------------------------------------------------
def __getslice__(self, a, b):
""" Return the sliced part of chromosome """
return self.genomeList[a:b]
# -----------------------------------------------------------------
def __setslice__(self, a, b, val):
""" Sets the slice part of chromosome """
self.genomeList[a:b] = val
# -----------------------------------------------------------------
def __getitem__(self, key):
""" Return the specified gene of List """
return self.genomeList[key]
# -----------------------------------------------------------------
def __setitem__(self, key, value):
""" Set the specified value for an gene of List """
self.genomeList[key] = value
# -----------------------------------------------------------------
def __iter__(self):
""" Iterator support to the list """
return iter(self.genomeList)
# -----------------------------------------------------------------
def __len__(self):
""" Return the size of the List """
return len(self.genomeList)
# -----------------------------------------------------------------
def getListSize(self):
""" Returns the list supposed size
.. warning:: this is different from what the len(obj) returns
"""
return self.genomeSize
# -----------------------------------------------------------------
def resumeString(self):
""" Returns a resumed string representation of the Genome """
return str(self.genomeList)
# -----------------------------------------------------------------
def append(self, value):
""" Appends an item to the end of the list
Example:
>>> genome.append(44)
:param value: value to be added
"""
self.genomeList.append(value)
# -----------------------------------------------------------------
def remove(self, value):
""" Removes an item from the list
Example:
>>> genome.remove(44)
:param value: value to be added
"""
self.genomeList.remove(value)
# -----------------------------------------------------------------
def clearList(self):
""" Remove all genes from Genome """
del self.genomeList[:]
# -----------------------------------------------------------------
def copy(self, g):
""" Copy genome to 'g'
Example:
>>> genome_origin.copy(genome_destination)
:param g: the destination instance
"""
g.genomeSize = self.genomeSize
g.genomeList = self.genomeList[:]
# -----------------------------------------------------------------
def getInternalList(self):
""" Returns the internal list of the genome
... note:: this method was created to solve performance issues
:rtype: the internal list
"""
return self.genomeList
# -----------------------------------------------------------------
def setInternalList(self, lst):
""" Assigns a list to the internal list of the chromosome
:param lst: the list to assign the internal list of the chromosome
"""
self.genomeList = lst
# -----------------------------------------------------------------
class GTreeNodeBase(object):
""" GTreeNodeBase Class - The base class for the node tree genomes
:param parent: the parent node of the node
:param childs: the childs of the node, must be a list of nodes
.. versionadded:: 0.6
Added the *GTreeNodeBase* class
"""
__slots__ = ["parent", "childs"]
def __init__(self, parent, childs=None):
"""
The constructor ...
:param parent:
:param childs:
"""
self.parent = parent
self.childs = []
if childs is not None:
if type(childs) != list:
utils.raiseException("Childs must be a list of nodes", TypeError)
typecheck_list = filter(lambda x: not isinstance(x, GTreeNodeBase), childs)
if len(typecheck_list) > 0:
utils.raiseException("Childs must be a list of nodes", TypeError)
self.childs += childs
# -----------------------------------------------------------------
def isLeaf(self):
""" Return True if the node is a leaf
:rtype: True or False
"""
return len(self.childs) == 0
# -----------------------------------------------------------------
def getChild(self, index):
""" Returns the index-child of the node
:rtype: child node
"""
return self.childs[index]
# -----------------------------------------------------------------
def getChilds(self):
""" Return the childs of the node
.. warning :: use .getChilds()[:] if you'll change the list itself, like using childs.reverse(),
otherwise the original genome child order will be changed.
:rtype: a list of nodes
"""
return self.childs
# -----------------------------------------------------------------
def addChild(self, child):
""" Adds a child to the node
:param child: the node to be added
"""
if type(child) == list:
self.childs.extend(child)
else:
if not isinstance(child, GTreeNodeBase):
utils.raiseException("The child must be a node", TypeError)
self.childs.append(child)
# -----------------------------------------------------------------
def replaceChild(self, older, newer):
""" Replaces a child of the node
:param older: the child to be replaces
:param newer: the new child which replaces the older
"""
index = self.childs.index(older)
self.childs[index] = newer
# -----------------------------------------------------------------
def setParent(self, parent):
""" Sets the parent of the node
:param parent: the parent node
"""
self.parent = parent
# -----------------------------------------------------------------
def getParent(self):
""" Get the parent node of the node
:rtype: the parent node
"""
return self.parent
# -----------------------------------------------------------------
def __repr__(self):
str_repr = "GTreeNodeBase [Childs=%d]" % len(self)
return str_repr
# -----------------------------------------------------------------
def __len__(self):
return len(self.childs)
# -----------------------------------------------------------------
def copy(self, g):
""" Copy the current contents GTreeNodeBase to 'g'
:param g: the destination node
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
g.parent = self.parent
g.childs = self.childs[:]
# -----------------------------------------------------------------
def clone(self):
""" Clone this GenomeBase
:rtype: the clone genome
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
newcopy = GTreeNodeBase(None)
self.copy(newcopy)
return newcopy
# -----------------------------------------------------------------
class GTreeBase(GenomeBase):
""" GTreeBase Class - The base class for the tree genomes
This chromosome class extends the :class:`GenomeBase` classes.
:param root_node: the root node of the tree
.. versionadded:: 0.6
Added the *GTreeBase* class
"""
__slots__ = ["root_node", "tree_height", "nodes_list", "nodes_leaf", "nodes_branch"]
def __init__(self, root_node):
super(GTreeBase, self).__init__()
self.root_node = root_node
self.tree_height = None
self.nodes_list = None
# -----------------------------------------------------------------
def processNodes(self, cloning=False):
""" Creates a *cache* on the tree, this method must be called
every time you change the shape of the tree. It updates the
internal nodes list and the internal nodes properties such as
depth and height.
"""
if self.root_node is None:
return
self.nodes_list = self.getAllNodes()
self.nodes_leaf = filter(lambda n: n.isLeaf(), self.nodes_list)
self.nodes_branch = filter(lambda n: n.isLeaf() is False, self.nodes_list)
if not cloning: self.tree_height = self.getNodeHeight(self.getRoot())
# -----------------------------------------------------------------
def getRoot(self):
""" Return the tree root node
:rtype: the tree root node
"""
return self.root_node
# -----------------------------------------------------------------
def setRoot(self, root):
""" Sets the root of the tree
:param root: the tree root node
"""
if not isinstance(root, GTreeNodeBase):
utils.raiseException("The root must be a node", TypeError)
self.root_node = root
# -----------------------------------------------------------------
def getNodeDepth(self, node):
""" Returns the depth of a node
:rtype: the depth of the node, the depth of root node is 0
"""
if node == self.getRoot():
return 0
else:
return 1 + self.getNodeDepth(node.getParent())
# -----------------------------------------------------------------
def getNodeHeight(self, node):
""" Returns the height of a node
.. note:: If the node has no childs, the height will be 0.
:rtype: the height of the node
"""
height = 0
if len(node) <= 0:
return 0
for child in node.getChilds():
h_inner = self.getNodeHeight(child) + 1
if h_inner > height:
height = h_inner
return height
# -----------------------------------------------------------------
def getHeight(self):
""" Return the tree height
:rtype: the tree height
"""
return self.tree_height
# -----------------------------------------------------------------
def getNodesCount(self, start_node=None):
""" Return the number of the nodes on the tree
starting at the *start_node*, if *start_node* is None,
then the method will count all the tree nodes.
:rtype: the number of nodes
"""
count = 1
if start_node is None:
start_node = self.getRoot()
for i in start_node.getChilds():
count += self.getNodesCount(i)
return count
# -----------------------------------------------------------------
def getTraversalString(self, start_node=None, spc=0):
""" Returns a tree-formated string of the tree. This
method is used by the __repr__ method of the tree
:rtype: a string representing the tree
"""
str_buff = ""
if start_node is None:
start_node = self.getRoot()
str_buff += "%s\n" % start_node
spaces = spc + 2
for child_node in start_node.getChilds():
str_buff += "%s%s\n" % (" " * spaces, child_node)
str_buff += self.getTraversalString(child_node, spaces)
return str_buff
# -----------------------------------------------------------------
def traversal(self, callback, start_node=None):
""" Traversal the tree, this method will call the
user-defined callback function for each node on the tree
:param callback: a function
:param start_node: the start node to begin the traversal
"""
if not inspect.isfunction(callback):
utils.raiseException("The callback for the tree traversal must be a function", TypeError)
if start_node is None:
start_node = self.getRoot()
callback(start_node)
for child_node in start_node.getChilds():
callback(child_node)
self.traversal(callback, child_node)
# -----------------------------------------------------------------
def getRandomNode(self, node_type=0):
""" Returns a random node from the Tree
:param node_type: 0 = Any, 1 = Leaf, 2 = Branch
:rtype: random node
"""
lists = (self.nodes_list, self.nodes_leaf, self.nodes_branch)
cho = lists[node_type]
if len(cho) <= 0:
return None
return prng.choice(cho)
# -----------------------------------------------------------------
def getAllNodes(self):
""" Return a new list with all nodes
:rtype: the list with all nodes
"""
node_stack = []
all_nodes = []
tmp = None
node_stack.append(self.getRoot())
while len(node_stack) > 0:
tmp = node_stack.pop()
all_nodes.append(tmp)
childs = tmp.getChilds()
node_stack.extend(childs)
return all_nodes
# -----------------------------------------------------------------
def __repr__(self):
str_buff = "- GTree\n"
str_buff += "\tHeight:\t\t\t%d\n" % self.getHeight()
str_buff += "\tNodes:\t\t\t%d\n" % self.getNodesCount()
str_buff += "\n" + self.getTraversalString()
return str_buff
# -----------------------------------------------------------------
def __len__(self):
return len(self.nodes_list)
# -----------------------------------------------------------------
def __getitem__(self, index):
return self.nodes_list[index]
# -----------------------------------------------------------------
def __iter__(self):
return iter(self.nodes_list)
# -----------------------------------------------------------------
def copy(self, g, node=None, node_parent=None):
""" Copy the current contents GTreeBase to 'g'
:param g: the destination GTreeBase tree
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
if node is None:
g.tree_height = self.tree_height
node = self.root_node
if node is None:
return None
newnode = node.clone()
if node_parent is None:
g.setRoot(newnode)
else:
newnode.setParent(node_parent)
node_parent.replaceChild(node, newnode)
for ci in xrange(len(newnode)):
GTreeBase.copy(self, g, newnode.getChild(ci), newnode)
return newnode
# -----------------------------------------------------------------
def clone(self):
""" Clone this GenomeBase
:rtype: the clone genome
.. note:: If you are planning to create a new chromosome representation, you
**must** implement this method on your class.
"""
newcopy = GTreeBase(None)
self.copy(newcopy)
newcopy.processNodes()
return newcopy
# ----------------------------------------------------------------- | 0.811974 | 0.351116 |
from copy import (
deepcopy,
)
import json
import os
from typing import (
Optional,
Tuple,
Union,
cast,
)
import attr
from azul import (
CatalogName,
IndexName,
config,
)
from azul.indexer import (
Bundle,
BundleFQID,
SourcedBundleFQID,
)
from azul.indexer.index_service import (
IndexService,
IndexWriter,
Tallies,
)
from azul.plugins.repository.dss import (
DSSBundle,
DSSSourceRef,
)
from azul.types import (
AnyJSON,
JSON,
JSONs,
MutableJSON,
MutableJSONs,
)
from azul_test_case import (
AzulUnitTestCase,
)
from es_test_case import (
ElasticsearchTestCase,
)
class ForcedRefreshIndexService(IndexService):
def _create_writer(self, catalog: Optional[CatalogName]) -> IndexWriter:
writer = super()._create_writer(catalog)
# With a single client thread, refresh=True is faster than
# refresh="wait_for". The latter would limit the request rate to
# 1/refresh_interval. That's only one request per second with
# refresh_interval being 1s.
writer.refresh = True
return writer
class CannedBundleTestCase(AzulUnitTestCase):
@classmethod
def _load_canned_file(cls,
bundle: BundleFQID,
extension: str
) -> Union[MutableJSONs, MutableJSON]:
def load(version):
return cls._load_canned_file_version(uuid=bundle.uuid,
version=version,
extension=extension)
try:
return load(bundle.version)
except FileNotFoundError:
return load(None)
@classmethod
def _load_canned_file_version(cls,
*,
uuid: str,
version: Optional[str],
extension: str
) -> Union[MutableJSONs, MutableJSON]:
data_prefix = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
suffix = '' if version is None else '.' + version
file_name = f'{uuid}{suffix}.{extension}.json'
with open(os.path.join(data_prefix, file_name), 'r') as infile:
return json.load(infile)
@classmethod
def _load_canned_bundle(cls, bundle: SourcedBundleFQID) -> Bundle:
manifest = cast(MutableJSONs, cls._load_canned_file(bundle, 'manifest'))
metadata_files = cls._load_canned_file(bundle, 'metadata')
assert isinstance(manifest, list)
return DSSBundle(fqid=bundle,
manifest=manifest,
metadata_files=metadata_files)
mock_dss_endpoint = 'https://test'
class IndexerTestCase(ElasticsearchTestCase, CannedBundleTestCase):
index_service: IndexService
source = None
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.index_service = ForcedRefreshIndexService()
cls.source = DSSSourceRef.for_dss_endpoint(mock_dss_endpoint)
@classmethod
def bundle_fqid(cls, *, uuid, version):
return SourcedBundleFQID(source=cls.source,
uuid=uuid,
version=version)
def _load_canned_result(self, bundle_fqid: BundleFQID) -> MutableJSONs:
"""
Load the canned index documents for the given canned bundle and fix the
'_index' entry in each to match the index name in the current deployment
"""
expected_hits = self._load_canned_file(bundle_fqid, 'results')
assert isinstance(expected_hits, list)
for hit in expected_hits:
index_name = IndexName.parse(hit['_index'])
hit['_index'] = config.es_index_name(catalog=self.catalog,
entity_type=index_name.entity_type,
aggregate=index_name.aggregate)
return expected_hits
@classmethod
def _index_canned_bundle(cls, bundle_fqid: SourcedBundleFQID, delete=False):
bundle = cls._load_canned_bundle(bundle_fqid)
cls._index_bundle(bundle, delete=delete)
@classmethod
def _index_bundle(cls, bundle: Bundle, delete: bool = False):
if delete:
cls.index_service.delete(cls.catalog, bundle)
else:
cls.index_service.index(cls.catalog, bundle)
@classmethod
def _write_contributions(cls, bundle: Bundle) -> Tallies:
bundle = attr.evolve(bundle,
manifest=deepcopy(bundle.manifest),
metadata_files=deepcopy(bundle.metadata_files))
contributions = cls.index_service.transform(cls.catalog, bundle, delete=False)
return cls.index_service.contribute(cls.catalog, contributions)
def _verify_sorted_lists(self, data: AnyJSON):
"""
Traverse through an index document or service response to verify all
lists of primitives are sorted. Fails if no lists to check are found.
"""
def verify_sorted_lists(data_: AnyJSON, path: Tuple[str, ...] = ()) -> int:
if isinstance(data_, dict):
return sum(verify_sorted_lists(val, (*path, key))
for key, val in cast(JSON, data_).items())
elif isinstance(data_, list):
if data_:
if isinstance(data_[0], dict):
return sum(verify_sorted_lists(v, (*path, k))
for val in cast(JSONs, data_)
for k, v in val.items())
elif isinstance(data_[0], (type(None), bool, int, float, str)):
self.assertEqual(data_,
sorted(data_, key=lambda x: (x is None, x)),
msg=f'Value at {path} is not sorted: {data_}')
return 1
else:
assert False, str(type(data_[0]))
else:
return 0
elif isinstance(data_, (type(None), bool, int, float, str)):
return 0
else:
assert False, str(type(data_))
num_lists_counted = verify_sorted_lists(data)
self.assertGreater(num_lists_counted, 0) | test/indexer/__init__.py | from copy import (
deepcopy,
)
import json
import os
from typing import (
Optional,
Tuple,
Union,
cast,
)
import attr
from azul import (
CatalogName,
IndexName,
config,
)
from azul.indexer import (
Bundle,
BundleFQID,
SourcedBundleFQID,
)
from azul.indexer.index_service import (
IndexService,
IndexWriter,
Tallies,
)
from azul.plugins.repository.dss import (
DSSBundle,
DSSSourceRef,
)
from azul.types import (
AnyJSON,
JSON,
JSONs,
MutableJSON,
MutableJSONs,
)
from azul_test_case import (
AzulUnitTestCase,
)
from es_test_case import (
ElasticsearchTestCase,
)
class ForcedRefreshIndexService(IndexService):
def _create_writer(self, catalog: Optional[CatalogName]) -> IndexWriter:
writer = super()._create_writer(catalog)
# With a single client thread, refresh=True is faster than
# refresh="wait_for". The latter would limit the request rate to
# 1/refresh_interval. That's only one request per second with
# refresh_interval being 1s.
writer.refresh = True
return writer
class CannedBundleTestCase(AzulUnitTestCase):
@classmethod
def _load_canned_file(cls,
bundle: BundleFQID,
extension: str
) -> Union[MutableJSONs, MutableJSON]:
def load(version):
return cls._load_canned_file_version(uuid=bundle.uuid,
version=version,
extension=extension)
try:
return load(bundle.version)
except FileNotFoundError:
return load(None)
@classmethod
def _load_canned_file_version(cls,
*,
uuid: str,
version: Optional[str],
extension: str
) -> Union[MutableJSONs, MutableJSON]:
data_prefix = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
suffix = '' if version is None else '.' + version
file_name = f'{uuid}{suffix}.{extension}.json'
with open(os.path.join(data_prefix, file_name), 'r') as infile:
return json.load(infile)
@classmethod
def _load_canned_bundle(cls, bundle: SourcedBundleFQID) -> Bundle:
manifest = cast(MutableJSONs, cls._load_canned_file(bundle, 'manifest'))
metadata_files = cls._load_canned_file(bundle, 'metadata')
assert isinstance(manifest, list)
return DSSBundle(fqid=bundle,
manifest=manifest,
metadata_files=metadata_files)
mock_dss_endpoint = 'https://test'
class IndexerTestCase(ElasticsearchTestCase, CannedBundleTestCase):
index_service: IndexService
source = None
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.index_service = ForcedRefreshIndexService()
cls.source = DSSSourceRef.for_dss_endpoint(mock_dss_endpoint)
@classmethod
def bundle_fqid(cls, *, uuid, version):
return SourcedBundleFQID(source=cls.source,
uuid=uuid,
version=version)
def _load_canned_result(self, bundle_fqid: BundleFQID) -> MutableJSONs:
"""
Load the canned index documents for the given canned bundle and fix the
'_index' entry in each to match the index name in the current deployment
"""
expected_hits = self._load_canned_file(bundle_fqid, 'results')
assert isinstance(expected_hits, list)
for hit in expected_hits:
index_name = IndexName.parse(hit['_index'])
hit['_index'] = config.es_index_name(catalog=self.catalog,
entity_type=index_name.entity_type,
aggregate=index_name.aggregate)
return expected_hits
@classmethod
def _index_canned_bundle(cls, bundle_fqid: SourcedBundleFQID, delete=False):
bundle = cls._load_canned_bundle(bundle_fqid)
cls._index_bundle(bundle, delete=delete)
@classmethod
def _index_bundle(cls, bundle: Bundle, delete: bool = False):
if delete:
cls.index_service.delete(cls.catalog, bundle)
else:
cls.index_service.index(cls.catalog, bundle)
@classmethod
def _write_contributions(cls, bundle: Bundle) -> Tallies:
bundle = attr.evolve(bundle,
manifest=deepcopy(bundle.manifest),
metadata_files=deepcopy(bundle.metadata_files))
contributions = cls.index_service.transform(cls.catalog, bundle, delete=False)
return cls.index_service.contribute(cls.catalog, contributions)
def _verify_sorted_lists(self, data: AnyJSON):
"""
Traverse through an index document or service response to verify all
lists of primitives are sorted. Fails if no lists to check are found.
"""
def verify_sorted_lists(data_: AnyJSON, path: Tuple[str, ...] = ()) -> int:
if isinstance(data_, dict):
return sum(verify_sorted_lists(val, (*path, key))
for key, val in cast(JSON, data_).items())
elif isinstance(data_, list):
if data_:
if isinstance(data_[0], dict):
return sum(verify_sorted_lists(v, (*path, k))
for val in cast(JSONs, data_)
for k, v in val.items())
elif isinstance(data_[0], (type(None), bool, int, float, str)):
self.assertEqual(data_,
sorted(data_, key=lambda x: (x is None, x)),
msg=f'Value at {path} is not sorted: {data_}')
return 1
else:
assert False, str(type(data_[0]))
else:
return 0
elif isinstance(data_, (type(None), bool, int, float, str)):
return 0
else:
assert False, str(type(data_))
num_lists_counted = verify_sorted_lists(data)
self.assertGreater(num_lists_counted, 0) | 0.728459 | 0.212702 |
from collections import OrderedDict
import json
import string
import torch
import pyprob
from pyprob import Model
import pyprob.distributions as dists
def format_cc(cc, cc_format) -> str:
if cc_format == 0: return ""
elif cc_format == 1: return "+" + cc
elif cc_format == 2: return "+" + cc
elif cc_format == 3: return cc + "-"
elif cc_format == 4: return "+" + cc + "-"
elif cc_format == 5: return cc + " "
else: return "+" + cc + " "
def format_ac(ac, ac_format) -> str:
if ac_format == 0: return ac
elif ac_format == 1: return "(" + ac + ")"
elif ac_format == 2: return ac + "-"
elif ac_format == 3: return "(" + ac + ")-"
elif ac_format == 4: return ac + " "
else: return "(" + ac + ") "
def format_line_number(line_number_blocks, line_format) -> str:
if line_format == 0: return "".join(line_number_blocks)
elif line_format == 1: return "-".join(line_number_blocks)
else: return " ".join(line_number_blocks)
def letter_to_index(letter: str) -> int:
index = ALL_LETTERS.find(letter)
if index == -1: raise Exception(f"letter {letter} is not permitted.")
return index
def pad_string(original: str, desired_len: int, pad_character: str = ' ') -> str:
# Returns the padded version of the original string to length: desired_len
return original + (pad_character * (desired_len - len(original)))
def load_json(jsonpath: str) -> dict:
with open(jsonpath) as jsonfile:
return json.load(jsonfile, object_pairs_hook=OrderedDict)
"""
Supports Countries In
- https://en.wikipedia.org/wiki/National_conventions_for_writing_telephone_numbers#International_Telecommunication_Union
"""
COUNTRY_INFO = load_json("./data/limited_cc.json")
MAX_STRING_LEN = 30
ALL_DIGITS = string.digits
ALL_LETTERS = string.digits+' .,:()+-'
N_DIGIT = len(ALL_DIGITS)
N_LETTER = len(ALL_LETTERS)
class OneHot2DCategorical(dists.Categorical):
def sample(self):
s = self._torch_dist.sample()
one_hot = self._probs * 0
for i, val in enumerate(s):
one_hot[i, int(val.item())] = 1
return one_hot
def log_prob(self, x, *args, **kwargs):
# vector of one hot vectors
non_one_hot = torch.tensor([row.nonzero() for row in x])
return super().log_prob(non_one_hot, *args, **kwargs)
class PhoneParser(Model):
def __init__(self):
super().__init__(name="Phone number with Unknown Format")
def forward(self):
country_index = int(pyprob.sample(dists.Categorical(torch.tensor([1/len(COUNTRY_INFO)]*len(COUNTRY_INFO)))).item())
country_info = COUNTRY_INFO[country_index]
# Obtain formatted country code
country_code = country_info['cc']
cc_format = int(pyprob.sample(dists.Categorical(torch.tensor([1/3] + [1/9]*6))).item())
full_cc = format_cc(country_code, cc_format)
structure_index = int(pyprob.sample(dists.Categorical(torch.tensor([1/len(country_info['structure'])]*len(country_info['structure'])))).item())
number_structure = country_info['structure'][structure_index]
# Obtain formatted area code
area_code_len = number_structure[0]
area_code = ""
for _ in range(area_code_len):
curr_digit = int(pyprob.sample(dists.Categorical(torch.tensor([1/N_DIGIT]*N_DIGIT))).item())
area_code += str(curr_digit)
ac_format = int(pyprob.sample(dists.Categorical(torch.tensor([1/6]*6))).item())
full_ac = format_ac(area_code, ac_format)
# Obtain formatted line number
line_number_structure = number_structure[1:]
line_number_block_len = len(line_number_structure)
line_number_blocks = []
for i in range(line_number_block_len):
number_block_len = line_number_structure[i]
number_block_digits = ""
for _ in range(number_block_len):
number = int(pyprob.sample(dists.Categorical(torch.tensor([1/N_DIGIT]*N_DIGIT))).item())
number_block_digits += str(number)
line_number_blocks.append(number_block_digits)
line_number = " ".join(line_number_blocks)
line_format = int(pyprob.sample(dists.Categorical(torch.tensor([1/3]*3))).item())
full_line = format_line_number(line_number_blocks, line_format)
# make a categorical distribution that observes each letter independently (like 30 independent categoricals)
output = pad_string(original=full_cc+full_ac+full_line, desired_len=MAX_STRING_LEN)
probs = torch.ones(MAX_STRING_LEN, N_LETTER)*0.001
for i, letter in enumerate(output):
probs[i, letter_to_index(letter)] = 1.
pyprob.observe(OneHot2DCategorical(probs), name=f"phone_string")
return output, {'country': country_info['country'],'country code': country_code, 'area code': area_code, 'line number': line_number}
def get_observes(self, phone_string):
if len(phone_string) > 30: raise Exception("Phone number string length cannot exceed 30.")
one_hot = torch.zeros(MAX_STRING_LEN, N_LETTER)
phone_string = pad_string(original=phone_string, desired_len=MAX_STRING_LEN)
for i, letter in enumerate(phone_string):
one_hot[i, letter_to_index(letter)] = 1.
return {'phone_string': one_hot} | primitives_ubc/phoneParser/infcomp.py | from collections import OrderedDict
import json
import string
import torch
import pyprob
from pyprob import Model
import pyprob.distributions as dists
def format_cc(cc, cc_format) -> str:
if cc_format == 0: return ""
elif cc_format == 1: return "+" + cc
elif cc_format == 2: return "+" + cc
elif cc_format == 3: return cc + "-"
elif cc_format == 4: return "+" + cc + "-"
elif cc_format == 5: return cc + " "
else: return "+" + cc + " "
def format_ac(ac, ac_format) -> str:
if ac_format == 0: return ac
elif ac_format == 1: return "(" + ac + ")"
elif ac_format == 2: return ac + "-"
elif ac_format == 3: return "(" + ac + ")-"
elif ac_format == 4: return ac + " "
else: return "(" + ac + ") "
def format_line_number(line_number_blocks, line_format) -> str:
if line_format == 0: return "".join(line_number_blocks)
elif line_format == 1: return "-".join(line_number_blocks)
else: return " ".join(line_number_blocks)
def letter_to_index(letter: str) -> int:
index = ALL_LETTERS.find(letter)
if index == -1: raise Exception(f"letter {letter} is not permitted.")
return index
def pad_string(original: str, desired_len: int, pad_character: str = ' ') -> str:
# Returns the padded version of the original string to length: desired_len
return original + (pad_character * (desired_len - len(original)))
def load_json(jsonpath: str) -> dict:
with open(jsonpath) as jsonfile:
return json.load(jsonfile, object_pairs_hook=OrderedDict)
"""
Supports Countries In
- https://en.wikipedia.org/wiki/National_conventions_for_writing_telephone_numbers#International_Telecommunication_Union
"""
COUNTRY_INFO = load_json("./data/limited_cc.json")
MAX_STRING_LEN = 30
ALL_DIGITS = string.digits
ALL_LETTERS = string.digits+' .,:()+-'
N_DIGIT = len(ALL_DIGITS)
N_LETTER = len(ALL_LETTERS)
class OneHot2DCategorical(dists.Categorical):
def sample(self):
s = self._torch_dist.sample()
one_hot = self._probs * 0
for i, val in enumerate(s):
one_hot[i, int(val.item())] = 1
return one_hot
def log_prob(self, x, *args, **kwargs):
# vector of one hot vectors
non_one_hot = torch.tensor([row.nonzero() for row in x])
return super().log_prob(non_one_hot, *args, **kwargs)
class PhoneParser(Model):
def __init__(self):
super().__init__(name="Phone number with Unknown Format")
def forward(self):
country_index = int(pyprob.sample(dists.Categorical(torch.tensor([1/len(COUNTRY_INFO)]*len(COUNTRY_INFO)))).item())
country_info = COUNTRY_INFO[country_index]
# Obtain formatted country code
country_code = country_info['cc']
cc_format = int(pyprob.sample(dists.Categorical(torch.tensor([1/3] + [1/9]*6))).item())
full_cc = format_cc(country_code, cc_format)
structure_index = int(pyprob.sample(dists.Categorical(torch.tensor([1/len(country_info['structure'])]*len(country_info['structure'])))).item())
number_structure = country_info['structure'][structure_index]
# Obtain formatted area code
area_code_len = number_structure[0]
area_code = ""
for _ in range(area_code_len):
curr_digit = int(pyprob.sample(dists.Categorical(torch.tensor([1/N_DIGIT]*N_DIGIT))).item())
area_code += str(curr_digit)
ac_format = int(pyprob.sample(dists.Categorical(torch.tensor([1/6]*6))).item())
full_ac = format_ac(area_code, ac_format)
# Obtain formatted line number
line_number_structure = number_structure[1:]
line_number_block_len = len(line_number_structure)
line_number_blocks = []
for i in range(line_number_block_len):
number_block_len = line_number_structure[i]
number_block_digits = ""
for _ in range(number_block_len):
number = int(pyprob.sample(dists.Categorical(torch.tensor([1/N_DIGIT]*N_DIGIT))).item())
number_block_digits += str(number)
line_number_blocks.append(number_block_digits)
line_number = " ".join(line_number_blocks)
line_format = int(pyprob.sample(dists.Categorical(torch.tensor([1/3]*3))).item())
full_line = format_line_number(line_number_blocks, line_format)
# make a categorical distribution that observes each letter independently (like 30 independent categoricals)
output = pad_string(original=full_cc+full_ac+full_line, desired_len=MAX_STRING_LEN)
probs = torch.ones(MAX_STRING_LEN, N_LETTER)*0.001
for i, letter in enumerate(output):
probs[i, letter_to_index(letter)] = 1.
pyprob.observe(OneHot2DCategorical(probs), name=f"phone_string")
return output, {'country': country_info['country'],'country code': country_code, 'area code': area_code, 'line number': line_number}
def get_observes(self, phone_string):
if len(phone_string) > 30: raise Exception("Phone number string length cannot exceed 30.")
one_hot = torch.zeros(MAX_STRING_LEN, N_LETTER)
phone_string = pad_string(original=phone_string, desired_len=MAX_STRING_LEN)
for i, letter in enumerate(phone_string):
one_hot[i, letter_to_index(letter)] = 1.
return {'phone_string': one_hot} | 0.718792 | 0.31338 |
from typing import Union
import numpy as np
class ContextualBandit(object):
"""
Base Class for a Multi-armed Bandit
:param bandits: Number of bandits
:param arms: Number of arms in each bandit
:type bandits: int
:type arms: int
"""
def __init__(self, bandits: int = 1, arms: int = 1):
self._nbandits = bandits
self._narms = arms
self.reset()
@property
def arms(self) -> int:
"""
Get the number of arms in each bandit
:returns: Number of arms in each bandit
:rtype: int
"""
return self._narms
@property
def bandits(self) -> int:
"""
Get the number of bandits
:returns: Number of bandits
:rtype: int
"""
return self._nbandits
def reset(self):
"""
Resets the current bandit randomly
:returns: The current bandit as observation
:rtype: int
"""
self.curr_bandit = np.random.randint(self.bandits)
return self.curr_bandit
def step(self, action: int) -> Union[int, float]:
"""
Takes an action in the bandit and returns the sampled reward
This method needs to be implemented in the specific bandit.
:param action: The action to take
:type action: int
:returns: Reward sampled for the action taken
:rtype: int, float ...
"""
raise NotImplementedError
class BernoulliCB(ContextualBandit):
"""
Contextual Bandit with categorial context and bernoulli reward distribution
:param bandits: Number of bandits
:param arms: Number of arms in each bandit
:param reward_probs: Probabilities of getting rewards
:type bandits: int
:type arms: int
:type reward_probs: numpy.ndarray
"""
def __init__(
self, bandits: int = 1, arms: int = 1, reward_probs: np.ndarray = None
):
super(BernoulliCB, self).__init__(bandits, arms)
if reward_probs is not None:
self.reward_probs = reward_probs
else:
self.reward_probs = np.random.random(size=(bandits, arms))
def step(self, action: int) -> int:
"""
Takes an action in the bandit and returns the sampled reward
The reward is sampled from a bernoulli distribution
:param action: The action to take
:type action: int
:returns: Reward sampled for the action taken
:rtype: int
"""
reward_prob = self.reward_probs[self.curr_bandit, action]
reward = int(np.random.random() > reward_prob)
self.reset()
return self.curr_bandit, reward
class GaussianCB(ContextualBandit):
"""
Contextual Bandit with categorial context and gaussian reward distribution
:param bandits: Number of bandits
:param arms: Number of arms in each bandit
:param reward_means: Mean of gaussian distribution for each reward
:type bandits: int
:type arms: int
:type reward_means: numpy.ndarray
"""
def __init__(
self, bandits: int = 1, arms: int = 1, reward_means: np.ndarray = None
):
super(GaussianCB, self).__init__(bandits, arms)
if reward_means is not None:
self.reward_means = reward_means
else:
self.reward_means = np.random.random(size=(bandits, arms))
def step(self, action: int) -> float:
"""
Takes an action in the bandit and returns the sampled reward
The reward is sampled from a gaussian distribution
:param action: The action to take
:type action: int
:returns: Reward sampled for the action taken
:rtype: int
"""
reward_mean = self.reward_means[self.curr_bandit, action]
reward = np.random.normal(reward_mean)
self.reset()
return self.curr_bandit, reward | genrl/classical/bandit/contextual_bandits.py | from typing import Union
import numpy as np
class ContextualBandit(object):
"""
Base Class for a Multi-armed Bandit
:param bandits: Number of bandits
:param arms: Number of arms in each bandit
:type bandits: int
:type arms: int
"""
def __init__(self, bandits: int = 1, arms: int = 1):
self._nbandits = bandits
self._narms = arms
self.reset()
@property
def arms(self) -> int:
"""
Get the number of arms in each bandit
:returns: Number of arms in each bandit
:rtype: int
"""
return self._narms
@property
def bandits(self) -> int:
"""
Get the number of bandits
:returns: Number of bandits
:rtype: int
"""
return self._nbandits
def reset(self):
"""
Resets the current bandit randomly
:returns: The current bandit as observation
:rtype: int
"""
self.curr_bandit = np.random.randint(self.bandits)
return self.curr_bandit
def step(self, action: int) -> Union[int, float]:
"""
Takes an action in the bandit and returns the sampled reward
This method needs to be implemented in the specific bandit.
:param action: The action to take
:type action: int
:returns: Reward sampled for the action taken
:rtype: int, float ...
"""
raise NotImplementedError
class BernoulliCB(ContextualBandit):
"""
Contextual Bandit with categorial context and bernoulli reward distribution
:param bandits: Number of bandits
:param arms: Number of arms in each bandit
:param reward_probs: Probabilities of getting rewards
:type bandits: int
:type arms: int
:type reward_probs: numpy.ndarray
"""
def __init__(
self, bandits: int = 1, arms: int = 1, reward_probs: np.ndarray = None
):
super(BernoulliCB, self).__init__(bandits, arms)
if reward_probs is not None:
self.reward_probs = reward_probs
else:
self.reward_probs = np.random.random(size=(bandits, arms))
def step(self, action: int) -> int:
"""
Takes an action in the bandit and returns the sampled reward
The reward is sampled from a bernoulli distribution
:param action: The action to take
:type action: int
:returns: Reward sampled for the action taken
:rtype: int
"""
reward_prob = self.reward_probs[self.curr_bandit, action]
reward = int(np.random.random() > reward_prob)
self.reset()
return self.curr_bandit, reward
class GaussianCB(ContextualBandit):
"""
Contextual Bandit with categorial context and gaussian reward distribution
:param bandits: Number of bandits
:param arms: Number of arms in each bandit
:param reward_means: Mean of gaussian distribution for each reward
:type bandits: int
:type arms: int
:type reward_means: numpy.ndarray
"""
def __init__(
self, bandits: int = 1, arms: int = 1, reward_means: np.ndarray = None
):
super(GaussianCB, self).__init__(bandits, arms)
if reward_means is not None:
self.reward_means = reward_means
else:
self.reward_means = np.random.random(size=(bandits, arms))
def step(self, action: int) -> float:
"""
Takes an action in the bandit and returns the sampled reward
The reward is sampled from a gaussian distribution
:param action: The action to take
:type action: int
:returns: Reward sampled for the action taken
:rtype: int
"""
reward_mean = self.reward_means[self.curr_bandit, action]
reward = np.random.normal(reward_mean)
self.reset()
return self.curr_bandit, reward | 0.966797 | 0.827689 |
import itertools
import numpy as np
from novelpy.utils.run_indicator_tools import Dataset
import pymongo
import tqdm
from sklearn.metrics.pairwise import cosine_similarity
import json
import os
def cosine_similarity_dist(n,doc_mat):
"""
Description
-----------
Compute a list of cosine similarity for all articles
Parameters
----------
n : int
number of articles.
doc_mat : np.array
array of articles representation.
Returns
-------
dist_list : list
list of distances.
"""
# Compute similarity
cos_sim = cosine_similarity(doc_mat)
dist_list = []
for i in range(n):
for j in range(i+1,n):
dist_list.append(1 - cos_sim[i][j])
return dist_list
def get_percentiles(dist_list):
"""
Description
-----------
Return percentiles of the novelty distribution
Parameters
----------
dist_list : list
list of distances.
Returns
-------
nov_list : dict
dict of novelty percentiles.
"""
nov_list = dict()
for q in [100, 99, 95, 90, 80, 50, 20, 10, 5, 1, 0]:
nov_list.update({str(q)+'%': np.percentile(dist_list, q)})
return nov_list
class Shibayama2021(Dataset):
def __init__(self,
id_variable,
year_variable,
ref_variable,
entity,
focal_year,
embedding_dim = 200,
client_name = None,
db_name = None,
collection_name = None):
"""
Description
-----------
Compute Shibayama et al (2021) novelty indicator and our alternative with author poximity
Parameters
----------
id_variable : str
identifier variable name.
ref_variable : str
lembedded representation of references variable name.
aut_profile_variable : str
embedded representation of author articles variable name.
year_variable : str
year variable name.
Returns
-------
None.
"""
self.id_variable = id_variable
self.ref_variable = ref_variable
self.year_variable = year_variable
self.entity = entity
self.embedding_dim = embedding_dim
Dataset.__init__(
self,
client_name = client_name,
db_name = db_name,
collection_name = collection_name ,
id_variable = id_variable,
year_variable = year_variable,
focal_year = focal_year)
self.path_score = "Result/shibayama/"
if not os.path.exists(self.path_score):
os.makedirs(self.path_score)
def compute_score(self,doc,entity):
"""
Parameters
----------
doc : dict
document from the embedded reference collection.
entity : list
'title_embedding' or 'abstract_embedding' or both.
Returns
-------
None.
"""
for ent in entity:
clean_refs = [ref for ref in doc[self.ref_variable] if ref[ent] and isinstance(ref[ent],list)]
n = len(clean_refs)
if n > 1:
doc_mat = np.zeros((n, self.embedding_dim))
for i in range(n):
item = clean_refs[i][ent]
if item:
doc_mat[i, :] = item
dist_list = cosine_similarity_dist(n,doc_mat)
nov_list = get_percentiles(dist_list)
references_novelty = {
'shibayama_{}'.format(ent) :nov_list,
'scores_array_{}'.format(ent) :dist_list
}
self.infos.update(references_novelty)
def get_indicator(self):
if self.client_name:
self.docs = self.collection.find({
self.ref_variable:{'$ne':None},
self.year_variable:self.focal_year
})
else:
self.docs = json.load(open("Data/docs/{}/{}.json".format(self.collection_name,self.focal_year)))
print('Getting score per paper ...')
# Iterate over every docs
list_of_insertion = []
for doc in tqdm.tqdm(self.docs):
self.infos = dict()
if doc[self.ref_variable] and len(doc[self.ref_variable])>1:
self.compute_score(doc, self.entity)
if self.infos:
if self.client_name:
list_of_insertion.append(pymongo.UpdateOne({self.id_variable: doc[self.id_variable]},
{'$set': {'shibayama': self.infos}},
upsert = True))
else:
list_of_insertion.append({self.id_variable: doc[self.id_variable],'shibayama': self.infos})
if self.client_name:
if "output" not in self.db.list_collection_names():
print("Init output collection with index on id_variable ...")
self.collection_output = self.db["output"]
self.collection_output.create_index([ (self.id_variable,1) ])
else:
self.collection_output = self.db["output"]
if list_of_insertion:
self.db['output'].bulk_write(list_of_insertion)
else:
if list_of_insertion:
with open(self.path_score + "/{}.json".format(self.focal_year), 'w') as outfile:
json.dump(list_of_insertion, outfile) | novelpy/indicators/Shibayama2021.py | import itertools
import numpy as np
from novelpy.utils.run_indicator_tools import Dataset
import pymongo
import tqdm
from sklearn.metrics.pairwise import cosine_similarity
import json
import os
def cosine_similarity_dist(n,doc_mat):
"""
Description
-----------
Compute a list of cosine similarity for all articles
Parameters
----------
n : int
number of articles.
doc_mat : np.array
array of articles representation.
Returns
-------
dist_list : list
list of distances.
"""
# Compute similarity
cos_sim = cosine_similarity(doc_mat)
dist_list = []
for i in range(n):
for j in range(i+1,n):
dist_list.append(1 - cos_sim[i][j])
return dist_list
def get_percentiles(dist_list):
"""
Description
-----------
Return percentiles of the novelty distribution
Parameters
----------
dist_list : list
list of distances.
Returns
-------
nov_list : dict
dict of novelty percentiles.
"""
nov_list = dict()
for q in [100, 99, 95, 90, 80, 50, 20, 10, 5, 1, 0]:
nov_list.update({str(q)+'%': np.percentile(dist_list, q)})
return nov_list
class Shibayama2021(Dataset):
def __init__(self,
id_variable,
year_variable,
ref_variable,
entity,
focal_year,
embedding_dim = 200,
client_name = None,
db_name = None,
collection_name = None):
"""
Description
-----------
Compute Shibayama et al (2021) novelty indicator and our alternative with author poximity
Parameters
----------
id_variable : str
identifier variable name.
ref_variable : str
lembedded representation of references variable name.
aut_profile_variable : str
embedded representation of author articles variable name.
year_variable : str
year variable name.
Returns
-------
None.
"""
self.id_variable = id_variable
self.ref_variable = ref_variable
self.year_variable = year_variable
self.entity = entity
self.embedding_dim = embedding_dim
Dataset.__init__(
self,
client_name = client_name,
db_name = db_name,
collection_name = collection_name ,
id_variable = id_variable,
year_variable = year_variable,
focal_year = focal_year)
self.path_score = "Result/shibayama/"
if not os.path.exists(self.path_score):
os.makedirs(self.path_score)
def compute_score(self,doc,entity):
"""
Parameters
----------
doc : dict
document from the embedded reference collection.
entity : list
'title_embedding' or 'abstract_embedding' or both.
Returns
-------
None.
"""
for ent in entity:
clean_refs = [ref for ref in doc[self.ref_variable] if ref[ent] and isinstance(ref[ent],list)]
n = len(clean_refs)
if n > 1:
doc_mat = np.zeros((n, self.embedding_dim))
for i in range(n):
item = clean_refs[i][ent]
if item:
doc_mat[i, :] = item
dist_list = cosine_similarity_dist(n,doc_mat)
nov_list = get_percentiles(dist_list)
references_novelty = {
'shibayama_{}'.format(ent) :nov_list,
'scores_array_{}'.format(ent) :dist_list
}
self.infos.update(references_novelty)
def get_indicator(self):
if self.client_name:
self.docs = self.collection.find({
self.ref_variable:{'$ne':None},
self.year_variable:self.focal_year
})
else:
self.docs = json.load(open("Data/docs/{}/{}.json".format(self.collection_name,self.focal_year)))
print('Getting score per paper ...')
# Iterate over every docs
list_of_insertion = []
for doc in tqdm.tqdm(self.docs):
self.infos = dict()
if doc[self.ref_variable] and len(doc[self.ref_variable])>1:
self.compute_score(doc, self.entity)
if self.infos:
if self.client_name:
list_of_insertion.append(pymongo.UpdateOne({self.id_variable: doc[self.id_variable]},
{'$set': {'shibayama': self.infos}},
upsert = True))
else:
list_of_insertion.append({self.id_variable: doc[self.id_variable],'shibayama': self.infos})
if self.client_name:
if "output" not in self.db.list_collection_names():
print("Init output collection with index on id_variable ...")
self.collection_output = self.db["output"]
self.collection_output.create_index([ (self.id_variable,1) ])
else:
self.collection_output = self.db["output"]
if list_of_insertion:
self.db['output'].bulk_write(list_of_insertion)
else:
if list_of_insertion:
with open(self.path_score + "/{}.json".format(self.focal_year), 'w') as outfile:
json.dump(list_of_insertion, outfile) | 0.719778 | 0.233138 |
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateRecordRuleResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'rule_id': 'str',
'app_id': 'str',
'obs_addr': 'RecordObsFileAddr',
'record_formats': 'list[str]',
'hls_config': 'HLSRecordConfig',
'mp4_config': 'MP4RecordConfig',
'create_time': 'str',
'update_time': 'str',
'x_request_id': 'str'
}
attribute_map = {
'rule_id': 'rule_id',
'app_id': 'app_id',
'obs_addr': 'obs_addr',
'record_formats': 'record_formats',
'hls_config': 'hls_config',
'mp4_config': 'mp4_config',
'create_time': 'create_time',
'update_time': 'update_time',
'x_request_id': 'X-request-Id'
}
def __init__(self, rule_id=None, app_id=None, obs_addr=None, record_formats=None, hls_config=None, mp4_config=None, create_time=None, update_time=None, x_request_id=None):
"""CreateRecordRuleResponse - a model defined in huaweicloud sdk"""
super(CreateRecordRuleResponse, self).__init__()
self._rule_id = None
self._app_id = None
self._obs_addr = None
self._record_formats = None
self._hls_config = None
self._mp4_config = None
self._create_time = None
self._update_time = None
self._x_request_id = None
self.discriminator = None
if rule_id is not None:
self.rule_id = rule_id
if app_id is not None:
self.app_id = app_id
if obs_addr is not None:
self.obs_addr = obs_addr
if record_formats is not None:
self.record_formats = record_formats
if hls_config is not None:
self.hls_config = hls_config
if mp4_config is not None:
self.mp4_config = mp4_config
if create_time is not None:
self.create_time = create_time
if update_time is not None:
self.update_time = update_time
if x_request_id is not None:
self.x_request_id = x_request_id
@property
def rule_id(self):
"""Gets the rule_id of this CreateRecordRuleResponse.
规则id,由服务端返回。创建或修改规则的时候不携带
:return: The rule_id of this CreateRecordRuleResponse.
:rtype: str
"""
return self._rule_id
@rule_id.setter
def rule_id(self, rule_id):
"""Sets the rule_id of this CreateRecordRuleResponse.
规则id,由服务端返回。创建或修改规则的时候不携带
:param rule_id: The rule_id of this CreateRecordRuleResponse.
:type: str
"""
self._rule_id = rule_id
@property
def app_id(self):
"""Gets the app_id of this CreateRecordRuleResponse.
应用id
:return: The app_id of this CreateRecordRuleResponse.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this CreateRecordRuleResponse.
应用id
:param app_id: The app_id of this CreateRecordRuleResponse.
:type: str
"""
self._app_id = app_id
@property
def obs_addr(self):
"""Gets the obs_addr of this CreateRecordRuleResponse.
:return: The obs_addr of this CreateRecordRuleResponse.
:rtype: RecordObsFileAddr
"""
return self._obs_addr
@obs_addr.setter
def obs_addr(self, obs_addr):
"""Sets the obs_addr of this CreateRecordRuleResponse.
:param obs_addr: The obs_addr of this CreateRecordRuleResponse.
:type: RecordObsFileAddr
"""
self._obs_addr = obs_addr
@property
def record_formats(self):
"""Gets the record_formats of this CreateRecordRuleResponse.
录制格式:hls格式或者mp4格式
:return: The record_formats of this CreateRecordRuleResponse.
:rtype: list[str]
"""
return self._record_formats
@record_formats.setter
def record_formats(self, record_formats):
"""Sets the record_formats of this CreateRecordRuleResponse.
录制格式:hls格式或者mp4格式
:param record_formats: The record_formats of this CreateRecordRuleResponse.
:type: list[str]
"""
self._record_formats = record_formats
@property
def hls_config(self):
"""Gets the hls_config of this CreateRecordRuleResponse.
:return: The hls_config of this CreateRecordRuleResponse.
:rtype: HLSRecordConfig
"""
return self._hls_config
@hls_config.setter
def hls_config(self, hls_config):
"""Sets the hls_config of this CreateRecordRuleResponse.
:param hls_config: The hls_config of this CreateRecordRuleResponse.
:type: HLSRecordConfig
"""
self._hls_config = hls_config
@property
def mp4_config(self):
"""Gets the mp4_config of this CreateRecordRuleResponse.
:return: The mp4_config of this CreateRecordRuleResponse.
:rtype: MP4RecordConfig
"""
return self._mp4_config
@mp4_config.setter
def mp4_config(self, mp4_config):
"""Sets the mp4_config of this CreateRecordRuleResponse.
:param mp4_config: The mp4_config of this CreateRecordRuleResponse.
:type: MP4RecordConfig
"""
self._mp4_config = mp4_config
@property
def create_time(self):
"""Gets the create_time of this CreateRecordRuleResponse.
创建时间,形如“2006-01-02T15:04:05.075Z”,时区为:UTC
:return: The create_time of this CreateRecordRuleResponse.
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this CreateRecordRuleResponse.
创建时间,形如“2006-01-02T15:04:05.075Z”,时区为:UTC
:param create_time: The create_time of this CreateRecordRuleResponse.
:type: str
"""
self._create_time = create_time
@property
def update_time(self):
"""Gets the update_time of this CreateRecordRuleResponse.
更新时间,形如“2006-01-02T15:04:05.075Z”,时区为:UTC
:return: The update_time of this CreateRecordRuleResponse.
:rtype: str
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this CreateRecordRuleResponse.
更新时间,形如“2006-01-02T15:04:05.075Z”,时区为:UTC
:param update_time: The update_time of this CreateRecordRuleResponse.
:type: str
"""
self._update_time = update_time
@property
def x_request_id(self):
"""Gets the x_request_id of this CreateRecordRuleResponse.
:return: The x_request_id of this CreateRecordRuleResponse.
:rtype: str
"""
return self._x_request_id
@x_request_id.setter
def x_request_id(self, x_request_id):
"""Sets the x_request_id of this CreateRecordRuleResponse.
:param x_request_id: The x_request_id of this CreateRecordRuleResponse.
:type: str
"""
self._x_request_id = x_request_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateRecordRuleResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | huaweicloud-sdk-cloudrtc/huaweicloudsdkcloudrtc/v2/model/create_record_rule_response.py |
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateRecordRuleResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'rule_id': 'str',
'app_id': 'str',
'obs_addr': 'RecordObsFileAddr',
'record_formats': 'list[str]',
'hls_config': 'HLSRecordConfig',
'mp4_config': 'MP4RecordConfig',
'create_time': 'str',
'update_time': 'str',
'x_request_id': 'str'
}
attribute_map = {
'rule_id': 'rule_id',
'app_id': 'app_id',
'obs_addr': 'obs_addr',
'record_formats': 'record_formats',
'hls_config': 'hls_config',
'mp4_config': 'mp4_config',
'create_time': 'create_time',
'update_time': 'update_time',
'x_request_id': 'X-request-Id'
}
def __init__(self, rule_id=None, app_id=None, obs_addr=None, record_formats=None, hls_config=None, mp4_config=None, create_time=None, update_time=None, x_request_id=None):
"""CreateRecordRuleResponse - a model defined in huaweicloud sdk"""
super(CreateRecordRuleResponse, self).__init__()
self._rule_id = None
self._app_id = None
self._obs_addr = None
self._record_formats = None
self._hls_config = None
self._mp4_config = None
self._create_time = None
self._update_time = None
self._x_request_id = None
self.discriminator = None
if rule_id is not None:
self.rule_id = rule_id
if app_id is not None:
self.app_id = app_id
if obs_addr is not None:
self.obs_addr = obs_addr
if record_formats is not None:
self.record_formats = record_formats
if hls_config is not None:
self.hls_config = hls_config
if mp4_config is not None:
self.mp4_config = mp4_config
if create_time is not None:
self.create_time = create_time
if update_time is not None:
self.update_time = update_time
if x_request_id is not None:
self.x_request_id = x_request_id
@property
def rule_id(self):
"""Gets the rule_id of this CreateRecordRuleResponse.
规则id,由服务端返回。创建或修改规则的时候不携带
:return: The rule_id of this CreateRecordRuleResponse.
:rtype: str
"""
return self._rule_id
@rule_id.setter
def rule_id(self, rule_id):
"""Sets the rule_id of this CreateRecordRuleResponse.
规则id,由服务端返回。创建或修改规则的时候不携带
:param rule_id: The rule_id of this CreateRecordRuleResponse.
:type: str
"""
self._rule_id = rule_id
@property
def app_id(self):
"""Gets the app_id of this CreateRecordRuleResponse.
应用id
:return: The app_id of this CreateRecordRuleResponse.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this CreateRecordRuleResponse.
应用id
:param app_id: The app_id of this CreateRecordRuleResponse.
:type: str
"""
self._app_id = app_id
@property
def obs_addr(self):
"""Gets the obs_addr of this CreateRecordRuleResponse.
:return: The obs_addr of this CreateRecordRuleResponse.
:rtype: RecordObsFileAddr
"""
return self._obs_addr
@obs_addr.setter
def obs_addr(self, obs_addr):
"""Sets the obs_addr of this CreateRecordRuleResponse.
:param obs_addr: The obs_addr of this CreateRecordRuleResponse.
:type: RecordObsFileAddr
"""
self._obs_addr = obs_addr
@property
def record_formats(self):
"""Gets the record_formats of this CreateRecordRuleResponse.
录制格式:hls格式或者mp4格式
:return: The record_formats of this CreateRecordRuleResponse.
:rtype: list[str]
"""
return self._record_formats
@record_formats.setter
def record_formats(self, record_formats):
"""Sets the record_formats of this CreateRecordRuleResponse.
录制格式:hls格式或者mp4格式
:param record_formats: The record_formats of this CreateRecordRuleResponse.
:type: list[str]
"""
self._record_formats = record_formats
@property
def hls_config(self):
"""Gets the hls_config of this CreateRecordRuleResponse.
:return: The hls_config of this CreateRecordRuleResponse.
:rtype: HLSRecordConfig
"""
return self._hls_config
@hls_config.setter
def hls_config(self, hls_config):
"""Sets the hls_config of this CreateRecordRuleResponse.
:param hls_config: The hls_config of this CreateRecordRuleResponse.
:type: HLSRecordConfig
"""
self._hls_config = hls_config
@property
def mp4_config(self):
"""Gets the mp4_config of this CreateRecordRuleResponse.
:return: The mp4_config of this CreateRecordRuleResponse.
:rtype: MP4RecordConfig
"""
return self._mp4_config
@mp4_config.setter
def mp4_config(self, mp4_config):
"""Sets the mp4_config of this CreateRecordRuleResponse.
:param mp4_config: The mp4_config of this CreateRecordRuleResponse.
:type: MP4RecordConfig
"""
self._mp4_config = mp4_config
@property
def create_time(self):
"""Gets the create_time of this CreateRecordRuleResponse.
创建时间,形如“2006-01-02T15:04:05.075Z”,时区为:UTC
:return: The create_time of this CreateRecordRuleResponse.
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this CreateRecordRuleResponse.
创建时间,形如“2006-01-02T15:04:05.075Z”,时区为:UTC
:param create_time: The create_time of this CreateRecordRuleResponse.
:type: str
"""
self._create_time = create_time
@property
def update_time(self):
"""Gets the update_time of this CreateRecordRuleResponse.
更新时间,形如“2006-01-02T15:04:05.075Z”,时区为:UTC
:return: The update_time of this CreateRecordRuleResponse.
:rtype: str
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this CreateRecordRuleResponse.
更新时间,形如“2006-01-02T15:04:05.075Z”,时区为:UTC
:param update_time: The update_time of this CreateRecordRuleResponse.
:type: str
"""
self._update_time = update_time
@property
def x_request_id(self):
"""Gets the x_request_id of this CreateRecordRuleResponse.
:return: The x_request_id of this CreateRecordRuleResponse.
:rtype: str
"""
return self._x_request_id
@x_request_id.setter
def x_request_id(self, x_request_id):
"""Sets the x_request_id of this CreateRecordRuleResponse.
:param x_request_id: The x_request_id of this CreateRecordRuleResponse.
:type: str
"""
self._x_request_id = x_request_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateRecordRuleResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.477554 | 0.08772 |
from collections import namedtuple
import functools
import re
from core.libs.peewee import *
from core.libs.peewee import CommaClause
from core.libs.peewee import EnclosedClause
from core.libs.peewee import Entity
from core.libs.peewee import Expression
from core.libs.peewee import Node
from core.libs.peewee import OP_EQ
class Operation(object):
"""Encapsulate a single schema altering operation."""
def __init__(self, migrator, method, *args, **kwargs):
self.migrator = migrator
self.method = method
self.args = args
self.kwargs = kwargs
def _parse_node(self, node):
compiler = self.migrator.database.compiler()
return compiler.parse_node(node)
def execute(self, node):
sql, params = self._parse_node(node)
self.migrator.database.execute_sql(sql, params)
def _handle_result(self, result):
if isinstance(result, Node):
self.execute(result)
elif isinstance(result, Operation):
result.run()
elif isinstance(result, (list, tuple)):
for item in result:
self._handle_result(item)
def run(self):
kwargs = self.kwargs.copy()
kwargs['generate'] = True
self._handle_result(
getattr(self.migrator, self.method)(*self.args, **kwargs))
def operation(fn):
@functools.wraps(fn)
def inner(self, *args, **kwargs):
generate = kwargs.pop('generate', False)
if generate:
return fn(self, *args, **kwargs)
return Operation(self, fn.__name__, *args, **kwargs)
return inner
class SchemaMigrator(object):
def __init__(self, database):
self.database = database
@classmethod
def from_database(cls, database):
if isinstance(database, PostgresqlDatabase):
return PostgresqlMigrator(database)
elif isinstance(database, MySQLDatabase):
return MySQLMigrator(database)
else:
return SqliteMigrator(database)
@operation
def apply_default(self, table, column_name, field):
default = field.default
if callable(default):
default = default()
return Clause(
SQL('UPDATE'),
Entity(table),
SQL('SET'),
Expression(
Entity(column_name),
OP_EQ,
Param(field.db_value(default)),
flat=True))
@operation
def alter_add_column(self, table, column_name, field):
# Make field null at first.
field_null, field.null = field.null, True
field.name = field.db_column = column_name
field_clause = self.database.compiler().field_definition(field)
field.null = field_null
parts = [
SQL('ALTER TABLE'),
Entity(table),
SQL('ADD COLUMN'),
field_clause]
if isinstance(field, ForeignKeyField):
parts.extend([
SQL('REFERENCES'),
Entity(field.rel_model._meta.db_table),
EnclosedClause(Entity(field.to_field.db_column))
])
return Clause(*parts)
@operation
def add_column(self, table, column_name, field):
# Adding a column is complicated by the fact that if there are rows
# present and the field is non-null, then we need to first add the
# column as a nullable field, then set the value, then add a not null
# constraint.
if not field.null and field.default is None:
raise ValueError('%s is not null but has no default' % column_name)
# Foreign key fields must explicitly specify a `to_field`.
if isinstance(field, ForeignKeyField) and not field.to_field:
raise ValueError('Foreign keys must specify a `to_field`.')
operations = [self.alter_add_column(table, column_name, field)]
# In the event the field is *not* nullable, update with the default
# value and set not null.
if not field.null:
operations.extend([
self.apply_default(table, column_name, field),
self.add_not_null(table, column_name)])
return operations
@operation
def drop_column(self, table, column_name, cascade=True):
nodes = [
SQL('ALTER TABLE'),
Entity(table),
SQL('DROP COLUMN'),
Entity(column_name)]
if cascade:
nodes.append(SQL('CASCADE'))
return Clause(*nodes)
@operation
def rename_column(self, table, old_name, new_name):
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('RENAME COLUMN'),
Entity(old_name),
SQL('TO'),
Entity(new_name))
def _alter_column(self, table, column):
return [
SQL('ALTER TABLE'),
Entity(table),
SQL('ALTER COLUMN'),
Entity(column)]
@operation
def add_not_null(self, table, column):
nodes = self._alter_column(table, column)
nodes.append(SQL('SET NOT NULL'))
return Clause(*nodes)
@operation
def drop_not_null(self, table, column):
nodes = self._alter_column(table, column)
nodes.append(SQL('DROP NOT NULL'))
return Clause(*nodes)
@operation
def rename_table(self, old_name, new_name):
return Clause(
SQL('ALTER TABLE'),
Entity(old_name),
SQL('RENAME TO'),
Entity(new_name))
@operation
def add_index(self, table, columns, unique=False):
compiler = self.database.compiler()
statement = 'CREATE UNIQUE INDEX' if unique else 'CREATE INDEX'
return Clause(
SQL(statement),
Entity(compiler.index_name(table, columns)),
SQL('ON'),
Entity(table),
EnclosedClause(*[Entity(column) for column in columns]))
@operation
def drop_index(self, table, index_name):
return Clause(
SQL('DROP INDEX'),
Entity(index_name))
class PostgresqlMigrator(SchemaMigrator):
def _primary_key_columns(self, tbl):
query = """
SELECT pg_attribute.attname
FROM pg_index, pg_class, pg_attribute
WHERE
pg_class.oid = '%s'::regclass AND
indrelid = pg_class.oid AND
pg_attribute.attrelid = pg_class.oid AND
pg_attribute.attnum = any(pg_index.indkey) AND
indisprimary;
"""
cursor = self.database.execute_sql(query % tbl)
return [row[0] for row in cursor.fetchall()]
@operation
def rename_table(self, old_name, new_name):
pk_names = self._primary_key_columns(old_name)
ParentClass = super(PostgresqlMigrator, self)
operations = [
ParentClass.rename_table(old_name, new_name, generate=True)]
if len(pk_names) == 1:
# Check for existence of primary key sequence.
seq_name = '%s_%s_seq' % (old_name, pk_names[0])
query = """
SELECT 1
FROM information_schema.sequences
WHERE LOWER(sequence_name) = LOWER(%s)
"""
cursor = self.database.execute_sql(query, (seq_name,))
if bool(cursor.fetchone()):
new_seq_name = '%s_%s_seq' % (new_name, pk_names[0])
operations.append(ParentClass.rename_table(
seq_name, new_seq_name, generate=True))
return operations
_column_attributes = ('name', 'definition', 'null', 'pk', 'default', 'extra')
class MySQLColumn(namedtuple('_Column', _column_attributes)):
@property
def is_pk(self):
return self.pk == 'PRI'
@property
def is_unique(self):
return self.pk == 'UNI'
@property
def is_null(self):
return self.null == 'YES'
def sql(self, column_name=None, is_null=None):
if is_null is None:
is_null = self.is_null
if column_name is None:
column_name = self.name
parts = [
Entity(column_name),
SQL(self.definition)]
if self.is_unique:
parts.append(SQL('UNIQUE'))
if not is_null:
parts.append(SQL('NOT NULL'))
if self.is_pk:
parts.append(SQL('PRIMARY KEY'))
if self.extra:
parts.append(SQL(extra))
return Clause(*parts)
class MySQLMigrator(SchemaMigrator):
def _get_column_definition(self, table, column_name):
cursor = self.database.execute_sql('DESCRIBE %s;' % table)
rows = cursor.fetchall()
for row in rows:
column = MySQLColumn(*row)
if column.name == column_name:
return column
return False
@operation
def add_not_null(self, table, column):
column = self._get_column_definition(table, column)
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('MODIFY'),
column.sql(is_null=False))
@operation
def drop_not_null(self, table, column):
column = self._get_column_definition(table, column)
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('MODIFY'),
column.sql(is_null=True))
@operation
def rename_column(self, table, old_name, new_name):
column = self._get_column_definition(table, old_name)
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('CHANGE'),
Entity(old_name),
column.sql(column_name=new_name))
@operation
def drop_index(self, table, index_name):
return Clause(
SQL('DROP INDEX'),
Entity(index_name),
SQL('ON'),
Entity(table))
class SqliteMigrator(SchemaMigrator):
"""
SQLite supports a subset of ALTER TABLE queries, view the docs for the
full details http://sqlite.org/lang_altertable.html
"""
column_re = re.compile('(.+?)\((.+)\)')
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
column_name_re = re.compile('"?([\w]+)')
fk_re = re.compile('FOREIGN KEY\s+\("?([\w]+)"?\)\s+', re.I)
def _get_column_names(self, table):
res = self.database.execute_sql('select * from "%s" limit 1' % table)
return [item[0] for item in res.description]
def _get_create_table(self, table):
res = self.database.execute_sql(
('select name, sql from sqlite_master '
'where type=? and LOWER(name)=?'),
['table', table.lower()])
return res.fetchone()
@operation
def _update_column(self, table, column_to_update, fn):
columns = set(column.name.lower()
for column in self.database.get_columns(table))
if column_to_update.lower() not in columns:
raise ValueError('Column "%s" does not exist on "%s"' %
(column_to_update, table))
# Get the SQL used to create the given table.
table, create_table = self._get_create_table(table)
# Get the indexes and SQL to re-create indexes.
indexes = self.database.get_indexes(table)
# Find any foreign keys we may need to remove.
foreign_keys = self.database.get_foreign_keys(table)
# Parse out the `CREATE TABLE` and column list portions of the query.
raw_create, raw_columns = self.column_re.search(create_table).groups()
# Clean up the individual column definitions.
column_defs = [
col.strip() for col in self.column_split_re.findall(raw_columns)]
new_column_defs = []
new_column_names = []
original_column_names = []
fk_columns = {}
for column_def in column_defs:
column_name, = self.column_name_re.match(column_def).groups()
if column_name == column_to_update:
new_column_def = fn(column_name, column_def)
if new_column_def:
new_column_defs.append(new_column_def)
original_column_names.append(column_name)
column_name, = self.column_name_re.match(
new_column_def).groups()
new_column_names.append(column_name)
else:
new_column_defs.append(column_def)
if not column_name.lower().startswith(('foreign', 'primary')):
new_column_names.append(column_name)
original_column_names.append(column_name)
# Create a mapping of original columns to new columns.
original_to_new = dict(zip(original_column_names, new_column_names))
new_column = original_to_new.get(column_to_update)
fk_filter_fn = lambda column_def: column_def
if not new_column:
# Remove any foreign keys associated with this column.
fk_filter_fn = lambda column_def: None
elif new_column != column_to_update:
# Update any foreign keys for this column.
fk_filter_fn = lambda column_def: self.fk_re.sub(
'FOREIGN KEY ("%s") ' % new_column,
column_def)
cleaned_columns = []
for column_def in new_column_defs:
match = self.fk_re.match(column_def)
if match is not None and match.groups()[0] == column_to_update:
column_def = fk_filter_fn(column_def)
if column_def:
cleaned_columns.append(column_def)
# Update the name of the new CREATE TABLE query.
temp_table = table + '__tmp__'
rgx = re.compile('("?)%s("?)' % table, re.I)
create = rgx.sub(
'\\1%s\\2' % temp_table,
raw_create)
# Create the new table.
columns = ', '.join(cleaned_columns)
queries = [
Clause(SQL('DROP TABLE IF EXISTS'), Entity(temp_table)),
SQL('%s (%s)' % (create.strip(), columns))]
# Populate new table.
populate_table = Clause(
SQL('INSERT INTO'),
Entity(temp_table),
EnclosedClause(*[Entity(col) for col in new_column_names]),
SQL('SELECT'),
CommaClause(*[Entity(col) for col in original_column_names]),
SQL('FROM'),
Entity(table))
queries.append(populate_table)
# Drop existing table and rename temp table.
queries.append(Clause(
SQL('DROP TABLE'),
Entity(table)))
queries.append(self.rename_table(temp_table, table))
# Re-create indexes.
for index in indexes:
# Auto-generated indexes in SQLite will not have associated SQL,
# so pass over them.
if not index.sql:
continue
if column_to_update in index.columns:
if new_column:
queries.append(
SQL(index.sql.replace(column_to_update, new_column)))
else:
queries.append(SQL(index.sql))
return queries
@operation
def drop_column(self, table, column_name, cascade=True):
return self._update_column(table, column_name, lambda a, b: None)
@operation
def rename_column(self, table, old_name, new_name):
def _rename(column_name, column_def):
return column_def.replace(column_name, new_name)
return self._update_column(table, old_name, _rename)
@operation
def add_not_null(self, table, column):
def _add_not_null(column_name, column_def):
return column_def + ' NOT NULL'
return self._update_column(table, column, _add_not_null)
@operation
def drop_not_null(self, table, column):
def _drop_not_null(column_name, column_def):
return column_def.replace('NOT NULL', '')
return self._update_column(table, column, _drop_not_null)
def migrate(*operations, **kwargs):
for operation in operations:
operation.run() | mercury/core/libs/playhouse/migrate.py | from collections import namedtuple
import functools
import re
from core.libs.peewee import *
from core.libs.peewee import CommaClause
from core.libs.peewee import EnclosedClause
from core.libs.peewee import Entity
from core.libs.peewee import Expression
from core.libs.peewee import Node
from core.libs.peewee import OP_EQ
class Operation(object):
"""Encapsulate a single schema altering operation."""
def __init__(self, migrator, method, *args, **kwargs):
self.migrator = migrator
self.method = method
self.args = args
self.kwargs = kwargs
def _parse_node(self, node):
compiler = self.migrator.database.compiler()
return compiler.parse_node(node)
def execute(self, node):
sql, params = self._parse_node(node)
self.migrator.database.execute_sql(sql, params)
def _handle_result(self, result):
if isinstance(result, Node):
self.execute(result)
elif isinstance(result, Operation):
result.run()
elif isinstance(result, (list, tuple)):
for item in result:
self._handle_result(item)
def run(self):
kwargs = self.kwargs.copy()
kwargs['generate'] = True
self._handle_result(
getattr(self.migrator, self.method)(*self.args, **kwargs))
def operation(fn):
@functools.wraps(fn)
def inner(self, *args, **kwargs):
generate = kwargs.pop('generate', False)
if generate:
return fn(self, *args, **kwargs)
return Operation(self, fn.__name__, *args, **kwargs)
return inner
class SchemaMigrator(object):
def __init__(self, database):
self.database = database
@classmethod
def from_database(cls, database):
if isinstance(database, PostgresqlDatabase):
return PostgresqlMigrator(database)
elif isinstance(database, MySQLDatabase):
return MySQLMigrator(database)
else:
return SqliteMigrator(database)
@operation
def apply_default(self, table, column_name, field):
default = field.default
if callable(default):
default = default()
return Clause(
SQL('UPDATE'),
Entity(table),
SQL('SET'),
Expression(
Entity(column_name),
OP_EQ,
Param(field.db_value(default)),
flat=True))
@operation
def alter_add_column(self, table, column_name, field):
# Make field null at first.
field_null, field.null = field.null, True
field.name = field.db_column = column_name
field_clause = self.database.compiler().field_definition(field)
field.null = field_null
parts = [
SQL('ALTER TABLE'),
Entity(table),
SQL('ADD COLUMN'),
field_clause]
if isinstance(field, ForeignKeyField):
parts.extend([
SQL('REFERENCES'),
Entity(field.rel_model._meta.db_table),
EnclosedClause(Entity(field.to_field.db_column))
])
return Clause(*parts)
@operation
def add_column(self, table, column_name, field):
# Adding a column is complicated by the fact that if there are rows
# present and the field is non-null, then we need to first add the
# column as a nullable field, then set the value, then add a not null
# constraint.
if not field.null and field.default is None:
raise ValueError('%s is not null but has no default' % column_name)
# Foreign key fields must explicitly specify a `to_field`.
if isinstance(field, ForeignKeyField) and not field.to_field:
raise ValueError('Foreign keys must specify a `to_field`.')
operations = [self.alter_add_column(table, column_name, field)]
# In the event the field is *not* nullable, update with the default
# value and set not null.
if not field.null:
operations.extend([
self.apply_default(table, column_name, field),
self.add_not_null(table, column_name)])
return operations
@operation
def drop_column(self, table, column_name, cascade=True):
nodes = [
SQL('ALTER TABLE'),
Entity(table),
SQL('DROP COLUMN'),
Entity(column_name)]
if cascade:
nodes.append(SQL('CASCADE'))
return Clause(*nodes)
@operation
def rename_column(self, table, old_name, new_name):
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('RENAME COLUMN'),
Entity(old_name),
SQL('TO'),
Entity(new_name))
def _alter_column(self, table, column):
return [
SQL('ALTER TABLE'),
Entity(table),
SQL('ALTER COLUMN'),
Entity(column)]
@operation
def add_not_null(self, table, column):
nodes = self._alter_column(table, column)
nodes.append(SQL('SET NOT NULL'))
return Clause(*nodes)
@operation
def drop_not_null(self, table, column):
nodes = self._alter_column(table, column)
nodes.append(SQL('DROP NOT NULL'))
return Clause(*nodes)
@operation
def rename_table(self, old_name, new_name):
return Clause(
SQL('ALTER TABLE'),
Entity(old_name),
SQL('RENAME TO'),
Entity(new_name))
@operation
def add_index(self, table, columns, unique=False):
compiler = self.database.compiler()
statement = 'CREATE UNIQUE INDEX' if unique else 'CREATE INDEX'
return Clause(
SQL(statement),
Entity(compiler.index_name(table, columns)),
SQL('ON'),
Entity(table),
EnclosedClause(*[Entity(column) for column in columns]))
@operation
def drop_index(self, table, index_name):
return Clause(
SQL('DROP INDEX'),
Entity(index_name))
class PostgresqlMigrator(SchemaMigrator):
def _primary_key_columns(self, tbl):
query = """
SELECT pg_attribute.attname
FROM pg_index, pg_class, pg_attribute
WHERE
pg_class.oid = '%s'::regclass AND
indrelid = pg_class.oid AND
pg_attribute.attrelid = pg_class.oid AND
pg_attribute.attnum = any(pg_index.indkey) AND
indisprimary;
"""
cursor = self.database.execute_sql(query % tbl)
return [row[0] for row in cursor.fetchall()]
@operation
def rename_table(self, old_name, new_name):
pk_names = self._primary_key_columns(old_name)
ParentClass = super(PostgresqlMigrator, self)
operations = [
ParentClass.rename_table(old_name, new_name, generate=True)]
if len(pk_names) == 1:
# Check for existence of primary key sequence.
seq_name = '%s_%s_seq' % (old_name, pk_names[0])
query = """
SELECT 1
FROM information_schema.sequences
WHERE LOWER(sequence_name) = LOWER(%s)
"""
cursor = self.database.execute_sql(query, (seq_name,))
if bool(cursor.fetchone()):
new_seq_name = '%s_%s_seq' % (new_name, pk_names[0])
operations.append(ParentClass.rename_table(
seq_name, new_seq_name, generate=True))
return operations
_column_attributes = ('name', 'definition', 'null', 'pk', 'default', 'extra')
class MySQLColumn(namedtuple('_Column', _column_attributes)):
@property
def is_pk(self):
return self.pk == 'PRI'
@property
def is_unique(self):
return self.pk == 'UNI'
@property
def is_null(self):
return self.null == 'YES'
def sql(self, column_name=None, is_null=None):
if is_null is None:
is_null = self.is_null
if column_name is None:
column_name = self.name
parts = [
Entity(column_name),
SQL(self.definition)]
if self.is_unique:
parts.append(SQL('UNIQUE'))
if not is_null:
parts.append(SQL('NOT NULL'))
if self.is_pk:
parts.append(SQL('PRIMARY KEY'))
if self.extra:
parts.append(SQL(extra))
return Clause(*parts)
class MySQLMigrator(SchemaMigrator):
def _get_column_definition(self, table, column_name):
cursor = self.database.execute_sql('DESCRIBE %s;' % table)
rows = cursor.fetchall()
for row in rows:
column = MySQLColumn(*row)
if column.name == column_name:
return column
return False
@operation
def add_not_null(self, table, column):
column = self._get_column_definition(table, column)
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('MODIFY'),
column.sql(is_null=False))
@operation
def drop_not_null(self, table, column):
column = self._get_column_definition(table, column)
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('MODIFY'),
column.sql(is_null=True))
@operation
def rename_column(self, table, old_name, new_name):
column = self._get_column_definition(table, old_name)
return Clause(
SQL('ALTER TABLE'),
Entity(table),
SQL('CHANGE'),
Entity(old_name),
column.sql(column_name=new_name))
@operation
def drop_index(self, table, index_name):
return Clause(
SQL('DROP INDEX'),
Entity(index_name),
SQL('ON'),
Entity(table))
class SqliteMigrator(SchemaMigrator):
"""
SQLite supports a subset of ALTER TABLE queries, view the docs for the
full details http://sqlite.org/lang_altertable.html
"""
column_re = re.compile('(.+?)\((.+)\)')
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
column_name_re = re.compile('"?([\w]+)')
fk_re = re.compile('FOREIGN KEY\s+\("?([\w]+)"?\)\s+', re.I)
def _get_column_names(self, table):
res = self.database.execute_sql('select * from "%s" limit 1' % table)
return [item[0] for item in res.description]
def _get_create_table(self, table):
res = self.database.execute_sql(
('select name, sql from sqlite_master '
'where type=? and LOWER(name)=?'),
['table', table.lower()])
return res.fetchone()
@operation
def _update_column(self, table, column_to_update, fn):
columns = set(column.name.lower()
for column in self.database.get_columns(table))
if column_to_update.lower() not in columns:
raise ValueError('Column "%s" does not exist on "%s"' %
(column_to_update, table))
# Get the SQL used to create the given table.
table, create_table = self._get_create_table(table)
# Get the indexes and SQL to re-create indexes.
indexes = self.database.get_indexes(table)
# Find any foreign keys we may need to remove.
foreign_keys = self.database.get_foreign_keys(table)
# Parse out the `CREATE TABLE` and column list portions of the query.
raw_create, raw_columns = self.column_re.search(create_table).groups()
# Clean up the individual column definitions.
column_defs = [
col.strip() for col in self.column_split_re.findall(raw_columns)]
new_column_defs = []
new_column_names = []
original_column_names = []
fk_columns = {}
for column_def in column_defs:
column_name, = self.column_name_re.match(column_def).groups()
if column_name == column_to_update:
new_column_def = fn(column_name, column_def)
if new_column_def:
new_column_defs.append(new_column_def)
original_column_names.append(column_name)
column_name, = self.column_name_re.match(
new_column_def).groups()
new_column_names.append(column_name)
else:
new_column_defs.append(column_def)
if not column_name.lower().startswith(('foreign', 'primary')):
new_column_names.append(column_name)
original_column_names.append(column_name)
# Create a mapping of original columns to new columns.
original_to_new = dict(zip(original_column_names, new_column_names))
new_column = original_to_new.get(column_to_update)
fk_filter_fn = lambda column_def: column_def
if not new_column:
# Remove any foreign keys associated with this column.
fk_filter_fn = lambda column_def: None
elif new_column != column_to_update:
# Update any foreign keys for this column.
fk_filter_fn = lambda column_def: self.fk_re.sub(
'FOREIGN KEY ("%s") ' % new_column,
column_def)
cleaned_columns = []
for column_def in new_column_defs:
match = self.fk_re.match(column_def)
if match is not None and match.groups()[0] == column_to_update:
column_def = fk_filter_fn(column_def)
if column_def:
cleaned_columns.append(column_def)
# Update the name of the new CREATE TABLE query.
temp_table = table + '__tmp__'
rgx = re.compile('("?)%s("?)' % table, re.I)
create = rgx.sub(
'\\1%s\\2' % temp_table,
raw_create)
# Create the new table.
columns = ', '.join(cleaned_columns)
queries = [
Clause(SQL('DROP TABLE IF EXISTS'), Entity(temp_table)),
SQL('%s (%s)' % (create.strip(), columns))]
# Populate new table.
populate_table = Clause(
SQL('INSERT INTO'),
Entity(temp_table),
EnclosedClause(*[Entity(col) for col in new_column_names]),
SQL('SELECT'),
CommaClause(*[Entity(col) for col in original_column_names]),
SQL('FROM'),
Entity(table))
queries.append(populate_table)
# Drop existing table and rename temp table.
queries.append(Clause(
SQL('DROP TABLE'),
Entity(table)))
queries.append(self.rename_table(temp_table, table))
# Re-create indexes.
for index in indexes:
# Auto-generated indexes in SQLite will not have associated SQL,
# so pass over them.
if not index.sql:
continue
if column_to_update in index.columns:
if new_column:
queries.append(
SQL(index.sql.replace(column_to_update, new_column)))
else:
queries.append(SQL(index.sql))
return queries
@operation
def drop_column(self, table, column_name, cascade=True):
return self._update_column(table, column_name, lambda a, b: None)
@operation
def rename_column(self, table, old_name, new_name):
def _rename(column_name, column_def):
return column_def.replace(column_name, new_name)
return self._update_column(table, old_name, _rename)
@operation
def add_not_null(self, table, column):
def _add_not_null(column_name, column_def):
return column_def + ' NOT NULL'
return self._update_column(table, column, _add_not_null)
@operation
def drop_not_null(self, table, column):
def _drop_not_null(column_name, column_def):
return column_def.replace('NOT NULL', '')
return self._update_column(table, column, _drop_not_null)
def migrate(*operations, **kwargs):
for operation in operations:
operation.run() | 0.776114 | 0.116663 |
import ipaddress
import json
import re
import string
import sys
from datetime import datetime
# Phantom App imports
import phantom.app as phantom
import requests
from bs4 import BeautifulSoup
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
from phantom.vault import Vault
from kennasecurity_consts import *
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class KennaSecurityConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(KennaSecurityConnector, self).__init__()
self._state = None
self._risk_token = None
@staticmethod
def _process_empty_response(response, action_result):
""" This function is used to process empty response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# For given response codes, send success with empty response dict
status_codes = [200, 204]
if response.status_code in status_codes:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"),
None)
@staticmethod
def _process_html_response(response, action_result):
""" This function is used to process html response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
error_text = soup.text.encode('utf-8')
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code, error_text.encode('utf-8'))
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
@staticmethod
def _process_json_response(response, action_result):
""" This function is used to process json response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# Try a json parse
try:
resp_json = response.json()
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}"
.format(str(e))), None)
# Please specify the status codes here
if 200 <= response.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
# You should process the error returned in the json
message = "Error from server. Status Code: {0} Data from server: {1}".format(
response.status_code, response.text.replace('{', '{{').replace('}', '}}'))
# Check for message in error codes of Kenna Security
error_codes = [400, 401, 404, 409, 412, 422, 429]
if response.status_code in error_codes:
if resp_json.get('message', ""):
message = "Error from server. Status Code: {0} Data from server: {1}".format(response.status_code,
resp_json['message'])
elif resp_json.get('error', ""):
message = "Error from server. Status Code: {0} Data from server: {1}".format(response.status_code,
resp_json['error'])
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, response, action_result):
""" This function is used to process html response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# store the r_text in debug data, it will get dumped in the logs if the action fails
if hasattr(action_result, 'add_debug_data'):
action_result.add_debug_data({'r_status_code': response.status_code})
action_result.add_debug_data({'r_text': response.text})
action_result.add_debug_data({'r_headers': response.headers})
# Process each 'Content-Type' of response separately
# Process a json response
if 'json' in response.headers.get('Content-Type', ''):
return self._process_json_response(response, action_result)
# Process an HTML response, Do this no matter what the api talks.
# There is a high chance of a PROXY in between phantom and the rest of
# world, in case of errors, PROXY's return HTML, this function parses
# the error and adds it to the action_result.
if 'html' in response.headers.get('Content-Type', ''):
return self._process_html_response(response, action_result)
# it's not content-type that is to be parsed, handle an empty response
if not response.text:
return self._process_empty_response(response, action_result)
# everything else is actually an error at this point
message = "Can't process response from server. Status Code: {0} Data from server: {1}".format(
response.status_code, response.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, endpoint, action_result, headers=None, params=None, data=None, method="get",
timeout=None, files=None):
""" Function that makes the REST call to the app.
:param endpoint: REST endpoint that needs to appended to the service address
:param action_result: object of ActionResult class
:param headers: request headers
:param params: request parameters
:param data: request body
:param method: GET/POST/PUT/DELETE/PATCH (Default will be GET)
:param timeout: Timeout for API call
:param files: File to be uploaded
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message),
response obtained by making an API call
"""
resp_json = None
if headers is None:
headers = {}
url = "{}{}".format(KENNA_API_BASE_URL, endpoint)
headers.update({
'X-Risk-Token': self._risk_token
})
if not self.get_action_identifier() == 'run_connector':
headers.update({
'Content-Type': 'application/json'
})
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json)
try:
response = request_func(url, data=data, headers=headers, verify=True, params=params, timeout=timeout,
files=files)
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. Details: {0}"
.format(str(e))), resp_json)
return self._process_response(response, action_result)
def _handle_test_connectivity(self, param):
""" This function is used to test the connectivity of an asset with given credentials.
:param param: (not used in this method)
:return: status success/failure
"""
action_result = self.add_action_result(ActionResult(dict(param)))
self.send_progress(KENNA_MAKING_CONNECTION_MSG)
# test connectivity check on users endpoint
endpoint = KENNA_USERS_ENDPOINT
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result,
timeout=KENNA_TEST_CONNECTIVITY_TIMEOUT)
self.send_progress('')
# Something went wrong
if phantom.is_fail(ret_val):
self.save_progress(KENNA_TEST_CONNECTIVITY_FAILED_MSG)
return action_result.get_status()
self.save_progress(KENNA_USERS_FOUND_MSG)
self.save_progress(KENNA_TEST_CONNECTIVITY_PASSED_MSG)
return action_result.set_status(phantom.APP_SUCCESS)
def _modify_data_paths(self, item):
""" This function is used to modify data paths for domain and URL for assets and vulnerabilities.
:param item: Dictionary of asset/vulnerability
:return: modified asset/vulnerability
"""
if self.get_action_identifier() == 'list_devices':
asset = item
if asset.get('urls', {}).get('vulnerabilities', ""):
url = asset['urls']['vulnerabilities']
if phantom.is_url(url):
domain = url.split('/')[0]
url = 'https://{}'.format(url)
domains = {
'vulnerabilities': domain
}
asset['urls'].update({
'vulnerabilities': url
})
asset.update({
'domains': domains
})
return asset
else:
vulnerability = item
if vulnerability.get('urls', {}).get('asset', ""):
url = vulnerability['urls']['asset']
if phantom.is_url(url):
domain = url.split('/')[0]
url = 'https://{}'.format(url)
domains = {
'asset': domain
}
vulnerability['urls'].update({
'asset': url
})
vulnerability.update({
'domains': domains
})
return vulnerability
def _process_tags(self, tags):
""" This function is used to process comma seperated tags.
:param tags: Comma separated string of tags
:return: updated_list: Comma separated string of processed tags
"""
tags_list = tags.strip().split(',')
updated_tags = ""
for tag in tags_list:
tag = tag.strip()
if not tag == "":
if not updated_tags:
updated_tags = tag
updated_tags = '{},{}'.format(updated_tags, tag)
return updated_tags
def _is_mac(self, value):
""" This function is used to verify valid MAC for Kenna security.
:param value: Value of the filter
:return: status(true/false)
"""
# Update MAC as per Kenna security data format
value = value.replace("-", "").replace(":", "")
# Check for MAC length
if not len(value) == 12:
return False
# Check for valid hexadecimal character (0-9, a-f, A-F)
return all(c in string.hexdigits for c in value)
def _filter_asset(self, action_result, params_asset, filter_value=None):
""" This function is used to filter asset based on given filter.
:param action_result: object of ActionResult class
:param params_asset: Dictionary of parameters to be sent for API call
:param filter_value: Value of the filter
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR), asset obtained by making an API call
"""
page = 1
params_asset.update({
'page': page
})
while True:
ret_val, response = self._make_rest_call(endpoint=KENNA_ASSET_SEARCH_ENDPOINT, action_result=action_result,
params=params_asset)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
if not self.get_action_identifier() == 'list_devices':
for asset in response['assets']:
if asset['locator'].lower() == filter_value.lower():
return phantom.APP_SUCCESS, asset
else:
for asset in response['assets']:
asset = self._modify_data_paths(asset)
action_result.add_data(asset)
# Check if current page is less than total pages and Kenna security page limit (i.e. 20)
if response['meta']['page'] < response['meta']['pages'] and response['meta']['page'] < 20:
page += 1
params_asset.update({
'page': page
})
else:
if self.get_action_identifier() == 'list_devices':
return phantom.APP_SUCCESS, None
break
return phantom.APP_ERROR, None
def _validate_date(self, due_date):
""" This function is used to validate date for due date as per YYYY-MM-DD format or valid iso8601 format.
:param due_date: Value of the date
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
regex = r'^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):' \
r'([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$'
match_iso8601 = re.compile(regex).match
try:
if match_iso8601(due_date) is not None:
return phantom.APP_SUCCESS
elif datetime.strptime(due_date, '%Y-%m-%d'):
return phantom.APP_SUCCESS
except:
return phantom.APP_ERROR
return phantom.APP_ERROR
def _verify_param(self, input_value, action_result):
""" This function is used to check that the input for connector is positive integer or a valid string.
In current phantom version 3.5.210, numeric input value can be string or int depending on the
value passed by user. So we need to verify that it is valid integer.
For e.g. if user passes 5 it will passed as an integer, but if user passes random
string it will be passed as an string.
:param input_value: Input parameter
:param action_result: object of ActionResult class
:return: ID of the connector
"""
if input_value.isdigit() and int(input_value) != 0:
return input_value
else:
try:
float(input_value)
return None
except ValueError:
self.debug_print(input_value)
status, connector = self._get_connector_id(action_result, input_value)
if phantom.is_fail(status):
return None
return connector['id']
def _get_connector_id(self, action_result, connector):
""" This function is used to get ID of connector from its name.
:param action_result: object of ActionResult class
:param connector: Name of the connector
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR), connector obtained by making an API call
"""
ret_val, response = self._make_rest_call(endpoint=KENNA_CONNECTORS_ENDPOINT, action_result=action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
for conn in response['connectors']:
if conn['name'].lower() == connector.lower():
return phantom.APP_SUCCESS, conn
return phantom.APP_ERROR, None
def _handle_list_patches(self, param):
""" This function is used to handle the list patches action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
filter_type = param.get(KENNA_JSON_FILTER_TYPE, "")
filter_value = param.get(KENNA_JSON_FILTER, "")
vulnerability_id = param.get(KENNA_JSON_VULNERABILITY_ID, "")
params_patches = {}
endpoint = KENNA_FIXES_SEARCH_ENDPOINT
# Check if valid ID is present
if vulnerability_id:
if not isinstance(vulnerability_id, int) or vulnerability_id <= 0:
return action_result.set_status(phantom.APP_ERROR, KENNA_ID_VALIDATION_FAILED_MSG)
params_patches.update({
'id[]': vulnerability_id
})
# If filter type is not set
elif filter_value and not filter_type:
return action_result.set_status(phantom.APP_ERROR, KENNA_FILTER_TYPE_MISSING_MSG)
# If filter value is missing
elif filter_type and not filter_value:
return action_result.set_status(phantom.APP_ERROR, KENNA_FILTER_MISSING_MSG)
# If both filter type and value are present
elif filter_type and filter_value:
# If filter type is IP
if filter_type == KENNA_CONST_IP:
try:
ipaddress.ip_address(unicode(filter_value))
except:
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_IP))
params_patches.update({
'q': '{}:{}'.format(KENNA_FILTER_IP, filter_value)
})
# If filter type is Hostname
elif filter_type == KENNA_CONST_HOSTNAME:
if not phantom.is_hostname(filter_value):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_HOSTNAME))
params_patches.update({
'q': '{}:{}'.format(KENNA_FILTER_HOSTNAME, filter_value)
})
# If filter type is MAC Address
elif filter_type == KENNA_CONST_MAC_ADDR:
if not self._is_mac(filter_value):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_MAC_ADDR))
filter_value = filter_value.replace("-", "").replace(":", "")
params_patches.update({
'q': '{}:{}'.format(KENNA_FILTER_MAC_ADDR, filter_value)
})
page = 1
params_patches.update({
'page': page,
'per_page': 99
})
while True:
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result,
params=params_patches)
if phantom.is_fail(ret_val):
return action_result.get_status()
if not response.get('fixes', []):
return action_result.set_status(phantom.APP_ERROR, "No patches found")
for fix in response['fixes']:
action_result.add_data(fix)
# Check if current page is less than total pages
if response['meta']['page'] < response['meta']['pages']:
page += 1
params_patches.update({
'page': page
})
else:
break
summary = action_result.update_summary({})
summary['total_patches'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_update_device(self, param):
""" This function is used to handle the update device action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
device_id = param.get(KENNA_JSON_DEVICE_ID, "")
ip = param.get(KENNA_JSON_IP, "")
hostname = param.get(KENNA_JSON_HOSTNAME, "")
active = param.get(KENNA_JSON_ACTIVE, "")
notes = param.get(KENNA_JSON_NOTES, "")
owner = param.get(KENNA_JSON_OWNER, "")
tags = param.get(KENNA_JSON_TAGS, "")
# If valid ID is present
if device_id:
if not isinstance(device_id, int) or device_id <= 0:
return action_result.set_status(phantom.APP_ERROR, KENNA_ID_VALIDATION_FAILED_MSG)
# If IP is present
elif ip:
try:
ipaddress.ip_address(unicode(ip))
except:
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_IP))
params_device = {
'primary_locator[]': KENNA_FILTER_IP
}
# Check for ID related to given IP
status_device, response_device = self._filter_asset(action_result, params_device, ip)
if phantom.is_fail(status_device):
return action_result.set_status(phantom.APP_ERROR, "Cannot find requested IP")
device_id = response_device['id']
# If hostname is present
elif hostname:
if not phantom.is_hostname(hostname):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_HOSTNAME))
params_device = {
'primary_locator[]': KENNA_FILTER_HOSTNAME
}
# Check for ID related to given hostname
status_device, response_device = self._filter_asset(action_result, params_device, hostname)
if phantom.is_fail(status_device):
return action_result.set_status(phantom.APP_ERROR, "Cannot find requested Host")
device_id = response_device['id']
# If none of the ID or IP or Hostname is provided
elif not (device_id or ip or hostname):
return action_result.set_status(phantom.APP_ERROR, "Atleast one parameter needs to be provided")
device = {}
# Set active status
if active == KENNA_CONST_TRUE:
device.update({
'inactive': 'false'
})
elif active == KENNA_CONST_FALSE:
device.update({
'inactive': 'true'
})
# Set notes for asset
if notes:
device.update({
'notes': notes
})
# Set owner for asset
if owner:
device.update({
'owner': owner
})
if not device:
return action_result.set_status(phantom.APP_ERROR, "Atleast one parameter is required for updating device")
data = {
'asset': device
}
endpoint = KENNA_ASSET_ENDPOINT.format(id=device_id)
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, method='put',
data=json.dumps(data))
if phantom.is_fail(ret_val):
return action_result.get_status()
# If tags are present, then pass the comma separated list as an input
if tags:
updated_tags = self._process_tags(tags)
device_tags = {
'tags': updated_tags
}
data_tags = {
'asset': device_tags
}
# Endpoint for updating tags to the given asset
endpoint = "{}/tags".format(KENNA_ASSET_ENDPOINT.format(id=device_id))
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, method='put',
data=json.dumps(data_tags))
if phantom.is_fail(ret_val):
return action_result.get_status()
return action_result.set_status(phantom.APP_SUCCESS, "Device with ID {} updated".format(device_id))
def _handle_list_devices(self, param):
""" This function is used to handle list devices action.
:param param: Dictionary of input parameters
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
# Search string to be passed as a filter
search = param.get(KENNA_JSON_SEARCH, "")
params = {}
if search:
params.update({
'q': search
})
status, _ = self._filter_asset(action_result, params)
if phantom.is_fail(status):
return action_result.get_status()
summary = action_result.update_summary({})
summary['total_devices'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_run_connector(self, param):
""" This function is used to handle the run connector action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
connector = param[KENNA_JSON_CONNECTOR]
vault_id = param[KENNA_JSON_VAULT_ID]
# Get connector id for given connector
id = self._verify_param(connector, action_result)
if not id:
return action_result.set_status(phantom.APP_ERROR, "Connector not found")
# Find vault path for given vault ID
vault_path = Vault.get_file_path(vault_id)
# check if vault path is accessible
if not vault_path:
return action_result.set_status(phantom.APP_ERROR, "Vault path not found")
# Set run value true for connector to be run automatically after successful file upload
data = {
'run': "true"
}
endpoint = KENNA_RUN_CONNECTOR_ENDPOINT.format(id=id)
with open(vault_path, 'r') as f:
# Set file to be uploaded
files = {
'file': f
}
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, method='post',
data=data, files=files)
if phantom.is_fail(ret_val):
return action_result.get_status()
action_result.add_data(response)
return action_result.set_status(phantom.APP_SUCCESS, "Connector run is successful")
def _handle_list_connectors(self, param):
""" This function is used to handle the list connectors action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
endpoint = KENNA_CONNECTORS_ENDPOINT
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if not response.get('connectors', []):
return action_result.set_status(phantom.APP_ERROR, "No connectors found")
# Add connectors to action result object
for connector in response['connectors']:
action_result.add_data(connector)
summary = action_result.update_summary({})
summary['total_connectors'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_update_vulnerability(self, param):
""" This function is used to handle the get vulnerabilities action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
vulnerability_id = param[KENNA_JSON_VULNERABILITY_ID]
vulnerability_status = param.get(KENNA_JSON_VULNERABILITY_STATUS, "")
notes = param.get(KENNA_JSON_NOTES, "")
priority = param.get(KENNA_JSON_PRIORITY, "")
due_date = param.get(KENNA_JSON_DUE_DATE)
# Check for valid ID
if not isinstance(vulnerability_id, int) or vulnerability_id <= 0:
return action_result.set_status(phantom.APP_ERROR, KENNA_ID_VALIDATION_FAILED_MSG)
endpoint = KENNA_VULNERABILITIES_ENDPOINT.format(id=vulnerability_id)
vulnerability = {}
# Set status for vulnerability
if vulnerability_status == KENNA_CONST_OPEN:
vulnerability.update({
'status': KENNA_FILTER_OPEN,
})
elif vulnerability_status == KENNA_CONST_CLOSED:
vulnerability.update({
'status': KENNA_FILTER_CLOSED,
})
elif vulnerability_status == KENNA_CONST_RISK_ACCEPTED:
vulnerability.update({
'status': KENNA_FILTER_RISK_ACCEPTED,
})
elif vulnerability_status == KENNA_CONST_FALSE_POSITIVE:
vulnerability.update({
'status': KENNA_FILTER_FALSE_POSITIVE,
})
# Set priority for vulnerability
if priority == KENNA_CONST_TRUE:
vulnerability.update({
'prioritized': 'true',
})
elif priority == KENNA_CONST_FALSE:
vulnerability.update({
'prioritized': 'false',
})
# Set notes for vulnerability
if notes:
vulnerability.update({
'notes': notes
})
# Set due date for vulnerability in YYYY-MM-DD or iso8601 UTC format
if due_date:
date_status = self._validate_date(due_date)
if not date_status:
return action_result.set_status(phantom.APP_ERROR, KENNA_DATE_VALIDATION_FAILED_MSG)
vulnerability.update({
'due_date': due_date
})
if not vulnerability:
return action_result.set_status(phantom.APP_ERROR, "Atleast one parameter is required for updating "
"vulnerability")
data = {
'vulnerability': vulnerability
}
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, method='put',
data=json.dumps(data))
if phantom.is_fail(ret_val):
return action_result.get_status()
return action_result.set_status(phantom.APP_SUCCESS, "Vulnerability with ID {} updated".
format(vulnerability_id))
def _handle_get_vulnerabilities(self, param):
""" This function is used to handle the get vulnerabilities action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
filter_type = param[KENNA_JSON_FILTER_TYPE]
filter_value = param[KENNA_JSON_FILTER]
params_asset = {}
# Check if filter type is IP
if filter_type == KENNA_CONST_IP:
try:
ipaddress.ip_address(unicode(filter_value))
except:
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_IP))
params_asset.update({
'primary_locator[]': KENNA_FILTER_IP
})
# Check if filter type is Hostname
elif filter_type == KENNA_CONST_HOSTNAME:
if not phantom.is_hostname(filter_value):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_HOSTNAME))
params_asset.update({
'primary_locator[]': KENNA_FILTER_HOSTNAME
})
# Check if filter type is MAC Address
elif filter_type == KENNA_CONST_MAC_ADDR:
if not self._is_mac(filter_value):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_MAC_ADDR))
params_asset.update({
'primary_locator[]': KENNA_FILTER_MAC_ADDR
})
# Set MAC Address as per Kenna security data format
filter_value = filter_value.replace("-", "").replace(":", "")
# Get valid asset for given filters
status_asset, response_asset = self._filter_asset(action_result, params_asset, filter_value)
if phantom.is_fail(status_asset):
return action_result.set_status(phantom.APP_ERROR, "Cannot find requested IP or Host or MAC")
id = response_asset['id']
ret_val, response = self._make_rest_call(endpoint=KENNA_GET_VULNERABILITIES_ENDPOINT.format(id=id),
action_result=action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
# Add vulnerabilities in action result object
for vulnerability in response['vulnerabilities']:
vulnerability = self._modify_data_paths(vulnerability)
action_result.add_data(vulnerability)
summary = action_result.update_summary({})
summary['total_vulnerabilities'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_run_query(self, param):
""" This function is used to handle run query action.
:param param: Dictionary of input parameters
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
search = param.get(KENNA_JSON_SEARCH, "")
vulnerability_status = param.get(KENNA_JSON_VULNERABILITY_STATUS, "")
connector_names = param.get(KENNA_JSON_CONNECTOR, "")
params = {}
endpoint = KENNA_VULNERABILITY_SEARCH_ENDPOINT
# Check if search string is present
if search:
params.update({
'q': search
})
else:
# Set status for vulnerability
if vulnerability_status == KENNA_CONST_OPEN:
params.update({
'status[]': KENNA_FILTER_OPEN,
})
elif vulnerability_status == KENNA_CONST_CLOSED:
params.update({
'status[]': KENNA_FILTER_CLOSED,
})
elif vulnerability_status == KENNA_CONST_RISK_ACCEPTED:
params.update({
'status[]': KENNA_FILTER_RISK_ACCEPTED,
})
elif vulnerability_status == KENNA_CONST_FALSE_POSITIVE:
params.update({
'status[]': KENNA_FILTER_FALSE_POSITIVE,
})
elif vulnerability_status == KENNA_CONST_ALL:
params.update({
'status[]': [KENNA_FILTER_OPEN, KENNA_FILTER_CLOSED, KENNA_FILTER_RISK_ACCEPTED,
KENNA_FILTER_FALSE_POSITIVE]
})
# Set connector for vulnerability
if connector_names:
params.update({
'connector_names[]': connector_names
})
page = 1
params.update({
'page': page
})
while True:
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, params=params)
if phantom.is_fail(ret_val):
return action_result.get_status()
if not response.get('vulnerabilities', []):
return action_result.set_status(phantom.APP_ERROR, "No vulnerabilities found")
# Add vulnerabilities in action result object
for vulnerability in response['vulnerabilities']:
vulnerability = self._modify_data_paths(vulnerability)
action_result.add_data(vulnerability)
# Check if current page is less than total pages and Kenna security page limit (i.e. 20)
if response['meta']['page'] < response['meta']['pages'] and response['meta']['page'] < 20:
page += 1
params.update({
'page': page
})
else:
break
summary = action_result.update_summary({})
summary['total_vulnerabilities'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
""" This function gets current action identifier and calls member function of its own to handle the action.
:param param: dictionary which contains information about the actions to be executed
:return: status success/failure
"""
self.debug_print("action_id", self.get_action_identifier())
# Dictionary mapping each action with its corresponding actions
action_mapping = {
'test_connectivity': self._handle_test_connectivity,
'list_patches': self._handle_list_patches,
'update_device': self._handle_update_device,
'list_devices': self._handle_list_devices,
'run_connector': self._handle_run_connector,
'list_connectors': self._handle_list_connectors,
'update_vulnerability': self._handle_update_vulnerability,
'get_vulnerabilities': self._handle_get_vulnerabilities,
'run_query': self._handle_run_query
}
action = self.get_action_identifier()
action_execution_status = phantom.APP_SUCCESS
if action in action_mapping.keys():
action_function = action_mapping[action]
action_execution_status = action_function(param)
return action_execution_status
def initialize(self):
""" This is an optional function that can be implemented by the AppConnector derived class. Since the
configuration dictionary is already validated by the time this function is called, it's a good place to do any
extra initialization of any internal modules. This function MUST return a value of either phantom.APP_SUCCESS or
phantom.APP_ERROR. If this function returns phantom.APP_ERROR, then AppConnector::handle_action will not get
called.
"""
self._state = self.load_state()
# get the asset config
config = self.get_config()
self._risk_token = config[KENNA_CONFIG_RISK_TOKEN]
return phantom.APP_SUCCESS
def finalize(self):
""" This function gets called once all the param dictionary elements are looped over and no more handle_action
calls are left to be made. It gives the AppConnector a chance to loop through all the results that were
accumulated by multiple handle_action function calls and create any summary if required. Another usage is
cleanup, disconnect from remote devices etc.
:return: status (success/failure)
"""
# Save the state, this data is saved across actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
if __name__ == '__main__':
import argparse
import pudb
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = <PASSWORD>
verify = args.verify
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = get<PASSWORD>.getpass("Password: ")
if username and password:
try:
print("Accessing the Login page")
r = requests.get(BaseConnector._get_phantom_base_url() + "login", verify=verify, timeout=60)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken={}'.format(csrftoken)
headers['Referer'] = BaseConnector._get_phantom_base_url() + 'login'
print("Logging into Platform to get the session id")
r2 = requests.post(BaseConnector._get_phantom_base_url() + "login", verify=verify, data=data, headers=headers, timeout=60)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platfrom. Error: {}".format(str(e)))
sys.exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = KennaSecurityConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0) | kennasecurity_connector.py | import ipaddress
import json
import re
import string
import sys
from datetime import datetime
# Phantom App imports
import phantom.app as phantom
import requests
from bs4 import BeautifulSoup
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
from phantom.vault import Vault
from kennasecurity_consts import *
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class KennaSecurityConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(KennaSecurityConnector, self).__init__()
self._state = None
self._risk_token = None
@staticmethod
def _process_empty_response(response, action_result):
""" This function is used to process empty response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# For given response codes, send success with empty response dict
status_codes = [200, 204]
if response.status_code in status_codes:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"),
None)
@staticmethod
def _process_html_response(response, action_result):
""" This function is used to process html response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
error_text = soup.text.encode('utf-8')
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code, error_text.encode('utf-8'))
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
@staticmethod
def _process_json_response(response, action_result):
""" This function is used to process json response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# Try a json parse
try:
resp_json = response.json()
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}"
.format(str(e))), None)
# Please specify the status codes here
if 200 <= response.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
# You should process the error returned in the json
message = "Error from server. Status Code: {0} Data from server: {1}".format(
response.status_code, response.text.replace('{', '{{').replace('}', '}}'))
# Check for message in error codes of Kenna Security
error_codes = [400, 401, 404, 409, 412, 422, 429]
if response.status_code in error_codes:
if resp_json.get('message', ""):
message = "Error from server. Status Code: {0} Data from server: {1}".format(response.status_code,
resp_json['message'])
elif resp_json.get('error', ""):
message = "Error from server. Status Code: {0} Data from server: {1}".format(response.status_code,
resp_json['error'])
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, response, action_result):
""" This function is used to process html response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# store the r_text in debug data, it will get dumped in the logs if the action fails
if hasattr(action_result, 'add_debug_data'):
action_result.add_debug_data({'r_status_code': response.status_code})
action_result.add_debug_data({'r_text': response.text})
action_result.add_debug_data({'r_headers': response.headers})
# Process each 'Content-Type' of response separately
# Process a json response
if 'json' in response.headers.get('Content-Type', ''):
return self._process_json_response(response, action_result)
# Process an HTML response, Do this no matter what the api talks.
# There is a high chance of a PROXY in between phantom and the rest of
# world, in case of errors, PROXY's return HTML, this function parses
# the error and adds it to the action_result.
if 'html' in response.headers.get('Content-Type', ''):
return self._process_html_response(response, action_result)
# it's not content-type that is to be parsed, handle an empty response
if not response.text:
return self._process_empty_response(response, action_result)
# everything else is actually an error at this point
message = "Can't process response from server. Status Code: {0} Data from server: {1}".format(
response.status_code, response.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, endpoint, action_result, headers=None, params=None, data=None, method="get",
timeout=None, files=None):
""" Function that makes the REST call to the app.
:param endpoint: REST endpoint that needs to appended to the service address
:param action_result: object of ActionResult class
:param headers: request headers
:param params: request parameters
:param data: request body
:param method: GET/POST/PUT/DELETE/PATCH (Default will be GET)
:param timeout: Timeout for API call
:param files: File to be uploaded
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message),
response obtained by making an API call
"""
resp_json = None
if headers is None:
headers = {}
url = "{}{}".format(KENNA_API_BASE_URL, endpoint)
headers.update({
'X-Risk-Token': self._risk_token
})
if not self.get_action_identifier() == 'run_connector':
headers.update({
'Content-Type': 'application/json'
})
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json)
try:
response = request_func(url, data=data, headers=headers, verify=True, params=params, timeout=timeout,
files=files)
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. Details: {0}"
.format(str(e))), resp_json)
return self._process_response(response, action_result)
def _handle_test_connectivity(self, param):
""" This function is used to test the connectivity of an asset with given credentials.
:param param: (not used in this method)
:return: status success/failure
"""
action_result = self.add_action_result(ActionResult(dict(param)))
self.send_progress(KENNA_MAKING_CONNECTION_MSG)
# test connectivity check on users endpoint
endpoint = KENNA_USERS_ENDPOINT
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result,
timeout=KENNA_TEST_CONNECTIVITY_TIMEOUT)
self.send_progress('')
# Something went wrong
if phantom.is_fail(ret_val):
self.save_progress(KENNA_TEST_CONNECTIVITY_FAILED_MSG)
return action_result.get_status()
self.save_progress(KENNA_USERS_FOUND_MSG)
self.save_progress(KENNA_TEST_CONNECTIVITY_PASSED_MSG)
return action_result.set_status(phantom.APP_SUCCESS)
def _modify_data_paths(self, item):
""" This function is used to modify data paths for domain and URL for assets and vulnerabilities.
:param item: Dictionary of asset/vulnerability
:return: modified asset/vulnerability
"""
if self.get_action_identifier() == 'list_devices':
asset = item
if asset.get('urls', {}).get('vulnerabilities', ""):
url = asset['urls']['vulnerabilities']
if phantom.is_url(url):
domain = url.split('/')[0]
url = 'https://{}'.format(url)
domains = {
'vulnerabilities': domain
}
asset['urls'].update({
'vulnerabilities': url
})
asset.update({
'domains': domains
})
return asset
else:
vulnerability = item
if vulnerability.get('urls', {}).get('asset', ""):
url = vulnerability['urls']['asset']
if phantom.is_url(url):
domain = url.split('/')[0]
url = 'https://{}'.format(url)
domains = {
'asset': domain
}
vulnerability['urls'].update({
'asset': url
})
vulnerability.update({
'domains': domains
})
return vulnerability
def _process_tags(self, tags):
""" This function is used to process comma seperated tags.
:param tags: Comma separated string of tags
:return: updated_list: Comma separated string of processed tags
"""
tags_list = tags.strip().split(',')
updated_tags = ""
for tag in tags_list:
tag = tag.strip()
if not tag == "":
if not updated_tags:
updated_tags = tag
updated_tags = '{},{}'.format(updated_tags, tag)
return updated_tags
def _is_mac(self, value):
""" This function is used to verify valid MAC for Kenna security.
:param value: Value of the filter
:return: status(true/false)
"""
# Update MAC as per Kenna security data format
value = value.replace("-", "").replace(":", "")
# Check for MAC length
if not len(value) == 12:
return False
# Check for valid hexadecimal character (0-9, a-f, A-F)
return all(c in string.hexdigits for c in value)
def _filter_asset(self, action_result, params_asset, filter_value=None):
""" This function is used to filter asset based on given filter.
:param action_result: object of ActionResult class
:param params_asset: Dictionary of parameters to be sent for API call
:param filter_value: Value of the filter
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR), asset obtained by making an API call
"""
page = 1
params_asset.update({
'page': page
})
while True:
ret_val, response = self._make_rest_call(endpoint=KENNA_ASSET_SEARCH_ENDPOINT, action_result=action_result,
params=params_asset)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
if not self.get_action_identifier() == 'list_devices':
for asset in response['assets']:
if asset['locator'].lower() == filter_value.lower():
return phantom.APP_SUCCESS, asset
else:
for asset in response['assets']:
asset = self._modify_data_paths(asset)
action_result.add_data(asset)
# Check if current page is less than total pages and Kenna security page limit (i.e. 20)
if response['meta']['page'] < response['meta']['pages'] and response['meta']['page'] < 20:
page += 1
params_asset.update({
'page': page
})
else:
if self.get_action_identifier() == 'list_devices':
return phantom.APP_SUCCESS, None
break
return phantom.APP_ERROR, None
def _validate_date(self, due_date):
""" This function is used to validate date for due date as per YYYY-MM-DD format or valid iso8601 format.
:param due_date: Value of the date
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
regex = r'^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):' \
r'([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$'
match_iso8601 = re.compile(regex).match
try:
if match_iso8601(due_date) is not None:
return phantom.APP_SUCCESS
elif datetime.strptime(due_date, '%Y-%m-%d'):
return phantom.APP_SUCCESS
except:
return phantom.APP_ERROR
return phantom.APP_ERROR
def _verify_param(self, input_value, action_result):
""" This function is used to check that the input for connector is positive integer or a valid string.
In current phantom version 3.5.210, numeric input value can be string or int depending on the
value passed by user. So we need to verify that it is valid integer.
For e.g. if user passes 5 it will passed as an integer, but if user passes random
string it will be passed as an string.
:param input_value: Input parameter
:param action_result: object of ActionResult class
:return: ID of the connector
"""
if input_value.isdigit() and int(input_value) != 0:
return input_value
else:
try:
float(input_value)
return None
except ValueError:
self.debug_print(input_value)
status, connector = self._get_connector_id(action_result, input_value)
if phantom.is_fail(status):
return None
return connector['id']
def _get_connector_id(self, action_result, connector):
""" This function is used to get ID of connector from its name.
:param action_result: object of ActionResult class
:param connector: Name of the connector
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR), connector obtained by making an API call
"""
ret_val, response = self._make_rest_call(endpoint=KENNA_CONNECTORS_ENDPOINT, action_result=action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
for conn in response['connectors']:
if conn['name'].lower() == connector.lower():
return phantom.APP_SUCCESS, conn
return phantom.APP_ERROR, None
def _handle_list_patches(self, param):
""" This function is used to handle the list patches action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
filter_type = param.get(KENNA_JSON_FILTER_TYPE, "")
filter_value = param.get(KENNA_JSON_FILTER, "")
vulnerability_id = param.get(KENNA_JSON_VULNERABILITY_ID, "")
params_patches = {}
endpoint = KENNA_FIXES_SEARCH_ENDPOINT
# Check if valid ID is present
if vulnerability_id:
if not isinstance(vulnerability_id, int) or vulnerability_id <= 0:
return action_result.set_status(phantom.APP_ERROR, KENNA_ID_VALIDATION_FAILED_MSG)
params_patches.update({
'id[]': vulnerability_id
})
# If filter type is not set
elif filter_value and not filter_type:
return action_result.set_status(phantom.APP_ERROR, KENNA_FILTER_TYPE_MISSING_MSG)
# If filter value is missing
elif filter_type and not filter_value:
return action_result.set_status(phantom.APP_ERROR, KENNA_FILTER_MISSING_MSG)
# If both filter type and value are present
elif filter_type and filter_value:
# If filter type is IP
if filter_type == KENNA_CONST_IP:
try:
ipaddress.ip_address(unicode(filter_value))
except:
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_IP))
params_patches.update({
'q': '{}:{}'.format(KENNA_FILTER_IP, filter_value)
})
# If filter type is Hostname
elif filter_type == KENNA_CONST_HOSTNAME:
if not phantom.is_hostname(filter_value):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_HOSTNAME))
params_patches.update({
'q': '{}:{}'.format(KENNA_FILTER_HOSTNAME, filter_value)
})
# If filter type is MAC Address
elif filter_type == KENNA_CONST_MAC_ADDR:
if not self._is_mac(filter_value):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_MAC_ADDR))
filter_value = filter_value.replace("-", "").replace(":", "")
params_patches.update({
'q': '{}:{}'.format(KENNA_FILTER_MAC_ADDR, filter_value)
})
page = 1
params_patches.update({
'page': page,
'per_page': 99
})
while True:
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result,
params=params_patches)
if phantom.is_fail(ret_val):
return action_result.get_status()
if not response.get('fixes', []):
return action_result.set_status(phantom.APP_ERROR, "No patches found")
for fix in response['fixes']:
action_result.add_data(fix)
# Check if current page is less than total pages
if response['meta']['page'] < response['meta']['pages']:
page += 1
params_patches.update({
'page': page
})
else:
break
summary = action_result.update_summary({})
summary['total_patches'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_update_device(self, param):
""" This function is used to handle the update device action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
device_id = param.get(KENNA_JSON_DEVICE_ID, "")
ip = param.get(KENNA_JSON_IP, "")
hostname = param.get(KENNA_JSON_HOSTNAME, "")
active = param.get(KENNA_JSON_ACTIVE, "")
notes = param.get(KENNA_JSON_NOTES, "")
owner = param.get(KENNA_JSON_OWNER, "")
tags = param.get(KENNA_JSON_TAGS, "")
# If valid ID is present
if device_id:
if not isinstance(device_id, int) or device_id <= 0:
return action_result.set_status(phantom.APP_ERROR, KENNA_ID_VALIDATION_FAILED_MSG)
# If IP is present
elif ip:
try:
ipaddress.ip_address(unicode(ip))
except:
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_IP))
params_device = {
'primary_locator[]': KENNA_FILTER_IP
}
# Check for ID related to given IP
status_device, response_device = self._filter_asset(action_result, params_device, ip)
if phantom.is_fail(status_device):
return action_result.set_status(phantom.APP_ERROR, "Cannot find requested IP")
device_id = response_device['id']
# If hostname is present
elif hostname:
if not phantom.is_hostname(hostname):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_HOSTNAME))
params_device = {
'primary_locator[]': KENNA_FILTER_HOSTNAME
}
# Check for ID related to given hostname
status_device, response_device = self._filter_asset(action_result, params_device, hostname)
if phantom.is_fail(status_device):
return action_result.set_status(phantom.APP_ERROR, "Cannot find requested Host")
device_id = response_device['id']
# If none of the ID or IP or Hostname is provided
elif not (device_id or ip or hostname):
return action_result.set_status(phantom.APP_ERROR, "Atleast one parameter needs to be provided")
device = {}
# Set active status
if active == KENNA_CONST_TRUE:
device.update({
'inactive': 'false'
})
elif active == KENNA_CONST_FALSE:
device.update({
'inactive': 'true'
})
# Set notes for asset
if notes:
device.update({
'notes': notes
})
# Set owner for asset
if owner:
device.update({
'owner': owner
})
if not device:
return action_result.set_status(phantom.APP_ERROR, "Atleast one parameter is required for updating device")
data = {
'asset': device
}
endpoint = KENNA_ASSET_ENDPOINT.format(id=device_id)
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, method='put',
data=json.dumps(data))
if phantom.is_fail(ret_val):
return action_result.get_status()
# If tags are present, then pass the comma separated list as an input
if tags:
updated_tags = self._process_tags(tags)
device_tags = {
'tags': updated_tags
}
data_tags = {
'asset': device_tags
}
# Endpoint for updating tags to the given asset
endpoint = "{}/tags".format(KENNA_ASSET_ENDPOINT.format(id=device_id))
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, method='put',
data=json.dumps(data_tags))
if phantom.is_fail(ret_val):
return action_result.get_status()
return action_result.set_status(phantom.APP_SUCCESS, "Device with ID {} updated".format(device_id))
def _handle_list_devices(self, param):
""" This function is used to handle list devices action.
:param param: Dictionary of input parameters
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
# Search string to be passed as a filter
search = param.get(KENNA_JSON_SEARCH, "")
params = {}
if search:
params.update({
'q': search
})
status, _ = self._filter_asset(action_result, params)
if phantom.is_fail(status):
return action_result.get_status()
summary = action_result.update_summary({})
summary['total_devices'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_run_connector(self, param):
""" This function is used to handle the run connector action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
connector = param[KENNA_JSON_CONNECTOR]
vault_id = param[KENNA_JSON_VAULT_ID]
# Get connector id for given connector
id = self._verify_param(connector, action_result)
if not id:
return action_result.set_status(phantom.APP_ERROR, "Connector not found")
# Find vault path for given vault ID
vault_path = Vault.get_file_path(vault_id)
# check if vault path is accessible
if not vault_path:
return action_result.set_status(phantom.APP_ERROR, "Vault path not found")
# Set run value true for connector to be run automatically after successful file upload
data = {
'run': "true"
}
endpoint = KENNA_RUN_CONNECTOR_ENDPOINT.format(id=id)
with open(vault_path, 'r') as f:
# Set file to be uploaded
files = {
'file': f
}
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, method='post',
data=data, files=files)
if phantom.is_fail(ret_val):
return action_result.get_status()
action_result.add_data(response)
return action_result.set_status(phantom.APP_SUCCESS, "Connector run is successful")
def _handle_list_connectors(self, param):
""" This function is used to handle the list connectors action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
endpoint = KENNA_CONNECTORS_ENDPOINT
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
if not response.get('connectors', []):
return action_result.set_status(phantom.APP_ERROR, "No connectors found")
# Add connectors to action result object
for connector in response['connectors']:
action_result.add_data(connector)
summary = action_result.update_summary({})
summary['total_connectors'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_update_vulnerability(self, param):
""" This function is used to handle the get vulnerabilities action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
vulnerability_id = param[KENNA_JSON_VULNERABILITY_ID]
vulnerability_status = param.get(KENNA_JSON_VULNERABILITY_STATUS, "")
notes = param.get(KENNA_JSON_NOTES, "")
priority = param.get(KENNA_JSON_PRIORITY, "")
due_date = param.get(KENNA_JSON_DUE_DATE)
# Check for valid ID
if not isinstance(vulnerability_id, int) or vulnerability_id <= 0:
return action_result.set_status(phantom.APP_ERROR, KENNA_ID_VALIDATION_FAILED_MSG)
endpoint = KENNA_VULNERABILITIES_ENDPOINT.format(id=vulnerability_id)
vulnerability = {}
# Set status for vulnerability
if vulnerability_status == KENNA_CONST_OPEN:
vulnerability.update({
'status': KENNA_FILTER_OPEN,
})
elif vulnerability_status == KENNA_CONST_CLOSED:
vulnerability.update({
'status': KENNA_FILTER_CLOSED,
})
elif vulnerability_status == KENNA_CONST_RISK_ACCEPTED:
vulnerability.update({
'status': KENNA_FILTER_RISK_ACCEPTED,
})
elif vulnerability_status == KENNA_CONST_FALSE_POSITIVE:
vulnerability.update({
'status': KENNA_FILTER_FALSE_POSITIVE,
})
# Set priority for vulnerability
if priority == KENNA_CONST_TRUE:
vulnerability.update({
'prioritized': 'true',
})
elif priority == KENNA_CONST_FALSE:
vulnerability.update({
'prioritized': 'false',
})
# Set notes for vulnerability
if notes:
vulnerability.update({
'notes': notes
})
# Set due date for vulnerability in YYYY-MM-DD or iso8601 UTC format
if due_date:
date_status = self._validate_date(due_date)
if not date_status:
return action_result.set_status(phantom.APP_ERROR, KENNA_DATE_VALIDATION_FAILED_MSG)
vulnerability.update({
'due_date': due_date
})
if not vulnerability:
return action_result.set_status(phantom.APP_ERROR, "Atleast one parameter is required for updating "
"vulnerability")
data = {
'vulnerability': vulnerability
}
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, method='put',
data=json.dumps(data))
if phantom.is_fail(ret_val):
return action_result.get_status()
return action_result.set_status(phantom.APP_SUCCESS, "Vulnerability with ID {} updated".
format(vulnerability_id))
def _handle_get_vulnerabilities(self, param):
""" This function is used to handle the get vulnerabilities action.
:param param: Dictionary of input parameters
:return: status(phantom.APP_SUCCESS/phantom.APP_ERROR)
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
filter_type = param[KENNA_JSON_FILTER_TYPE]
filter_value = param[KENNA_JSON_FILTER]
params_asset = {}
# Check if filter type is IP
if filter_type == KENNA_CONST_IP:
try:
ipaddress.ip_address(unicode(filter_value))
except:
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_IP))
params_asset.update({
'primary_locator[]': KENNA_FILTER_IP
})
# Check if filter type is Hostname
elif filter_type == KENNA_CONST_HOSTNAME:
if not phantom.is_hostname(filter_value):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_HOSTNAME))
params_asset.update({
'primary_locator[]': KENNA_FILTER_HOSTNAME
})
# Check if filter type is MAC Address
elif filter_type == KENNA_CONST_MAC_ADDR:
if not self._is_mac(filter_value):
return action_result.set_status(phantom.APP_ERROR, KENNA_PARAM_VALIDATION_FAILED_MSG
.format(KENNA_CONST_MAC_ADDR))
params_asset.update({
'primary_locator[]': KENNA_FILTER_MAC_ADDR
})
# Set MAC Address as per Kenna security data format
filter_value = filter_value.replace("-", "").replace(":", "")
# Get valid asset for given filters
status_asset, response_asset = self._filter_asset(action_result, params_asset, filter_value)
if phantom.is_fail(status_asset):
return action_result.set_status(phantom.APP_ERROR, "Cannot find requested IP or Host or MAC")
id = response_asset['id']
ret_val, response = self._make_rest_call(endpoint=KENNA_GET_VULNERABILITIES_ENDPOINT.format(id=id),
action_result=action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
# Add vulnerabilities in action result object
for vulnerability in response['vulnerabilities']:
vulnerability = self._modify_data_paths(vulnerability)
action_result.add_data(vulnerability)
summary = action_result.update_summary({})
summary['total_vulnerabilities'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_run_query(self, param):
""" This function is used to handle run query action.
:param param: Dictionary of input parameters
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
search = param.get(KENNA_JSON_SEARCH, "")
vulnerability_status = param.get(KENNA_JSON_VULNERABILITY_STATUS, "")
connector_names = param.get(KENNA_JSON_CONNECTOR, "")
params = {}
endpoint = KENNA_VULNERABILITY_SEARCH_ENDPOINT
# Check if search string is present
if search:
params.update({
'q': search
})
else:
# Set status for vulnerability
if vulnerability_status == KENNA_CONST_OPEN:
params.update({
'status[]': KENNA_FILTER_OPEN,
})
elif vulnerability_status == KENNA_CONST_CLOSED:
params.update({
'status[]': KENNA_FILTER_CLOSED,
})
elif vulnerability_status == KENNA_CONST_RISK_ACCEPTED:
params.update({
'status[]': KENNA_FILTER_RISK_ACCEPTED,
})
elif vulnerability_status == KENNA_CONST_FALSE_POSITIVE:
params.update({
'status[]': KENNA_FILTER_FALSE_POSITIVE,
})
elif vulnerability_status == KENNA_CONST_ALL:
params.update({
'status[]': [KENNA_FILTER_OPEN, KENNA_FILTER_CLOSED, KENNA_FILTER_RISK_ACCEPTED,
KENNA_FILTER_FALSE_POSITIVE]
})
# Set connector for vulnerability
if connector_names:
params.update({
'connector_names[]': connector_names
})
page = 1
params.update({
'page': page
})
while True:
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, params=params)
if phantom.is_fail(ret_val):
return action_result.get_status()
if not response.get('vulnerabilities', []):
return action_result.set_status(phantom.APP_ERROR, "No vulnerabilities found")
# Add vulnerabilities in action result object
for vulnerability in response['vulnerabilities']:
vulnerability = self._modify_data_paths(vulnerability)
action_result.add_data(vulnerability)
# Check if current page is less than total pages and Kenna security page limit (i.e. 20)
if response['meta']['page'] < response['meta']['pages'] and response['meta']['page'] < 20:
page += 1
params.update({
'page': page
})
else:
break
summary = action_result.update_summary({})
summary['total_vulnerabilities'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
""" This function gets current action identifier and calls member function of its own to handle the action.
:param param: dictionary which contains information about the actions to be executed
:return: status success/failure
"""
self.debug_print("action_id", self.get_action_identifier())
# Dictionary mapping each action with its corresponding actions
action_mapping = {
'test_connectivity': self._handle_test_connectivity,
'list_patches': self._handle_list_patches,
'update_device': self._handle_update_device,
'list_devices': self._handle_list_devices,
'run_connector': self._handle_run_connector,
'list_connectors': self._handle_list_connectors,
'update_vulnerability': self._handle_update_vulnerability,
'get_vulnerabilities': self._handle_get_vulnerabilities,
'run_query': self._handle_run_query
}
action = self.get_action_identifier()
action_execution_status = phantom.APP_SUCCESS
if action in action_mapping.keys():
action_function = action_mapping[action]
action_execution_status = action_function(param)
return action_execution_status
def initialize(self):
""" This is an optional function that can be implemented by the AppConnector derived class. Since the
configuration dictionary is already validated by the time this function is called, it's a good place to do any
extra initialization of any internal modules. This function MUST return a value of either phantom.APP_SUCCESS or
phantom.APP_ERROR. If this function returns phantom.APP_ERROR, then AppConnector::handle_action will not get
called.
"""
self._state = self.load_state()
# get the asset config
config = self.get_config()
self._risk_token = config[KENNA_CONFIG_RISK_TOKEN]
return phantom.APP_SUCCESS
def finalize(self):
""" This function gets called once all the param dictionary elements are looped over and no more handle_action
calls are left to be made. It gives the AppConnector a chance to loop through all the results that were
accumulated by multiple handle_action function calls and create any summary if required. Another usage is
cleanup, disconnect from remote devices etc.
:return: status (success/failure)
"""
# Save the state, this data is saved across actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
if __name__ == '__main__':
import argparse
import pudb
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = <PASSWORD>
verify = args.verify
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = get<PASSWORD>.getpass("Password: ")
if username and password:
try:
print("Accessing the Login page")
r = requests.get(BaseConnector._get_phantom_base_url() + "login", verify=verify, timeout=60)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken={}'.format(csrftoken)
headers['Referer'] = BaseConnector._get_phantom_base_url() + 'login'
print("Logging into Platform to get the session id")
r2 = requests.post(BaseConnector._get_phantom_base_url() + "login", verify=verify, data=data, headers=headers, timeout=60)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platfrom. Error: {}".format(str(e)))
sys.exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = KennaSecurityConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0) | 0.489259 | 0.134634 |
import collections
import tensorflow as tf
from object_detection.utils import ops
slim = tf.contrib.slim
def get_depth_fn(depth_multiplier, min_depth):
"""Builds a callable to compute depth (output channels) of conv filters.
Args:
depth_multiplier: a multiplier for the nominal depth.
min_depth: a lower bound on the depth of filters.
Returns:
A callable that takes in a nominal depth and returns the depth to use.
"""
def multiply_depth(depth):
new_depth = int(depth * depth_multiplier)
return max(new_depth, min_depth)
return multiply_depth
def multi_resolution_feature_maps(feature_map_layout, depth_multiplier,
min_depth, insert_1x1_conv, image_features):
"""Generates multi resolution feature maps from input image features.
Generates multi-scale feature maps for detection as in the SSD papers by
Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'anchor_strides': [16, 32, 64, -1, -1, -1]
}
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'anchor_strides': [16, 32, 64, -1, -1, -1]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'anchor_strides': [16, 32, 64, -1, -1, -1]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from the
feature map (instead of using the provided 'layer_depth' parameter). In
this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution operations.
Note that the current implementation only supports generating new layers
using convolutions of stride 2 (resulting in a spatial resolution
reduction by a factor of 2), and will be extended to a more flexible
design. Finally, the optional 'anchor_strides' can be used to specify the
anchor stride at each layer where 'from_layer' is specified. Our
convention is to set 'anchor_strides' to -1 whenever at the positions that
'from_layer' is an empty string, and anchor strides at these layers will
be inferred from the previous layer's anchor strides and the current
layer's stride length. In the case where 'anchor_strides' is not
specified, the anchor strides will default to the image width and height
divided by the number of anchors.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution
should be inserted before shrinking the feature map.
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: if the number entries in 'from_layer' and
'layer_depth' do not match.
ValueError: if the generated layer does not have the same resolution
as specified.
"""
depth_fn = get_depth_fn(depth_multiplier, min_depth)
feature_map_keys = []
feature_maps = []
base_from_layer = ''
feature_map_strides = None
use_depthwise = False
if 'anchor_strides' in feature_map_layout:
feature_map_strides = (feature_map_layout['anchor_strides'])
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, (from_layer, layer_depth) in enumerate(
zip(feature_map_layout['from_layer'], feature_map_layout['layer_depth'])):
if from_layer:
feature_map = image_features[from_layer]
base_from_layer = from_layer
feature_map_keys.append(from_layer)
else:
pre_layer = feature_maps[-1]
intermediate_layer = pre_layer
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth / 2))
intermediate_layer = slim.conv2d(
pre_layer,
depth_fn(layer_depth / 2), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
stride = 2
layer_name = '{}_2_Conv2d_{}_3x3_s2_{}'.format(
base_from_layer, index, depth_fn(layer_depth))
if use_depthwise:
feature_map = slim.separable_conv2d(
ops.pad_to_multiple(intermediate_layer, stride),
None, [3, 3],
depth_multiplier=1,
padding='SAME',
stride=stride,
scope=layer_name + '_depthwise')
feature_map = slim.conv2d(
feature_map,
depth_fn(layer_depth), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
else:
feature_map = slim.conv2d(
ops.pad_to_multiple(intermediate_layer, stride),
depth_fn(layer_depth), [3, 3],
padding='SAME',
stride=stride,
scope=layer_name)
if (index > 0 and feature_map_strides and
feature_map_strides[index - 1] > 0):
feature_map_strides[index] = (
stride * feature_map_strides[index - 1])
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) | object_detection/models/feature_map_generators.py | import collections
import tensorflow as tf
from object_detection.utils import ops
slim = tf.contrib.slim
def get_depth_fn(depth_multiplier, min_depth):
"""Builds a callable to compute depth (output channels) of conv filters.
Args:
depth_multiplier: a multiplier for the nominal depth.
min_depth: a lower bound on the depth of filters.
Returns:
A callable that takes in a nominal depth and returns the depth to use.
"""
def multiply_depth(depth):
new_depth = int(depth * depth_multiplier)
return max(new_depth, min_depth)
return multiply_depth
def multi_resolution_feature_maps(feature_map_layout, depth_multiplier,
min_depth, insert_1x1_conv, image_features):
"""Generates multi resolution feature maps from input image features.
Generates multi-scale feature maps for detection as in the SSD papers by
Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'anchor_strides': [16, 32, 64, -1, -1, -1]
}
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'anchor_strides': [16, 32, 64, -1, -1, -1]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'anchor_strides': [16, 32, 64, -1, -1, -1]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from the
feature map (instead of using the provided 'layer_depth' parameter). In
this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution operations.
Note that the current implementation only supports generating new layers
using convolutions of stride 2 (resulting in a spatial resolution
reduction by a factor of 2), and will be extended to a more flexible
design. Finally, the optional 'anchor_strides' can be used to specify the
anchor stride at each layer where 'from_layer' is specified. Our
convention is to set 'anchor_strides' to -1 whenever at the positions that
'from_layer' is an empty string, and anchor strides at these layers will
be inferred from the previous layer's anchor strides and the current
layer's stride length. In the case where 'anchor_strides' is not
specified, the anchor strides will default to the image width and height
divided by the number of anchors.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution
should be inserted before shrinking the feature map.
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: if the number entries in 'from_layer' and
'layer_depth' do not match.
ValueError: if the generated layer does not have the same resolution
as specified.
"""
depth_fn = get_depth_fn(depth_multiplier, min_depth)
feature_map_keys = []
feature_maps = []
base_from_layer = ''
feature_map_strides = None
use_depthwise = False
if 'anchor_strides' in feature_map_layout:
feature_map_strides = (feature_map_layout['anchor_strides'])
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, (from_layer, layer_depth) in enumerate(
zip(feature_map_layout['from_layer'], feature_map_layout['layer_depth'])):
if from_layer:
feature_map = image_features[from_layer]
base_from_layer = from_layer
feature_map_keys.append(from_layer)
else:
pre_layer = feature_maps[-1]
intermediate_layer = pre_layer
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth / 2))
intermediate_layer = slim.conv2d(
pre_layer,
depth_fn(layer_depth / 2), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
stride = 2
layer_name = '{}_2_Conv2d_{}_3x3_s2_{}'.format(
base_from_layer, index, depth_fn(layer_depth))
if use_depthwise:
feature_map = slim.separable_conv2d(
ops.pad_to_multiple(intermediate_layer, stride),
None, [3, 3],
depth_multiplier=1,
padding='SAME',
stride=stride,
scope=layer_name + '_depthwise')
feature_map = slim.conv2d(
feature_map,
depth_fn(layer_depth), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
else:
feature_map = slim.conv2d(
ops.pad_to_multiple(intermediate_layer, stride),
depth_fn(layer_depth), [3, 3],
padding='SAME',
stride=stride,
scope=layer_name)
if (index > 0 and feature_map_strides and
feature_map_strides[index - 1] > 0):
feature_map_strides[index] = (
stride * feature_map_strides[index - 1])
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) | 0.897471 | 0.64855 |
from typing import Set, List, Dict, Tuple
import requests
import datetime as dt
from bs4 import BeautifulSoup as bs
import re
import calendar
from selenium.common.exceptions import TimeoutException
import config as conf
import pickle as pk
import os.path
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.support.ui import WebDriverWait
def read_files(path: str) -> Set[str]:
try:
with open(path, 'r') as f:
return set([str(l).strip() for l in f.readlines()])
except Exception as _:
return set([])
def insert_new_url(d, date: dt.datetime, media: str, url: str):
if date not in d.keys():
d[date] = {media: [url]}
else:
media_dict = d[date]
if media not in media_dict.keys():
media_dict[media] = [url]
else:
media_dict[media].append(url)
def configure_firefox_driver():
# Add additional Options to the webdriver
firefox_options = FirefoxOptions()
# add the argument and make the browser Headless.
firefox_options.add_argument("--headless")
driver = webdriver.Firefox(options=firefox_options)
return driver
def scrape_media_text(url: str) -> str:
blacklist = [
'style',
'script',
'header'
]
out = ""
if "https://apnews.com/" in url:
driver = configure_firefox_driver()
try:
driver.get(url)
WebDriverWait(driver, 180).until(
lambda h: h.find_element_by_class_name('Article').is_displayed()
)
ps = driver.find_elements_by_tag_name('p')
for p in ps:
out += " {}".format(p.text)
finally:
driver.close()
else:
page = requests.get(url)
html = page.text
soup = bs(html, 'html5lib')
if "https://www.bbc.com" in url:
blacklist.append("span")
elements = [t for t in soup.find_all('p') if t.parent.name not in blacklist]
for i, s in enumerate(elements):
out += " {}".format(s.getText())
return out
def parse_dates(urls: Set[str]) -> Dict[dt.datetime, Dict[str, List[str]]]:
out_dict = {}
for url in urls:
print("Scraping " + url)
if "https://ewn.co.za/" in url:
raw_dt = url.strip("https://ewn.co.za/")[:10].split('/')
date = dt.datetime(int(raw_dt[0]), int(raw_dt[1]), int(raw_dt[2]))
insert_new_url(out_dict, date, "EWN", url)
elif "https://apnews.com/" in url:
driver = configure_firefox_driver()
try:
driver.get(url)
WebDriverWait(driver, 60).until(
lambda h: h.find_element_by_class_name('CardHeadline').is_displayed()
)
p = driver.find_element_by_css_selector('span[data-key="timestamp"]')
gs = re.match(r"^(\w+)[ ](\d+)[, ]+(\d+).*$", p.text)
month_num = list(calendar.month_name).index(gs.group(1))
day_num = gs.group(2)
year_num = gs.group(3)
date = dt.datetime(int(year_num), int(month_num), int(day_num))
insert_new_url(out_dict, date, "Associated Press", url)
finally:
driver.close()
elif "https://www.bbc.com/" in url:
page = requests.get(url)
soup = bs(page.text, 'html.parser')
match = soup.select('time[data-testid="timestamp"]')[0]
gs = re.match(r"^.*datetime=\"(\d+)-(\d+)-(\d+)T.*$", str(match))
day_num = gs.group(3)
month_num = gs.group(2)
year_num = gs.group(1)
date = dt.datetime(int(year_num), int(month_num), int(day_num))
insert_new_url(out_dict, date, "BBC", url)
elif "https://www.theguardian.com/" in url:
gs = re.match(r"^https://www.theguardian.com/[\w\-\/]*/(\d+)/(\w+)/(\d+)/.*$", url)
day_num = gs.group(3)
month_num = list(calendar.month_name).index(str(gs.group(2)).title())
year_num = gs.group(1)
date = dt.datetime(int(year_num), int(month_num), int(day_num))
insert_new_url(out_dict, date, "The Guardian", url)
return out_dict
def serialize(path: str, obj: any):
with open(path, "wb") as mu:
pk.dump(obj, mu)
def deserialize(path: str) -> any:
with open(path, "rb") as mu:
return pk.load(mu)
def get_data() -> Dict[dt.datetime, Dict[str, List[str]]]:
"""
Reads the URLs from "./resources/media_articles.txt" and organizes these by date and news outlet.
:return a dictionary with keys being datetime and values being strings to lists of URLs.
"""
mu_pickle = "./media_urls.pickle"
print("Getting media objects... ", end="")
if os.path.isfile(mu_pickle):
out_dict = deserialize(mu_pickle)
else:
urls = read_files(conf.MEDIA_URLS_PATH)
out_dict = parse_dates(urls)
serialize(mu_pickle, out_dict)
print("Success!")
return out_dict
def get_scraped_text(media_data: Dict[dt.datetime, Dict[str, List[str]]]) -> Dict[dt.datetime, Dict[str, List[
Tuple[str, str]]]]:
scraped_data = {}
for (date, media_map) in media_data.items():
data_per_media_outlet = {}
for (media_outlet, urls) in media_map.items():
data_per_media_outlet[media_outlet] = []
for u in urls:
try:
text = scrape_media_text(u)
if len(text) < 5:
print("Unsuccessful scrape {}".format(u))
data_per_media_outlet[media_outlet].append((u, text))
except TimeoutException:
print("Unsuccessful scrape {} (timeout)".format(u))
scraped_data[date] = data_per_media_outlet
return scraped_data
def scraped_data(media_data: Dict[dt.datetime, Dict[str, List[str]]]) -> Dict[dt.datetime, Dict[str, List[
Tuple[str, str]]]]:
mu_pickle = "./scraped_data.pickle"
print("Scraping media objects... ", end="")
if os.path.isfile(mu_pickle):
scraped_data = deserialize(mu_pickle)
else:
scraped_data = get_scraped_text(media_data)
serialize(mu_pickle, scraped_data)
print("Success!")
return scraped_data
def export_to_csv(scraped_data: Dict[dt.datetime, Dict[str, List[Tuple[str, str]]]]):
import json
with open('../media.json', 'w') as f:
for (date, media_data) in scraped_data.items():
for (media, articles) in media_data.items():
for (u, a) in articles:
payload = {
'date': date.strftime('%Y-%m-%d'),
'media_outlet': media,
'url': u,
'text': a.strip()
}
f.write(json.dumps(payload) + "\n")
if __name__ == "__main__":
media_data = get_data()
scraped_data = scraped_data(media_data)
export_to_csv(scraped_data) | scraping/import_media.py | from typing import Set, List, Dict, Tuple
import requests
import datetime as dt
from bs4 import BeautifulSoup as bs
import re
import calendar
from selenium.common.exceptions import TimeoutException
import config as conf
import pickle as pk
import os.path
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.support.ui import WebDriverWait
def read_files(path: str) -> Set[str]:
try:
with open(path, 'r') as f:
return set([str(l).strip() for l in f.readlines()])
except Exception as _:
return set([])
def insert_new_url(d, date: dt.datetime, media: str, url: str):
if date not in d.keys():
d[date] = {media: [url]}
else:
media_dict = d[date]
if media not in media_dict.keys():
media_dict[media] = [url]
else:
media_dict[media].append(url)
def configure_firefox_driver():
# Add additional Options to the webdriver
firefox_options = FirefoxOptions()
# add the argument and make the browser Headless.
firefox_options.add_argument("--headless")
driver = webdriver.Firefox(options=firefox_options)
return driver
def scrape_media_text(url: str) -> str:
blacklist = [
'style',
'script',
'header'
]
out = ""
if "https://apnews.com/" in url:
driver = configure_firefox_driver()
try:
driver.get(url)
WebDriverWait(driver, 180).until(
lambda h: h.find_element_by_class_name('Article').is_displayed()
)
ps = driver.find_elements_by_tag_name('p')
for p in ps:
out += " {}".format(p.text)
finally:
driver.close()
else:
page = requests.get(url)
html = page.text
soup = bs(html, 'html5lib')
if "https://www.bbc.com" in url:
blacklist.append("span")
elements = [t for t in soup.find_all('p') if t.parent.name not in blacklist]
for i, s in enumerate(elements):
out += " {}".format(s.getText())
return out
def parse_dates(urls: Set[str]) -> Dict[dt.datetime, Dict[str, List[str]]]:
out_dict = {}
for url in urls:
print("Scraping " + url)
if "https://ewn.co.za/" in url:
raw_dt = url.strip("https://ewn.co.za/")[:10].split('/')
date = dt.datetime(int(raw_dt[0]), int(raw_dt[1]), int(raw_dt[2]))
insert_new_url(out_dict, date, "EWN", url)
elif "https://apnews.com/" in url:
driver = configure_firefox_driver()
try:
driver.get(url)
WebDriverWait(driver, 60).until(
lambda h: h.find_element_by_class_name('CardHeadline').is_displayed()
)
p = driver.find_element_by_css_selector('span[data-key="timestamp"]')
gs = re.match(r"^(\w+)[ ](\d+)[, ]+(\d+).*$", p.text)
month_num = list(calendar.month_name).index(gs.group(1))
day_num = gs.group(2)
year_num = gs.group(3)
date = dt.datetime(int(year_num), int(month_num), int(day_num))
insert_new_url(out_dict, date, "Associated Press", url)
finally:
driver.close()
elif "https://www.bbc.com/" in url:
page = requests.get(url)
soup = bs(page.text, 'html.parser')
match = soup.select('time[data-testid="timestamp"]')[0]
gs = re.match(r"^.*datetime=\"(\d+)-(\d+)-(\d+)T.*$", str(match))
day_num = gs.group(3)
month_num = gs.group(2)
year_num = gs.group(1)
date = dt.datetime(int(year_num), int(month_num), int(day_num))
insert_new_url(out_dict, date, "BBC", url)
elif "https://www.theguardian.com/" in url:
gs = re.match(r"^https://www.theguardian.com/[\w\-\/]*/(\d+)/(\w+)/(\d+)/.*$", url)
day_num = gs.group(3)
month_num = list(calendar.month_name).index(str(gs.group(2)).title())
year_num = gs.group(1)
date = dt.datetime(int(year_num), int(month_num), int(day_num))
insert_new_url(out_dict, date, "The Guardian", url)
return out_dict
def serialize(path: str, obj: any):
with open(path, "wb") as mu:
pk.dump(obj, mu)
def deserialize(path: str) -> any:
with open(path, "rb") as mu:
return pk.load(mu)
def get_data() -> Dict[dt.datetime, Dict[str, List[str]]]:
"""
Reads the URLs from "./resources/media_articles.txt" and organizes these by date and news outlet.
:return a dictionary with keys being datetime and values being strings to lists of URLs.
"""
mu_pickle = "./media_urls.pickle"
print("Getting media objects... ", end="")
if os.path.isfile(mu_pickle):
out_dict = deserialize(mu_pickle)
else:
urls = read_files(conf.MEDIA_URLS_PATH)
out_dict = parse_dates(urls)
serialize(mu_pickle, out_dict)
print("Success!")
return out_dict
def get_scraped_text(media_data: Dict[dt.datetime, Dict[str, List[str]]]) -> Dict[dt.datetime, Dict[str, List[
Tuple[str, str]]]]:
scraped_data = {}
for (date, media_map) in media_data.items():
data_per_media_outlet = {}
for (media_outlet, urls) in media_map.items():
data_per_media_outlet[media_outlet] = []
for u in urls:
try:
text = scrape_media_text(u)
if len(text) < 5:
print("Unsuccessful scrape {}".format(u))
data_per_media_outlet[media_outlet].append((u, text))
except TimeoutException:
print("Unsuccessful scrape {} (timeout)".format(u))
scraped_data[date] = data_per_media_outlet
return scraped_data
def scraped_data(media_data: Dict[dt.datetime, Dict[str, List[str]]]) -> Dict[dt.datetime, Dict[str, List[
Tuple[str, str]]]]:
mu_pickle = "./scraped_data.pickle"
print("Scraping media objects... ", end="")
if os.path.isfile(mu_pickle):
scraped_data = deserialize(mu_pickle)
else:
scraped_data = get_scraped_text(media_data)
serialize(mu_pickle, scraped_data)
print("Success!")
return scraped_data
def export_to_csv(scraped_data: Dict[dt.datetime, Dict[str, List[Tuple[str, str]]]]):
import json
with open('../media.json', 'w') as f:
for (date, media_data) in scraped_data.items():
for (media, articles) in media_data.items():
for (u, a) in articles:
payload = {
'date': date.strftime('%Y-%m-%d'),
'media_outlet': media,
'url': u,
'text': a.strip()
}
f.write(json.dumps(payload) + "\n")
if __name__ == "__main__":
media_data = get_data()
scraped_data = scraped_data(media_data)
export_to_csv(scraped_data) | 0.397471 | 0.164449 |
"""Tests for the McAfee AV Log parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import mcafeeav as mcafeeav_formatter
from plaso.lib import eventdata
from plaso.parsers import mcafeeav
from plaso.parsers import test_lib
class McafeeAccessProtectionUnitTest(test_lib.ParserTestCase):
"""Tests for the McAfee AV Log parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = mcafeeav.McafeeAccessProtectionParser()
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath(['AccessProtectionLog.txt'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The file contains 14 lines which results in 14 event objects.
self.assertEquals(len(event_objects), 14)
# Test that the UTF-8 byte order mark gets removed from the first line.
event_object = event_objects[0]
self.assertEquals(event_object.timestamp, 1380292946000000)
# Test this entry:
# 9/27/2013 2:42:26 PM Blocked by Access Protection rule
# SOMEDOMAIN\someUser C:\Windows\System32\procexp64.exe C:\Program Files
# (x86)\McAfee\Common Framework\UdaterUI.exe Common Standard
# Protection:Prevent termination of McAfee processes Action blocked :
# Terminate
event_object = event_objects[1]
self.assertEquals(event_object.timestamp, 1380292959000000)
self.assertEquals(event_object.username, u'SOMEDOMAIN\\someUser')
self.assertEquals(
event_object.full_path, u'C:\\Windows\\System32\\procexp64.exe')
expected_msg = (
u'File Name: C:\\Windows\\System32\\procexp64.exe '
u'User: SOMEDOMAIN\\someUser '
u'C:\\Program Files (x86)\\McAfee\\Common Framework\\Frame'
u'workService.exe '
u'Blocked by Access Protection rule '
u'Common Standard Protection:Prevent termination of McAfee processes '
u'Action blocked : Terminate')
expected_msg_short = (
u'C:\\Windows\\System32\\procexp64.exe '
u'Action blocked : Terminate')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main() | plaso/parsers/mcafeeav_test.py | """Tests for the McAfee AV Log parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import mcafeeav as mcafeeav_formatter
from plaso.lib import eventdata
from plaso.parsers import mcafeeav
from plaso.parsers import test_lib
class McafeeAccessProtectionUnitTest(test_lib.ParserTestCase):
"""Tests for the McAfee AV Log parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = mcafeeav.McafeeAccessProtectionParser()
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath(['AccessProtectionLog.txt'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The file contains 14 lines which results in 14 event objects.
self.assertEquals(len(event_objects), 14)
# Test that the UTF-8 byte order mark gets removed from the first line.
event_object = event_objects[0]
self.assertEquals(event_object.timestamp, 1380292946000000)
# Test this entry:
# 9/27/2013 2:42:26 PM Blocked by Access Protection rule
# SOMEDOMAIN\someUser C:\Windows\System32\procexp64.exe C:\Program Files
# (x86)\McAfee\Common Framework\UdaterUI.exe Common Standard
# Protection:Prevent termination of McAfee processes Action blocked :
# Terminate
event_object = event_objects[1]
self.assertEquals(event_object.timestamp, 1380292959000000)
self.assertEquals(event_object.username, u'SOMEDOMAIN\\someUser')
self.assertEquals(
event_object.full_path, u'C:\\Windows\\System32\\procexp64.exe')
expected_msg = (
u'File Name: C:\\Windows\\System32\\procexp64.exe '
u'User: SOMEDOMAIN\\someUser '
u'C:\\Program Files (x86)\\McAfee\\Common Framework\\Frame'
u'workService.exe '
u'Blocked by Access Protection rule '
u'Common Standard Protection:Prevent termination of McAfee processes '
u'Action blocked : Terminate')
expected_msg_short = (
u'C:\\Windows\\System32\\procexp64.exe '
u'Action blocked : Terminate')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main() | 0.726037 | 0.357287 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
padding='SAME')
net = subsample(net, factor=stride)
whereas
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope
)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = array_ops.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]
)
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope
)
@add_arg_scope
def stack_blocks_dense(
net, blocks, output_stride=None, outputs_collections=None
):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError(
'The target output_stride cannot be reached.'
)
with variable_scope.variable_scope(
'unit_%d' % (i + 1), values=[net]
):
unit_depth, unit_depth_bottleneck, unit_stride = unit
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(
net,
depth=unit_depth,
depth_bottleneck=unit_depth_bottleneck,
stride=1,
rate=rate
)
rate *= unit_stride
else:
net = block.unit_fn(
net,
depth=unit_depth,
depth_bottleneck=unit_depth_bottleneck,
stride=unit_stride,
rate=1
)
current_stride *= unit_stride
net = utils.collect_named_outputs(
outputs_collections, sc.name, net
)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(
is_training=True,
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True
):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
is_training: Whether or not we are training the parameters in the batch
normalization layers of the model.
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'is_training': is_training,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': ops.GraphKeys.UPDATE_OPS,
}
with arg_scope(
[layers_lib.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params
):
with arg_scope([layers.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
return arg_sc | vendor/tensorboxresnet/tensorboxresnet/utils/slim_nets/resnet_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
padding='SAME')
net = subsample(net, factor=stride)
whereas
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope
)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = array_ops.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]
)
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope
)
@add_arg_scope
def stack_blocks_dense(
net, blocks, output_stride=None, outputs_collections=None
):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError(
'The target output_stride cannot be reached.'
)
with variable_scope.variable_scope(
'unit_%d' % (i + 1), values=[net]
):
unit_depth, unit_depth_bottleneck, unit_stride = unit
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(
net,
depth=unit_depth,
depth_bottleneck=unit_depth_bottleneck,
stride=1,
rate=rate
)
rate *= unit_stride
else:
net = block.unit_fn(
net,
depth=unit_depth,
depth_bottleneck=unit_depth_bottleneck,
stride=unit_stride,
rate=1
)
current_stride *= unit_stride
net = utils.collect_named_outputs(
outputs_collections, sc.name, net
)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(
is_training=True,
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True
):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
is_training: Whether or not we are training the parameters in the batch
normalization layers of the model.
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'is_training': is_training,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': ops.GraphKeys.UPDATE_OPS,
}
with arg_scope(
[layers_lib.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params
):
with arg_scope([layers.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
return arg_sc | 0.955005 | 0.496887 |
from os.path import join, realpath
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from decimal import Decimal
import math
import logging; logging.basicConfig(level=logging.ERROR)
import pandas as pd
from typing import List
import unittest
from hummingsim.backtest.backtest_market import BacktestMarket
from hummingsim.backtest.market import (
AssetType,
Market,
MarketConfig,
QuantizationParams
)
from hummingsim.backtest.mock_order_book_loader import MockOrderBookLoader
from hummingbot.core.clock import (
Clock,
ClockMode
)
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import (
MarketEvent,
TradeType,
OrderType,
OrderFilledEvent,
BuyOrderCompletedEvent,
SellOrderCompletedEvent,
TradeFee
)
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.strategy.dev_5_vwap import Dev5TwapTradeStrategy
import sys; sys.path.insert(0, realpath(join(__file__, "../../")))
class TWAPUnitTest(unittest.TestCase):
start: pd.Timestamp = pd.Timestamp("2019-01-01", tz="UTC")
end: pd.Timestamp = pd.Timestamp("2019-01-01 01:00:00", tz="UTC")
start_timestamp: float = start.timestamp()
end_timestamp: float = end.timestamp()
maker_symbols: List[str] = ["COINALPHA-WETH", "COINALPHA", "WETH"]
clock_tick_size = 10
def setUp(self):
self.clock: Clock = Clock(ClockMode.BACKTEST, self.clock_tick_size, self.start_timestamp, self.end_timestamp)
self.market: BacktestMarket = BacktestMarket()
self.maker_data: MockOrderBookLoader = MockOrderBookLoader(*self.maker_symbols)
self.mid_price = 100
self.time_delay = 15
self.cancel_order_wait_time = 45
self.maker_data.set_balanced_order_book(mid_price=self.mid_price, min_price=1,
max_price=200, price_step_size=1, volume_step_size=10)
self.market.add_data(self.maker_data)
self.market.set_balance("COINALPHA", 500)
self.market.set_balance("WETH", 500000000000)
self.market.set_balance("QETH", 500)
self.market.set_quantization_param(
QuantizationParams(
self.maker_symbols[0], 6, 6, 6, 6
)
)
self.market_info: MarketTradingPairTuple = MarketTradingPairTuple(
*(
[self.market] + self.maker_symbols
)
)
# Define strategies to test
self.limit_buy_strategy: Dev5TwapTradeStrategy = Dev5TwapTradeStrategy(
[self.market_info],
order_type="limit",
order_price=Decimal("99"),
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=True,
time_delay=self.time_delay,
is_vwap=True,
percent_slippage=50.0,
order_percent_of_volume=0.5,
order_amount=Decimal("100.0")
)
self.limit_sell_strategy: Dev5TwapTradeStrategy = Dev5TwapTradeStrategy(
[self.market_info],
order_type="limit",
order_price=Decimal("101"),
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=False,
time_delay=self.time_delay,
is_vwap=True,
percent_slippage=50.0,
order_percent_of_volume=0.5,
order_amount=Decimal("100.0")
)
self.market_buy_strategy: Dev5TwapTradeStrategy = Dev5TwapTradeStrategy(
[self.market_info],
order_type="market",
order_price=None,
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=True,
time_delay=self.time_delay,
is_vwap=True,
percent_slippage=50.0,
order_percent_of_volume=0.5,
order_amount=Decimal("100.0")
)
self.market_sell_strategy: Dev5TwapTradeStrategy = Dev5TwapTradeStrategy(
[self.market_info],
order_type="market",
order_price=None,
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=False,
time_delay=self.time_delay,
is_vwap=True,
percent_slippage=50.0,
order_percent_of_volume=0.5,
order_amount=Decimal("100.0")
)
self.clock.add_iterator(self.market)
self.maker_order_fill_logger: EventLogger = EventLogger()
self.cancel_order_logger: EventLogger = EventLogger()
self.buy_order_completed_logger: EventLogger = EventLogger()
self.sell_order_completed_logger: EventLogger = EventLogger()
self.market.add_listener(MarketEvent.BuyOrderCompleted, self.buy_order_completed_logger)
self.market.add_listener(MarketEvent.SellOrderCompleted, self.sell_order_completed_logger)
self.market.add_listener(MarketEvent.OrderFilled, self.maker_order_fill_logger)
self.market.add_listener(MarketEvent.OrderCancelled, self.cancel_order_logger)
@staticmethod
def simulate_limit_order_fill(market: Market, limit_order: LimitOrder):
quote_currency_traded: Decimal = limit_order.price * limit_order.quantity
base_currency_traded: Decimal = limit_order.quantity
quote_currency: str = limit_order.quote_currency
base_currency: str = limit_order.base_currency
config: MarketConfig = market.config
if limit_order.is_buy:
market.set_balance(quote_currency, market.get_balance(quote_currency) - quote_currency_traded)
market.set_balance(base_currency, market.get_balance(base_currency) + base_currency_traded)
market.trigger_event(MarketEvent.OrderFilled, OrderFilledEvent(
market.current_timestamp,
limit_order.client_order_id,
limit_order.trading_pair,
TradeType.BUY,
OrderType.LIMIT,
limit_order.price,
limit_order.quantity,
TradeFee(Decimal("0"))
))
market.trigger_event(MarketEvent.BuyOrderCompleted, BuyOrderCompletedEvent(
market.current_timestamp,
limit_order.client_order_id,
base_currency,
quote_currency,
base_currency if config.buy_fees_asset is AssetType.BASE_CURRENCY else quote_currency,
base_currency_traded,
quote_currency_traded,
Decimal("0"),
OrderType.LIMIT
))
else:
market.set_balance(quote_currency, market.get_balance(quote_currency) + quote_currency_traded)
market.set_balance(base_currency, market.get_balance(base_currency) - base_currency_traded)
market.trigger_event(MarketEvent.OrderFilled, OrderFilledEvent(
market.current_timestamp,
limit_order.client_order_id,
limit_order.trading_pair,
TradeType.SELL,
OrderType.LIMIT,
limit_order.price,
limit_order.quantity,
TradeFee(Decimal("0"))
))
market.trigger_event(MarketEvent.SellOrderCompleted, SellOrderCompletedEvent(
market.current_timestamp,
limit_order.client_order_id,
base_currency,
quote_currency,
base_currency if config.sell_fees_asset is AssetType.BASE_CURRENCY else quote_currency,
base_currency_traded,
quote_currency_traded,
Decimal("0"),
OrderType.LIMIT
))
def test_limit_buy_order(self):
self.clock.add_iterator(self.limit_buy_strategy)
# test whether number of orders is one at start
# check whether the order is buy
# check whether the price is correct
# check whether amount is correct
order_time_1 = self.start_timestamp + self.clock_tick_size
self.clock.backtest_til(order_time_1)
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order.price)
self.assertLessEqual(bid_order.quantity, 100)
# Simulate market fill for limit buy and limit sell
self.simulate_limit_order_fill(self.market, bid_order)
fill_events = self.maker_order_fill_logger.event_log
self.assertEqual(1, len(fill_events))
bid_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.BUY]
self.assertEqual(1, len(bid_fills))
# test whether number of orders is two after time delay
# check whether the order is buy
# check whether the price is correct
# check whether amount is correct
self.assertLess(bid_order.quantity, 100)
order_time_2 = order_time_1 + self.clock_tick_size * math.ceil(self.time_delay / self.clock_tick_size)
self.clock.backtest_til(order_time_2)
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order_2: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order_2.price)
self.assertLessEqual(bid_order_2.quantity, 100 - bid_order.quantity)
# Check whether order is cancelled after cancel_order_wait_time
cancel_time_2 = order_time_2 + self.cancel_order_wait_time
self.clock.backtest_til(cancel_time_2)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
def test_limit_sell_order(self):
self.clock.add_iterator(self.limit_sell_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
self.assertEqual(0, len(self.limit_buy_strategy.active_asks))
# test whether number of orders is one at start
# check whether the order is sell
# check whether the price is correct
# check whether amount is correct
order_time_1 = self.start_timestamp + self.clock_tick_size
self.clock.backtest_til(order_time_1)
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order.price)
self.assertLessEqual(ask_order.quantity, 100)
# Simulate market fill for limit buy and limit sell
self.simulate_limit_order_fill(self.market, ask_order)
fill_events = self.maker_order_fill_logger.event_log
self.assertEqual(1, len(fill_events))
ask_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.SELL]
self.assertEqual(1, len(ask_fills))
# test whether number of orders is two after time delay
# check whether the order is buy
# check whether the price is correct
# check whether amount is correct
self.assertLess(ask_order.quantity, 100)
order_time_2 = order_time_1 + self.clock_tick_size * math.ceil(self.time_delay / self.clock_tick_size)
self.clock.backtest_til(order_time_2)
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order_2: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order_2.price)
self.assertLessEqual(ask_order_2.quantity, 100 - ask_order.quantity)
# Check whether order is cancelled after cancel_order_wait_time
cancel_time_2 = order_time_2 + self.cancel_order_wait_time
self.clock.backtest_til(cancel_time_2)
self.assertEqual(0, len(self.limit_sell_strategy.active_asks))
def test_market_buy_order(self):
self.clock.add_iterator(self.market_buy_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
# test whether number of orders is one after one time delay
# check whether the order is buy
# check whether the size is correct
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size * 2)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(1, len(market_buy_events))
amount: Decimal = sum(t.base_asset_amount for t in market_buy_events)
self.assertLessEqual(amount, Decimal("100"))
self.buy_order_completed_logger.clear()
def test_market_sell_order(self):
self.clock.add_iterator(self.market_sell_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(0, len(market_sell_events))
# test whether number of orders is one
# check whether the order is sell
# check whether the size is correct
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size * 2)
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(1, len(market_sell_events))
amount: Decimal = sum(t.base_asset_amount for t in market_sell_events)
self.assertLessEqual(amount, Decimal("100"))
self.sell_order_completed_logger.clear()
def test_order_filled_events(self):
self.clock.add_iterator(self.limit_buy_strategy)
self.clock.add_iterator(self.limit_sell_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
# test whether number of orders is one
# check whether the order is sell
# check whether the price is correct
# check whether amount is correct
self.clock.backtest_til(self.start_timestamp + math.ceil(self.clock_tick_size / self.time_delay))
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order.price)
self.assertLessEqual(ask_order.price, Decimal("101"))
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order.price)
self.assertLessEqual(bid_order.price, Decimal("100"))
# Simulate market fill for limit buy and limit sell
self.simulate_limit_order_fill(self.market, bid_order)
self.simulate_limit_order_fill(self.market, ask_order)
fill_events = self.maker_order_fill_logger.event_log
self.assertEqual(2, len(fill_events))
bid_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.SELL]
ask_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.BUY]
self.assertEqual(1, len(bid_fills))
self.assertEqual(1, len(ask_fills))
def test_with_insufficient_balance(self):
# Set base balance to zero and check if sell strategies don't place orders
self.clock.add_iterator(self.limit_buy_strategy)
self.clock.add_iterator(self.market_buy_strategy)
self.market.set_balance("WETH", 0)
end_ts = self.start_timestamp + self.clock_tick_size + self.time_delay
self.clock.backtest_til(end_ts)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
self.clock.add_iterator(self.limit_sell_strategy)
self.clock.add_iterator(self.market_sell_strategy)
self.market.set_balance("COINALPHA", 0)
end_ts += self.clock_tick_size + self.time_delay
self.clock.backtest_til(end_ts)
self.assertEqual(0, len(self.limit_sell_strategy.active_asks))
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(0, len(market_sell_events)) | test/hummingbot/strategy/dev_5_tvap/test_vwap.py |
from os.path import join, realpath
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from decimal import Decimal
import math
import logging; logging.basicConfig(level=logging.ERROR)
import pandas as pd
from typing import List
import unittest
from hummingsim.backtest.backtest_market import BacktestMarket
from hummingsim.backtest.market import (
AssetType,
Market,
MarketConfig,
QuantizationParams
)
from hummingsim.backtest.mock_order_book_loader import MockOrderBookLoader
from hummingbot.core.clock import (
Clock,
ClockMode
)
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import (
MarketEvent,
TradeType,
OrderType,
OrderFilledEvent,
BuyOrderCompletedEvent,
SellOrderCompletedEvent,
TradeFee
)
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.strategy.dev_5_vwap import Dev5TwapTradeStrategy
import sys; sys.path.insert(0, realpath(join(__file__, "../../")))
class TWAPUnitTest(unittest.TestCase):
start: pd.Timestamp = pd.Timestamp("2019-01-01", tz="UTC")
end: pd.Timestamp = pd.Timestamp("2019-01-01 01:00:00", tz="UTC")
start_timestamp: float = start.timestamp()
end_timestamp: float = end.timestamp()
maker_symbols: List[str] = ["COINALPHA-WETH", "COINALPHA", "WETH"]
clock_tick_size = 10
def setUp(self):
self.clock: Clock = Clock(ClockMode.BACKTEST, self.clock_tick_size, self.start_timestamp, self.end_timestamp)
self.market: BacktestMarket = BacktestMarket()
self.maker_data: MockOrderBookLoader = MockOrderBookLoader(*self.maker_symbols)
self.mid_price = 100
self.time_delay = 15
self.cancel_order_wait_time = 45
self.maker_data.set_balanced_order_book(mid_price=self.mid_price, min_price=1,
max_price=200, price_step_size=1, volume_step_size=10)
self.market.add_data(self.maker_data)
self.market.set_balance("COINALPHA", 500)
self.market.set_balance("WETH", 500000000000)
self.market.set_balance("QETH", 500)
self.market.set_quantization_param(
QuantizationParams(
self.maker_symbols[0], 6, 6, 6, 6
)
)
self.market_info: MarketTradingPairTuple = MarketTradingPairTuple(
*(
[self.market] + self.maker_symbols
)
)
# Define strategies to test
self.limit_buy_strategy: Dev5TwapTradeStrategy = Dev5TwapTradeStrategy(
[self.market_info],
order_type="limit",
order_price=Decimal("99"),
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=True,
time_delay=self.time_delay,
is_vwap=True,
percent_slippage=50.0,
order_percent_of_volume=0.5,
order_amount=Decimal("100.0")
)
self.limit_sell_strategy: Dev5TwapTradeStrategy = Dev5TwapTradeStrategy(
[self.market_info],
order_type="limit",
order_price=Decimal("101"),
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=False,
time_delay=self.time_delay,
is_vwap=True,
percent_slippage=50.0,
order_percent_of_volume=0.5,
order_amount=Decimal("100.0")
)
self.market_buy_strategy: Dev5TwapTradeStrategy = Dev5TwapTradeStrategy(
[self.market_info],
order_type="market",
order_price=None,
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=True,
time_delay=self.time_delay,
is_vwap=True,
percent_slippage=50.0,
order_percent_of_volume=0.5,
order_amount=Decimal("100.0")
)
self.market_sell_strategy: Dev5TwapTradeStrategy = Dev5TwapTradeStrategy(
[self.market_info],
order_type="market",
order_price=None,
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=False,
time_delay=self.time_delay,
is_vwap=True,
percent_slippage=50.0,
order_percent_of_volume=0.5,
order_amount=Decimal("100.0")
)
self.clock.add_iterator(self.market)
self.maker_order_fill_logger: EventLogger = EventLogger()
self.cancel_order_logger: EventLogger = EventLogger()
self.buy_order_completed_logger: EventLogger = EventLogger()
self.sell_order_completed_logger: EventLogger = EventLogger()
self.market.add_listener(MarketEvent.BuyOrderCompleted, self.buy_order_completed_logger)
self.market.add_listener(MarketEvent.SellOrderCompleted, self.sell_order_completed_logger)
self.market.add_listener(MarketEvent.OrderFilled, self.maker_order_fill_logger)
self.market.add_listener(MarketEvent.OrderCancelled, self.cancel_order_logger)
@staticmethod
def simulate_limit_order_fill(market: Market, limit_order: LimitOrder):
quote_currency_traded: Decimal = limit_order.price * limit_order.quantity
base_currency_traded: Decimal = limit_order.quantity
quote_currency: str = limit_order.quote_currency
base_currency: str = limit_order.base_currency
config: MarketConfig = market.config
if limit_order.is_buy:
market.set_balance(quote_currency, market.get_balance(quote_currency) - quote_currency_traded)
market.set_balance(base_currency, market.get_balance(base_currency) + base_currency_traded)
market.trigger_event(MarketEvent.OrderFilled, OrderFilledEvent(
market.current_timestamp,
limit_order.client_order_id,
limit_order.trading_pair,
TradeType.BUY,
OrderType.LIMIT,
limit_order.price,
limit_order.quantity,
TradeFee(Decimal("0"))
))
market.trigger_event(MarketEvent.BuyOrderCompleted, BuyOrderCompletedEvent(
market.current_timestamp,
limit_order.client_order_id,
base_currency,
quote_currency,
base_currency if config.buy_fees_asset is AssetType.BASE_CURRENCY else quote_currency,
base_currency_traded,
quote_currency_traded,
Decimal("0"),
OrderType.LIMIT
))
else:
market.set_balance(quote_currency, market.get_balance(quote_currency) + quote_currency_traded)
market.set_balance(base_currency, market.get_balance(base_currency) - base_currency_traded)
market.trigger_event(MarketEvent.OrderFilled, OrderFilledEvent(
market.current_timestamp,
limit_order.client_order_id,
limit_order.trading_pair,
TradeType.SELL,
OrderType.LIMIT,
limit_order.price,
limit_order.quantity,
TradeFee(Decimal("0"))
))
market.trigger_event(MarketEvent.SellOrderCompleted, SellOrderCompletedEvent(
market.current_timestamp,
limit_order.client_order_id,
base_currency,
quote_currency,
base_currency if config.sell_fees_asset is AssetType.BASE_CURRENCY else quote_currency,
base_currency_traded,
quote_currency_traded,
Decimal("0"),
OrderType.LIMIT
))
def test_limit_buy_order(self):
self.clock.add_iterator(self.limit_buy_strategy)
# test whether number of orders is one at start
# check whether the order is buy
# check whether the price is correct
# check whether amount is correct
order_time_1 = self.start_timestamp + self.clock_tick_size
self.clock.backtest_til(order_time_1)
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order.price)
self.assertLessEqual(bid_order.quantity, 100)
# Simulate market fill for limit buy and limit sell
self.simulate_limit_order_fill(self.market, bid_order)
fill_events = self.maker_order_fill_logger.event_log
self.assertEqual(1, len(fill_events))
bid_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.BUY]
self.assertEqual(1, len(bid_fills))
# test whether number of orders is two after time delay
# check whether the order is buy
# check whether the price is correct
# check whether amount is correct
self.assertLess(bid_order.quantity, 100)
order_time_2 = order_time_1 + self.clock_tick_size * math.ceil(self.time_delay / self.clock_tick_size)
self.clock.backtest_til(order_time_2)
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order_2: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order_2.price)
self.assertLessEqual(bid_order_2.quantity, 100 - bid_order.quantity)
# Check whether order is cancelled after cancel_order_wait_time
cancel_time_2 = order_time_2 + self.cancel_order_wait_time
self.clock.backtest_til(cancel_time_2)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
def test_limit_sell_order(self):
self.clock.add_iterator(self.limit_sell_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
self.assertEqual(0, len(self.limit_buy_strategy.active_asks))
# test whether number of orders is one at start
# check whether the order is sell
# check whether the price is correct
# check whether amount is correct
order_time_1 = self.start_timestamp + self.clock_tick_size
self.clock.backtest_til(order_time_1)
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order.price)
self.assertLessEqual(ask_order.quantity, 100)
# Simulate market fill for limit buy and limit sell
self.simulate_limit_order_fill(self.market, ask_order)
fill_events = self.maker_order_fill_logger.event_log
self.assertEqual(1, len(fill_events))
ask_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.SELL]
self.assertEqual(1, len(ask_fills))
# test whether number of orders is two after time delay
# check whether the order is buy
# check whether the price is correct
# check whether amount is correct
self.assertLess(ask_order.quantity, 100)
order_time_2 = order_time_1 + self.clock_tick_size * math.ceil(self.time_delay / self.clock_tick_size)
self.clock.backtest_til(order_time_2)
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order_2: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order_2.price)
self.assertLessEqual(ask_order_2.quantity, 100 - ask_order.quantity)
# Check whether order is cancelled after cancel_order_wait_time
cancel_time_2 = order_time_2 + self.cancel_order_wait_time
self.clock.backtest_til(cancel_time_2)
self.assertEqual(0, len(self.limit_sell_strategy.active_asks))
def test_market_buy_order(self):
self.clock.add_iterator(self.market_buy_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
# test whether number of orders is one after one time delay
# check whether the order is buy
# check whether the size is correct
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size * 2)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(1, len(market_buy_events))
amount: Decimal = sum(t.base_asset_amount for t in market_buy_events)
self.assertLessEqual(amount, Decimal("100"))
self.buy_order_completed_logger.clear()
def test_market_sell_order(self):
self.clock.add_iterator(self.market_sell_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(0, len(market_sell_events))
# test whether number of orders is one
# check whether the order is sell
# check whether the size is correct
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size * 2)
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(1, len(market_sell_events))
amount: Decimal = sum(t.base_asset_amount for t in market_sell_events)
self.assertLessEqual(amount, Decimal("100"))
self.sell_order_completed_logger.clear()
def test_order_filled_events(self):
self.clock.add_iterator(self.limit_buy_strategy)
self.clock.add_iterator(self.limit_sell_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
# test whether number of orders is one
# check whether the order is sell
# check whether the price is correct
# check whether amount is correct
self.clock.backtest_til(self.start_timestamp + math.ceil(self.clock_tick_size / self.time_delay))
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order.price)
self.assertLessEqual(ask_order.price, Decimal("101"))
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order.price)
self.assertLessEqual(bid_order.price, Decimal("100"))
# Simulate market fill for limit buy and limit sell
self.simulate_limit_order_fill(self.market, bid_order)
self.simulate_limit_order_fill(self.market, ask_order)
fill_events = self.maker_order_fill_logger.event_log
self.assertEqual(2, len(fill_events))
bid_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.SELL]
ask_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.BUY]
self.assertEqual(1, len(bid_fills))
self.assertEqual(1, len(ask_fills))
def test_with_insufficient_balance(self):
# Set base balance to zero and check if sell strategies don't place orders
self.clock.add_iterator(self.limit_buy_strategy)
self.clock.add_iterator(self.market_buy_strategy)
self.market.set_balance("WETH", 0)
end_ts = self.start_timestamp + self.clock_tick_size + self.time_delay
self.clock.backtest_til(end_ts)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
self.clock.add_iterator(self.limit_sell_strategy)
self.clock.add_iterator(self.market_sell_strategy)
self.market.set_balance("COINALPHA", 0)
end_ts += self.clock_tick_size + self.time_delay
self.clock.backtest_til(end_ts)
self.assertEqual(0, len(self.limit_sell_strategy.active_asks))
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(0, len(market_sell_events)) | 0.595257 | 0.172939 |
from collections import namedtuple
import netCDF4
import numpy as np
import numpy.ma as ma
from cloudnetpy.instruments.ceilometer import Ceilometer
from cloudnetpy import utils
instrument_info = namedtuple('instrument_info',
['calibration_factor',
'overlap_function_params',
'is_range_corrected'])
# TODO: should be a separate config file or accessible over http api
CEILOMETER_INFO = {
'punta-arenas': instrument_info(
calibration_factor=1e-12,
overlap_function_params=None,
is_range_corrected=True),
'mace-head': instrument_info(
calibration_factor=5.2e-15,
overlap_function_params=(500, 200),
is_range_corrected=False),
'bucharest': instrument_info(
calibration_factor=5e-12,
overlap_function_params=None,
is_range_corrected=True),
'granada': instrument_info(
calibration_factor=5.2e-12,
overlap_function_params=None,
is_range_corrected=True),
'lindenberg': instrument_info(
calibration_factor=2.5e-11,
overlap_function_params=None,
is_range_corrected=True),
}
class JenoptikCeilo(Ceilometer):
"""Class for Jenoptik chm15k ceilometer."""
def __init__(self, file_name, site_name):
super().__init__(file_name)
self.model = 'Jenoptik CHM15k'
self.dataset = netCDF4.Dataset(self.file_name)
self.variables = self.dataset.variables
self.noise_params = (70, 2e-14, 0.3e-6, (1e-9, 4e-9))
self.calibration_info = _read_calibration_info(site_name)
def read_ceilometer_file(self):
"""Reads data and metadata from Jenoptik netCDF file."""
self.range = self._calc_range()
self.time = self._convert_time()
self.date = self._read_date()
self.backscatter = self._convert_backscatter()
self.metadata = self._read_metadata()
def _calc_range(self):
"""Assumes 'range' means the upper limit of range gate."""
ceilo_range = self._getvar('range')
return ceilo_range - utils.mdiff(ceilo_range)/2
def _convert_time(self):
time = self.variables['time']
try:
assert all(np.diff(time) > 0)
except AssertionError:
raise RuntimeError('Inconsistent ceilometer time stamps.')
if max(time) > 24:
time = utils.seconds2hours(time)
return time
def _read_date(self):
return [self.dataset.year, self.dataset.month, self.dataset.day]
def _convert_backscatter(self):
"""Steps to convert Jenoptik SNR to raw beta."""
beta_raw = self._getvar('beta_raw')
if not self.calibration_info.is_range_corrected:
beta_raw *= self.range ** 2
overlap_function = _get_overlap(self.range, self.calibration_info)
beta_raw /= overlap_function
beta_raw *= self.calibration_info.calibration_factor
return beta_raw
def _getvar(self, *args):
"""Reads data of variable (array or scalar) from netcdf-file."""
for arg in args:
if arg in self.variables:
var = self.variables[arg]
return var[0] if utils.isscalar(var) else var[:]
def _read_metadata(self):
meta = {'tilt_angle': self._getvar('zenith')}
return meta
def _get_overlap(range_ceilo, calibration_info):
"""Approximative overlap function."""
params = calibration_info.overlap_function_params or (0, 1)
return utils.array_to_probability(range_ceilo, *params)
def _read_calibration_info(site_name):
if 'punta' in site_name.lower():
return CEILOMETER_INFO['punta-arenas']
elif 'mace' in site_name.lower():
return CEILOMETER_INFO['mace-head']
else:
return CEILOMETER_INFO[site_name.lower()] | cloudnetpy/instruments/jenoptik.py | from collections import namedtuple
import netCDF4
import numpy as np
import numpy.ma as ma
from cloudnetpy.instruments.ceilometer import Ceilometer
from cloudnetpy import utils
instrument_info = namedtuple('instrument_info',
['calibration_factor',
'overlap_function_params',
'is_range_corrected'])
# TODO: should be a separate config file or accessible over http api
CEILOMETER_INFO = {
'punta-arenas': instrument_info(
calibration_factor=1e-12,
overlap_function_params=None,
is_range_corrected=True),
'mace-head': instrument_info(
calibration_factor=5.2e-15,
overlap_function_params=(500, 200),
is_range_corrected=False),
'bucharest': instrument_info(
calibration_factor=5e-12,
overlap_function_params=None,
is_range_corrected=True),
'granada': instrument_info(
calibration_factor=5.2e-12,
overlap_function_params=None,
is_range_corrected=True),
'lindenberg': instrument_info(
calibration_factor=2.5e-11,
overlap_function_params=None,
is_range_corrected=True),
}
class JenoptikCeilo(Ceilometer):
"""Class for Jenoptik chm15k ceilometer."""
def __init__(self, file_name, site_name):
super().__init__(file_name)
self.model = 'Jenoptik CHM15k'
self.dataset = netCDF4.Dataset(self.file_name)
self.variables = self.dataset.variables
self.noise_params = (70, 2e-14, 0.3e-6, (1e-9, 4e-9))
self.calibration_info = _read_calibration_info(site_name)
def read_ceilometer_file(self):
"""Reads data and metadata from Jenoptik netCDF file."""
self.range = self._calc_range()
self.time = self._convert_time()
self.date = self._read_date()
self.backscatter = self._convert_backscatter()
self.metadata = self._read_metadata()
def _calc_range(self):
"""Assumes 'range' means the upper limit of range gate."""
ceilo_range = self._getvar('range')
return ceilo_range - utils.mdiff(ceilo_range)/2
def _convert_time(self):
time = self.variables['time']
try:
assert all(np.diff(time) > 0)
except AssertionError:
raise RuntimeError('Inconsistent ceilometer time stamps.')
if max(time) > 24:
time = utils.seconds2hours(time)
return time
def _read_date(self):
return [self.dataset.year, self.dataset.month, self.dataset.day]
def _convert_backscatter(self):
"""Steps to convert Jenoptik SNR to raw beta."""
beta_raw = self._getvar('beta_raw')
if not self.calibration_info.is_range_corrected:
beta_raw *= self.range ** 2
overlap_function = _get_overlap(self.range, self.calibration_info)
beta_raw /= overlap_function
beta_raw *= self.calibration_info.calibration_factor
return beta_raw
def _getvar(self, *args):
"""Reads data of variable (array or scalar) from netcdf-file."""
for arg in args:
if arg in self.variables:
var = self.variables[arg]
return var[0] if utils.isscalar(var) else var[:]
def _read_metadata(self):
meta = {'tilt_angle': self._getvar('zenith')}
return meta
def _get_overlap(range_ceilo, calibration_info):
"""Approximative overlap function."""
params = calibration_info.overlap_function_params or (0, 1)
return utils.array_to_probability(range_ceilo, *params)
def _read_calibration_info(site_name):
if 'punta' in site_name.lower():
return CEILOMETER_INFO['punta-arenas']
elif 'mace' in site_name.lower():
return CEILOMETER_INFO['mace-head']
else:
return CEILOMETER_INFO[site_name.lower()] | 0.573201 | 0.3398 |
import os
import numpy as np
import torch
import collections
from utils.logger import logger
from utils.logger import create_stats_ordered_dict
from utils import utils
class Trainer(object):
def __init__(self, agent, expl_env, eval_env, replay_buffer, device, start_timesteps=25e3):
self.agent = agent
self.device = device
self.expl_env = expl_env
self.eval_env = eval_env
self.replay_buffer = replay_buffer
self.max_episode_steps = 1000
self.start_timesteps = int(start_timesteps)
def train(self, num_epochs=1000, num_iters_per_epoch=1000):
total_timesteps = 0
episode_num = 0
episode_reward = 0
episode_timesteps = 0
evaluations = []
state, done = self.expl_env.reset(), False
for curr_epoch in range(num_epochs):
for _ in range(num_iters_per_epoch):
if total_timesteps < self.start_timesteps:
action = self.expl_env.action_space.sample()
else:
action = self.agent.sample_action(np.array(state))
next_state, reward, done, _ = self.expl_env.step(action)
done_bool = float(done) if episode_timesteps < self.max_episode_steps else 0
episode_timesteps += 1
total_timesteps += 1
self.replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
if total_timesteps >= self.start_timesteps:
loss = self.agent.train_from_batch(self.replay_buffer)
if done or (episode_timesteps == self.max_episode_steps):
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
# utils.print_banner(f"Total T: {total_timesteps + 1} Episode Num: {episode_num + 1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
# Reset environment
state, done = self.expl_env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# TODO: add code
# Evaluate episode
utils.print_banner(f"Train step: {total_timesteps}", separator="*", num_star=90)
eval_res = self.eval_policy()
evaluations.append(eval_res)
# np.save(os.path.join(output_dir, "eval"), evaluations)
logger.record_tabular('Training Epochs', curr_epoch)
logger.record_tabular('GF1 Loss', loss['gf1_loss'] if total_timesteps >= self.start_timesteps else 0.)
logger.record_tabular('GF2 Loss', loss['gf2_loss'] if total_timesteps >= self.start_timesteps else 0.)
logger.record_tabular('Actor Loss', loss['actor_loss'] if total_timesteps >= self.start_timesteps else 0.)
logger.record_tabular('Policy log_pi',
loss['log_pi'] if total_timesteps >= self.start_timesteps else 0.)
logger.record_tabular('Average Episodic Reward', eval_res)
logger.dump_tabular()
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(self, eval_episodes=10):
avg_reward = 0.
for _ in range(eval_episodes):
state, done = self.eval_env.reset(), False
while not done:
action = self.agent.sample_action(np.array(state))
state, reward, done, _ = self.eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
utils.print_banner(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
return avg_reward | trainer.py | import os
import numpy as np
import torch
import collections
from utils.logger import logger
from utils.logger import create_stats_ordered_dict
from utils import utils
class Trainer(object):
def __init__(self, agent, expl_env, eval_env, replay_buffer, device, start_timesteps=25e3):
self.agent = agent
self.device = device
self.expl_env = expl_env
self.eval_env = eval_env
self.replay_buffer = replay_buffer
self.max_episode_steps = 1000
self.start_timesteps = int(start_timesteps)
def train(self, num_epochs=1000, num_iters_per_epoch=1000):
total_timesteps = 0
episode_num = 0
episode_reward = 0
episode_timesteps = 0
evaluations = []
state, done = self.expl_env.reset(), False
for curr_epoch in range(num_epochs):
for _ in range(num_iters_per_epoch):
if total_timesteps < self.start_timesteps:
action = self.expl_env.action_space.sample()
else:
action = self.agent.sample_action(np.array(state))
next_state, reward, done, _ = self.expl_env.step(action)
done_bool = float(done) if episode_timesteps < self.max_episode_steps else 0
episode_timesteps += 1
total_timesteps += 1
self.replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
if total_timesteps >= self.start_timesteps:
loss = self.agent.train_from_batch(self.replay_buffer)
if done or (episode_timesteps == self.max_episode_steps):
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
# utils.print_banner(f"Total T: {total_timesteps + 1} Episode Num: {episode_num + 1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
# Reset environment
state, done = self.expl_env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# TODO: add code
# Evaluate episode
utils.print_banner(f"Train step: {total_timesteps}", separator="*", num_star=90)
eval_res = self.eval_policy()
evaluations.append(eval_res)
# np.save(os.path.join(output_dir, "eval"), evaluations)
logger.record_tabular('Training Epochs', curr_epoch)
logger.record_tabular('GF1 Loss', loss['gf1_loss'] if total_timesteps >= self.start_timesteps else 0.)
logger.record_tabular('GF2 Loss', loss['gf2_loss'] if total_timesteps >= self.start_timesteps else 0.)
logger.record_tabular('Actor Loss', loss['actor_loss'] if total_timesteps >= self.start_timesteps else 0.)
logger.record_tabular('Policy log_pi',
loss['log_pi'] if total_timesteps >= self.start_timesteps else 0.)
logger.record_tabular('Average Episodic Reward', eval_res)
logger.dump_tabular()
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(self, eval_episodes=10):
avg_reward = 0.
for _ in range(eval_episodes):
state, done = self.eval_env.reset(), False
while not done:
action = self.agent.sample_action(np.array(state))
state, reward, done, _ = self.eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
utils.print_banner(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
return avg_reward | 0.44553 | 0.249699 |
import abc
from typing import List
import pandas as pd
from genomics_data_index.configuration.connector.DataIndexConnection import DataIndexConnection
from genomics_data_index.storage.SampleSet import SampleSet, AllSampleSet
class FeaturesComparator(abc.ABC):
FEATURES_SELECTIONS = ['all', 'unique']
def __init__(self, connection: DataIndexConnection, include_unknown_samples: bool,
include_unknown_no_present_samples: bool):
self._connection = connection
self._include_unknown_samples = include_unknown_samples
self._include_unknown_no_present_samples = include_unknown_no_present_samples
def _get_total(self, samples: SampleSet) -> int:
if isinstance(samples, AllSampleSet):
return self._connection.sample_service.count_samples()
else:
return len(samples)
@property
@abc.abstractmethod
def feature_id_columns(self) -> List[str]:
pass
@property
@abc.abstractmethod
def summary_columns(self) -> List[str]:
pass
@property
@abc.abstractmethod
def index_name(self) -> str:
pass
def _join_additional_columns(self, features_df: pd.DataFrame) -> pd.DataFrame:
return features_df
@abc.abstractmethod
def features_comparison(self, selected_samples: SampleSet,
sample_categories: List[SampleSet],
category_prefixes: List[str] = None,
category_samples_threshold: int = None,
unit: str = 'percent') -> pd.DataFrame:
"""
Creates a dataframe which compares different categories of samples with each other with respect to features.
For example, if kind=='mutations', compare_kind == 'percent' and there are two sample_categories then
this will return dataframe like:
| Mutation | Total | Category1_percent | Category2_percent | Category1_total | Category2_total |
| ref:100:A:T | 10 | 50% | 100% | 8 | 2 |
| ref:200:CT:C | 10 | 100% | 0% | 8 | 2 |
| ... | ... | ... | ... | 8 | 2 |
Here, "Category1_percent" is the percent of samples in Category1 that have this mutation/feature
(50% or 4 out of 8 samples in Category1). "Category2_percent" is the percent of samples in Category2 with the
feature (100% or 2 out of 2 samples in Category2).
"Category1_total" and "Category2_total" are the total samples in each category. "Total" is the total
samples in the overall query that form the universe from which we are defining "Category1" and "Category2".
Note: since categories are defined based on sample sets, there is no enforcement that categories are
mutually exclusive (that is, "Category1_total" + "Category2_total" will not always equal "Total"). This
is done on purpose in case the categories you wish to compare are not mutually exclusive.
:param selected_samples: The set of selected samples of which sample_categories will form subsets of.
:param sample_categories: The different categories to compare.
:param category_prefixes: The prefixes to use for the different categories (defaults to Category1, Category2, ...).
:param category_samples_threshold: A threshold on the number of samples in a category for it to be considered.
:param unit: The type of data to compare in each category (either 'percent', 'proportion', or 'count').
:return: A dataframe comparing each category with respect to the differences in features.
"""
pass
@abc.abstractmethod
def summary(self, sample_set: SampleSet) -> pd.DataFrame:
"""
Given a samples set, summarizes the features for all samples in this set.
:param sample_set: The set of samples to summarize features in.
:return: A dataframe summarizing features in this set of samples.
"""
pass
def unique_summary(self, sample_set: SampleSet, other_set: SampleSet) -> pd.DataFrame:
"""
Given a samples set, summarizes the features for all samples in this set that are not in another set
(i.e., are unique to the given sample set compared to the other set).
:param sample_set: The set of samples to summarize features in.
:param other_set: The set of samples features should not appear in.
:return: A dataframe summarizing unique features in this set of samples.
"""
features_df = self.summary(sample_set)
features_complement_df = self.summary(other_set)
features_merged_df = features_df.merge(features_complement_df, left_index=True, right_index=True,
how='left', indicator=True, suffixes=('_x', '_y'))
rename_dict = {col + '_x': col for col in self.summary_columns}
features_unique_df = features_merged_df[features_merged_df['_merge'] == 'left_only'].rename(rename_dict,
axis='columns')
features_unique_df = features_unique_df[self.summary_columns]
return self._join_additional_columns(features_unique_df) | genomics_data_index/api/query/features/FeaturesComparator.py | import abc
from typing import List
import pandas as pd
from genomics_data_index.configuration.connector.DataIndexConnection import DataIndexConnection
from genomics_data_index.storage.SampleSet import SampleSet, AllSampleSet
class FeaturesComparator(abc.ABC):
FEATURES_SELECTIONS = ['all', 'unique']
def __init__(self, connection: DataIndexConnection, include_unknown_samples: bool,
include_unknown_no_present_samples: bool):
self._connection = connection
self._include_unknown_samples = include_unknown_samples
self._include_unknown_no_present_samples = include_unknown_no_present_samples
def _get_total(self, samples: SampleSet) -> int:
if isinstance(samples, AllSampleSet):
return self._connection.sample_service.count_samples()
else:
return len(samples)
@property
@abc.abstractmethod
def feature_id_columns(self) -> List[str]:
pass
@property
@abc.abstractmethod
def summary_columns(self) -> List[str]:
pass
@property
@abc.abstractmethod
def index_name(self) -> str:
pass
def _join_additional_columns(self, features_df: pd.DataFrame) -> pd.DataFrame:
return features_df
@abc.abstractmethod
def features_comparison(self, selected_samples: SampleSet,
sample_categories: List[SampleSet],
category_prefixes: List[str] = None,
category_samples_threshold: int = None,
unit: str = 'percent') -> pd.DataFrame:
"""
Creates a dataframe which compares different categories of samples with each other with respect to features.
For example, if kind=='mutations', compare_kind == 'percent' and there are two sample_categories then
this will return dataframe like:
| Mutation | Total | Category1_percent | Category2_percent | Category1_total | Category2_total |
| ref:100:A:T | 10 | 50% | 100% | 8 | 2 |
| ref:200:CT:C | 10 | 100% | 0% | 8 | 2 |
| ... | ... | ... | ... | 8 | 2 |
Here, "Category1_percent" is the percent of samples in Category1 that have this mutation/feature
(50% or 4 out of 8 samples in Category1). "Category2_percent" is the percent of samples in Category2 with the
feature (100% or 2 out of 2 samples in Category2).
"Category1_total" and "Category2_total" are the total samples in each category. "Total" is the total
samples in the overall query that form the universe from which we are defining "Category1" and "Category2".
Note: since categories are defined based on sample sets, there is no enforcement that categories are
mutually exclusive (that is, "Category1_total" + "Category2_total" will not always equal "Total"). This
is done on purpose in case the categories you wish to compare are not mutually exclusive.
:param selected_samples: The set of selected samples of which sample_categories will form subsets of.
:param sample_categories: The different categories to compare.
:param category_prefixes: The prefixes to use for the different categories (defaults to Category1, Category2, ...).
:param category_samples_threshold: A threshold on the number of samples in a category for it to be considered.
:param unit: The type of data to compare in each category (either 'percent', 'proportion', or 'count').
:return: A dataframe comparing each category with respect to the differences in features.
"""
pass
@abc.abstractmethod
def summary(self, sample_set: SampleSet) -> pd.DataFrame:
"""
Given a samples set, summarizes the features for all samples in this set.
:param sample_set: The set of samples to summarize features in.
:return: A dataframe summarizing features in this set of samples.
"""
pass
def unique_summary(self, sample_set: SampleSet, other_set: SampleSet) -> pd.DataFrame:
"""
Given a samples set, summarizes the features for all samples in this set that are not in another set
(i.e., are unique to the given sample set compared to the other set).
:param sample_set: The set of samples to summarize features in.
:param other_set: The set of samples features should not appear in.
:return: A dataframe summarizing unique features in this set of samples.
"""
features_df = self.summary(sample_set)
features_complement_df = self.summary(other_set)
features_merged_df = features_df.merge(features_complement_df, left_index=True, right_index=True,
how='left', indicator=True, suffixes=('_x', '_y'))
rename_dict = {col + '_x': col for col in self.summary_columns}
features_unique_df = features_merged_df[features_merged_df['_merge'] == 'left_only'].rename(rename_dict,
axis='columns')
features_unique_df = features_unique_df[self.summary_columns]
return self._join_additional_columns(features_unique_df) | 0.85753 | 0.54577 |
import math
from unittest import mock
import numpy as np
import pybullet
import pybullet_utils.bullet_client as bc
import pytest
from direct.showbase.ShowBase import ShowBase
import random
from pathlib import Path
import multiprocessing as mp
from smarts.core.chassis import AckermannChassis
from smarts.core.controllers import (
TrajectoryTrackingController,
TrajectoryTrackingControllerState,
)
from smarts.core.coordinates import Heading, Pose
from smarts.core.scenario import Start
from smarts.core.vehicle import Vehicle
time_step = 0.1
@pytest.fixture
def bullet_client(TIMESTEP_SEC=time_step):
client = bc.BulletClient(pybullet.DIRECT)
client.resetSimulation()
client.setGravity(0, 0, -9.8)
client.setPhysicsEngineParameter(
fixedTimeStep=TIMESTEP_SEC, numSubSteps=int(TIMESTEP_SEC / (1 / 240)),
)
path = Path(__file__).parent / "../models/plane.urdf"
path = str(path.absolute())
plane_body_id = client.loadURDF(path, useFixedBase=True)
yield client
client.disconnect()
@pytest.fixture
def vehicle(bullet_client, TIMESTEP_SEC=time_step):
pose = Pose.from_center((0, 0, 0), Heading(0))
vehicle1 = Vehicle(
id="vehicle",
pose=pose,
showbase=mock.MagicMock(),
chassis=AckermannChassis(pose=pose, bullet_client=bullet_client,),
)
return vehicle1
# We test 3 values for radius of the circular trajectory
@pytest.fixture(params=np.arange(10, 30, 10))
def radius(request):
return request.param
# We test 3 values for vehicle yaw rate of the circular trajectory
@pytest.fixture(params=np.arange(0.1, 0.21, 0.05))
def omega(request):
return request.param
# We use circular trajectory with different radius and yaw rate
def build_trajectory(radius, omega, step_num, TIMESTEP_SEC=time_step):
num_trajectory_points = 15
R = radius
omega_1 = omega
omega_2 = omega
if step_num > 3.14 / (TIMESTEP_SEC * omega_1):
Omega = omega_2
alph = ((omega_1 - omega_2) / omega_2) * 3.14 / (TIMESTEP_SEC * omega_1)
else:
Omega = omega_1
alph = 0
desheadi = step_num * Omega * TIMESTEP_SEC
trajectory = [
[
-(R - R * math.cos((step_num + i + alph) * Omega * TIMESTEP_SEC))
for i in range(num_trajectory_points)
],
[
R * math.sin((step_num + i + alph) * Omega * TIMESTEP_SEC)
for i in range(num_trajectory_points)
],
[
(step_num + i + alph) * Omega * TIMESTEP_SEC
for i in range(num_trajectory_points)
],
[R * Omega for i in range(num_trajectory_points)],
]
return trajectory
def step_with_vehicle_commands(
bullet_client, vehicle, radius, omega, TIMESTEP_SEC=time_step
):
prev_friction_sum = None
# Proceed till the end of half of the circle.
n_steps = int(0.5 * 3.14 / (omega * TIMESTEP_SEC))
controller_state = TrajectoryTrackingControllerState()
for step_num in range(n_steps):
desired_trajectory = build_trajectory(radius, omega, step_num)
TrajectoryTrackingController.perform_trajectory_tracking_PD(
desired_trajectory, vehicle, controller_state, dt_sec=TIMESTEP_SEC,
)
bullet_client.stepSimulation()
final_error = math.sqrt(
(vehicle.position[0] - desired_trajectory[0][0]) ** 2
+ (vehicle.position[1] - desired_trajectory[1][0]) ** 2
)
return final_error
def test_trajectory_tracking(bullet_client, vehicle, radius, omega):
final_error = step_with_vehicle_commands(bullet_client, vehicle, radius, omega)
assert final_error <= 10 | smarts/core/tests/test_trajectory_controller.py | import math
from unittest import mock
import numpy as np
import pybullet
import pybullet_utils.bullet_client as bc
import pytest
from direct.showbase.ShowBase import ShowBase
import random
from pathlib import Path
import multiprocessing as mp
from smarts.core.chassis import AckermannChassis
from smarts.core.controllers import (
TrajectoryTrackingController,
TrajectoryTrackingControllerState,
)
from smarts.core.coordinates import Heading, Pose
from smarts.core.scenario import Start
from smarts.core.vehicle import Vehicle
time_step = 0.1
@pytest.fixture
def bullet_client(TIMESTEP_SEC=time_step):
client = bc.BulletClient(pybullet.DIRECT)
client.resetSimulation()
client.setGravity(0, 0, -9.8)
client.setPhysicsEngineParameter(
fixedTimeStep=TIMESTEP_SEC, numSubSteps=int(TIMESTEP_SEC / (1 / 240)),
)
path = Path(__file__).parent / "../models/plane.urdf"
path = str(path.absolute())
plane_body_id = client.loadURDF(path, useFixedBase=True)
yield client
client.disconnect()
@pytest.fixture
def vehicle(bullet_client, TIMESTEP_SEC=time_step):
pose = Pose.from_center((0, 0, 0), Heading(0))
vehicle1 = Vehicle(
id="vehicle",
pose=pose,
showbase=mock.MagicMock(),
chassis=AckermannChassis(pose=pose, bullet_client=bullet_client,),
)
return vehicle1
# We test 3 values for radius of the circular trajectory
@pytest.fixture(params=np.arange(10, 30, 10))
def radius(request):
return request.param
# We test 3 values for vehicle yaw rate of the circular trajectory
@pytest.fixture(params=np.arange(0.1, 0.21, 0.05))
def omega(request):
return request.param
# We use circular trajectory with different radius and yaw rate
def build_trajectory(radius, omega, step_num, TIMESTEP_SEC=time_step):
num_trajectory_points = 15
R = radius
omega_1 = omega
omega_2 = omega
if step_num > 3.14 / (TIMESTEP_SEC * omega_1):
Omega = omega_2
alph = ((omega_1 - omega_2) / omega_2) * 3.14 / (TIMESTEP_SEC * omega_1)
else:
Omega = omega_1
alph = 0
desheadi = step_num * Omega * TIMESTEP_SEC
trajectory = [
[
-(R - R * math.cos((step_num + i + alph) * Omega * TIMESTEP_SEC))
for i in range(num_trajectory_points)
],
[
R * math.sin((step_num + i + alph) * Omega * TIMESTEP_SEC)
for i in range(num_trajectory_points)
],
[
(step_num + i + alph) * Omega * TIMESTEP_SEC
for i in range(num_trajectory_points)
],
[R * Omega for i in range(num_trajectory_points)],
]
return trajectory
def step_with_vehicle_commands(
bullet_client, vehicle, radius, omega, TIMESTEP_SEC=time_step
):
prev_friction_sum = None
# Proceed till the end of half of the circle.
n_steps = int(0.5 * 3.14 / (omega * TIMESTEP_SEC))
controller_state = TrajectoryTrackingControllerState()
for step_num in range(n_steps):
desired_trajectory = build_trajectory(radius, omega, step_num)
TrajectoryTrackingController.perform_trajectory_tracking_PD(
desired_trajectory, vehicle, controller_state, dt_sec=TIMESTEP_SEC,
)
bullet_client.stepSimulation()
final_error = math.sqrt(
(vehicle.position[0] - desired_trajectory[0][0]) ** 2
+ (vehicle.position[1] - desired_trajectory[1][0]) ** 2
)
return final_error
def test_trajectory_tracking(bullet_client, vehicle, radius, omega):
final_error = step_with_vehicle_commands(bullet_client, vehicle, radius, omega)
assert final_error <= 10 | 0.714927 | 0.328301 |
from multiprocessing import Process, Queue
from fizzbuzz_utils import fizzbuzz as fizzbuzz
from fizzbuzz_utils import printer as printer
from fizzbuzz_utils import even as even
from fizzbuzz_utils import numbers as numbers
class PLPipeSentinel: pass
def pl_run_numbers(pl_stream, pl_out_queue):
for pl_data in pl_stream:
pl_out_queue.put(pl_data)
pl_out_queue.put(PLPipeSentinel())
def pl_run_even(pl_in_queue, pl_out_queue):
is_even = None
count = None
while 1:
pl_inp = pl_in_queue.get()
if isinstance(pl_inp, PLPipeSentinel):
pl_outp = PLPipeSentinel()
if not isinstance(pl_inp, PLPipeSentinel):
pl_result = even(number=pl_inp, counter=count)
is_even, count = pl_result
if is_even:
pl_outp = pl_inp
else:
continue
if pl_out_queue is not None:
pl_out_queue.put(pl_outp)
if isinstance(pl_inp, PLPipeSentinel):
break
def pl_run_fizzbuzz(pl_in_queue, pl_out_queue):
number = None
while 1:
pl_inp = pl_in_queue.get()
if isinstance(pl_inp, PLPipeSentinel):
pl_outp = PLPipeSentinel()
if not isinstance(pl_inp, PLPipeSentinel):
pl_outp = fizzbuzz(number=pl_inp, fizz="fizz", buzz="buzz")
number = pl_outp
if pl_out_queue is not None:
pl_out_queue.put(pl_outp)
if isinstance(pl_inp, PLPipeSentinel):
break
def pl_run_printer(pl_in_queue, pl_out_queue):
while 1:
pl_inp = pl_in_queue.get()
number = pl_inp
if isinstance(pl_inp, PLPipeSentinel):
pl_outp = PLPipeSentinel()
if not isinstance(pl_inp, PLPipeSentinel):
pl_outp = printer(number=number)
if pl_out_queue is not None:
pl_out_queue.put(pl_outp)
if isinstance(pl_inp, PLPipeSentinel):
break
if __name__ == "__main__":
pl_data = numbers()
pl_in_even = Queue()
pl_in_fizzbuzz = Queue()
pl_in_printer = Queue()
pl_numbers_process = Process(target=pl_run_numbers, args=(pl_data, pl_in_even))
pl_even_process = Process(target=pl_run_even, args=(pl_in_even,pl_in_fizzbuzz,))
pl_fizzbuzz_process = Process(target=pl_run_fizzbuzz, args=(pl_in_fizzbuzz,pl_in_printer,))
pl_printer_process = Process(target=pl_run_printer, args=(pl_in_printer,None,))
pl_numbers_process.start()
pl_even_process.start()
pl_fizzbuzz_process.start()
pl_printer_process.start()
pl_even_process.join()
pl_fizzbuzz_process.join()
pl_printer_process.join() | tests/fizzbuzz.py | from multiprocessing import Process, Queue
from fizzbuzz_utils import fizzbuzz as fizzbuzz
from fizzbuzz_utils import printer as printer
from fizzbuzz_utils import even as even
from fizzbuzz_utils import numbers as numbers
class PLPipeSentinel: pass
def pl_run_numbers(pl_stream, pl_out_queue):
for pl_data in pl_stream:
pl_out_queue.put(pl_data)
pl_out_queue.put(PLPipeSentinel())
def pl_run_even(pl_in_queue, pl_out_queue):
is_even = None
count = None
while 1:
pl_inp = pl_in_queue.get()
if isinstance(pl_inp, PLPipeSentinel):
pl_outp = PLPipeSentinel()
if not isinstance(pl_inp, PLPipeSentinel):
pl_result = even(number=pl_inp, counter=count)
is_even, count = pl_result
if is_even:
pl_outp = pl_inp
else:
continue
if pl_out_queue is not None:
pl_out_queue.put(pl_outp)
if isinstance(pl_inp, PLPipeSentinel):
break
def pl_run_fizzbuzz(pl_in_queue, pl_out_queue):
number = None
while 1:
pl_inp = pl_in_queue.get()
if isinstance(pl_inp, PLPipeSentinel):
pl_outp = PLPipeSentinel()
if not isinstance(pl_inp, PLPipeSentinel):
pl_outp = fizzbuzz(number=pl_inp, fizz="fizz", buzz="buzz")
number = pl_outp
if pl_out_queue is not None:
pl_out_queue.put(pl_outp)
if isinstance(pl_inp, PLPipeSentinel):
break
def pl_run_printer(pl_in_queue, pl_out_queue):
while 1:
pl_inp = pl_in_queue.get()
number = pl_inp
if isinstance(pl_inp, PLPipeSentinel):
pl_outp = PLPipeSentinel()
if not isinstance(pl_inp, PLPipeSentinel):
pl_outp = printer(number=number)
if pl_out_queue is not None:
pl_out_queue.put(pl_outp)
if isinstance(pl_inp, PLPipeSentinel):
break
if __name__ == "__main__":
pl_data = numbers()
pl_in_even = Queue()
pl_in_fizzbuzz = Queue()
pl_in_printer = Queue()
pl_numbers_process = Process(target=pl_run_numbers, args=(pl_data, pl_in_even))
pl_even_process = Process(target=pl_run_even, args=(pl_in_even,pl_in_fizzbuzz,))
pl_fizzbuzz_process = Process(target=pl_run_fizzbuzz, args=(pl_in_fizzbuzz,pl_in_printer,))
pl_printer_process = Process(target=pl_run_printer, args=(pl_in_printer,None,))
pl_numbers_process.start()
pl_even_process.start()
pl_fizzbuzz_process.start()
pl_printer_process.start()
pl_even_process.join()
pl_fizzbuzz_process.join()
pl_printer_process.join() | 0.205535 | 0.206134 |
import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, Tuple
import numpy as np
from pymatgen.electronic_structure.core import Spin
from tabulate import tabulate
from amset.constants import (
boltzmann_au,
coulomb_to_au,
ev_to_hartree,
gpa_to_au,
m_to_bohr,
s_to_au,
)
from amset.core.data import AmsetData, check_nbands_equal
from amset.interpolation.deformation import DeformationPotentialInterpolator
from amset.scattering.common import calculate_inverse_screening_length_sq
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
logger = logging.getLogger(__name__)
class AbstractElasticScattering(ABC):
name: str
required_properties: Tuple[str]
def __init__(self, properties, doping, temperatures, nbands):
self.properties = properties
self.doping = doping
self.temperatures = temperatures
self.nbands = nbands
self.spins = list(nbands.keys())
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
)
@abstractmethod
def prefactor(self, spin: Spin, b_idx: int):
pass
@abstractmethod
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
pass
def to_reference(self):
return self.properties, self.doping, self.temperatures, self.nbands
@classmethod
def from_reference(cls, properties, doping, temperatures, nbands):
return cls(properties, doping, temperatures, nbands)
@classmethod
def get_properties(cls, materials_properties):
return {p: materials_properties[p] for p in cls.required_properties}
@staticmethod
def get_nbands(amset_data):
return {s: len(amset_data.energies[s]) for s in amset_data.spins}
class AcousticDeformationPotentialScattering(AbstractElasticScattering):
name = "ADP"
required_properties = ("deformation_potential", "elastic_constant")
def __init__(
self,
properties,
doping,
temperatures,
nbands,
deformation_potential,
vb_idx,
is_metal,
fermi_levels,
):
super().__init__(properties, doping, temperatures, nbands)
self._prefactor = boltzmann_au * s_to_au
self.elastic_constant = self.properties["elastic_constant"] * gpa_to_au
self.deformation_potential = deformation_potential
self.vb_idx = vb_idx
self.is_metal = is_metal
self.fermi_levels = fermi_levels
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
vb_idx = amset_data.vb_idx
is_metal = amset_data.is_metal
deformation_potential = materials_properties["deformation_potential"]
if isinstance(deformation_potential, (str, Path)):
deformation_potential = DeformationPotentialInterpolator.from_file(
deformation_potential, scale=ev_to_hartree
)
equal = check_nbands_equal(deformation_potential, amset_data)
if not equal:
raise RuntimeError(
"Deformation potential file does not contain the correct number of"
" bands\nEnsure it was generated using the same energy_cutoff as "
"this AMSET run."
)
elif is_metal and isinstance(deformation_potential, tuple):
logger.warning(
"System is metallic but deformation potentials for both "
"the valence and conduction bands have been set... using the "
"valence band potential for all bands"
)
deformation_potential = deformation_potential[0] * ev_to_hartree
elif is_metal:
deformation_potential = deformation_potential * ev_to_hartree
elif not is_metal and not isinstance(deformation_potential, tuple):
logger.warning(
"System is semiconducting but only one deformation "
"potential has been set... using this potential for all bands."
)
deformation_potential = (
deformation_potential * ev_to_hartree,
deformation_potential * ev_to_hartree,
)
else:
deformation_potential = (
deformation_potential[0] * ev_to_hartree,
deformation_potential[1] * ev_to_hartree,
)
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
deformation_potential,
vb_idx,
is_metal,
amset_data.fermi_levels,
)
def prefactor(self, spin: Spin, b_idx: int):
prefactor = (
self._prefactor
* self.temperatures[None, :]
* np.ones((len(self.doping), len(self.temperatures)))
)
return prefactor
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
christoffel_tensors = get_christoffel_tensors(self.elastic_constant, unit_q)
(
(c_trans_a, c_trans_b, c_long),
(v_trans_a, v_trans_b, v_long),
) = solve_christoffel_equation(christoffel_tensors)
if isinstance(self.deformation_potential, DeformationPotentialInterpolator):
deform = self.deformation_potential.interpolate(spin, [band_idx], [kpoint])
deform = np.abs(deform[0])
deform += np.outer(velocity, velocity) # velocity correction
strain_long, strain_trans_a, strain_trans_b = prepare_acoustic_strains(
unit_q, v_long, v_trans_a, v_trans_b
)
factor = (
np.tensordot(strain_long, deform) ** 2 / c_long
+ np.tensordot(strain_trans_a, deform) ** 2 / c_trans_a
+ np.tensordot(strain_trans_b, deform) ** 2 / c_trans_b
)
elif self.is_metal:
factor = self.deformation_potential ** 2 / c_long
else:
def_idx = 1 if band_idx > self.vb_idx[spin] else 0
factor = self.deformation_potential[def_idx] ** 2 / c_long
return factor[None, None] * np.ones(self.fermi_levels.shape + norm_q_sq.shape)
def to_reference(self):
base_reference = super().to_reference()
if isinstance(self.deformation_potential, DeformationPotentialInterpolator):
deformation_reference = self.deformation_potential.to_reference()
is_interpolator = True
else:
deformation_reference = self.deformation_potential
is_interpolator = False
return base_reference + (
deformation_reference,
self.vb_idx,
self.is_metal,
self.fermi_levels,
is_interpolator,
)
@classmethod
def from_reference(
cls,
properties,
doping,
temperatures,
nbands,
deformation_reference,
vb_idx,
is_metal,
fermi_levels,
is_interpolator,
):
if is_interpolator:
deformation_potential = DeformationPotentialInterpolator.from_reference(
*deformation_reference
)
else:
deformation_potential = deformation_reference
return cls(
properties,
doping,
temperatures,
nbands,
deformation_potential,
vb_idx,
is_metal,
fermi_levels,
)
def prepare_acoustic_strains(unit_q, v_long, v_trans_a, v_trans_b):
# orient v_long and unit_q to face the same direction
# the einsum is just pairwise dot product along the first axis
sign = np.sign(np.einsum("ij,ij->i", unit_q, v_long))[:, None]
v_long *= sign
v_trans_a *= sign
v_trans_b *= sign
strain_long = get_unit_strain_tensors(unit_q, v_long)
strain_trans_a = get_unit_strain_tensors(unit_q, v_trans_a)
strain_trans_b = get_unit_strain_tensors(unit_q, v_trans_b)
return strain_long, strain_trans_a, strain_trans_b
def get_christoffel_tensors(elastic_constant, unit_q):
return np.einsum("ijkl,ni,nl->njk", elastic_constant, unit_q, unit_q)
def solve_christoffel_equation(christoffel_tensors):
eigenvalues, eigenvectors = np.linalg.eigh(christoffel_tensors)
return eigenvalues.T, eigenvectors.transpose(2, 0, 1)
def get_unit_strain_tensors(propagation_vectors, polarization_vectors):
return propagation_vectors[:, :, None] * polarization_vectors[:, None, :]
class IonizedImpurityScattering(AbstractElasticScattering):
name = "IMP"
required_properties = ("defect_charge", "static_dielectric", "compensation_factor")
def __init__(
self,
properties,
doping,
temperatures,
nbands,
q_concentration,
inverse_screening_length_sq,
):
super().__init__(properties, doping, temperatures, nbands)
self._prefactor = q_concentration * s_to_au
self.inverse_screening_length_sq = inverse_screening_length_sq
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
from amset.constants import bohr_to_cm
avg_diel = np.linalg.eigvalsh(materials_properties["static_dielectric"]).mean()
inverse_screening_length_sq = calculate_inverse_screening_length_sq(
amset_data, avg_diel
)
defect_charge = materials_properties["defect_charge"]
comp_factor = materials_properties["compensation_factor"]
imp_info = []
q_concentration = np.zeros(amset_data.fermi_levels.shape)
for n, t in np.ndindex(inverse_screening_length_sq.shape):
n_conc = np.abs(amset_data.electron_conc[n, t])
p_conc = np.abs(amset_data.hole_conc[n, t])
excess_impurities = abs((n_conc - p_conc) / defect_charge)
impurity_concentration = excess_impurities * comp_factor
q_concentration[n, t] = defect_charge ** 2 * impurity_concentration
imp_info.append(
(
amset_data.doping[n] * (1 / bohr_to_cm) ** 3,
amset_data.temperatures[t],
inverse_screening_length_sq[n, t],
impurity_concentration * (1 / bohr_to_cm) ** 3,
)
)
logger.info("Inverse screening length (β) and impurity concentration (Nᵢᵢ):")
table = tabulate(
imp_info,
headers=("conc [cm⁻³]", "temp [K]", "β² [a₀⁻²]", "Nᵢᵢ [cm⁻³]"),
numalign="right",
stralign="center",
floatfmt=(".2e", ".1f", ".2e", ".2e"),
)
logger.info(table)
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
q_concentration,
inverse_screening_length_sq,
)
def prefactor(self, spin: Spin, b_idx: int):
# need to return prefactor with shape (nspins, ndops, ntemps, nbands)
return self._prefactor
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
static_tensor = self.properties["static_dielectric"] / (4 * np.pi)
static_diel = np.einsum("ij,ij->i", unit_q, np.dot(static_tensor, unit_q.T).T)
diel_factor = (1 / static_diel) ** 2
return (
diel_factor[None, None]
/ (norm_q_sq[None, None] + self.inverse_screening_length_sq[..., None]) ** 2
)
class PiezoelectricScattering(AbstractElasticScattering):
name = "PIE"
required_properties = (
"piezoelectric_constant",
"elastic_constant",
"high_frequency_dielectric",
"free_carrier_screening",
)
def __init__(
self,
properties,
doping,
temperatures,
nbands,
piezoelectric_constant,
inverse_screening_length_sq,
):
super().__init__(properties, doping, temperatures, nbands)
self.piezoelectric_constant = piezoelectric_constant
self.inverse_screening_length_sq = inverse_screening_length_sq
self._prefactor = self.temperatures[None, :] * boltzmann_au * s_to_au
self._shape = np.ones((len(self.doping), len(self.temperatures)))
self.elastic_constant = self.properties["elastic_constant"] * gpa_to_au
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
# convert dielectric to atomic units
shape = (len(amset_data.doping), len(amset_data.temperatures))
e = materials_properties["piezoelectric_constant"]
e *= coulomb_to_au / m_to_bohr ** 2 # convert to atomic units
dielectric = materials_properties["high_frequency_dielectric"] / (4 * np.pi)
inv_dielectric = np.linalg.inv(dielectric)
# use h piezoelectric coefficient (Stress-Voltage)
piezoelectric_constant = np.einsum("mn,mkl->nkl", inv_dielectric, e)
if materials_properties["free_carrier_screening"]:
avg_diel = np.linalg.eigvalsh(
materials_properties["high_frequency_dielectric"]
).mean()
inverse_screening_length_sq = calculate_inverse_screening_length_sq(
amset_data, avg_diel
)
else:
# fill with small value for numerical convergence
inverse_screening_length_sq = np.full(shape, 1e-12)
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
piezoelectric_constant,
inverse_screening_length_sq,
)
def prefactor(self, spin: Spin, b_idx: int):
# need to return prefactor with shape (ndops, ntemps)
return self._prefactor * self._shape
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
christoffel_tensors = get_christoffel_tensors(self.elastic_constant, unit_q)
(
(c_trans_a, c_trans_b, c_long),
(v_trans_a, v_trans_b, v_long),
) = solve_christoffel_equation(christoffel_tensors)
strain_long, strain_trans_a, strain_trans_b = prepare_acoustic_strains(
unit_q, v_long, v_trans_a, v_trans_b
)
qh = np.einsum("ijk,nj->nik", self.piezoelectric_constant, unit_q)
# einsum is double dot product along first axis
factor = (
np.einsum("nij,nij->n", strain_long, qh) ** 2 / c_long
+ np.einsum("nij,nij->n", strain_trans_a, qh) ** 2 / c_trans_a
+ np.einsum("nij,nij->n", strain_trans_b, qh) ** 2 / c_trans_b
)
return (
factor[None, None]
* np.ones(self._shape.shape + norm_q_sq.shape)
/ (norm_q_sq[None, None, :] + self.inverse_screening_length_sq[..., None])
) | amset/scattering/elastic.py | import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, Tuple
import numpy as np
from pymatgen.electronic_structure.core import Spin
from tabulate import tabulate
from amset.constants import (
boltzmann_au,
coulomb_to_au,
ev_to_hartree,
gpa_to_au,
m_to_bohr,
s_to_au,
)
from amset.core.data import AmsetData, check_nbands_equal
from amset.interpolation.deformation import DeformationPotentialInterpolator
from amset.scattering.common import calculate_inverse_screening_length_sq
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
logger = logging.getLogger(__name__)
class AbstractElasticScattering(ABC):
name: str
required_properties: Tuple[str]
def __init__(self, properties, doping, temperatures, nbands):
self.properties = properties
self.doping = doping
self.temperatures = temperatures
self.nbands = nbands
self.spins = list(nbands.keys())
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
)
@abstractmethod
def prefactor(self, spin: Spin, b_idx: int):
pass
@abstractmethod
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
pass
def to_reference(self):
return self.properties, self.doping, self.temperatures, self.nbands
@classmethod
def from_reference(cls, properties, doping, temperatures, nbands):
return cls(properties, doping, temperatures, nbands)
@classmethod
def get_properties(cls, materials_properties):
return {p: materials_properties[p] for p in cls.required_properties}
@staticmethod
def get_nbands(amset_data):
return {s: len(amset_data.energies[s]) for s in amset_data.spins}
class AcousticDeformationPotentialScattering(AbstractElasticScattering):
name = "ADP"
required_properties = ("deformation_potential", "elastic_constant")
def __init__(
self,
properties,
doping,
temperatures,
nbands,
deformation_potential,
vb_idx,
is_metal,
fermi_levels,
):
super().__init__(properties, doping, temperatures, nbands)
self._prefactor = boltzmann_au * s_to_au
self.elastic_constant = self.properties["elastic_constant"] * gpa_to_au
self.deformation_potential = deformation_potential
self.vb_idx = vb_idx
self.is_metal = is_metal
self.fermi_levels = fermi_levels
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
vb_idx = amset_data.vb_idx
is_metal = amset_data.is_metal
deformation_potential = materials_properties["deformation_potential"]
if isinstance(deformation_potential, (str, Path)):
deformation_potential = DeformationPotentialInterpolator.from_file(
deformation_potential, scale=ev_to_hartree
)
equal = check_nbands_equal(deformation_potential, amset_data)
if not equal:
raise RuntimeError(
"Deformation potential file does not contain the correct number of"
" bands\nEnsure it was generated using the same energy_cutoff as "
"this AMSET run."
)
elif is_metal and isinstance(deformation_potential, tuple):
logger.warning(
"System is metallic but deformation potentials for both "
"the valence and conduction bands have been set... using the "
"valence band potential for all bands"
)
deformation_potential = deformation_potential[0] * ev_to_hartree
elif is_metal:
deformation_potential = deformation_potential * ev_to_hartree
elif not is_metal and not isinstance(deformation_potential, tuple):
logger.warning(
"System is semiconducting but only one deformation "
"potential has been set... using this potential for all bands."
)
deformation_potential = (
deformation_potential * ev_to_hartree,
deformation_potential * ev_to_hartree,
)
else:
deformation_potential = (
deformation_potential[0] * ev_to_hartree,
deformation_potential[1] * ev_to_hartree,
)
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
deformation_potential,
vb_idx,
is_metal,
amset_data.fermi_levels,
)
def prefactor(self, spin: Spin, b_idx: int):
prefactor = (
self._prefactor
* self.temperatures[None, :]
* np.ones((len(self.doping), len(self.temperatures)))
)
return prefactor
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
christoffel_tensors = get_christoffel_tensors(self.elastic_constant, unit_q)
(
(c_trans_a, c_trans_b, c_long),
(v_trans_a, v_trans_b, v_long),
) = solve_christoffel_equation(christoffel_tensors)
if isinstance(self.deformation_potential, DeformationPotentialInterpolator):
deform = self.deformation_potential.interpolate(spin, [band_idx], [kpoint])
deform = np.abs(deform[0])
deform += np.outer(velocity, velocity) # velocity correction
strain_long, strain_trans_a, strain_trans_b = prepare_acoustic_strains(
unit_q, v_long, v_trans_a, v_trans_b
)
factor = (
np.tensordot(strain_long, deform) ** 2 / c_long
+ np.tensordot(strain_trans_a, deform) ** 2 / c_trans_a
+ np.tensordot(strain_trans_b, deform) ** 2 / c_trans_b
)
elif self.is_metal:
factor = self.deformation_potential ** 2 / c_long
else:
def_idx = 1 if band_idx > self.vb_idx[spin] else 0
factor = self.deformation_potential[def_idx] ** 2 / c_long
return factor[None, None] * np.ones(self.fermi_levels.shape + norm_q_sq.shape)
def to_reference(self):
base_reference = super().to_reference()
if isinstance(self.deformation_potential, DeformationPotentialInterpolator):
deformation_reference = self.deformation_potential.to_reference()
is_interpolator = True
else:
deformation_reference = self.deformation_potential
is_interpolator = False
return base_reference + (
deformation_reference,
self.vb_idx,
self.is_metal,
self.fermi_levels,
is_interpolator,
)
@classmethod
def from_reference(
cls,
properties,
doping,
temperatures,
nbands,
deformation_reference,
vb_idx,
is_metal,
fermi_levels,
is_interpolator,
):
if is_interpolator:
deformation_potential = DeformationPotentialInterpolator.from_reference(
*deformation_reference
)
else:
deformation_potential = deformation_reference
return cls(
properties,
doping,
temperatures,
nbands,
deformation_potential,
vb_idx,
is_metal,
fermi_levels,
)
def prepare_acoustic_strains(unit_q, v_long, v_trans_a, v_trans_b):
# orient v_long and unit_q to face the same direction
# the einsum is just pairwise dot product along the first axis
sign = np.sign(np.einsum("ij,ij->i", unit_q, v_long))[:, None]
v_long *= sign
v_trans_a *= sign
v_trans_b *= sign
strain_long = get_unit_strain_tensors(unit_q, v_long)
strain_trans_a = get_unit_strain_tensors(unit_q, v_trans_a)
strain_trans_b = get_unit_strain_tensors(unit_q, v_trans_b)
return strain_long, strain_trans_a, strain_trans_b
def get_christoffel_tensors(elastic_constant, unit_q):
return np.einsum("ijkl,ni,nl->njk", elastic_constant, unit_q, unit_q)
def solve_christoffel_equation(christoffel_tensors):
eigenvalues, eigenvectors = np.linalg.eigh(christoffel_tensors)
return eigenvalues.T, eigenvectors.transpose(2, 0, 1)
def get_unit_strain_tensors(propagation_vectors, polarization_vectors):
return propagation_vectors[:, :, None] * polarization_vectors[:, None, :]
class IonizedImpurityScattering(AbstractElasticScattering):
name = "IMP"
required_properties = ("defect_charge", "static_dielectric", "compensation_factor")
def __init__(
self,
properties,
doping,
temperatures,
nbands,
q_concentration,
inverse_screening_length_sq,
):
super().__init__(properties, doping, temperatures, nbands)
self._prefactor = q_concentration * s_to_au
self.inverse_screening_length_sq = inverse_screening_length_sq
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
from amset.constants import bohr_to_cm
avg_diel = np.linalg.eigvalsh(materials_properties["static_dielectric"]).mean()
inverse_screening_length_sq = calculate_inverse_screening_length_sq(
amset_data, avg_diel
)
defect_charge = materials_properties["defect_charge"]
comp_factor = materials_properties["compensation_factor"]
imp_info = []
q_concentration = np.zeros(amset_data.fermi_levels.shape)
for n, t in np.ndindex(inverse_screening_length_sq.shape):
n_conc = np.abs(amset_data.electron_conc[n, t])
p_conc = np.abs(amset_data.hole_conc[n, t])
excess_impurities = abs((n_conc - p_conc) / defect_charge)
impurity_concentration = excess_impurities * comp_factor
q_concentration[n, t] = defect_charge ** 2 * impurity_concentration
imp_info.append(
(
amset_data.doping[n] * (1 / bohr_to_cm) ** 3,
amset_data.temperatures[t],
inverse_screening_length_sq[n, t],
impurity_concentration * (1 / bohr_to_cm) ** 3,
)
)
logger.info("Inverse screening length (β) and impurity concentration (Nᵢᵢ):")
table = tabulate(
imp_info,
headers=("conc [cm⁻³]", "temp [K]", "β² [a₀⁻²]", "Nᵢᵢ [cm⁻³]"),
numalign="right",
stralign="center",
floatfmt=(".2e", ".1f", ".2e", ".2e"),
)
logger.info(table)
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
q_concentration,
inverse_screening_length_sq,
)
def prefactor(self, spin: Spin, b_idx: int):
# need to return prefactor with shape (nspins, ndops, ntemps, nbands)
return self._prefactor
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
static_tensor = self.properties["static_dielectric"] / (4 * np.pi)
static_diel = np.einsum("ij,ij->i", unit_q, np.dot(static_tensor, unit_q.T).T)
diel_factor = (1 / static_diel) ** 2
return (
diel_factor[None, None]
/ (norm_q_sq[None, None] + self.inverse_screening_length_sq[..., None]) ** 2
)
class PiezoelectricScattering(AbstractElasticScattering):
name = "PIE"
required_properties = (
"piezoelectric_constant",
"elastic_constant",
"high_frequency_dielectric",
"free_carrier_screening",
)
def __init__(
self,
properties,
doping,
temperatures,
nbands,
piezoelectric_constant,
inverse_screening_length_sq,
):
super().__init__(properties, doping, temperatures, nbands)
self.piezoelectric_constant = piezoelectric_constant
self.inverse_screening_length_sq = inverse_screening_length_sq
self._prefactor = self.temperatures[None, :] * boltzmann_au * s_to_au
self._shape = np.ones((len(self.doping), len(self.temperatures)))
self.elastic_constant = self.properties["elastic_constant"] * gpa_to_au
@classmethod
def from_amset_data(
cls, materials_properties: Dict[str, Any], amset_data: AmsetData
):
# convert dielectric to atomic units
shape = (len(amset_data.doping), len(amset_data.temperatures))
e = materials_properties["piezoelectric_constant"]
e *= coulomb_to_au / m_to_bohr ** 2 # convert to atomic units
dielectric = materials_properties["high_frequency_dielectric"] / (4 * np.pi)
inv_dielectric = np.linalg.inv(dielectric)
# use h piezoelectric coefficient (Stress-Voltage)
piezoelectric_constant = np.einsum("mn,mkl->nkl", inv_dielectric, e)
if materials_properties["free_carrier_screening"]:
avg_diel = np.linalg.eigvalsh(
materials_properties["high_frequency_dielectric"]
).mean()
inverse_screening_length_sq = calculate_inverse_screening_length_sq(
amset_data, avg_diel
)
else:
# fill with small value for numerical convergence
inverse_screening_length_sq = np.full(shape, 1e-12)
return cls(
cls.get_properties(materials_properties),
amset_data.doping,
amset_data.temperatures,
cls.get_nbands(amset_data),
piezoelectric_constant,
inverse_screening_length_sq,
)
def prefactor(self, spin: Spin, b_idx: int):
# need to return prefactor with shape (ndops, ntemps)
return self._prefactor * self._shape
def factor(
self,
unit_q: np.array,
norm_q_sq: np.ndarray,
spin: Spin,
band_idx: int,
kpoint: np.ndarray,
velocity: np.ndarray,
):
christoffel_tensors = get_christoffel_tensors(self.elastic_constant, unit_q)
(
(c_trans_a, c_trans_b, c_long),
(v_trans_a, v_trans_b, v_long),
) = solve_christoffel_equation(christoffel_tensors)
strain_long, strain_trans_a, strain_trans_b = prepare_acoustic_strains(
unit_q, v_long, v_trans_a, v_trans_b
)
qh = np.einsum("ijk,nj->nik", self.piezoelectric_constant, unit_q)
# einsum is double dot product along first axis
factor = (
np.einsum("nij,nij->n", strain_long, qh) ** 2 / c_long
+ np.einsum("nij,nij->n", strain_trans_a, qh) ** 2 / c_trans_a
+ np.einsum("nij,nij->n", strain_trans_b, qh) ** 2 / c_trans_b
)
return (
factor[None, None]
* np.ones(self._shape.shape + norm_q_sq.shape)
/ (norm_q_sq[None, None, :] + self.inverse_screening_length_sq[..., None])
) | 0.864739 | 0.231549 |
from mesos.exceptions import MesosException
class Encoder():
"""
Encode an arbitray message type into a 'RecordIO' message.
This class encapsulates the process of encoding an
arbitrary message into a 'RecordIO' message. Its
constructor takes a serialization function of the form
'serialize(message)'. This serialization function is
responsible for knowing how to take whatever message type
is passed to 'encode()' and serializing it to a 'UTF-8'
encoded byte array.
Once 'encode(message)' is called, it will use the
serialization function to convert 'message' into a 'UTF-8'
encoded byte array, wrap it in a 'RecordIO' frame,
and return it.
:param serialize: a function to serialize any message
passed to 'encode()' into a 'UTF-8'
encoded byte array
:type serialize: function
"""
def __init__(self, serialize):
self.serialize = serialize
def encode(self, message):
"""
Encode a message into 'RecordIO' format.
:param message: a message to serialize and then wrap in
a 'RecordIO' frame.
:type message: object
:returns: a serialized message wrapped in a 'RecordIO' frame
:rtype: bytes
"""
s = self.serialize(message)
if not isinstance(s, bytes):
raise MesosException("Calling 'serialize(message)' must"
" return a 'bytes' object")
return bytes(str(len(s)) + "\n", "UTF-8") + s
class Decoder():
"""
Decode a 'RecordIO' message back to an arbitrary message type.
This class encapsulates the process of decoding a message
previously encoded with 'RecordIO' back to an arbitrary
message type. Its constructor takes a deserialization
function of the form 'deserialize(data)'. This
deserialization function is responsible for knowing how to
take a fully constructed 'RecordIO' message containing a
'UTF-8' encoded byte array and deserialize it back into the
original message type.
The 'decode(data)' message takes a 'UTF-8' encoded byte array
as input and buffers it across subsequent calls to
construct a set of fully constructed 'RecordIO' messages that
are decoded and returned in a list.
:param deserialize: a function to deserialize from 'RecordIO'
messages built up by subsequent calls
to 'decode(data)'
:type deserialize: function
"""
HEADER = 0
RECORD = 1
FAILED = 2
def __init__(self, deserialize):
self.deserialize = deserialize
self.state = self.HEADER
self.buffer = bytes("", "UTF-8")
self.length = 0
def decode(self, data):
"""
Decode a 'RecordIO' formatted message to its original type.
:param data: an array of 'UTF-8' encoded bytes that make up a
partial 'RecordIO' message. Subsequent calls to this
function maintain state to build up a full 'RecordIO'
message and decode it
:type data: bytes
:returns: a list of deserialized messages
:rtype: list
"""
if not isinstance(data, bytes):
raise MesosException("Parameter 'data' must of of type 'bytes'")
if self.state == self.FAILED:
raise MesosException("Decoder is in a FAILED state")
records = []
for c in data:
if self.state == self.HEADER:
if c != ord('\n'):
self.buffer += bytes([c])
continue
try:
self.length = int(self.buffer.decode("UTF-8"))
except Exception as exception:
self.state = self.FAILED
raise MesosException("Failed to decode length"
"'{buffer}': {error}"
.format(buffer=self.buffer,
error=exception))
self.buffer = bytes("", "UTF-8")
self.state = self.RECORD
# Note that for 0 length records, we immediately decode.
if self.length <= 0:
records.append(self.deserialize(self.buffer))
self.state = self.HEADER
elif self.state == self.RECORD:
assert self.length
assert len(self.buffer) < self.length
self.buffer += bytes([c])
if len(self.buffer) == self.length:
records.append(self.deserialize(self.buffer))
self.buffer = bytes("", "UTF-8")
self.state = self.HEADER
return records | src/python/lib/mesos/recordio.py | from mesos.exceptions import MesosException
class Encoder():
"""
Encode an arbitray message type into a 'RecordIO' message.
This class encapsulates the process of encoding an
arbitrary message into a 'RecordIO' message. Its
constructor takes a serialization function of the form
'serialize(message)'. This serialization function is
responsible for knowing how to take whatever message type
is passed to 'encode()' and serializing it to a 'UTF-8'
encoded byte array.
Once 'encode(message)' is called, it will use the
serialization function to convert 'message' into a 'UTF-8'
encoded byte array, wrap it in a 'RecordIO' frame,
and return it.
:param serialize: a function to serialize any message
passed to 'encode()' into a 'UTF-8'
encoded byte array
:type serialize: function
"""
def __init__(self, serialize):
self.serialize = serialize
def encode(self, message):
"""
Encode a message into 'RecordIO' format.
:param message: a message to serialize and then wrap in
a 'RecordIO' frame.
:type message: object
:returns: a serialized message wrapped in a 'RecordIO' frame
:rtype: bytes
"""
s = self.serialize(message)
if not isinstance(s, bytes):
raise MesosException("Calling 'serialize(message)' must"
" return a 'bytes' object")
return bytes(str(len(s)) + "\n", "UTF-8") + s
class Decoder():
"""
Decode a 'RecordIO' message back to an arbitrary message type.
This class encapsulates the process of decoding a message
previously encoded with 'RecordIO' back to an arbitrary
message type. Its constructor takes a deserialization
function of the form 'deserialize(data)'. This
deserialization function is responsible for knowing how to
take a fully constructed 'RecordIO' message containing a
'UTF-8' encoded byte array and deserialize it back into the
original message type.
The 'decode(data)' message takes a 'UTF-8' encoded byte array
as input and buffers it across subsequent calls to
construct a set of fully constructed 'RecordIO' messages that
are decoded and returned in a list.
:param deserialize: a function to deserialize from 'RecordIO'
messages built up by subsequent calls
to 'decode(data)'
:type deserialize: function
"""
HEADER = 0
RECORD = 1
FAILED = 2
def __init__(self, deserialize):
self.deserialize = deserialize
self.state = self.HEADER
self.buffer = bytes("", "UTF-8")
self.length = 0
def decode(self, data):
"""
Decode a 'RecordIO' formatted message to its original type.
:param data: an array of 'UTF-8' encoded bytes that make up a
partial 'RecordIO' message. Subsequent calls to this
function maintain state to build up a full 'RecordIO'
message and decode it
:type data: bytes
:returns: a list of deserialized messages
:rtype: list
"""
if not isinstance(data, bytes):
raise MesosException("Parameter 'data' must of of type 'bytes'")
if self.state == self.FAILED:
raise MesosException("Decoder is in a FAILED state")
records = []
for c in data:
if self.state == self.HEADER:
if c != ord('\n'):
self.buffer += bytes([c])
continue
try:
self.length = int(self.buffer.decode("UTF-8"))
except Exception as exception:
self.state = self.FAILED
raise MesosException("Failed to decode length"
"'{buffer}': {error}"
.format(buffer=self.buffer,
error=exception))
self.buffer = bytes("", "UTF-8")
self.state = self.RECORD
# Note that for 0 length records, we immediately decode.
if self.length <= 0:
records.append(self.deserialize(self.buffer))
self.state = self.HEADER
elif self.state == self.RECORD:
assert self.length
assert len(self.buffer) < self.length
self.buffer += bytes([c])
if len(self.buffer) == self.length:
records.append(self.deserialize(self.buffer))
self.buffer = bytes("", "UTF-8")
self.state = self.HEADER
return records | 0.919265 | 0.509825 |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1,64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1,64), Email()])
username = StringField('Username', validators=[DataRequired(), Length(1,64),
Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, numbers, dots or '
'underscores')])
password = PasswordField('Password', validators=[DataRequired(),
EqualTo('password2', message='Password must match')])
password2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use. Pick another username.')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('<PASSWORD>', validators=[DataRequired()])
password = PasswordField('<PASSWORD>', validators=[
DataRequired(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(FlaskForm):
email = StringField("Your Registered Email",validators=[
DataRequired(), Length(1,64), Email()
])
submit = SubmitField('Submit')
class PasswordResetForm(FlaskForm):
password = PasswordField('Create New Password', validators=[
DataRequired(), EqualTo('password2', message='Password must match')])
password2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
submit = SubmitField('Update Password')
class ChangeEmailForm(FlaskForm):
email = StringField('Enter new email', validators=[
DataRequired(), Email()
])
password = PasswordField('Enter your password', validators=[DataRequired()])
submit = SubmitField('Update Email')
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first():
raise ValidationError('Email already registered') | app/auth/forms.py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1,64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1,64), Email()])
username = StringField('Username', validators=[DataRequired(), Length(1,64),
Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, numbers, dots or '
'underscores')])
password = PasswordField('Password', validators=[DataRequired(),
EqualTo('password2', message='Password must match')])
password2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use. Pick another username.')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('<PASSWORD>', validators=[DataRequired()])
password = PasswordField('<PASSWORD>', validators=[
DataRequired(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(FlaskForm):
email = StringField("Your Registered Email",validators=[
DataRequired(), Length(1,64), Email()
])
submit = SubmitField('Submit')
class PasswordResetForm(FlaskForm):
password = PasswordField('Create New Password', validators=[
DataRequired(), EqualTo('password2', message='Password must match')])
password2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
submit = SubmitField('Update Password')
class ChangeEmailForm(FlaskForm):
email = StringField('Enter new email', validators=[
DataRequired(), Email()
])
password = PasswordField('Enter your password', validators=[DataRequired()])
submit = SubmitField('Update Email')
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first():
raise ValidationError('Email already registered') | 0.388966 | 0.130535 |
import warnings
import numpy as np
from nengo.base import Process
from nengo.dists import DistributionParam, Gaussian
from nengo.exceptions import ValidationError
from nengo.params import BoolParam, DictParam, EnumParam, NdarrayParam, NumberParam
from nengo.synapses import LinearFilter, Lowpass, SynapseParam
from nengo.utils.numpy import array_hash, clip, is_number, rfftfreq
class WhiteNoise(Process):
"""Full-spectrum white noise process.
Parameters
----------
dist : Distribution, optional
The distribution from which to draw samples.
scale : bool, optional
Whether to scale the white noise for integration. Integrating white
noise requires using a time constant of ``sqrt(dt)`` instead of ``dt``
on the noise term [1]_, to ensure the magnitude of the integrated
noise does not change with ``dt``.
seed : int, optional
Random number seed. Ensures noise will be the same each run.
References
----------
.. [1] <NAME>. (1996) Exact numerical simulation of the Ornstein-
Uhlenbeck process and its integral. Phys. Rev. E 54, pp. 2084-91.
"""
dist = DistributionParam("dist")
scale = BoolParam("scale")
def __init__(self, dist=Gaussian(mean=0, std=1), scale=True, **kwargs):
super().__init__(default_size_in=0, **kwargs)
self.dist = dist
self.scale = scale
def make_step(self, shape_in, shape_out, dt, rng, state):
assert shape_in == (0,)
assert len(shape_out) == 1
dist = self.dist
scale = self.scale
alpha = 1.0 / np.sqrt(dt)
# ^ need sqrt(dt) when integrating, so divide by sqrt(dt) here,
# since dt / sqrt(dt) = sqrt(dt).
def step_whitenoise(_):
x = dist.sample(n=1, d=shape_out[0], rng=rng)[0]
return alpha * x if scale else x
return step_whitenoise
class FilteredNoise(Process):
"""Filtered white noise process.
This process takes white noise and filters it using the provided synapse.
Parameters
----------
synapse : Synapse, optional
The synapse to use to filter the noise.
dist : Distribution, optional
The distribution used to generate the white noise.
scale : bool, optional
Whether to scale the white noise for integration, making the output
signal invariant to ``dt``.
seed : int, optional
Random number seed. Ensures noise will be the same each run.
"""
synapse = SynapseParam("synapse")
dist = DistributionParam("dist")
scale = BoolParam("scale")
def __init__(
self,
synapse=Lowpass(tau=0.005),
dist=Gaussian(mean=0, std=1),
scale=True,
**kwargs,
):
super().__init__(default_size_in=0, **kwargs)
self.synapse = synapse
self.dist = dist
self.scale = scale
def make_state(self, shape_in, shape_out, dt, dtype=None):
return self.synapse.make_state(shape_out, shape_out, dt, dtype=dtype)
def make_step(self, shape_in, shape_out, dt, rng, state):
assert shape_in == (0,)
assert len(shape_out) == 1
dist = self.dist
scale = self.scale
alpha = 1.0 / np.sqrt(dt)
filter_step = self.synapse.make_step(shape_out, shape_out, dt, rng, state)
def step_filterednoise(t):
x = dist.sample(n=1, d=shape_out[0], rng=rng)[0]
if scale:
x *= alpha
return filter_step(t, x)
return step_filterednoise
class BrownNoise(FilteredNoise):
"""Brown noise process (aka Brownian noise, red noise, Wiener process).
This process is the integral of white noise.
Parameters
----------
dist : Distribution, optional
The distribution used to generate the white noise.
seed : int, optional
Random number seed. Ensures noise will be the same each run.
"""
def __init__(self, dist=Gaussian(mean=0, std=1), **kwargs):
super().__init__(
synapse=LinearFilter([1], [1, 0], method="euler"), dist=dist, **kwargs
)
class WhiteSignal(Process):
"""An ideal low-pass filtered white noise process.
This signal is created in the frequency domain, and designed to have
exactly equal power at all frequencies below the cut-off frequency,
and no power above the cut-off.
The signal is naturally periodic, so it can be used beyond its period
while still being continuous with continuous derivatives.
Parameters
----------
period : float
A white noise signal with this period will be generated.
Samples will repeat after this duration.
high : float
The cut-off frequency of the low-pass filter, in Hz.
Must not exceed the Nyquist frequency for the simulation
timestep, which is ``0.5 / dt``.
rms : float, optional
The root mean square power of the filtered signal
y0 : float, optional
Align the phase of each output dimension to begin at the value
that is closest (in absolute value) to y0.
seed : int, optional
Random number seed. Ensures noise will be the same each run.
"""
period = NumberParam("period", low=0, low_open=True)
high = NumberParam("high", low=0, low_open=True)
rms = NumberParam("rms", low=0, low_open=True)
y0 = NumberParam("y0", optional=True)
def __init__(self, period, high, rms=0.5, y0=None, **kwargs):
super().__init__(default_size_in=0, **kwargs)
self.period = period
self.high = high
self.rms = rms
self.y0 = y0
if self.high is not None and self.high < 1.0 / self.period:
raise ValidationError(
"Make ``high >= 1. / period`` to produce a non-zero signal",
attr="high",
obj=self,
)
def make_step(self, shape_in, shape_out, dt, rng, state):
assert shape_in == (0,)
nyquist_cutoff = 0.5 / dt
if self.high > nyquist_cutoff:
raise ValidationError(
"High must not exceed the Nyquist frequency "
f"for the given dt ({nyquist_cutoff:0.3f})",
attr="high",
obj=self,
)
n_coefficients = int(np.ceil(self.period / dt / 2.0))
shape = (n_coefficients + 1,) + shape_out
sigma = self.rms * np.sqrt(0.5)
coefficients = 1j * rng.normal(0.0, sigma, size=shape)
coefficients += rng.normal(0.0, sigma, size=shape)
coefficients[0] = 0.0
coefficients[-1].imag = 0.0
set_to_zero = rfftfreq(2 * n_coefficients, d=dt) > self.high
coefficients[set_to_zero] = 0.0
power_correction = np.sqrt(
1.0 - np.sum(set_to_zero, dtype=float) / n_coefficients
)
if power_correction > 0.0:
coefficients /= power_correction
coefficients *= np.sqrt(2 * n_coefficients)
signal = np.fft.irfft(coefficients, axis=0)
if self.y0 is not None:
# Starts each dimension off where it is closest to y0
def shift(x):
offset = np.argmin(abs(self.y0 - x))
return np.roll(x, -offset + 1) # +1 since t starts at dt
signal = np.apply_along_axis(shift, 0, signal)
def step_whitesignal(t):
i = int(round(t / dt))
return signal[i % signal.shape[0]]
return step_whitesignal
class PresentInput(Process):
"""Present a series of inputs, each for the same fixed length of time.
Parameters
----------
inputs : array_like
Inputs to present, where each row is an input. Rows will be flattened.
presentation_time : float
Show each input for this amount of time (in seconds).
"""
inputs = NdarrayParam("inputs", shape=("...",))
presentation_time = NumberParam("presentation_time", low=0, low_open=True)
def __init__(self, inputs, presentation_time, **kwargs):
self.inputs = inputs
self.presentation_time = presentation_time
super().__init__(
default_size_in=0, default_size_out=self.inputs[0].size, **kwargs
)
def make_step(self, shape_in, shape_out, dt, rng, state):
assert shape_in == (0,)
assert shape_out == (self.inputs[0].size,)
n = len(self.inputs)
inputs = self.inputs.reshape(n, -1)
presentation_time = float(self.presentation_time)
def step_presentinput(t):
i = int((t - dt) / presentation_time + 1e-7)
return inputs[i % n]
return step_presentinput
class PiecewiseDataParam(DictParam):
"""Piecewise-specific validation for the data dictionary.
In the `.Piecewise` data dict, the keys are points in time (float) and
values are numerical constants or callables of the same dimensionality.
"""
equatable = True
def coerce(self, instance, data): # pylint: disable=arguments-renamed
data = super().coerce(instance, data)
size_out = None
for time, value in data.items():
if not is_number(time):
raise ValidationError(
f"Keys must be times (floats or ints), not '{type(time).__name__}'",
attr="data",
obj=instance,
)
# figure out the length of this item
if callable(value):
try:
value = np.ravel(value(time))
except Exception as e:
raise ValidationError(
f"callable object for time step {time:0.3f} "
"should return a numerical constant",
attr="data",
obj=instance,
) from e
else:
value = np.ravel(value)
data[time] = value
size = value.size
# make sure this is the same size as previous items
if size != size_out and size_out is not None:
raise ValidationError(
f"time {time} has size {size} instead of {size_out}",
attr="data",
obj=instance,
)
size_out = size
return data
def hashvalue(self, instance):
return hash(
frozenset(tuple((k, array_hash(v)) for k, v in self.data[instance].items()))
)
class Piecewise(Process):
"""A piecewise function with different options for interpolation.
Given an input dictionary of ``{0: 0, 0.5: -1, 0.75: 0.5, 1: 0}``,
this process will emit the numerical values (0, -1, 0.5, 0)
starting at the corresponding time points (0, 0.5, 0.75, 1).
The keys in the input dictionary must be times (float or int).
The values in the dictionary can be floats, lists of floats,
or numpy arrays. All lists or numpy arrays must be of the same length,
as the output shape of the process will be determined by the shape
of the values.
Interpolation on the data points using `scipy.interpolate` is also
supported. The default interpolation is 'zero', which creates a
piecewise function whose values change at the specified time points.
So the above example would be shortcut for:
.. testcode::
def function(t):
if t < 0.5:
return 0
elif t < 0.75:
return -1
elif t < 1:
return 0.5
else:
return 0
For times before the first specified time, an array of zeros (of
the correct length) will be emitted.
This means that the above can be simplified to:
.. testcode::
from nengo.processes import Piecewise
Piecewise({0.5: -1, 0.75: 0.5, 1: 0})
Parameters
----------
data : dict
A dictionary mapping times to the values that should be emitted
at those times. Times must be numbers (ints or floats), while values
can be numbers, lists of numbers, numpy arrays of numbers,
or callables that return any of those options.
interpolation : str, optional
One of 'linear', 'nearest', 'slinear', 'quadratic', 'cubic', or 'zero'.
Specifies how to interpolate between times with specified value.
'zero' creates a plain piecewise function whose values begin at
corresponding time points, while all other options interpolate
as described in `scipy.interpolate`.
Attributes
----------
data : dict
A dictionary mapping times to the values that should be emitted
at those times. Times are numbers (ints or floats), while values
can be numbers, lists of numbers, numpy arrays of numbers,
or callables that return any of those options.
interpolation : str
One of 'linear', 'nearest', 'slinear', 'quadratic', 'cubic', or 'zero'.
Specifies how to interpolate between times with specified value.
'zero' creates a plain piecewise function whose values change at
corresponding time points, while all other options interpolate
as described in `scipy.interpolate`.
Examples
--------
.. testcode::
from nengo.processes import Piecewise
process = Piecewise({0.5: 1, 0.75: -1, 1: 0})
with nengo.Network() as model:
u = nengo.Node(process, size_out=process.default_size_out)
up = nengo.Probe(u)
with nengo.Simulator(model, progress_bar=False) as sim:
sim.run(1.5)
f = sim.data[up]
t = sim.trange()
print(f[t == 0.2])
print(f[t == 0.58])
.. testoutput::
[[ 0.]]
[[ 1.]]
"""
data = PiecewiseDataParam("data", readonly=True)
interpolation = EnumParam(
"interpolation",
values=("zero", "linear", "nearest", "slinear", "quadratic", "cubic"),
)
def __init__(self, data, interpolation="zero", **kwargs):
self.data = data
needs_scipy = ("linear", "nearest", "slinear", "quadratic", "cubic")
if interpolation in needs_scipy:
self.sp_interpolate = None
if any(callable(val) for val in self.data.values()):
warnings.warn(
f"'{interpolation}' interpolation cannot be applied because "
"a callable was supplied for some piece of the "
"function. Using 'zero' interpolation instead."
)
interpolation = "zero"
else:
try:
import scipy.interpolate # pylint: disable=import-outside-toplevel
self.sp_interpolate = scipy.interpolate
except ImportError:
warnings.warn(
f"'{interpolation}' interpolation cannot be applied because "
"scipy is not installed. Using 'zero' "
"interpolation instead."
)
interpolation = "zero"
self.interpolation = interpolation
super().__init__(default_size_in=0, default_size_out=self.size_out, **kwargs)
@property
def size_out(self):
time, value = next(iter(self.data.items()))
value = np.ravel(value(time)) if callable(value) else value
return value.size
def make_step(self, shape_in, shape_out, dt, rng, state):
tp, yp = zip(*sorted(self.data.items()))
assert shape_in == (0,)
assert shape_out == (self.size_out,)
if self.interpolation == "zero":
def step_piecewise(t):
ti = clip(np.searchsorted(tp, t + 0.5 * dt) - 1, -1, len(yp) - 1)
if ti == -1:
return np.zeros(shape_out)
return np.ravel(yp[ti](t)) if callable(yp[ti]) else yp[ti]
else:
assert self.sp_interpolate is not None
if self.interpolation == "cubic" and 0 not in tp:
warnings.warn(
"'cubic' interpolation may fail if data not specified for t=0.0"
)
f = self.sp_interpolate.interp1d(
tp,
yp,
axis=0,
kind=self.interpolation,
bounds_error=False,
fill_value=0.0,
)
def step_piecewise(t):
return np.ravel(f(t))
return step_piecewise | nengo/processes.py | import warnings
import numpy as np
from nengo.base import Process
from nengo.dists import DistributionParam, Gaussian
from nengo.exceptions import ValidationError
from nengo.params import BoolParam, DictParam, EnumParam, NdarrayParam, NumberParam
from nengo.synapses import LinearFilter, Lowpass, SynapseParam
from nengo.utils.numpy import array_hash, clip, is_number, rfftfreq
class WhiteNoise(Process):
"""Full-spectrum white noise process.
Parameters
----------
dist : Distribution, optional
The distribution from which to draw samples.
scale : bool, optional
Whether to scale the white noise for integration. Integrating white
noise requires using a time constant of ``sqrt(dt)`` instead of ``dt``
on the noise term [1]_, to ensure the magnitude of the integrated
noise does not change with ``dt``.
seed : int, optional
Random number seed. Ensures noise will be the same each run.
References
----------
.. [1] <NAME>. (1996) Exact numerical simulation of the Ornstein-
Uhlenbeck process and its integral. Phys. Rev. E 54, pp. 2084-91.
"""
dist = DistributionParam("dist")
scale = BoolParam("scale")
def __init__(self, dist=Gaussian(mean=0, std=1), scale=True, **kwargs):
super().__init__(default_size_in=0, **kwargs)
self.dist = dist
self.scale = scale
def make_step(self, shape_in, shape_out, dt, rng, state):
assert shape_in == (0,)
assert len(shape_out) == 1
dist = self.dist
scale = self.scale
alpha = 1.0 / np.sqrt(dt)
# ^ need sqrt(dt) when integrating, so divide by sqrt(dt) here,
# since dt / sqrt(dt) = sqrt(dt).
def step_whitenoise(_):
x = dist.sample(n=1, d=shape_out[0], rng=rng)[0]
return alpha * x if scale else x
return step_whitenoise
class FilteredNoise(Process):
"""Filtered white noise process.
This process takes white noise and filters it using the provided synapse.
Parameters
----------
synapse : Synapse, optional
The synapse to use to filter the noise.
dist : Distribution, optional
The distribution used to generate the white noise.
scale : bool, optional
Whether to scale the white noise for integration, making the output
signal invariant to ``dt``.
seed : int, optional
Random number seed. Ensures noise will be the same each run.
"""
synapse = SynapseParam("synapse")
dist = DistributionParam("dist")
scale = BoolParam("scale")
def __init__(
self,
synapse=Lowpass(tau=0.005),
dist=Gaussian(mean=0, std=1),
scale=True,
**kwargs,
):
super().__init__(default_size_in=0, **kwargs)
self.synapse = synapse
self.dist = dist
self.scale = scale
def make_state(self, shape_in, shape_out, dt, dtype=None):
return self.synapse.make_state(shape_out, shape_out, dt, dtype=dtype)
def make_step(self, shape_in, shape_out, dt, rng, state):
assert shape_in == (0,)
assert len(shape_out) == 1
dist = self.dist
scale = self.scale
alpha = 1.0 / np.sqrt(dt)
filter_step = self.synapse.make_step(shape_out, shape_out, dt, rng, state)
def step_filterednoise(t):
x = dist.sample(n=1, d=shape_out[0], rng=rng)[0]
if scale:
x *= alpha
return filter_step(t, x)
return step_filterednoise
class BrownNoise(FilteredNoise):
"""Brown noise process (aka Brownian noise, red noise, Wiener process).
This process is the integral of white noise.
Parameters
----------
dist : Distribution, optional
The distribution used to generate the white noise.
seed : int, optional
Random number seed. Ensures noise will be the same each run.
"""
def __init__(self, dist=Gaussian(mean=0, std=1), **kwargs):
super().__init__(
synapse=LinearFilter([1], [1, 0], method="euler"), dist=dist, **kwargs
)
class WhiteSignal(Process):
"""An ideal low-pass filtered white noise process.
This signal is created in the frequency domain, and designed to have
exactly equal power at all frequencies below the cut-off frequency,
and no power above the cut-off.
The signal is naturally periodic, so it can be used beyond its period
while still being continuous with continuous derivatives.
Parameters
----------
period : float
A white noise signal with this period will be generated.
Samples will repeat after this duration.
high : float
The cut-off frequency of the low-pass filter, in Hz.
Must not exceed the Nyquist frequency for the simulation
timestep, which is ``0.5 / dt``.
rms : float, optional
The root mean square power of the filtered signal
y0 : float, optional
Align the phase of each output dimension to begin at the value
that is closest (in absolute value) to y0.
seed : int, optional
Random number seed. Ensures noise will be the same each run.
"""
period = NumberParam("period", low=0, low_open=True)
high = NumberParam("high", low=0, low_open=True)
rms = NumberParam("rms", low=0, low_open=True)
y0 = NumberParam("y0", optional=True)
def __init__(self, period, high, rms=0.5, y0=None, **kwargs):
super().__init__(default_size_in=0, **kwargs)
self.period = period
self.high = high
self.rms = rms
self.y0 = y0
if self.high is not None and self.high < 1.0 / self.period:
raise ValidationError(
"Make ``high >= 1. / period`` to produce a non-zero signal",
attr="high",
obj=self,
)
def make_step(self, shape_in, shape_out, dt, rng, state):
assert shape_in == (0,)
nyquist_cutoff = 0.5 / dt
if self.high > nyquist_cutoff:
raise ValidationError(
"High must not exceed the Nyquist frequency "
f"for the given dt ({nyquist_cutoff:0.3f})",
attr="high",
obj=self,
)
n_coefficients = int(np.ceil(self.period / dt / 2.0))
shape = (n_coefficients + 1,) + shape_out
sigma = self.rms * np.sqrt(0.5)
coefficients = 1j * rng.normal(0.0, sigma, size=shape)
coefficients += rng.normal(0.0, sigma, size=shape)
coefficients[0] = 0.0
coefficients[-1].imag = 0.0
set_to_zero = rfftfreq(2 * n_coefficients, d=dt) > self.high
coefficients[set_to_zero] = 0.0
power_correction = np.sqrt(
1.0 - np.sum(set_to_zero, dtype=float) / n_coefficients
)
if power_correction > 0.0:
coefficients /= power_correction
coefficients *= np.sqrt(2 * n_coefficients)
signal = np.fft.irfft(coefficients, axis=0)
if self.y0 is not None:
# Starts each dimension off where it is closest to y0
def shift(x):
offset = np.argmin(abs(self.y0 - x))
return np.roll(x, -offset + 1) # +1 since t starts at dt
signal = np.apply_along_axis(shift, 0, signal)
def step_whitesignal(t):
i = int(round(t / dt))
return signal[i % signal.shape[0]]
return step_whitesignal
class PresentInput(Process):
"""Present a series of inputs, each for the same fixed length of time.
Parameters
----------
inputs : array_like
Inputs to present, where each row is an input. Rows will be flattened.
presentation_time : float
Show each input for this amount of time (in seconds).
"""
inputs = NdarrayParam("inputs", shape=("...",))
presentation_time = NumberParam("presentation_time", low=0, low_open=True)
def __init__(self, inputs, presentation_time, **kwargs):
self.inputs = inputs
self.presentation_time = presentation_time
super().__init__(
default_size_in=0, default_size_out=self.inputs[0].size, **kwargs
)
def make_step(self, shape_in, shape_out, dt, rng, state):
assert shape_in == (0,)
assert shape_out == (self.inputs[0].size,)
n = len(self.inputs)
inputs = self.inputs.reshape(n, -1)
presentation_time = float(self.presentation_time)
def step_presentinput(t):
i = int((t - dt) / presentation_time + 1e-7)
return inputs[i % n]
return step_presentinput
class PiecewiseDataParam(DictParam):
"""Piecewise-specific validation for the data dictionary.
In the `.Piecewise` data dict, the keys are points in time (float) and
values are numerical constants or callables of the same dimensionality.
"""
equatable = True
def coerce(self, instance, data): # pylint: disable=arguments-renamed
data = super().coerce(instance, data)
size_out = None
for time, value in data.items():
if not is_number(time):
raise ValidationError(
f"Keys must be times (floats or ints), not '{type(time).__name__}'",
attr="data",
obj=instance,
)
# figure out the length of this item
if callable(value):
try:
value = np.ravel(value(time))
except Exception as e:
raise ValidationError(
f"callable object for time step {time:0.3f} "
"should return a numerical constant",
attr="data",
obj=instance,
) from e
else:
value = np.ravel(value)
data[time] = value
size = value.size
# make sure this is the same size as previous items
if size != size_out and size_out is not None:
raise ValidationError(
f"time {time} has size {size} instead of {size_out}",
attr="data",
obj=instance,
)
size_out = size
return data
def hashvalue(self, instance):
return hash(
frozenset(tuple((k, array_hash(v)) for k, v in self.data[instance].items()))
)
class Piecewise(Process):
"""A piecewise function with different options for interpolation.
Given an input dictionary of ``{0: 0, 0.5: -1, 0.75: 0.5, 1: 0}``,
this process will emit the numerical values (0, -1, 0.5, 0)
starting at the corresponding time points (0, 0.5, 0.75, 1).
The keys in the input dictionary must be times (float or int).
The values in the dictionary can be floats, lists of floats,
or numpy arrays. All lists or numpy arrays must be of the same length,
as the output shape of the process will be determined by the shape
of the values.
Interpolation on the data points using `scipy.interpolate` is also
supported. The default interpolation is 'zero', which creates a
piecewise function whose values change at the specified time points.
So the above example would be shortcut for:
.. testcode::
def function(t):
if t < 0.5:
return 0
elif t < 0.75:
return -1
elif t < 1:
return 0.5
else:
return 0
For times before the first specified time, an array of zeros (of
the correct length) will be emitted.
This means that the above can be simplified to:
.. testcode::
from nengo.processes import Piecewise
Piecewise({0.5: -1, 0.75: 0.5, 1: 0})
Parameters
----------
data : dict
A dictionary mapping times to the values that should be emitted
at those times. Times must be numbers (ints or floats), while values
can be numbers, lists of numbers, numpy arrays of numbers,
or callables that return any of those options.
interpolation : str, optional
One of 'linear', 'nearest', 'slinear', 'quadratic', 'cubic', or 'zero'.
Specifies how to interpolate between times with specified value.
'zero' creates a plain piecewise function whose values begin at
corresponding time points, while all other options interpolate
as described in `scipy.interpolate`.
Attributes
----------
data : dict
A dictionary mapping times to the values that should be emitted
at those times. Times are numbers (ints or floats), while values
can be numbers, lists of numbers, numpy arrays of numbers,
or callables that return any of those options.
interpolation : str
One of 'linear', 'nearest', 'slinear', 'quadratic', 'cubic', or 'zero'.
Specifies how to interpolate between times with specified value.
'zero' creates a plain piecewise function whose values change at
corresponding time points, while all other options interpolate
as described in `scipy.interpolate`.
Examples
--------
.. testcode::
from nengo.processes import Piecewise
process = Piecewise({0.5: 1, 0.75: -1, 1: 0})
with nengo.Network() as model:
u = nengo.Node(process, size_out=process.default_size_out)
up = nengo.Probe(u)
with nengo.Simulator(model, progress_bar=False) as sim:
sim.run(1.5)
f = sim.data[up]
t = sim.trange()
print(f[t == 0.2])
print(f[t == 0.58])
.. testoutput::
[[ 0.]]
[[ 1.]]
"""
data = PiecewiseDataParam("data", readonly=True)
interpolation = EnumParam(
"interpolation",
values=("zero", "linear", "nearest", "slinear", "quadratic", "cubic"),
)
def __init__(self, data, interpolation="zero", **kwargs):
self.data = data
needs_scipy = ("linear", "nearest", "slinear", "quadratic", "cubic")
if interpolation in needs_scipy:
self.sp_interpolate = None
if any(callable(val) for val in self.data.values()):
warnings.warn(
f"'{interpolation}' interpolation cannot be applied because "
"a callable was supplied for some piece of the "
"function. Using 'zero' interpolation instead."
)
interpolation = "zero"
else:
try:
import scipy.interpolate # pylint: disable=import-outside-toplevel
self.sp_interpolate = scipy.interpolate
except ImportError:
warnings.warn(
f"'{interpolation}' interpolation cannot be applied because "
"scipy is not installed. Using 'zero' "
"interpolation instead."
)
interpolation = "zero"
self.interpolation = interpolation
super().__init__(default_size_in=0, default_size_out=self.size_out, **kwargs)
@property
def size_out(self):
time, value = next(iter(self.data.items()))
value = np.ravel(value(time)) if callable(value) else value
return value.size
def make_step(self, shape_in, shape_out, dt, rng, state):
tp, yp = zip(*sorted(self.data.items()))
assert shape_in == (0,)
assert shape_out == (self.size_out,)
if self.interpolation == "zero":
def step_piecewise(t):
ti = clip(np.searchsorted(tp, t + 0.5 * dt) - 1, -1, len(yp) - 1)
if ti == -1:
return np.zeros(shape_out)
return np.ravel(yp[ti](t)) if callable(yp[ti]) else yp[ti]
else:
assert self.sp_interpolate is not None
if self.interpolation == "cubic" and 0 not in tp:
warnings.warn(
"'cubic' interpolation may fail if data not specified for t=0.0"
)
f = self.sp_interpolate.interp1d(
tp,
yp,
axis=0,
kind=self.interpolation,
bounds_error=False,
fill_value=0.0,
)
def step_piecewise(t):
return np.ravel(f(t))
return step_piecewise | 0.885792 | 0.681584 |
import itertools
from itertools import combinations
import copy
from functools import reduce
from operator import mul
import numpy as np
from qiskit.exceptions import QiskitError
class SpecialPolynomial:
"""Multivariate polynomial with special form.
Maximum degree 3, n Z_2 variables, coefficients in Z_8.
"""
def __init__(self, n_vars):
"""Construct the zero polynomial on n_vars variables."""
# 1 constant term
# n linear terms x_1, ..., x_n
# {n choose 2} quadratic terms x_1x_2, x_1x_3, ..., x_{n-1}x_n
# {n choose 3} cubic terms x_1x_2x_3, ..., x_{n-2}x_{n-1}x_n
# and coefficients in Z_8
if n_vars < 1:
raise QiskitError("n_vars for SpecialPolynomial is too small.")
self.n_vars = n_vars
self.nc2 = int(n_vars * (n_vars - 1) / 2)
self.nc3 = int(n_vars * (n_vars - 1) * (n_vars - 2) / 6)
self.weight_0 = 0
self.weight_1 = np.zeros(n_vars, dtype=np.int8)
self.weight_2 = np.zeros(self.nc2, dtype=np.int8)
self.weight_3 = np.zeros(self.nc3, dtype=np.int8)
def mul_monomial(self, indices):
"""Multiply by a monomial given by indices.
Returns the product.
"""
length = len(indices)
if length >= 4:
raise QiskitError("There is no term with on more than 3 indices.")
indices_arr = np.array(indices)
if (indices_arr < 0).any() and (indices_arr > self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
if length > 1 and (np.diff(indices_arr) <= 0).any():
raise QiskitError("Indices are non-increasing!")
result = SpecialPolynomial(self.n_vars)
if length == 0:
result = copy.deepcopy(self)
else:
terms0 = [[]]
terms1 = list(combinations(range(self.n_vars), r=1))
terms2 = list(combinations(range(self.n_vars), r=2))
terms3 = list(combinations(range(self.n_vars), r=3))
for term in terms0 + terms1 + terms2 + terms3:
value = self.get_term(term)
new_term = list(set(term).union(set(indices)))
result.set_term(new_term, (result.get_term(new_term) + value) % 8)
return result
def __mul__(self, other):
"""Multiply two polynomials."""
if not isinstance(other, SpecialPolynomial):
other = int(other)
result = SpecialPolynomial(self.n_vars)
if isinstance(other, int):
result.weight_0 = (self.weight_0 * other) % 8
result.weight_1 = (self.weight_1 * other) % 8
result.weight_2 = (self.weight_2 * other) % 8
result.weight_3 = (self.weight_3 * other) % 8
else:
if self.n_vars != other.n_vars:
raise QiskitError("Multiplication on different n_vars.")
terms0 = [[]]
terms1 = list(combinations(range(self.n_vars), r=1))
terms2 = list(combinations(range(self.n_vars), r=2))
terms3 = list(combinations(range(self.n_vars), r=3))
for term in terms0 + terms1 + terms2 + terms3:
value = other.get_term(term)
if value != 0:
temp = copy.deepcopy(self)
temp = temp.mul_monomial(term)
temp = temp * value
result = result + temp
return result
def __rmul__(self, other):
"""Right multiplication.
This operation is commutative.
"""
return self.__mul__(other)
def __add__(self, other):
"""Add two polynomials."""
if not isinstance(other, SpecialPolynomial):
raise QiskitError("Element to add is not a SpecialPolynomial.")
if self.n_vars != other.n_vars:
raise QiskitError("Addition on different n_vars.")
result = SpecialPolynomial(self.n_vars)
result.weight_0 = (self.weight_0 + other.weight_0) % 8
result.weight_1 = (self.weight_1 + other.weight_1) % 8
result.weight_2 = (self.weight_2 + other.weight_2) % 8
result.weight_3 = (self.weight_3 + other.weight_3) % 8
return result
def evaluate(self, xval):
"""Evaluate the multinomial at xval.
if xval is a length n z2 vector, return element of Z8.
if xval is a length n vector of multinomials, return
a multinomial. The multinomials must all be on n vars.
"""
if len(xval) != self.n_vars:
raise QiskitError("Evaluate on wrong number of variables.")
check_int = list(map(lambda x: isinstance(x, int), xval))
check_poly = list(map(lambda x: isinstance(x, SpecialPolynomial), xval))
if False in check_int and False in check_poly:
raise QiskitError("Evaluate on a wrong type.")
is_int = False not in check_int
if not is_int:
if False in [i.n_vars == self.n_vars for i in xval]:
raise QiskitError("Evaluate on incompatible polynomials.")
else:
xval = xval % 2
# Examine each term of this polynomial
terms0 = [[]]
terms1 = list(combinations(range(self.n_vars), r=1))
terms2 = list(combinations(range(self.n_vars), r=2))
terms3 = list(combinations(range(self.n_vars), r=3))
# Set the initial result and start for each term
if is_int:
result = 0
start = 1
else:
result = SpecialPolynomial(self.n_vars)
start = SpecialPolynomial(self.n_vars)
start.weight_0 = 1
# Compute the new terms and accumulate
for term in terms0 + terms1 + terms2 + terms3:
value = self.get_term(term)
if value != 0:
newterm = reduce(mul, [xval[j] for j in term], start)
result = result + value * newterm
if isinstance(result, int):
result = result % 8
return result
def set_pj(self, indices):
"""Set to special form polynomial on subset of variables.
p_J(x) := sum_{a subseteq J,|a| neq 0} (-2)^{|a|-1}x^a
"""
indices_arr = np.array(indices)
if (indices_arr < 0).any() or (indices_arr >= self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
indices = sorted(indices)
subsets_2 = itertools.combinations(indices, 2)
subsets_3 = itertools.combinations(indices, 3)
self.weight_0 = 0
self.weight_1 = np.zeros(self.n_vars)
self.weight_2 = np.zeros(self.nc2)
self.weight_3 = np.zeros(self.nc3)
for j in indices:
self.set_term([j], 1)
for j in subsets_2:
self.set_term(list(j), 6)
for j in subsets_3:
self.set_term(list(j), 4)
def get_term(self, indices):
"""Get the value of a term given the list of variables.
Example: indices = [] returns the constant
indices = [0] returns the coefficient of x_0
indices = [0,3] returns the coefficient of x_0x_3
indices = [0,1,3] returns the coefficient of x_0x_1x_3
If len(indices) > 3 the method fails.
If the indices are out of bounds the method fails.
If the indices are not increasing the method fails.
"""
length = len(indices)
if length >= 4:
return 0
indices_arr = np.array(indices)
if (indices_arr < 0).any() or (indices_arr >= self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
if length > 1 and (np.diff(indices_arr) <= 0).any():
raise QiskitError("Indices are non-increasing.")
if length == 0:
return self.weight_0
if length == 1:
return self.weight_1[indices[0]]
if length == 2:
# sum(self.n_vars-j, {j, 1, indices[0]})
offset_1 = int(indices[0] * self.n_vars - ((indices[0] + 1) * indices[0]) / 2)
offset_2 = int(indices[1] - indices[0] - 1)
return self.weight_2[offset_1 + offset_2]
# handle length = 3
tmp_1 = self.n_vars - indices[0]
offset_1 = int((tmp_1 - 3) * (tmp_1 - 2) * (tmp_1 - 1) / 6)
tmp_2 = self.n_vars - indices[1]
offset_2 = int((tmp_2 - 2) * (tmp_2 - 1) / 2)
offset_3 = self.n_vars - indices[2]
offset = int(
self.n_vars * (self.n_vars - 1) * (self.n_vars - 2) / 6 - offset_1 - offset_2 - offset_3
)
return self.weight_3[offset]
def set_term(self, indices, value):
"""Set the value of a term given the list of variables.
Example: indices = [] returns the constant
indices = [0] returns the coefficient of x_0
indices = [0,3] returns the coefficient of x_0x_3
indices = [0,1,3] returns the coefficient of x_0x_1x_3
If len(indices) > 3 the method fails.
If the indices are out of bounds the method fails.
If the indices are not increasing the method fails.
The value is reduced modulo 8.
"""
length = len(indices)
if length >= 4:
return
indices_arr = np.array(indices)
if (indices_arr < 0).any() or (indices_arr >= self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
if length > 1 and (np.diff(indices_arr) <= 0).any():
raise QiskitError("Indices are non-increasing.")
value = value % 8
if length == 0:
self.weight_0 = value
elif length == 1:
self.weight_1[indices[0]] = value
elif length == 2:
# sum(self.n_vars-j, {j, 1, indices[0]})
offset_1 = int(indices[0] * self.n_vars - ((indices[0] + 1) * indices[0]) / 2)
offset_2 = int(indices[1] - indices[0] - 1)
self.weight_2[offset_1 + offset_2] = value
else: # length = 3
tmp_1 = self.n_vars - indices[0]
offset_1 = int((tmp_1 - 3) * (tmp_1 - 2) * (tmp_1 - 1) / 6)
tmp_2 = self.n_vars - indices[1]
offset_2 = int((tmp_2 - 2) * (tmp_2 - 1) / 2)
offset_3 = self.n_vars - indices[2]
offset = int(
self.n_vars * (self.n_vars - 1) * (self.n_vars - 2) / 6
- offset_1
- offset_2
- offset_3
)
self.weight_3[offset] = value
@property
def key(self):
"""Return a string representation."""
tup = (self.weight_0, tuple(self.weight_1), tuple(self.weight_2), tuple(self.weight_3))
return tup
def __eq__(self, x):
"""Test equality."""
return isinstance(x, SpecialPolynomial) and self.key == x.key
def __str__(self):
"""Return formatted string representation."""
out = str(self.weight_0)
for i in range(self.n_vars):
value = self.get_term([i])
if value != 0:
out += " + "
if value != 1:
out += str(value) + "*"
out += "x_" + str(i)
for i in range(self.n_vars - 1):
for j in range(i + 1, self.n_vars):
value = self.get_term([i, j])
if value != 0:
out += " + "
if value != 1:
out += str(value) + "*"
out += "x_" + str(i) + "*x_" + str(j)
for i in range(self.n_vars - 2):
for j in range(i + 1, self.n_vars - 1):
for k in range(j + 1, self.n_vars):
value = self.get_term([i, j, k])
if value != 0:
out += " + "
if value != 1:
out += str(value) + "*"
out += "x_" + str(i) + "*x_" + str(j) + "*x_" + str(k)
return out | qiskit/quantum_info/operators/dihedral/polynomial.py | import itertools
from itertools import combinations
import copy
from functools import reduce
from operator import mul
import numpy as np
from qiskit.exceptions import QiskitError
class SpecialPolynomial:
"""Multivariate polynomial with special form.
Maximum degree 3, n Z_2 variables, coefficients in Z_8.
"""
def __init__(self, n_vars):
"""Construct the zero polynomial on n_vars variables."""
# 1 constant term
# n linear terms x_1, ..., x_n
# {n choose 2} quadratic terms x_1x_2, x_1x_3, ..., x_{n-1}x_n
# {n choose 3} cubic terms x_1x_2x_3, ..., x_{n-2}x_{n-1}x_n
# and coefficients in Z_8
if n_vars < 1:
raise QiskitError("n_vars for SpecialPolynomial is too small.")
self.n_vars = n_vars
self.nc2 = int(n_vars * (n_vars - 1) / 2)
self.nc3 = int(n_vars * (n_vars - 1) * (n_vars - 2) / 6)
self.weight_0 = 0
self.weight_1 = np.zeros(n_vars, dtype=np.int8)
self.weight_2 = np.zeros(self.nc2, dtype=np.int8)
self.weight_3 = np.zeros(self.nc3, dtype=np.int8)
def mul_monomial(self, indices):
"""Multiply by a monomial given by indices.
Returns the product.
"""
length = len(indices)
if length >= 4:
raise QiskitError("There is no term with on more than 3 indices.")
indices_arr = np.array(indices)
if (indices_arr < 0).any() and (indices_arr > self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
if length > 1 and (np.diff(indices_arr) <= 0).any():
raise QiskitError("Indices are non-increasing!")
result = SpecialPolynomial(self.n_vars)
if length == 0:
result = copy.deepcopy(self)
else:
terms0 = [[]]
terms1 = list(combinations(range(self.n_vars), r=1))
terms2 = list(combinations(range(self.n_vars), r=2))
terms3 = list(combinations(range(self.n_vars), r=3))
for term in terms0 + terms1 + terms2 + terms3:
value = self.get_term(term)
new_term = list(set(term).union(set(indices)))
result.set_term(new_term, (result.get_term(new_term) + value) % 8)
return result
def __mul__(self, other):
"""Multiply two polynomials."""
if not isinstance(other, SpecialPolynomial):
other = int(other)
result = SpecialPolynomial(self.n_vars)
if isinstance(other, int):
result.weight_0 = (self.weight_0 * other) % 8
result.weight_1 = (self.weight_1 * other) % 8
result.weight_2 = (self.weight_2 * other) % 8
result.weight_3 = (self.weight_3 * other) % 8
else:
if self.n_vars != other.n_vars:
raise QiskitError("Multiplication on different n_vars.")
terms0 = [[]]
terms1 = list(combinations(range(self.n_vars), r=1))
terms2 = list(combinations(range(self.n_vars), r=2))
terms3 = list(combinations(range(self.n_vars), r=3))
for term in terms0 + terms1 + terms2 + terms3:
value = other.get_term(term)
if value != 0:
temp = copy.deepcopy(self)
temp = temp.mul_monomial(term)
temp = temp * value
result = result + temp
return result
def __rmul__(self, other):
"""Right multiplication.
This operation is commutative.
"""
return self.__mul__(other)
def __add__(self, other):
"""Add two polynomials."""
if not isinstance(other, SpecialPolynomial):
raise QiskitError("Element to add is not a SpecialPolynomial.")
if self.n_vars != other.n_vars:
raise QiskitError("Addition on different n_vars.")
result = SpecialPolynomial(self.n_vars)
result.weight_0 = (self.weight_0 + other.weight_0) % 8
result.weight_1 = (self.weight_1 + other.weight_1) % 8
result.weight_2 = (self.weight_2 + other.weight_2) % 8
result.weight_3 = (self.weight_3 + other.weight_3) % 8
return result
def evaluate(self, xval):
"""Evaluate the multinomial at xval.
if xval is a length n z2 vector, return element of Z8.
if xval is a length n vector of multinomials, return
a multinomial. The multinomials must all be on n vars.
"""
if len(xval) != self.n_vars:
raise QiskitError("Evaluate on wrong number of variables.")
check_int = list(map(lambda x: isinstance(x, int), xval))
check_poly = list(map(lambda x: isinstance(x, SpecialPolynomial), xval))
if False in check_int and False in check_poly:
raise QiskitError("Evaluate on a wrong type.")
is_int = False not in check_int
if not is_int:
if False in [i.n_vars == self.n_vars for i in xval]:
raise QiskitError("Evaluate on incompatible polynomials.")
else:
xval = xval % 2
# Examine each term of this polynomial
terms0 = [[]]
terms1 = list(combinations(range(self.n_vars), r=1))
terms2 = list(combinations(range(self.n_vars), r=2))
terms3 = list(combinations(range(self.n_vars), r=3))
# Set the initial result and start for each term
if is_int:
result = 0
start = 1
else:
result = SpecialPolynomial(self.n_vars)
start = SpecialPolynomial(self.n_vars)
start.weight_0 = 1
# Compute the new terms and accumulate
for term in terms0 + terms1 + terms2 + terms3:
value = self.get_term(term)
if value != 0:
newterm = reduce(mul, [xval[j] for j in term], start)
result = result + value * newterm
if isinstance(result, int):
result = result % 8
return result
def set_pj(self, indices):
"""Set to special form polynomial on subset of variables.
p_J(x) := sum_{a subseteq J,|a| neq 0} (-2)^{|a|-1}x^a
"""
indices_arr = np.array(indices)
if (indices_arr < 0).any() or (indices_arr >= self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
indices = sorted(indices)
subsets_2 = itertools.combinations(indices, 2)
subsets_3 = itertools.combinations(indices, 3)
self.weight_0 = 0
self.weight_1 = np.zeros(self.n_vars)
self.weight_2 = np.zeros(self.nc2)
self.weight_3 = np.zeros(self.nc3)
for j in indices:
self.set_term([j], 1)
for j in subsets_2:
self.set_term(list(j), 6)
for j in subsets_3:
self.set_term(list(j), 4)
def get_term(self, indices):
"""Get the value of a term given the list of variables.
Example: indices = [] returns the constant
indices = [0] returns the coefficient of x_0
indices = [0,3] returns the coefficient of x_0x_3
indices = [0,1,3] returns the coefficient of x_0x_1x_3
If len(indices) > 3 the method fails.
If the indices are out of bounds the method fails.
If the indices are not increasing the method fails.
"""
length = len(indices)
if length >= 4:
return 0
indices_arr = np.array(indices)
if (indices_arr < 0).any() or (indices_arr >= self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
if length > 1 and (np.diff(indices_arr) <= 0).any():
raise QiskitError("Indices are non-increasing.")
if length == 0:
return self.weight_0
if length == 1:
return self.weight_1[indices[0]]
if length == 2:
# sum(self.n_vars-j, {j, 1, indices[0]})
offset_1 = int(indices[0] * self.n_vars - ((indices[0] + 1) * indices[0]) / 2)
offset_2 = int(indices[1] - indices[0] - 1)
return self.weight_2[offset_1 + offset_2]
# handle length = 3
tmp_1 = self.n_vars - indices[0]
offset_1 = int((tmp_1 - 3) * (tmp_1 - 2) * (tmp_1 - 1) / 6)
tmp_2 = self.n_vars - indices[1]
offset_2 = int((tmp_2 - 2) * (tmp_2 - 1) / 2)
offset_3 = self.n_vars - indices[2]
offset = int(
self.n_vars * (self.n_vars - 1) * (self.n_vars - 2) / 6 - offset_1 - offset_2 - offset_3
)
return self.weight_3[offset]
def set_term(self, indices, value):
"""Set the value of a term given the list of variables.
Example: indices = [] returns the constant
indices = [0] returns the coefficient of x_0
indices = [0,3] returns the coefficient of x_0x_3
indices = [0,1,3] returns the coefficient of x_0x_1x_3
If len(indices) > 3 the method fails.
If the indices are out of bounds the method fails.
If the indices are not increasing the method fails.
The value is reduced modulo 8.
"""
length = len(indices)
if length >= 4:
return
indices_arr = np.array(indices)
if (indices_arr < 0).any() or (indices_arr >= self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
if length > 1 and (np.diff(indices_arr) <= 0).any():
raise QiskitError("Indices are non-increasing.")
value = value % 8
if length == 0:
self.weight_0 = value
elif length == 1:
self.weight_1[indices[0]] = value
elif length == 2:
# sum(self.n_vars-j, {j, 1, indices[0]})
offset_1 = int(indices[0] * self.n_vars - ((indices[0] + 1) * indices[0]) / 2)
offset_2 = int(indices[1] - indices[0] - 1)
self.weight_2[offset_1 + offset_2] = value
else: # length = 3
tmp_1 = self.n_vars - indices[0]
offset_1 = int((tmp_1 - 3) * (tmp_1 - 2) * (tmp_1 - 1) / 6)
tmp_2 = self.n_vars - indices[1]
offset_2 = int((tmp_2 - 2) * (tmp_2 - 1) / 2)
offset_3 = self.n_vars - indices[2]
offset = int(
self.n_vars * (self.n_vars - 1) * (self.n_vars - 2) / 6
- offset_1
- offset_2
- offset_3
)
self.weight_3[offset] = value
@property
def key(self):
"""Return a string representation."""
tup = (self.weight_0, tuple(self.weight_1), tuple(self.weight_2), tuple(self.weight_3))
return tup
def __eq__(self, x):
"""Test equality."""
return isinstance(x, SpecialPolynomial) and self.key == x.key
def __str__(self):
"""Return formatted string representation."""
out = str(self.weight_0)
for i in range(self.n_vars):
value = self.get_term([i])
if value != 0:
out += " + "
if value != 1:
out += str(value) + "*"
out += "x_" + str(i)
for i in range(self.n_vars - 1):
for j in range(i + 1, self.n_vars):
value = self.get_term([i, j])
if value != 0:
out += " + "
if value != 1:
out += str(value) + "*"
out += "x_" + str(i) + "*x_" + str(j)
for i in range(self.n_vars - 2):
for j in range(i + 1, self.n_vars - 1):
for k in range(j + 1, self.n_vars):
value = self.get_term([i, j, k])
if value != 0:
out += " + "
if value != 1:
out += str(value) + "*"
out += "x_" + str(i) + "*x_" + str(j) + "*x_" + str(k)
return out | 0.838051 | 0.397471 |
import cocos
import pyglet
import random
import socket
import math
random.seed()
class UILayer(cocos.layer.Layer):
"""
"""
lives_remaining_legend = 'Lives: '
def __init__(self):
""" """
super( UILayer, self ).__init__()
width, height = cocos.director.director.get_window_size()
labelPos = (width * 0.1, height * 0.95)
self.lives_remaining_label = cocos.text.Label(
UILayer.lives_remaining_legend + str(0),
font_name = 'Arial',
font_size = 20,
anchor_x = 'center',
anchor_y = 'center',
color = (255, 255, 255, 255))
self.lives_remaining_label.position = labelPos
self.add(self.lives_remaining_label, z=10)
def updateLivesRemaining(self, number):
""" """
self.lives_remaining_label.element.text = \
UILayer.lives_remaining_legend + str(number)
class KeyboardInputLayer(cocos.layer.Layer):
"""
"""
# You need to tell cocos that your layer is for handling input!
# This is key (no pun intended)!
# If you don't include this you'll be scratching your head wondering why your game isn't accepting input
is_event_handler = True
def __init__(self):
""" """
super(KeyboardInputLayer, self).__init__()
self.keys_being_pressed = set()
def on_key_press(self, key, modifiers):
""" """
self.keys_being_pressed.add(key)
def on_key_release(self, key, modifiers):
""" """
if key in self.keys_being_pressed:
self.keys_being_pressed.remove(key)
class PlayLayer(KeyboardInputLayer):
"""
"""
background_image_name = 'nebula_1024x768.png'
background_image = pyglet.resource.image(background_image_name)
ownID = socket.gethostbyname(socket.gethostname())
def __init__( self ):
""" """
super( PlayLayer, self ).__init__()
self.players = {}
self.batch = cocos.batch.BatchNode()
self.add(self.batch)
width, height = cocos.director.director.get_window_size()
self.add(
cocos.sprite.Sprite(PlayLayer.background_image_name,
position=(width * 0.5, height * 0.5)),
z=-1)
def updateLivesRemaining(self, number):
""" """
ui_layer = self.get_ancestor(UILayer)
if ui_layer:
ui_layer.updateLivesRemaining(number)
def addExplosion(self, position):
""" """
new_explosion = Explosion()
new_explosion.position = position
self.batch.add(new_explosion)
new_explosion.start()
def addAsteroids(self, count=8):
""" """
for i in range(0, count):
new_asteroid = Asteroid()
self.batch.add(new_asteroid)
new_asteroid.start()
def addPlayer(self, player_id):
""" """
new_player = None
if player_id in self.players:
new_player = self.players[player_id]
new_player.setRandomPosition()
new_player.onRespawn()
#print 'respawning ', player_id
else:
new_player = Player(player_id)
self.players[player_id] = new_player
new_player.start()
self.batch.add(new_player)
new_player.motion_vector = (0, 0)
new_player.shouldDie = False
if PlayLayer.ownID != player_id:
new_player.color = (255, 255, 0)
else:
self.updateLivesRemaining(new_player.lives_remaining)
def fireBulletForPlayer(self, player_id):
""" """
if player_id in self.players:
# Don't shoot if not in teh game at the moment
player = self.players[player_id]
if not player.shouldDie:
dx, dy = player.getHeadingVector()
x, y = player.position
x += dx * player.radius * 1.5 # Move bullet out of ship
y += dy * player.radius * 1.5 # Move bullet out of ship
new_bullet = Bullet(position=(x, y),
motion_vector=(dx, dy))
self.batch.add(new_bullet)
new_bullet.start()
else:
print 'Error: fire for unknown player,', player_id
def rotatePlayer(self, player_id, deg):
""" """
if player_id in self.players:
player = self.players[player_id]
player.do(cocos.actions.RotateBy(deg, 0.05))
def thrustPlayer(self, player_id):
""" """
if player_id in self.players:
player = self.players[player_id]
player.thrust()
class GameSpriteAction(cocos.actions.Action):
"""
This class exists to forward the step(dt) method call to the
receiver's target object. It is a hook that enables targets to
perform logic each time the display is updated.
"""
def step(self, dt):
""" """
self.target.step(dt)
class GameSprite(cocos.sprite.Sprite):
"""
This class exists to provide several features shared by almost
every game object.
Each instance has the following:
A unique identifier
A motion vector to describe how the instances should move.
A radius used to detect collisions with other GameSprite
instances
A flag, shouldDie, used to signal when the instance should be
removed from the game.
Instances automatically move according to each instance's
motion vector. Positions "wrap" meaning that if an instance moves
off the screen, it reappears on the opposite side of the screen.
"""
next_unique_id = 1
live_instances = {} # map unique_id to instance with that id
@staticmethod
def handleCollisions():
""" """
objects = GameSprite.live_instances.values()
for object in objects:
for other_object in objects:
if other_object.id != object.id and \
object.isHitByCircle(other_object.position,\
other_object.radius):
object.onCollision(other_object)
@staticmethod
def getInstances(klass):
""" """
result = []
for object in GameSprite.live_instances.values():
if isinstance(object, klass):
result.append(object)
return result
def __init__(self, image, id=None, position=(0, 0), rotation=0,
scale=1, opacity = 255, color=(255, 255, 255),
anchor=None):
""" """
super( GameSprite, self ).__init__( image, position, rotation,
scale, opacity, color, anchor)
if not id:
self.id = GameSprite.next_unique_id
else:
self.id = id
GameSprite.next_unique_id += 1
self.motion_vector = (0,0) # No motion by default
self.radius = 3 # Small default radius
self.shouldDie = False
self.type = '_'
GameSprite.live_instances[self.id] = self
def start(self):
""" """
self.do(GameSpriteAction())
def getInfo(self):
""" """
x, y = self.position
rot_deg = self.rotation
return {'id':self.id,
'type':self.type,
'pos':(int(x), int(y)),
'rot_deg': int(rot_deg),
'shouldDie' : self.shouldDie }
def updateWithInfo(self, info):
""" """
self.position = info['pos']
self.rotation = info['rot_deg']
self.shouldDie = info['shouldDie']
def getVelocityMultiplier(self):
""" Return a multiplier for use when calculating motion per
unit time.
"""
return 1
def setRandomPosition(self):
width, height = cocos.director.director.get_window_size()
self.position = (random.random() * width,
random.random() * height)
def markForDeath(self):
""" """
self.shouldDie = True
def isHitByCircle(self, center, radius):
""" Returns True if and only if the receiver's circle
calculated using the receiver's position and radius
overlaps the circle calculated using the center and radius
arguments to this method.
"""
total_radius = self.radius + radius
total_radius_squared = total_radius * total_radius
x, y = self.position
delta_x = center[0] - x
delta_y = center[1] - y
distance_squared = delta_x * delta_x + delta_y * delta_y
return distance_squared < total_radius_squared
def processCollision(self, other_object):
""" """
playLayer = self.get_ancestor(PlayLayer)
if playLayer:
playLayer.addExplosion(self.position)
return True
def onRespawn(self):
""" Adds the receiver back into collision detection set after
receiver has respawned """
GameSprite.live_instances[self.id] = self
self.do(GameSpriteAction())
def onCollision(self, other_object):
""" """
if self.processCollision(other_object):
self.markForDeath()
def step(self, dt):
""" Perform any updates that should occur after dt seconds
from the last update.
"""
if self.shouldDie:
self.stop()
self.kill()
if self.id in GameSprite.live_instances:
del GameSprite.live_instances[self.id];
else:
width, height = cocos.director.director.get_window_size()
dx = self.motion_vector[0] * self.getVelocityMultiplier()
dy = self.motion_vector[1] * self.getVelocityMultiplier()
x = self.position[0] + dx * dt
y = self.position[1] + dy * dt
if x < 0: x += width
elif x > width: x -= width
if y < 0: y += height
elif y > height: y -= height
self.position = (x, y)
class Asteroid(GameSprite):
"""
"""
# Don't call calls variable 'image' because it masks pyglet
# Sprite class variable and accessors
sprite_image = pyglet.resource.image('asteroidSpriteSheet.png')
grid = pyglet.image.ImageGrid(sprite_image, 6, 5)
textures = pyglet.image.TextureGrid(grid)
textures_list = textures[:]
frame_period = 0.05
animation = pyglet.image.Animation.from_image_sequence(
textures_list, frame_period, loop=True)
velocity_multiplier = 200
def __init__(self, id=None, position=(0, 0), rotation=0, scale=1,
opacity = 255, color=(255, 255, 255), anchor = None):
""" """
image = Asteroid.animation
super( Asteroid, self ).__init__(image, id, position, rotation,
2, opacity, color, anchor)
#self.scale = 2
self.motion_vector = (self.getRandomMotionMagnitude(),
self.getRandomMotionMagnitude())
self.setRandomPosition()
self.type = 'a'
# Radius is a bit less than half width
self.radius = self.image.get_max_width() * self.scale * 0.4
def getVelocityMultiplier(self):
""" """
return Asteroid.velocity_multiplier
def getRandomMotionMagnitude(self):
""" """
return random.random() - 0.5
def processCollision(self, other_object):
""" Overrides inherited version to prevent asteroid collisions
with other asteroids. """
result = not isinstance(other_object, Asteroid)
if result:
# Let inherited behavior rule the day
super( Asteroid, self ).processCollision(other_object)
return result
class Player(GameSprite):
"""
"""
# Don't call calls variable 'image' because it masks pyglet
# Sprite class variable and accessors
ship_image = pyglet.resource.image('ship.png')
ship_foreward_image = pyglet.resource.image('shipForward.png')
thrust_multiplier = 200
max_velocity_squared = 500 * 500
initial_lives = 3
def __init__( self, player_id=None, id=None, position=(0, 0),
rotation=0, scale=1, opacity = 255, color=(255, 255, 255),
anchor=None):
""" """
num_lives=Player.initial_lives
image = Player.ship_image
super( Player, self ).__init__(image, id, position, rotation,
scale, opacity, color, anchor)
self.player_id = player_id
self.is_thrusting = False
self.setRandomPosition()
self.rotation = random.random() * 360.0 # deg.
self.type = 'p'
self.radius = \
self.image.width * self.scale * 0.4 # bit less than half
self.lives_remaining = num_lives
def getInfo(self):
result = super( Player, self ).getInfo()
result['player_id'] = self.player_id
result['is_thrusting'] = self.is_thrusting
result['lives'] = self.lives_remaining
return result
def updateWithInfo(self, info):
""" """
super( Player, self ).updateWithInfo(info)
if 'player_id' in info: self.player_id = info['player_id']
else: print 'Error: ', info
if 'is_thrusting' in info: self.is_thrusting = info['is_thrusting']
else: print 'Error: ', info
if 'lives' in info: self.lives_remaining = info['lives']
else: print 'Error: ', info
def thrust(self):
""" """
dx, dy = self.getHeadingVector()
vx, vy = self.motion_vector
vx += dx
vy += dy
# Limit magnitude of velocity
if Player.max_velocity_squared < (vx * vx + vy * vy):
vx *= 0.8
vy *= 0.8
self.motion_vector = (vx, vy)
self.is_thrusting = True
def step(self, dt):
""" """
super( Player, self ).step(dt)
if self.is_thrusting:
self.image = Player.ship_foreward_image
self.is_thrusting = False
else:
self.image = Player.ship_image
def getHeadingVector(self):
""" """
rad = math.radians(-self.rotation)
return (math.cos(rad), math.sin(rad))
def processCollision(self, other_object):
""" """
self.lives_remaining -= 1
if 0 <= self.lives_remaining:
playLayer = self.get_ancestor(PlayLayer)
if playLayer:
playLayer.do(cocos.actions.Delay(5) + \
cocos.actions.CallFuncS(\
PlayLayer.addPlayer, self.player_id))
return super( Player, self ).processCollision(other_object)
class Bullet(GameSprite):
"""
"""
# Don't call calls variable 'image' because it masks pyglet
# Sprite class variable and accessors
bullet_image = pyglet.resource.image('bullet.png')
lifetime = 1.0 #second
speed = 600
def __init__( self, id=None, position=(0,0), motion_vector=(0,0),
rotation=0, scale=1, opacity = 255,
color=(255, 255, 255), anchor=None ):
""" """
super( Bullet, self ).__init__(Bullet.bullet_image, id,
position, rotation, scale, opacity, color, anchor)
self.motion_vector = motion_vector
self.type = 'b'
# remove bullet from its parent at end of its lifetime
self.do(cocos.actions.Delay(Bullet.lifetime) +\
cocos.actions.CallFuncS(Bullet.markForDeath))
def getVelocityMultiplier(self):
""" """
return Bullet.speed
def processCollision(self, other_object):
""" Overrides inherited version to prevent bullet collisions
with other bullets and prevent bullets from exploding. """
return not isinstance(other_object, Bullet)
class Explosion(GameSprite):
"""
"""
# Don't call calls variable 'image' because it masks pyglet
# Sprite class variable and accessors
small_image = pyglet.resource.image('explosionSmall.png')
small_grid = pyglet.image.ImageGrid(small_image, 5, 5)
small_textures = pyglet.image.TextureGrid(small_grid)
small_textures_list = small_textures[:]
frame_period = 0.05
small_animation = pyglet.image.Animation.from_image_sequence(
small_textures_list, frame_period, loop=True)
duration = len(small_textures_list) * frame_period
default_opacity = 128
def __init__(self, id=None, position=(0, 0), rotation=0, scale=1,
opacity = 255, color=(255, 255, 255), anchor = None):
""" """
image = Explosion.small_animation
opacity = Explosion.default_opacity
scale = 2
super( Explosion, self ).__init__(image, id, position,
rotation, scale, opacity, color, anchor)
self.type = 'e'
self.do(cocos.actions.Delay(Explosion.duration) + \
cocos.actions.CallFuncS(Explosion.markForDeath))
def processCollision(self, other_object):
""" Overrides inherited version to prevent collisions
with anything. """
return False
if __name__ == "__main__":
assert False | CommonLayers.py | import cocos
import pyglet
import random
import socket
import math
random.seed()
class UILayer(cocos.layer.Layer):
"""
"""
lives_remaining_legend = 'Lives: '
def __init__(self):
""" """
super( UILayer, self ).__init__()
width, height = cocos.director.director.get_window_size()
labelPos = (width * 0.1, height * 0.95)
self.lives_remaining_label = cocos.text.Label(
UILayer.lives_remaining_legend + str(0),
font_name = 'Arial',
font_size = 20,
anchor_x = 'center',
anchor_y = 'center',
color = (255, 255, 255, 255))
self.lives_remaining_label.position = labelPos
self.add(self.lives_remaining_label, z=10)
def updateLivesRemaining(self, number):
""" """
self.lives_remaining_label.element.text = \
UILayer.lives_remaining_legend + str(number)
class KeyboardInputLayer(cocos.layer.Layer):
"""
"""
# You need to tell cocos that your layer is for handling input!
# This is key (no pun intended)!
# If you don't include this you'll be scratching your head wondering why your game isn't accepting input
is_event_handler = True
def __init__(self):
""" """
super(KeyboardInputLayer, self).__init__()
self.keys_being_pressed = set()
def on_key_press(self, key, modifiers):
""" """
self.keys_being_pressed.add(key)
def on_key_release(self, key, modifiers):
""" """
if key in self.keys_being_pressed:
self.keys_being_pressed.remove(key)
class PlayLayer(KeyboardInputLayer):
"""
"""
background_image_name = 'nebula_1024x768.png'
background_image = pyglet.resource.image(background_image_name)
ownID = socket.gethostbyname(socket.gethostname())
def __init__( self ):
""" """
super( PlayLayer, self ).__init__()
self.players = {}
self.batch = cocos.batch.BatchNode()
self.add(self.batch)
width, height = cocos.director.director.get_window_size()
self.add(
cocos.sprite.Sprite(PlayLayer.background_image_name,
position=(width * 0.5, height * 0.5)),
z=-1)
def updateLivesRemaining(self, number):
""" """
ui_layer = self.get_ancestor(UILayer)
if ui_layer:
ui_layer.updateLivesRemaining(number)
def addExplosion(self, position):
""" """
new_explosion = Explosion()
new_explosion.position = position
self.batch.add(new_explosion)
new_explosion.start()
def addAsteroids(self, count=8):
""" """
for i in range(0, count):
new_asteroid = Asteroid()
self.batch.add(new_asteroid)
new_asteroid.start()
def addPlayer(self, player_id):
""" """
new_player = None
if player_id in self.players:
new_player = self.players[player_id]
new_player.setRandomPosition()
new_player.onRespawn()
#print 'respawning ', player_id
else:
new_player = Player(player_id)
self.players[player_id] = new_player
new_player.start()
self.batch.add(new_player)
new_player.motion_vector = (0, 0)
new_player.shouldDie = False
if PlayLayer.ownID != player_id:
new_player.color = (255, 255, 0)
else:
self.updateLivesRemaining(new_player.lives_remaining)
def fireBulletForPlayer(self, player_id):
""" """
if player_id in self.players:
# Don't shoot if not in teh game at the moment
player = self.players[player_id]
if not player.shouldDie:
dx, dy = player.getHeadingVector()
x, y = player.position
x += dx * player.radius * 1.5 # Move bullet out of ship
y += dy * player.radius * 1.5 # Move bullet out of ship
new_bullet = Bullet(position=(x, y),
motion_vector=(dx, dy))
self.batch.add(new_bullet)
new_bullet.start()
else:
print 'Error: fire for unknown player,', player_id
def rotatePlayer(self, player_id, deg):
""" """
if player_id in self.players:
player = self.players[player_id]
player.do(cocos.actions.RotateBy(deg, 0.05))
def thrustPlayer(self, player_id):
""" """
if player_id in self.players:
player = self.players[player_id]
player.thrust()
class GameSpriteAction(cocos.actions.Action):
"""
This class exists to forward the step(dt) method call to the
receiver's target object. It is a hook that enables targets to
perform logic each time the display is updated.
"""
def step(self, dt):
""" """
self.target.step(dt)
class GameSprite(cocos.sprite.Sprite):
"""
This class exists to provide several features shared by almost
every game object.
Each instance has the following:
A unique identifier
A motion vector to describe how the instances should move.
A radius used to detect collisions with other GameSprite
instances
A flag, shouldDie, used to signal when the instance should be
removed from the game.
Instances automatically move according to each instance's
motion vector. Positions "wrap" meaning that if an instance moves
off the screen, it reappears on the opposite side of the screen.
"""
next_unique_id = 1
live_instances = {} # map unique_id to instance with that id
@staticmethod
def handleCollisions():
""" """
objects = GameSprite.live_instances.values()
for object in objects:
for other_object in objects:
if other_object.id != object.id and \
object.isHitByCircle(other_object.position,\
other_object.radius):
object.onCollision(other_object)
@staticmethod
def getInstances(klass):
""" """
result = []
for object in GameSprite.live_instances.values():
if isinstance(object, klass):
result.append(object)
return result
def __init__(self, image, id=None, position=(0, 0), rotation=0,
scale=1, opacity = 255, color=(255, 255, 255),
anchor=None):
""" """
super( GameSprite, self ).__init__( image, position, rotation,
scale, opacity, color, anchor)
if not id:
self.id = GameSprite.next_unique_id
else:
self.id = id
GameSprite.next_unique_id += 1
self.motion_vector = (0,0) # No motion by default
self.radius = 3 # Small default radius
self.shouldDie = False
self.type = '_'
GameSprite.live_instances[self.id] = self
def start(self):
""" """
self.do(GameSpriteAction())
def getInfo(self):
""" """
x, y = self.position
rot_deg = self.rotation
return {'id':self.id,
'type':self.type,
'pos':(int(x), int(y)),
'rot_deg': int(rot_deg),
'shouldDie' : self.shouldDie }
def updateWithInfo(self, info):
""" """
self.position = info['pos']
self.rotation = info['rot_deg']
self.shouldDie = info['shouldDie']
def getVelocityMultiplier(self):
""" Return a multiplier for use when calculating motion per
unit time.
"""
return 1
def setRandomPosition(self):
width, height = cocos.director.director.get_window_size()
self.position = (random.random() * width,
random.random() * height)
def markForDeath(self):
""" """
self.shouldDie = True
def isHitByCircle(self, center, radius):
""" Returns True if and only if the receiver's circle
calculated using the receiver's position and radius
overlaps the circle calculated using the center and radius
arguments to this method.
"""
total_radius = self.radius + radius
total_radius_squared = total_radius * total_radius
x, y = self.position
delta_x = center[0] - x
delta_y = center[1] - y
distance_squared = delta_x * delta_x + delta_y * delta_y
return distance_squared < total_radius_squared
def processCollision(self, other_object):
""" """
playLayer = self.get_ancestor(PlayLayer)
if playLayer:
playLayer.addExplosion(self.position)
return True
def onRespawn(self):
""" Adds the receiver back into collision detection set after
receiver has respawned """
GameSprite.live_instances[self.id] = self
self.do(GameSpriteAction())
def onCollision(self, other_object):
""" """
if self.processCollision(other_object):
self.markForDeath()
def step(self, dt):
""" Perform any updates that should occur after dt seconds
from the last update.
"""
if self.shouldDie:
self.stop()
self.kill()
if self.id in GameSprite.live_instances:
del GameSprite.live_instances[self.id];
else:
width, height = cocos.director.director.get_window_size()
dx = self.motion_vector[0] * self.getVelocityMultiplier()
dy = self.motion_vector[1] * self.getVelocityMultiplier()
x = self.position[0] + dx * dt
y = self.position[1] + dy * dt
if x < 0: x += width
elif x > width: x -= width
if y < 0: y += height
elif y > height: y -= height
self.position = (x, y)
class Asteroid(GameSprite):
"""
"""
# Don't call calls variable 'image' because it masks pyglet
# Sprite class variable and accessors
sprite_image = pyglet.resource.image('asteroidSpriteSheet.png')
grid = pyglet.image.ImageGrid(sprite_image, 6, 5)
textures = pyglet.image.TextureGrid(grid)
textures_list = textures[:]
frame_period = 0.05
animation = pyglet.image.Animation.from_image_sequence(
textures_list, frame_period, loop=True)
velocity_multiplier = 200
def __init__(self, id=None, position=(0, 0), rotation=0, scale=1,
opacity = 255, color=(255, 255, 255), anchor = None):
""" """
image = Asteroid.animation
super( Asteroid, self ).__init__(image, id, position, rotation,
2, opacity, color, anchor)
#self.scale = 2
self.motion_vector = (self.getRandomMotionMagnitude(),
self.getRandomMotionMagnitude())
self.setRandomPosition()
self.type = 'a'
# Radius is a bit less than half width
self.radius = self.image.get_max_width() * self.scale * 0.4
def getVelocityMultiplier(self):
""" """
return Asteroid.velocity_multiplier
def getRandomMotionMagnitude(self):
""" """
return random.random() - 0.5
def processCollision(self, other_object):
""" Overrides inherited version to prevent asteroid collisions
with other asteroids. """
result = not isinstance(other_object, Asteroid)
if result:
# Let inherited behavior rule the day
super( Asteroid, self ).processCollision(other_object)
return result
class Player(GameSprite):
"""
"""
# Don't call calls variable 'image' because it masks pyglet
# Sprite class variable and accessors
ship_image = pyglet.resource.image('ship.png')
ship_foreward_image = pyglet.resource.image('shipForward.png')
thrust_multiplier = 200
max_velocity_squared = 500 * 500
initial_lives = 3
def __init__( self, player_id=None, id=None, position=(0, 0),
rotation=0, scale=1, opacity = 255, color=(255, 255, 255),
anchor=None):
""" """
num_lives=Player.initial_lives
image = Player.ship_image
super( Player, self ).__init__(image, id, position, rotation,
scale, opacity, color, anchor)
self.player_id = player_id
self.is_thrusting = False
self.setRandomPosition()
self.rotation = random.random() * 360.0 # deg.
self.type = 'p'
self.radius = \
self.image.width * self.scale * 0.4 # bit less than half
self.lives_remaining = num_lives
def getInfo(self):
result = super( Player, self ).getInfo()
result['player_id'] = self.player_id
result['is_thrusting'] = self.is_thrusting
result['lives'] = self.lives_remaining
return result
def updateWithInfo(self, info):
""" """
super( Player, self ).updateWithInfo(info)
if 'player_id' in info: self.player_id = info['player_id']
else: print 'Error: ', info
if 'is_thrusting' in info: self.is_thrusting = info['is_thrusting']
else: print 'Error: ', info
if 'lives' in info: self.lives_remaining = info['lives']
else: print 'Error: ', info
def thrust(self):
""" """
dx, dy = self.getHeadingVector()
vx, vy = self.motion_vector
vx += dx
vy += dy
# Limit magnitude of velocity
if Player.max_velocity_squared < (vx * vx + vy * vy):
vx *= 0.8
vy *= 0.8
self.motion_vector = (vx, vy)
self.is_thrusting = True
def step(self, dt):
""" """
super( Player, self ).step(dt)
if self.is_thrusting:
self.image = Player.ship_foreward_image
self.is_thrusting = False
else:
self.image = Player.ship_image
def getHeadingVector(self):
""" """
rad = math.radians(-self.rotation)
return (math.cos(rad), math.sin(rad))
def processCollision(self, other_object):
""" """
self.lives_remaining -= 1
if 0 <= self.lives_remaining:
playLayer = self.get_ancestor(PlayLayer)
if playLayer:
playLayer.do(cocos.actions.Delay(5) + \
cocos.actions.CallFuncS(\
PlayLayer.addPlayer, self.player_id))
return super( Player, self ).processCollision(other_object)
class Bullet(GameSprite):
"""
"""
# Don't call calls variable 'image' because it masks pyglet
# Sprite class variable and accessors
bullet_image = pyglet.resource.image('bullet.png')
lifetime = 1.0 #second
speed = 600
def __init__( self, id=None, position=(0,0), motion_vector=(0,0),
rotation=0, scale=1, opacity = 255,
color=(255, 255, 255), anchor=None ):
""" """
super( Bullet, self ).__init__(Bullet.bullet_image, id,
position, rotation, scale, opacity, color, anchor)
self.motion_vector = motion_vector
self.type = 'b'
# remove bullet from its parent at end of its lifetime
self.do(cocos.actions.Delay(Bullet.lifetime) +\
cocos.actions.CallFuncS(Bullet.markForDeath))
def getVelocityMultiplier(self):
""" """
return Bullet.speed
def processCollision(self, other_object):
""" Overrides inherited version to prevent bullet collisions
with other bullets and prevent bullets from exploding. """
return not isinstance(other_object, Bullet)
class Explosion(GameSprite):
"""
"""
# Don't call calls variable 'image' because it masks pyglet
# Sprite class variable and accessors
small_image = pyglet.resource.image('explosionSmall.png')
small_grid = pyglet.image.ImageGrid(small_image, 5, 5)
small_textures = pyglet.image.TextureGrid(small_grid)
small_textures_list = small_textures[:]
frame_period = 0.05
small_animation = pyglet.image.Animation.from_image_sequence(
small_textures_list, frame_period, loop=True)
duration = len(small_textures_list) * frame_period
default_opacity = 128
def __init__(self, id=None, position=(0, 0), rotation=0, scale=1,
opacity = 255, color=(255, 255, 255), anchor = None):
""" """
image = Explosion.small_animation
opacity = Explosion.default_opacity
scale = 2
super( Explosion, self ).__init__(image, id, position,
rotation, scale, opacity, color, anchor)
self.type = 'e'
self.do(cocos.actions.Delay(Explosion.duration) + \
cocos.actions.CallFuncS(Explosion.markForDeath))
def processCollision(self, other_object):
""" Overrides inherited version to prevent collisions
with anything. """
return False
if __name__ == "__main__":
assert False | 0.512449 | 0.185062 |
import logging
import hmac
import hashlib
from datetime import datetime
import requests
import json
from socketio import AsyncClient
from .const import (
DOMAIN,
VOIPCENTER_API_ENDPOINT,
VOIPCENTER_WS_ENDPOINT
)
_LOGGER = logging.getLogger(__name__)
class VoipcenterApi:
"""
Simple class to implement the Voipcenter API.
Currently only supports getting and updating function indicators.
This should become a more extensive python library uploaded to pypi.
"""
def __init__(self, hass, apiid, key, klantnummer, password, ws_username=None):
self._hass = hass
self._apiid = apiid
self._apikey = key
self._klantnummer = klantnummer
self._hashedpassword = hashlib.sha256(hashlib.md5(password.encode('utf-8')).hexdigest().encode('utf-8')).hexdigest()
self._session = hass.helpers.aiohttp_client.async_get_clientsession()
self._ws_username = ws_username
self._sio = AsyncClient(logger=_LOGGER, engineio_logger=_LOGGER, ssl_verify=False)
async def connect_ws(self):
"""
Connect the websocket and generate events
"""
if self._ws_username is None:
_LOGGER.warning("No websocket username configured, cannot connect")
return
try:
credentials = await self.get_ws_pass(self._ws_username)
except:
_LOGGER.error("No credentials!")
return
if ("username" not in credentials) or ("wskey" not in credentials):
_LOGGER.error(credentials)
return
# connect event callback
async def _async_on_connect():
_LOGGER.info("Websocket connected with sid %s", self._sio.sid)
self._sio.on("connect", _async_on_connect)
# disconnect event callback
async def _async_on_disconnect():
_LOGGER.info("disconnect")
self._sio.on("disconnect", _async_on_disconnect)
# serveroutput event callback
async def _async_on_serveroutput(data):
_LOGGER.debug(data)
_LOGGER.info("Trying to register...")
await self._sio.emit("register", { "username": credentials["username"], "wskey": credentials["wskey"] })
self._sio.on("serveroutput", _async_on_serveroutput)
# Notify event callback
async def _async_on_notify(data):
_LOGGER.debug("Notify received")
_LOGGER.debug(data)
self._hass.bus.fire(DOMAIN, data)
self._sio.on("notify", _async_on_notify)
await self._sio.connect(VOIPCENTER_WS_ENDPOINT)
async def disconnect_ws(self):
"""
Disconnect the websocket
"""
if self._ws_username is None:
_LOGGER.debug("No websocket client")
return
await self._sio.disconnect()
def generate_dig(self, values):
"""
Generate the pass value.
Here we assume that we are using python 3.6 and that the
values of the dictionary are keeping their order.
"""
passtobehashed = ''
for key, value in values.items():
passtobehashed = passtobehashed + key + value
return hmac.new(self._apikey.encode('utf-8'), msg=passtobehashed.encode('utf-8'), digestmod=hashlib.sha256).hexdigest()
async def get_fi(self, id):
url = VOIPCENTER_API_ENDPOINT + 'get.php'
values = {
'apiID' : self._apiid,
'knummer' : self._klantnummer,
'pwd' : <PASSWORD>,
'm' : 'indicator',
'id' : id,
'f' : 'json',
'ts': datetime.now().strftime('%s')
}
values['pass'] = self.generate_dig(values)
r = await self._session.get(url, params=values)
return await r.json(content_type='text/json charset=utf-8')
async def activate_fi(self, id, activate):
url = VOIPCENTER_API_ENDPOINT + 'update.php'
values = {
'apiID' : self._apiid,
'knummer' : self._klantnummer,
'pwd' : <PASSWORD>,
'm' : 'indicator',
'id' : id,
'p' : '1' if activate else '0',
'f' : 'json',
'ts': datetime.now().strftime('%s')
}
values['pass'] = self.generate_dig(values)
r = await self._session.get(url, params=values)
d = await r.json(content_type='text/json charset=utf-8')
return d['body']['status'] == '1'
async def get_ws_pass(self, ws_username):
url = VOIPCENTER_API_ENDPOINT + 'get.php'
values = {
'apiID' : self._apiid,
'knummer' : self._klantnummer,
'pwd' : <PASSWORD>,
'm' : 'wskey',
'id' : ws_username,
'f' : 'json',
'ts': datetime.now().strftime('%s')
}
values['pass'] = self.generate_dig(values)
r = await self._session.post(url, data=values)
return await r.json(content_type='text/json charset=utf-8')
def get_device_info(self):
return {
"identifiers": {(DOMAIN, self._klantnummer)},
"name": self._klantnummer,
"model": "unknown",
"sw_version": "1.0.0",
"manufacturer": "voipcenter"
} | custom_components/voipcenter/voipcenter.py | import logging
import hmac
import hashlib
from datetime import datetime
import requests
import json
from socketio import AsyncClient
from .const import (
DOMAIN,
VOIPCENTER_API_ENDPOINT,
VOIPCENTER_WS_ENDPOINT
)
_LOGGER = logging.getLogger(__name__)
class VoipcenterApi:
"""
Simple class to implement the Voipcenter API.
Currently only supports getting and updating function indicators.
This should become a more extensive python library uploaded to pypi.
"""
def __init__(self, hass, apiid, key, klantnummer, password, ws_username=None):
self._hass = hass
self._apiid = apiid
self._apikey = key
self._klantnummer = klantnummer
self._hashedpassword = hashlib.sha256(hashlib.md5(password.encode('utf-8')).hexdigest().encode('utf-8')).hexdigest()
self._session = hass.helpers.aiohttp_client.async_get_clientsession()
self._ws_username = ws_username
self._sio = AsyncClient(logger=_LOGGER, engineio_logger=_LOGGER, ssl_verify=False)
async def connect_ws(self):
"""
Connect the websocket and generate events
"""
if self._ws_username is None:
_LOGGER.warning("No websocket username configured, cannot connect")
return
try:
credentials = await self.get_ws_pass(self._ws_username)
except:
_LOGGER.error("No credentials!")
return
if ("username" not in credentials) or ("wskey" not in credentials):
_LOGGER.error(credentials)
return
# connect event callback
async def _async_on_connect():
_LOGGER.info("Websocket connected with sid %s", self._sio.sid)
self._sio.on("connect", _async_on_connect)
# disconnect event callback
async def _async_on_disconnect():
_LOGGER.info("disconnect")
self._sio.on("disconnect", _async_on_disconnect)
# serveroutput event callback
async def _async_on_serveroutput(data):
_LOGGER.debug(data)
_LOGGER.info("Trying to register...")
await self._sio.emit("register", { "username": credentials["username"], "wskey": credentials["wskey"] })
self._sio.on("serveroutput", _async_on_serveroutput)
# Notify event callback
async def _async_on_notify(data):
_LOGGER.debug("Notify received")
_LOGGER.debug(data)
self._hass.bus.fire(DOMAIN, data)
self._sio.on("notify", _async_on_notify)
await self._sio.connect(VOIPCENTER_WS_ENDPOINT)
async def disconnect_ws(self):
"""
Disconnect the websocket
"""
if self._ws_username is None:
_LOGGER.debug("No websocket client")
return
await self._sio.disconnect()
def generate_dig(self, values):
"""
Generate the pass value.
Here we assume that we are using python 3.6 and that the
values of the dictionary are keeping their order.
"""
passtobehashed = ''
for key, value in values.items():
passtobehashed = passtobehashed + key + value
return hmac.new(self._apikey.encode('utf-8'), msg=passtobehashed.encode('utf-8'), digestmod=hashlib.sha256).hexdigest()
async def get_fi(self, id):
url = VOIPCENTER_API_ENDPOINT + 'get.php'
values = {
'apiID' : self._apiid,
'knummer' : self._klantnummer,
'pwd' : <PASSWORD>,
'm' : 'indicator',
'id' : id,
'f' : 'json',
'ts': datetime.now().strftime('%s')
}
values['pass'] = self.generate_dig(values)
r = await self._session.get(url, params=values)
return await r.json(content_type='text/json charset=utf-8')
async def activate_fi(self, id, activate):
url = VOIPCENTER_API_ENDPOINT + 'update.php'
values = {
'apiID' : self._apiid,
'knummer' : self._klantnummer,
'pwd' : <PASSWORD>,
'm' : 'indicator',
'id' : id,
'p' : '1' if activate else '0',
'f' : 'json',
'ts': datetime.now().strftime('%s')
}
values['pass'] = self.generate_dig(values)
r = await self._session.get(url, params=values)
d = await r.json(content_type='text/json charset=utf-8')
return d['body']['status'] == '1'
async def get_ws_pass(self, ws_username):
url = VOIPCENTER_API_ENDPOINT + 'get.php'
values = {
'apiID' : self._apiid,
'knummer' : self._klantnummer,
'pwd' : <PASSWORD>,
'm' : 'wskey',
'id' : ws_username,
'f' : 'json',
'ts': datetime.now().strftime('%s')
}
values['pass'] = self.generate_dig(values)
r = await self._session.post(url, data=values)
return await r.json(content_type='text/json charset=utf-8')
def get_device_info(self):
return {
"identifiers": {(DOMAIN, self._klantnummer)},
"name": self._klantnummer,
"model": "unknown",
"sw_version": "1.0.0",
"manufacturer": "voipcenter"
} | 0.493897 | 0.12921 |
import json
import unittest
import mock
from airflow.exceptions import AirflowException
from airflow.providers.slack.operators.slack import SlackAPIPostOperator
class TestSlackAPIPostOperator(unittest.TestCase):
def setUp(self):
self.test_username = 'test_username'
self.test_channel = '#test_slack_channel'
self.test_text = 'test_text'
self.test_icon_url = 'test_icon_url'
self.test_attachments = [
{
"fallback": "Required plain-text summary of the attachment.",
"color": "#36a64f",
"pretext": "Optional text that appears above the attachment block",
"author_name": "<NAME>",
"author_link": "http://flickr.com/bobby/",
"author_icon": "http://flickr.com/icons/bobby.jpg",
"title": "Slack API Documentation",
"title_link": "https://api.slack.com/",
"text": "Optional text that appears within the attachment",
"fields": [
{
"title": "Priority",
"value": "High",
"short": 'false'
}
],
"image_url": "http://my-website.com/path/to/image.jpg",
"thumb_url": "http://example.com/path/to/thumb.png",
"footer": "Slack API",
"footer_icon": "https://platform.slack-edge.com/img/default_application_icon.png",
"ts": 123456789
}
]
self.test_blocks = [
{
"type": "section",
"text": {
"text": "A message *with some bold text* and _some italicized text_.",
"type": "mrkdwn"
},
"fields": [
{
"type": "mrkdwn",
"text": "High"
},
{
"type": "plain_text",
"emoji": True,
"text": "String"
}
]
}
]
self.test_attachments_in_json = json.dumps(self.test_attachments)
self.test_blocks_in_json = json.dumps(self.test_blocks)
self.test_api_params = {'key': 'value'}
self.expected_method = 'chat.postMessage'
self.expected_api_params = {
'channel': self.test_channel,
'username': self.test_username,
'text': self.test_text,
'icon_url': self.test_icon_url,
'attachments': self.test_attachments_in_json,
'blocks': self.test_blocks_in_json,
}
def __construct_operator(self, test_token, test_slack_conn_id, test_api_params=None):
return SlackAPIPostOperator(
task_id='slack',
username=self.test_username,
token=test_token,
slack_conn_id=test_slack_conn_id,
channel=self.test_channel,
text=self.test_text,
icon_url=self.test_icon_url,
attachments=self.test_attachments,
blocks=self.test_blocks,
api_params=test_api_params,
)
@mock.patch('airflow.providers.slack.operators.slack.SlackHook')
def test_execute_with_token_only(self, slack_hook_class_mock):
slack_hook_mock = mock.Mock()
slack_hook_class_mock.return_value = slack_hook_mock
test_token = 'test_token'
slack_api_post_operator = self.__construct_operator(test_token, None)
slack_api_post_operator.execute()
slack_hook_class_mock.assert_called_once_with(token=test_token, slack_conn_id=None)
slack_hook_mock.call.assert_called_once_with(self.expected_method, self.expected_api_params)
slack_hook_mock.reset_mock()
slack_hook_class_mock.reset_mock()
slack_api_post_operator = self.__construct_operator(test_token, None, self.test_api_params)
slack_api_post_operator.execute()
slack_hook_class_mock.assert_called_once_with(token=test_token, slack_conn_id=None)
slack_hook_mock.call.assert_called_once_with(self.expected_method, self.test_api_params)
@mock.patch('airflow.providers.slack.operators.slack.SlackHook')
def test_execute_with_slack_conn_id_only(self, slack_hook_class_mock):
slack_hook_mock = mock.Mock()
slack_hook_class_mock.return_value = slack_hook_mock
test_slack_conn_id = 'test_slack_conn_id'
slack_api_post_operator = self.__construct_operator(None, test_slack_conn_id)
slack_api_post_operator.execute()
slack_hook_class_mock.assert_called_once_with(token=None, slack_conn_id=test_slack_conn_id)
slack_hook_mock.call.assert_called_once_with(self.expected_method, self.expected_api_params)
def test_init_with_invalid_params(self):
test_token = 'test_token'
test_slack_conn_id = 'test_slack_conn_id'
self.assertRaises(AirflowException, self.__construct_operator, test_token, test_slack_conn_id)
self.assertRaises(AirflowException, self.__construct_operator, None, None)
def test_init_with_valid_params(self):
test_token = 'test_token'
test_slack_conn_id = 'test_slack_conn_id'
slack_api_post_operator = self.__construct_operator(test_token, None, self.test_api_params)
self.assertEqual(slack_api_post_operator.token, test_token)
self.assertEqual(slack_api_post_operator.slack_conn_id, None)
self.assertEqual(slack_api_post_operator.method, self.expected_method)
self.assertEqual(slack_api_post_operator.text, self.test_text)
self.assertEqual(slack_api_post_operator.channel, self.test_channel)
self.assertEqual(slack_api_post_operator.api_params, self.test_api_params)
self.assertEqual(slack_api_post_operator.username, self.test_username)
self.assertEqual(slack_api_post_operator.icon_url, self.test_icon_url)
self.assertEqual(slack_api_post_operator.attachments, self.test_attachments)
self.assertEqual(slack_api_post_operator.blocks, self.test_blocks)
slack_api_post_operator = self.__construct_operator(None, test_slack_conn_id)
self.assertEqual(slack_api_post_operator.token, None)
self.assertEqual(slack_api_post_operator.slack_conn_id, test_slack_conn_id)
@mock.patch('airflow.providers.slack.operators.slack.SlackHook')
def test_api_call_params_with_default_args(self, mock_hook):
test_slack_conn_id = 'test_slack_conn_id'
slack_api_post_operator = SlackAPIPostOperator(
task_id='slack',
username=self.test_username,
slack_conn_id=test_slack_conn_id,
)
slack_api_post_operator.execute()
expected_api_params = {
'channel': "#general",
'username': self.test_username,
'text': 'No message has been set.\n'
'Here is a cat video instead\n'
'https://www.youtube.com/watch?v=J---aiyznGQ',
'icon_url': "https://raw.githubusercontent.com/apache/"
"airflow/master/airflow/www/static/pin_100.png",
'attachments': '[]',
'blocks': '[]',
}
self.assertEqual(expected_api_params, slack_api_post_operator.api_params) | tests/providers/slack/operators/test_slack.py |
import json
import unittest
import mock
from airflow.exceptions import AirflowException
from airflow.providers.slack.operators.slack import SlackAPIPostOperator
class TestSlackAPIPostOperator(unittest.TestCase):
def setUp(self):
self.test_username = 'test_username'
self.test_channel = '#test_slack_channel'
self.test_text = 'test_text'
self.test_icon_url = 'test_icon_url'
self.test_attachments = [
{
"fallback": "Required plain-text summary of the attachment.",
"color": "#36a64f",
"pretext": "Optional text that appears above the attachment block",
"author_name": "<NAME>",
"author_link": "http://flickr.com/bobby/",
"author_icon": "http://flickr.com/icons/bobby.jpg",
"title": "Slack API Documentation",
"title_link": "https://api.slack.com/",
"text": "Optional text that appears within the attachment",
"fields": [
{
"title": "Priority",
"value": "High",
"short": 'false'
}
],
"image_url": "http://my-website.com/path/to/image.jpg",
"thumb_url": "http://example.com/path/to/thumb.png",
"footer": "Slack API",
"footer_icon": "https://platform.slack-edge.com/img/default_application_icon.png",
"ts": 123456789
}
]
self.test_blocks = [
{
"type": "section",
"text": {
"text": "A message *with some bold text* and _some italicized text_.",
"type": "mrkdwn"
},
"fields": [
{
"type": "mrkdwn",
"text": "High"
},
{
"type": "plain_text",
"emoji": True,
"text": "String"
}
]
}
]
self.test_attachments_in_json = json.dumps(self.test_attachments)
self.test_blocks_in_json = json.dumps(self.test_blocks)
self.test_api_params = {'key': 'value'}
self.expected_method = 'chat.postMessage'
self.expected_api_params = {
'channel': self.test_channel,
'username': self.test_username,
'text': self.test_text,
'icon_url': self.test_icon_url,
'attachments': self.test_attachments_in_json,
'blocks': self.test_blocks_in_json,
}
def __construct_operator(self, test_token, test_slack_conn_id, test_api_params=None):
return SlackAPIPostOperator(
task_id='slack',
username=self.test_username,
token=test_token,
slack_conn_id=test_slack_conn_id,
channel=self.test_channel,
text=self.test_text,
icon_url=self.test_icon_url,
attachments=self.test_attachments,
blocks=self.test_blocks,
api_params=test_api_params,
)
@mock.patch('airflow.providers.slack.operators.slack.SlackHook')
def test_execute_with_token_only(self, slack_hook_class_mock):
slack_hook_mock = mock.Mock()
slack_hook_class_mock.return_value = slack_hook_mock
test_token = 'test_token'
slack_api_post_operator = self.__construct_operator(test_token, None)
slack_api_post_operator.execute()
slack_hook_class_mock.assert_called_once_with(token=test_token, slack_conn_id=None)
slack_hook_mock.call.assert_called_once_with(self.expected_method, self.expected_api_params)
slack_hook_mock.reset_mock()
slack_hook_class_mock.reset_mock()
slack_api_post_operator = self.__construct_operator(test_token, None, self.test_api_params)
slack_api_post_operator.execute()
slack_hook_class_mock.assert_called_once_with(token=test_token, slack_conn_id=None)
slack_hook_mock.call.assert_called_once_with(self.expected_method, self.test_api_params)
@mock.patch('airflow.providers.slack.operators.slack.SlackHook')
def test_execute_with_slack_conn_id_only(self, slack_hook_class_mock):
slack_hook_mock = mock.Mock()
slack_hook_class_mock.return_value = slack_hook_mock
test_slack_conn_id = 'test_slack_conn_id'
slack_api_post_operator = self.__construct_operator(None, test_slack_conn_id)
slack_api_post_operator.execute()
slack_hook_class_mock.assert_called_once_with(token=None, slack_conn_id=test_slack_conn_id)
slack_hook_mock.call.assert_called_once_with(self.expected_method, self.expected_api_params)
def test_init_with_invalid_params(self):
test_token = 'test_token'
test_slack_conn_id = 'test_slack_conn_id'
self.assertRaises(AirflowException, self.__construct_operator, test_token, test_slack_conn_id)
self.assertRaises(AirflowException, self.__construct_operator, None, None)
def test_init_with_valid_params(self):
test_token = 'test_token'
test_slack_conn_id = 'test_slack_conn_id'
slack_api_post_operator = self.__construct_operator(test_token, None, self.test_api_params)
self.assertEqual(slack_api_post_operator.token, test_token)
self.assertEqual(slack_api_post_operator.slack_conn_id, None)
self.assertEqual(slack_api_post_operator.method, self.expected_method)
self.assertEqual(slack_api_post_operator.text, self.test_text)
self.assertEqual(slack_api_post_operator.channel, self.test_channel)
self.assertEqual(slack_api_post_operator.api_params, self.test_api_params)
self.assertEqual(slack_api_post_operator.username, self.test_username)
self.assertEqual(slack_api_post_operator.icon_url, self.test_icon_url)
self.assertEqual(slack_api_post_operator.attachments, self.test_attachments)
self.assertEqual(slack_api_post_operator.blocks, self.test_blocks)
slack_api_post_operator = self.__construct_operator(None, test_slack_conn_id)
self.assertEqual(slack_api_post_operator.token, None)
self.assertEqual(slack_api_post_operator.slack_conn_id, test_slack_conn_id)
@mock.patch('airflow.providers.slack.operators.slack.SlackHook')
def test_api_call_params_with_default_args(self, mock_hook):
test_slack_conn_id = 'test_slack_conn_id'
slack_api_post_operator = SlackAPIPostOperator(
task_id='slack',
username=self.test_username,
slack_conn_id=test_slack_conn_id,
)
slack_api_post_operator.execute()
expected_api_params = {
'channel': "#general",
'username': self.test_username,
'text': 'No message has been set.\n'
'Here is a cat video instead\n'
'https://www.youtube.com/watch?v=J---aiyznGQ',
'icon_url': "https://raw.githubusercontent.com/apache/"
"airflow/master/airflow/www/static/pin_100.png",
'attachments': '[]',
'blocks': '[]',
}
self.assertEqual(expected_api_params, slack_api_post_operator.api_params) | 0.611382 | 0.414543 |
import logging
import time
import usb.core
import usb.util
from .util import *
_MEM_IDENT = 0x0904
_MEM_WRITE = 0x0914
_MEM_READ = 0x0924
_CRC8_POLYNOMIAL = 0x31
_CRC8_INIT = 0xFF
_CMD_NOOP = 0b000
_CMD_GET = 0b001
_CMD_SET = 0b010
_CMD_SAVE = 0b100
_CMD_RESET = 0b111
_WAIT = 0.1
_NAME_RO = [
'power_errors',
'power_measure_12',
'power_measure_34',
]
_NAME_ADDR = dict(
data_state = 0x05,
# power_errors = 0x06,
power_limits = 0x07,
power_measure_12 = 0x08,
power_measure_34 = 0x09,
highspeed_disable = 0x10,
loop_delay = 0x11,
external_heartbeat = 0x12,
)
def _generate_crc(data):
crc = _CRC8_INIT
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x80:
crc = (crc << 1) ^ _CRC8_POLYNOMIAL
else:
crc <<= 1
return crc & 0xFF
class USBHubConfig:
def __init__(self, hub, clear=False):
self.hub = hub
self._version = None
if clear:
self.clear()
@property
def version(self):
if self._version is None:
buf, _ = self.hub.register_read(addr=_MEM_IDENT, length=4)
self._version = buf
return self._version[0]
@property
def circuitpython_version(self):
if self._version is None:
buf, _ = self.hub.register_read(addr=_MEM_IDENT, length=4)
self._version = buf
return ".".join([str(v) for v in self._version[1:4]])
def _read(self):
buf, _ = self.hub.register_read(addr=_MEM_READ, length=4)
crc = _generate_crc(buf[0:3])
if crc == buf[3]:
self.hub.register_write(addr=_MEM_READ, buf=[0,0,0,0])
return buf[0:3]
return None
def _write_okay(self):
buf, _ = self.hub.register_read(addr=_MEM_WRITE, length=4)
if buf[0] >> 5 == _CMD_NOOP:
return True
return False
def _write(self, buf):
crc = _generate_crc(buf[0:3])
buf = buf[0:3] + [crc]
return self.hub.register_write(addr=_MEM_WRITE, buf=buf)
def read(self):
buf = self._read()
if buf is None:
return _CMD_NOOP, None, None
cmd = buf[0] >> 5
name = buf[0] & 0b11111
value = buf[1] << 8 | buf[2]
return cmd, name, value
def write(self, cmd, name=None, value=0):
if name is None:
name_addr = 0
else:
name_addr = _NAME_ADDR[name]
if name_addr > 0b11111:
logging.error("Address of name '{}' is above 5 bit limit".format(name))
while not self._write_okay():
time.sleep(_WAIT)
self._write([cmd << 5 | name_addr, (value >> 8) & 0xFF, value & 0xFF])
def clear(self):
self.hub.register_write(addr=_MEM_READ, buf=[0,0,0,0])
self.hub.register_write(addr=_MEM_WRITE, buf=[0,0,0,0])
def device_info(self):
return dict(
firmware = self.version,
circuitpython = self.circuitpython_version.split(".")
)
def reset(self, target="usb"):
targets = ["usb", "mcu", "bootloader"]
self.write(_CMD_RESET, value=targets.index(target))
def save(self):
info = self.device_info()
if info["circuitpython"][0] == 5 and info["circuitpython"][1] < 2:
logging.error("MCU must be upgraded to CircuitPython 5.2.0 or newer for filesystem saves to work.")
return
self.write(_CMD_SAVE)
out = self.read()
while out[0] != _CMD_SAVE:
time.sleep(_WAIT)
out = self.read()
if out[2] == 0:
logging.error("Save of the config.ini file failed.")
logging.error("Please unmount the CIRCUITPY volume and try again.")
return out[2]
def get(self, name):
self.write(_CMD_GET, name=name)
out = self.read()
while out[0] != _CMD_GET:
time.sleep(_WAIT)
out = self.read()
return out[2]
def set(self, name, value):
if name in _NAME_RO:
raise ValueError("Cannot set read-only parameter '{}'".format(name))
self.write(_CMD_SET, name=name, value=value)
out = self.read()
while out[0] != _CMD_SET:
time.sleep(_WAIT)
out = self.read()
return out[2] | capablerobot_usbhub/config.py |
import logging
import time
import usb.core
import usb.util
from .util import *
_MEM_IDENT = 0x0904
_MEM_WRITE = 0x0914
_MEM_READ = 0x0924
_CRC8_POLYNOMIAL = 0x31
_CRC8_INIT = 0xFF
_CMD_NOOP = 0b000
_CMD_GET = 0b001
_CMD_SET = 0b010
_CMD_SAVE = 0b100
_CMD_RESET = 0b111
_WAIT = 0.1
_NAME_RO = [
'power_errors',
'power_measure_12',
'power_measure_34',
]
_NAME_ADDR = dict(
data_state = 0x05,
# power_errors = 0x06,
power_limits = 0x07,
power_measure_12 = 0x08,
power_measure_34 = 0x09,
highspeed_disable = 0x10,
loop_delay = 0x11,
external_heartbeat = 0x12,
)
def _generate_crc(data):
crc = _CRC8_INIT
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x80:
crc = (crc << 1) ^ _CRC8_POLYNOMIAL
else:
crc <<= 1
return crc & 0xFF
class USBHubConfig:
def __init__(self, hub, clear=False):
self.hub = hub
self._version = None
if clear:
self.clear()
@property
def version(self):
if self._version is None:
buf, _ = self.hub.register_read(addr=_MEM_IDENT, length=4)
self._version = buf
return self._version[0]
@property
def circuitpython_version(self):
if self._version is None:
buf, _ = self.hub.register_read(addr=_MEM_IDENT, length=4)
self._version = buf
return ".".join([str(v) for v in self._version[1:4]])
def _read(self):
buf, _ = self.hub.register_read(addr=_MEM_READ, length=4)
crc = _generate_crc(buf[0:3])
if crc == buf[3]:
self.hub.register_write(addr=_MEM_READ, buf=[0,0,0,0])
return buf[0:3]
return None
def _write_okay(self):
buf, _ = self.hub.register_read(addr=_MEM_WRITE, length=4)
if buf[0] >> 5 == _CMD_NOOP:
return True
return False
def _write(self, buf):
crc = _generate_crc(buf[0:3])
buf = buf[0:3] + [crc]
return self.hub.register_write(addr=_MEM_WRITE, buf=buf)
def read(self):
buf = self._read()
if buf is None:
return _CMD_NOOP, None, None
cmd = buf[0] >> 5
name = buf[0] & 0b11111
value = buf[1] << 8 | buf[2]
return cmd, name, value
def write(self, cmd, name=None, value=0):
if name is None:
name_addr = 0
else:
name_addr = _NAME_ADDR[name]
if name_addr > 0b11111:
logging.error("Address of name '{}' is above 5 bit limit".format(name))
while not self._write_okay():
time.sleep(_WAIT)
self._write([cmd << 5 | name_addr, (value >> 8) & 0xFF, value & 0xFF])
def clear(self):
self.hub.register_write(addr=_MEM_READ, buf=[0,0,0,0])
self.hub.register_write(addr=_MEM_WRITE, buf=[0,0,0,0])
def device_info(self):
return dict(
firmware = self.version,
circuitpython = self.circuitpython_version.split(".")
)
def reset(self, target="usb"):
targets = ["usb", "mcu", "bootloader"]
self.write(_CMD_RESET, value=targets.index(target))
def save(self):
info = self.device_info()
if info["circuitpython"][0] == 5 and info["circuitpython"][1] < 2:
logging.error("MCU must be upgraded to CircuitPython 5.2.0 or newer for filesystem saves to work.")
return
self.write(_CMD_SAVE)
out = self.read()
while out[0] != _CMD_SAVE:
time.sleep(_WAIT)
out = self.read()
if out[2] == 0:
logging.error("Save of the config.ini file failed.")
logging.error("Please unmount the CIRCUITPY volume and try again.")
return out[2]
def get(self, name):
self.write(_CMD_GET, name=name)
out = self.read()
while out[0] != _CMD_GET:
time.sleep(_WAIT)
out = self.read()
return out[2]
def set(self, name, value):
if name in _NAME_RO:
raise ValueError("Cannot set read-only parameter '{}'".format(name))
self.write(_CMD_SET, name=name, value=value)
out = self.read()
while out[0] != _CMD_SET:
time.sleep(_WAIT)
out = self.read()
return out[2] | 0.41052 | 0.139866 |
from django.urls import path
from extras.views import ObjectChangeLogView
from ipam.views import ServiceCreateView
from . import views
from .models import Cluster, ClusterGroup, ClusterType, VirtualMachine
app_name = 'virtualization'
urlpatterns = [
# Cluster types
path(r'cluster-types/', views.ClusterTypeListView.as_view(), name='clustertype_list'),
path(r'cluster-types/add/', views.ClusterTypeCreateView.as_view(), name='clustertype_add'),
path(r'cluster-types/import/', views.ClusterTypeBulkImportView.as_view(), name='clustertype_import'),
path(r'cluster-types/delete/', views.ClusterTypeBulkDeleteView.as_view(), name='clustertype_bulk_delete'),
path(r'cluster-types/<slug:slug>/edit/', views.ClusterTypeEditView.as_view(), name='clustertype_edit'),
path(r'cluster-types/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='clustertype_changelog', kwargs={'model': ClusterType}),
# Cluster groups
path(r'cluster-groups/', views.ClusterGroupListView.as_view(), name='clustergroup_list'),
path(r'cluster-groups/add/', views.ClusterGroupCreateView.as_view(), name='clustergroup_add'),
path(r'cluster-groups/import/', views.ClusterGroupBulkImportView.as_view(), name='clustergroup_import'),
path(r'cluster-groups/delete/', views.ClusterGroupBulkDeleteView.as_view(), name='clustergroup_bulk_delete'),
path(r'cluster-groups/<slug:slug>/edit/', views.ClusterGroupEditView.as_view(), name='clustergroup_edit'),
path(r'cluster-groups/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='clustergroup_changelog', kwargs={'model': ClusterGroup}),
# Clusters
path(r'clusters/', views.ClusterListView.as_view(), name='cluster_list'),
path(r'clusters/add/', views.ClusterCreateView.as_view(), name='cluster_add'),
path(r'clusters/import/', views.ClusterBulkImportView.as_view(), name='cluster_import'),
path(r'clusters/edit/', views.ClusterBulkEditView.as_view(), name='cluster_bulk_edit'),
path(r'clusters/delete/', views.ClusterBulkDeleteView.as_view(), name='cluster_bulk_delete'),
path(r'clusters/<int:pk>/', views.ClusterView.as_view(), name='cluster'),
path(r'clusters/<int:pk>/edit/', views.ClusterEditView.as_view(), name='cluster_edit'),
path(r'clusters/<int:pk>/delete/', views.ClusterDeleteView.as_view(), name='cluster_delete'),
path(r'clusters/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='cluster_changelog', kwargs={'model': Cluster}),
path(r'clusters/<int:pk>/devices/add/', views.ClusterAddDevicesView.as_view(), name='cluster_add_devices'),
path(r'clusters/<int:pk>/devices/remove/', views.ClusterRemoveDevicesView.as_view(), name='cluster_remove_devices'),
# Virtual machines
path(r'virtual-machines/', views.VirtualMachineListView.as_view(), name='virtualmachine_list'),
path(r'virtual-machines/add/', views.VirtualMachineCreateView.as_view(), name='virtualmachine_add'),
path(r'virtual-machines/import/', views.VirtualMachineBulkImportView.as_view(), name='virtualmachine_import'),
path(r'virtual-machines/edit/', views.VirtualMachineBulkEditView.as_view(), name='virtualmachine_bulk_edit'),
path(r'virtual-machines/delete/', views.VirtualMachineBulkDeleteView.as_view(), name='virtualmachine_bulk_delete'),
path(r'virtual-machines/<int:pk>/', views.VirtualMachineView.as_view(), name='virtualmachine'),
path(r'virtual-machines/<int:pk>/edit/', views.VirtualMachineEditView.as_view(), name='virtualmachine_edit'),
path(r'virtual-machines/<int:pk>/delete/', views.VirtualMachineDeleteView.as_view(), name='virtualmachine_delete'),
path(r'virtual-machines/<int:pk>/config-context/', views.VirtualMachineConfigContextView.as_view(), name='virtualmachine_configcontext'),
path(r'virtual-machines/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='virtualmachine_changelog', kwargs={'model': VirtualMachine}),
path(r'virtual-machines/<int:virtualmachine>/services/assign/', ServiceCreateView.as_view(), name='virtualmachine_service_assign'),
# VM interfaces
path(r'virtual-machines/interfaces/add/', views.VirtualMachineBulkAddInterfaceView.as_view(), name='virtualmachine_bulk_add_interface'),
path(r'virtual-machines/<int:pk>/interfaces/add/', views.InterfaceCreateView.as_view(), name='interface_add'),
path(r'virtual-machines/<int:pk>/interfaces/edit/', views.InterfaceBulkEditView.as_view(), name='interface_bulk_edit'),
path(r'virtual-machines/<int:pk>/interfaces/delete/', views.InterfaceBulkDeleteView.as_view(), name='interface_bulk_delete'),
path(r'vm-interfaces/<int:pk>/edit/', views.InterfaceEditView.as_view(), name='interface_edit'),
path(r'vm-interfaces/<int:pk>/delete/', views.InterfaceDeleteView.as_view(), name='interface_delete'),
] | netbox/virtualization/urls.py | from django.urls import path
from extras.views import ObjectChangeLogView
from ipam.views import ServiceCreateView
from . import views
from .models import Cluster, ClusterGroup, ClusterType, VirtualMachine
app_name = 'virtualization'
urlpatterns = [
# Cluster types
path(r'cluster-types/', views.ClusterTypeListView.as_view(), name='clustertype_list'),
path(r'cluster-types/add/', views.ClusterTypeCreateView.as_view(), name='clustertype_add'),
path(r'cluster-types/import/', views.ClusterTypeBulkImportView.as_view(), name='clustertype_import'),
path(r'cluster-types/delete/', views.ClusterTypeBulkDeleteView.as_view(), name='clustertype_bulk_delete'),
path(r'cluster-types/<slug:slug>/edit/', views.ClusterTypeEditView.as_view(), name='clustertype_edit'),
path(r'cluster-types/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='clustertype_changelog', kwargs={'model': ClusterType}),
# Cluster groups
path(r'cluster-groups/', views.ClusterGroupListView.as_view(), name='clustergroup_list'),
path(r'cluster-groups/add/', views.ClusterGroupCreateView.as_view(), name='clustergroup_add'),
path(r'cluster-groups/import/', views.ClusterGroupBulkImportView.as_view(), name='clustergroup_import'),
path(r'cluster-groups/delete/', views.ClusterGroupBulkDeleteView.as_view(), name='clustergroup_bulk_delete'),
path(r'cluster-groups/<slug:slug>/edit/', views.ClusterGroupEditView.as_view(), name='clustergroup_edit'),
path(r'cluster-groups/<slug:slug>/changelog/', ObjectChangeLogView.as_view(), name='clustergroup_changelog', kwargs={'model': ClusterGroup}),
# Clusters
path(r'clusters/', views.ClusterListView.as_view(), name='cluster_list'),
path(r'clusters/add/', views.ClusterCreateView.as_view(), name='cluster_add'),
path(r'clusters/import/', views.ClusterBulkImportView.as_view(), name='cluster_import'),
path(r'clusters/edit/', views.ClusterBulkEditView.as_view(), name='cluster_bulk_edit'),
path(r'clusters/delete/', views.ClusterBulkDeleteView.as_view(), name='cluster_bulk_delete'),
path(r'clusters/<int:pk>/', views.ClusterView.as_view(), name='cluster'),
path(r'clusters/<int:pk>/edit/', views.ClusterEditView.as_view(), name='cluster_edit'),
path(r'clusters/<int:pk>/delete/', views.ClusterDeleteView.as_view(), name='cluster_delete'),
path(r'clusters/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='cluster_changelog', kwargs={'model': Cluster}),
path(r'clusters/<int:pk>/devices/add/', views.ClusterAddDevicesView.as_view(), name='cluster_add_devices'),
path(r'clusters/<int:pk>/devices/remove/', views.ClusterRemoveDevicesView.as_view(), name='cluster_remove_devices'),
# Virtual machines
path(r'virtual-machines/', views.VirtualMachineListView.as_view(), name='virtualmachine_list'),
path(r'virtual-machines/add/', views.VirtualMachineCreateView.as_view(), name='virtualmachine_add'),
path(r'virtual-machines/import/', views.VirtualMachineBulkImportView.as_view(), name='virtualmachine_import'),
path(r'virtual-machines/edit/', views.VirtualMachineBulkEditView.as_view(), name='virtualmachine_bulk_edit'),
path(r'virtual-machines/delete/', views.VirtualMachineBulkDeleteView.as_view(), name='virtualmachine_bulk_delete'),
path(r'virtual-machines/<int:pk>/', views.VirtualMachineView.as_view(), name='virtualmachine'),
path(r'virtual-machines/<int:pk>/edit/', views.VirtualMachineEditView.as_view(), name='virtualmachine_edit'),
path(r'virtual-machines/<int:pk>/delete/', views.VirtualMachineDeleteView.as_view(), name='virtualmachine_delete'),
path(r'virtual-machines/<int:pk>/config-context/', views.VirtualMachineConfigContextView.as_view(), name='virtualmachine_configcontext'),
path(r'virtual-machines/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='virtualmachine_changelog', kwargs={'model': VirtualMachine}),
path(r'virtual-machines/<int:virtualmachine>/services/assign/', ServiceCreateView.as_view(), name='virtualmachine_service_assign'),
# VM interfaces
path(r'virtual-machines/interfaces/add/', views.VirtualMachineBulkAddInterfaceView.as_view(), name='virtualmachine_bulk_add_interface'),
path(r'virtual-machines/<int:pk>/interfaces/add/', views.InterfaceCreateView.as_view(), name='interface_add'),
path(r'virtual-machines/<int:pk>/interfaces/edit/', views.InterfaceBulkEditView.as_view(), name='interface_bulk_edit'),
path(r'virtual-machines/<int:pk>/interfaces/delete/', views.InterfaceBulkDeleteView.as_view(), name='interface_bulk_delete'),
path(r'vm-interfaces/<int:pk>/edit/', views.InterfaceEditView.as_view(), name='interface_edit'),
path(r'vm-interfaces/<int:pk>/delete/', views.InterfaceDeleteView.as_view(), name='interface_delete'),
] | 0.398172 | 0.105441 |
import unittest
from meu_grafo_matriz_adjacencia_nao_dir import *
from bibgrafo.grafo_exceptions import *
class TestGrafo(unittest.TestCase):
def setUp(self):
# Grafo da Paraíba
self.g_p = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
self.g_p.adicionaAresta('a1', 'J', 'C')
self.g_p.adicionaAresta('a2', 'C', 'E')
self.g_p.adicionaAresta('a3', 'C', 'E')
self.g_p.adicionaAresta('a4', 'P', 'C')
self.g_p.adicionaAresta('a5', 'P', 'C')
self.g_p.adicionaAresta('a6', 'T', 'C')
self.g_p.adicionaAresta('a7', 'M', 'C')
self.g_p.adicionaAresta('a8', 'M', 'T')
self.g_p.adicionaAresta('a9', 'T', 'Z')
# Grafo da Paraíba sem arestas paralelas
self.g_p_sem_paralelas = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
self.g_p_sem_paralelas.adicionaAresta('a1', 'J', 'C')
self.g_p_sem_paralelas.adicionaAresta('a2', 'C', 'E')
self.g_p_sem_paralelas.adicionaAresta('a3', 'P', 'C')
self.g_p_sem_paralelas.adicionaAresta('a4', 'T', 'C')
self.g_p_sem_paralelas.adicionaAresta('a5', 'M', 'C')
self.g_p_sem_paralelas.adicionaAresta('a6', 'M', 'T')
self.g_p_sem_paralelas.adicionaAresta('a7', 'T', 'Z')
# Grafos completos
self.g_c = MeuGrafo(['J', 'C', 'E', 'P'])
self.g_c.adicionaAresta('a1','J','C')
self.g_c.adicionaAresta('a2', 'J', 'E')
self.g_c.adicionaAresta('a3', 'J', 'P')
self.g_c.adicionaAresta('a4', 'E', 'C')
self.g_c.adicionaAresta('a5', 'P', 'C')
self.g_c.adicionaAresta('a6', 'P', 'E')
self.g_c2 = MeuGrafo(['Nina', 'Maria'])
self.g_c2.adicionaAresta('amiga', 'Nina', 'Maria')
self.g_c3 = MeuGrafo(['J'])
# Grafos com laco
self.g_l1 = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_l1.adicionaAresta('a1', 'A', 'A')
self.g_l1.adicionaAresta('a2', 'A', 'B')
self.g_l1.adicionaAresta('a3', 'A', 'A')
self.g_l2 = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_l2.adicionaAresta('a1', 'A', 'B')
self.g_l2.adicionaAresta('a2', 'B', 'B')
self.g_l2.adicionaAresta('a3', 'B', 'A')
self.g_l3 = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_l3.adicionaAresta('a1', 'C', 'A')
self.g_l3.adicionaAresta('a2', 'C', 'C')
self.g_l3.adicionaAresta('a3', 'D', 'D')
self.g_l3.adicionaAresta('a4', 'D', 'D')
self.g_l4 = MeuGrafo(['D'])
self.g_l4.adicionaAresta('a1', 'D', 'D')
self.g_l5 = MeuGrafo(['C', 'D'])
self.g_l5.adicionaAresta('a1', 'D', 'C')
self.g_l5.adicionaAresta('a2', 'C', 'C')
# Grafos desconexos
self.g_d = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_d.adicionaAresta('asd', 'A', 'B')
# Grafos não direcionados
self.g_nd = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
self.g_nd.adicionaAresta('a1', 'A', 'B')
self.g_nd.adicionaAresta('a2', 'A', 'G')
self.g_nd.adicionaAresta('a3', 'A', 'J')
self.g_nd.adicionaAresta('a4', 'G', 'K')
self.g_nd.adicionaAresta('a5', 'J', 'K')
self.g_nd.adicionaAresta('a6', 'G', 'J')
self.g_nd.adicionaAresta('a7', 'I', 'J')
self.g_nd.adicionaAresta('a8', 'G', 'I')
self.g_nd.adicionaAresta('a9', 'G', 'H')
self.g_nd.adicionaAresta('a10', 'F', 'H')
self.g_nd.adicionaAresta('a11', 'B', 'F')
self.g_nd.adicionaAresta('a12', 'B', 'G')
self.g_nd.adicionaAresta('a13', 'B', 'C')
self.g_nd.adicionaAresta('a14', 'C', 'D')
self.g_nd.adicionaAresta('a15', 'D', 'E')
self.g_nd.adicionaAresta('a16', 'B', 'D')
self.g_nd.adicionaAresta('a17', 'B', 'E')
def test_adiciona_aresta(self):
self.assertTrue(self.g_p.adicionaAresta('a10', 'J', 'C'))
with self.assertRaises(ArestaInvalidaException):
self.assertTrue(self.g_p.adicionaAresta('b1', '', 'C'))
with self.assertRaises(ArestaInvalidaException):
self.assertTrue(self.g_p.adicionaAresta('b1', 'A', 'C'))
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('')
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('aa-bb')
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('x', 'J', 'V')
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('a1', 'J', 'C')
def test_vertices_nao_adjacentes(self):
self.assertEqual(self.g_p.vertices_nao_adjacentes(), ['J-E', 'J-P', 'J-M', 'J-T', 'J-Z',
'C-Z', 'E-P', 'E-M', 'E-T', 'E-Z', 'P-M',
'P-T', 'P-Z', 'M-Z'])
self.assertEqual(self.g_p_sem_paralelas.vertices_nao_adjacentes(), ['J-E', 'J-P', 'J-M', 'J-T',
'J-Z', 'C-Z', 'E-P', 'E-M',
'E-T', 'E-Z', 'P-M', 'P-T',
'P-Z', 'M-Z'])
self.assertEqual(self.g_c.vertices_nao_adjacentes(), [])
self.assertEqual(self.g_l1.vertices_nao_adjacentes(), ['A-C', 'A-D', 'B-C', 'B-D', 'C-D'])
self.assertEqual(self.g_l2.vertices_nao_adjacentes(), ['A-C', 'A-D', 'B-C', 'B-D', 'C-D'])
self.assertEqual(self.g_l3.vertices_nao_adjacentes(), ['A-B', 'A-D', 'B-C', 'B-D', 'C-D'])
self.assertEqual(self.g_nd.vertices_nao_adjacentes(), ['A-C', 'A-D', 'A-E', 'A-F', 'A-H', 'A-I',
'A-K', 'B-H', 'B-I', 'B-J', 'B-K', 'C-E',
'C-F', 'C-G', 'C-H', 'C-I', 'C-J', 'C-K',
'D-F', 'D-G', 'D-H', 'D-I', 'D-J', 'D-K',
'E-F', 'E-G', 'E-H', 'E-I', 'E-J', 'E-K',
'F-G', 'F-I', 'F-J', 'F-K', 'H-I', 'H-J',
'H-K', 'I-K'])
def test_ha_laco(self):
self.assertFalse(self.g_p.ha_laco())
self.assertFalse(self.g_p_sem_paralelas.ha_laco())
self.assertFalse(self.g_c2.ha_laco())
self.assertTrue(self.g_l1.ha_laco())
self.assertTrue(self.g_l2.ha_laco())
self.assertTrue(self.g_l3.ha_laco())
self.assertTrue(self.g_l4.ha_laco())
self.assertTrue(self.g_l5.ha_laco())
def test_grau(self):
# Paraíba
self.assertEqual(self.g_p.grau('J'), 1)
self.assertEqual(self.g_p.grau('C'), 7)
self.assertEqual(self.g_p.grau('E'), 2)
self.assertEqual(self.g_p.grau('P'), 2)
self.assertEqual(self.g_p.grau('M'), 2)
self.assertEqual(self.g_p.grau('T'), 3)
self.assertEqual(self.g_p.grau('Z'), 1)
with self.assertRaises(VerticeInvalidoException):
self.assertEqual(self.g_p.grau('G'), 5)
self.assertEqual(self.g_d.grau('A'), 1)
self.assertEqual(self.g_d.grau('C'), 0)
self.assertNotEqual(self.g_d.grau('D'), 2)
# Completos
self.assertEqual(self.g_c.grau('J'), 3)
self.assertEqual(self.g_c.grau('C'), 3)
self.assertEqual(self.g_c.grau('E'), 3)
self.assertEqual(self.g_c.grau('P'), 3)
# Com laço. Lembrando que cada laço conta uma única vez por vértice para cálculo do grau
self.assertEqual(self.g_l1.grau('A'), 5)
self.assertEqual(self.g_l2.grau('B'), 4)
self.assertEqual(self.g_l4.grau('D'), 2)
def test_ha_paralelas(self):
self.assertTrue(self.g_p.ha_paralelas())
self.assertFalse(self.g_p_sem_paralelas.ha_paralelas())
self.assertFalse(self.g_c.ha_paralelas())
self.assertFalse(self.g_c2.ha_paralelas())
self.assertFalse(self.g_c3.ha_paralelas())
self.assertTrue(self.g_l1.ha_paralelas())
def test_arestas_sobre_vertice(self):
self.assertEqual(set(self.g_p.arestas_sobre_vertice('J')), set(['a1']))
self.assertEqual(set(self.g_p.arestas_sobre_vertice('C')), set(['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7']))
self.assertEqual(set(self.g_p.arestas_sobre_vertice('M')), set(['a7', 'a8']))
self.assertEqual(set(self.g_l2.arestas_sobre_vertice('B')), set(['a1', 'a2', 'a3']))
self.assertEqual(set(self.g_d.arestas_sobre_vertice('C')), set())
self.assertEqual(set(self.g_d.arestas_sobre_vertice('A')), set(['asd']))
with self.assertRaises(VerticeInvalidoException):
self.g_p.arestas_sobre_vertice('A')
def test_eh_completo(self):
self.assertFalse(self.g_p.eh_completo())
self.assertFalse((self.g_p_sem_paralelas.eh_completo()))
self.assertTrue((self.g_c.eh_completo()))
self.assertTrue((self.g_c2.eh_completo()))
self.assertTrue((self.g_c3.eh_completo()))
self.assertFalse((self.g_l1.eh_completo()))
self.assertFalse((self.g_l2.eh_completo()))
self.assertFalse((self.g_l3.eh_completo()))
self.assertFalse((self.g_l4.eh_completo()))
self.assertFalse((self.g_l5.eh_completo()))
if __name__ == '__main__':
unittest.main() | Roteiro 6/grafo_test.py | import unittest
from meu_grafo_matriz_adjacencia_nao_dir import *
from bibgrafo.grafo_exceptions import *
class TestGrafo(unittest.TestCase):
def setUp(self):
# Grafo da Paraíba
self.g_p = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
self.g_p.adicionaAresta('a1', 'J', 'C')
self.g_p.adicionaAresta('a2', 'C', 'E')
self.g_p.adicionaAresta('a3', 'C', 'E')
self.g_p.adicionaAresta('a4', 'P', 'C')
self.g_p.adicionaAresta('a5', 'P', 'C')
self.g_p.adicionaAresta('a6', 'T', 'C')
self.g_p.adicionaAresta('a7', 'M', 'C')
self.g_p.adicionaAresta('a8', 'M', 'T')
self.g_p.adicionaAresta('a9', 'T', 'Z')
# Grafo da Paraíba sem arestas paralelas
self.g_p_sem_paralelas = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
self.g_p_sem_paralelas.adicionaAresta('a1', 'J', 'C')
self.g_p_sem_paralelas.adicionaAresta('a2', 'C', 'E')
self.g_p_sem_paralelas.adicionaAresta('a3', 'P', 'C')
self.g_p_sem_paralelas.adicionaAresta('a4', 'T', 'C')
self.g_p_sem_paralelas.adicionaAresta('a5', 'M', 'C')
self.g_p_sem_paralelas.adicionaAresta('a6', 'M', 'T')
self.g_p_sem_paralelas.adicionaAresta('a7', 'T', 'Z')
# Grafos completos
self.g_c = MeuGrafo(['J', 'C', 'E', 'P'])
self.g_c.adicionaAresta('a1','J','C')
self.g_c.adicionaAresta('a2', 'J', 'E')
self.g_c.adicionaAresta('a3', 'J', 'P')
self.g_c.adicionaAresta('a4', 'E', 'C')
self.g_c.adicionaAresta('a5', 'P', 'C')
self.g_c.adicionaAresta('a6', 'P', 'E')
self.g_c2 = MeuGrafo(['Nina', 'Maria'])
self.g_c2.adicionaAresta('amiga', 'Nina', 'Maria')
self.g_c3 = MeuGrafo(['J'])
# Grafos com laco
self.g_l1 = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_l1.adicionaAresta('a1', 'A', 'A')
self.g_l1.adicionaAresta('a2', 'A', 'B')
self.g_l1.adicionaAresta('a3', 'A', 'A')
self.g_l2 = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_l2.adicionaAresta('a1', 'A', 'B')
self.g_l2.adicionaAresta('a2', 'B', 'B')
self.g_l2.adicionaAresta('a3', 'B', 'A')
self.g_l3 = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_l3.adicionaAresta('a1', 'C', 'A')
self.g_l3.adicionaAresta('a2', 'C', 'C')
self.g_l3.adicionaAresta('a3', 'D', 'D')
self.g_l3.adicionaAresta('a4', 'D', 'D')
self.g_l4 = MeuGrafo(['D'])
self.g_l4.adicionaAresta('a1', 'D', 'D')
self.g_l5 = MeuGrafo(['C', 'D'])
self.g_l5.adicionaAresta('a1', 'D', 'C')
self.g_l5.adicionaAresta('a2', 'C', 'C')
# Grafos desconexos
self.g_d = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_d.adicionaAresta('asd', 'A', 'B')
# Grafos não direcionados
self.g_nd = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
self.g_nd.adicionaAresta('a1', 'A', 'B')
self.g_nd.adicionaAresta('a2', 'A', 'G')
self.g_nd.adicionaAresta('a3', 'A', 'J')
self.g_nd.adicionaAresta('a4', 'G', 'K')
self.g_nd.adicionaAresta('a5', 'J', 'K')
self.g_nd.adicionaAresta('a6', 'G', 'J')
self.g_nd.adicionaAresta('a7', 'I', 'J')
self.g_nd.adicionaAresta('a8', 'G', 'I')
self.g_nd.adicionaAresta('a9', 'G', 'H')
self.g_nd.adicionaAresta('a10', 'F', 'H')
self.g_nd.adicionaAresta('a11', 'B', 'F')
self.g_nd.adicionaAresta('a12', 'B', 'G')
self.g_nd.adicionaAresta('a13', 'B', 'C')
self.g_nd.adicionaAresta('a14', 'C', 'D')
self.g_nd.adicionaAresta('a15', 'D', 'E')
self.g_nd.adicionaAresta('a16', 'B', 'D')
self.g_nd.adicionaAresta('a17', 'B', 'E')
def test_adiciona_aresta(self):
self.assertTrue(self.g_p.adicionaAresta('a10', 'J', 'C'))
with self.assertRaises(ArestaInvalidaException):
self.assertTrue(self.g_p.adicionaAresta('b1', '', 'C'))
with self.assertRaises(ArestaInvalidaException):
self.assertTrue(self.g_p.adicionaAresta('b1', 'A', 'C'))
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('')
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('aa-bb')
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('x', 'J', 'V')
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('a1', 'J', 'C')
def test_vertices_nao_adjacentes(self):
self.assertEqual(self.g_p.vertices_nao_adjacentes(), ['J-E', 'J-P', 'J-M', 'J-T', 'J-Z',
'C-Z', 'E-P', 'E-M', 'E-T', 'E-Z', 'P-M',
'P-T', 'P-Z', 'M-Z'])
self.assertEqual(self.g_p_sem_paralelas.vertices_nao_adjacentes(), ['J-E', 'J-P', 'J-M', 'J-T',
'J-Z', 'C-Z', 'E-P', 'E-M',
'E-T', 'E-Z', 'P-M', 'P-T',
'P-Z', 'M-Z'])
self.assertEqual(self.g_c.vertices_nao_adjacentes(), [])
self.assertEqual(self.g_l1.vertices_nao_adjacentes(), ['A-C', 'A-D', 'B-C', 'B-D', 'C-D'])
self.assertEqual(self.g_l2.vertices_nao_adjacentes(), ['A-C', 'A-D', 'B-C', 'B-D', 'C-D'])
self.assertEqual(self.g_l3.vertices_nao_adjacentes(), ['A-B', 'A-D', 'B-C', 'B-D', 'C-D'])
self.assertEqual(self.g_nd.vertices_nao_adjacentes(), ['A-C', 'A-D', 'A-E', 'A-F', 'A-H', 'A-I',
'A-K', 'B-H', 'B-I', 'B-J', 'B-K', 'C-E',
'C-F', 'C-G', 'C-H', 'C-I', 'C-J', 'C-K',
'D-F', 'D-G', 'D-H', 'D-I', 'D-J', 'D-K',
'E-F', 'E-G', 'E-H', 'E-I', 'E-J', 'E-K',
'F-G', 'F-I', 'F-J', 'F-K', 'H-I', 'H-J',
'H-K', 'I-K'])
def test_ha_laco(self):
self.assertFalse(self.g_p.ha_laco())
self.assertFalse(self.g_p_sem_paralelas.ha_laco())
self.assertFalse(self.g_c2.ha_laco())
self.assertTrue(self.g_l1.ha_laco())
self.assertTrue(self.g_l2.ha_laco())
self.assertTrue(self.g_l3.ha_laco())
self.assertTrue(self.g_l4.ha_laco())
self.assertTrue(self.g_l5.ha_laco())
def test_grau(self):
# Paraíba
self.assertEqual(self.g_p.grau('J'), 1)
self.assertEqual(self.g_p.grau('C'), 7)
self.assertEqual(self.g_p.grau('E'), 2)
self.assertEqual(self.g_p.grau('P'), 2)
self.assertEqual(self.g_p.grau('M'), 2)
self.assertEqual(self.g_p.grau('T'), 3)
self.assertEqual(self.g_p.grau('Z'), 1)
with self.assertRaises(VerticeInvalidoException):
self.assertEqual(self.g_p.grau('G'), 5)
self.assertEqual(self.g_d.grau('A'), 1)
self.assertEqual(self.g_d.grau('C'), 0)
self.assertNotEqual(self.g_d.grau('D'), 2)
# Completos
self.assertEqual(self.g_c.grau('J'), 3)
self.assertEqual(self.g_c.grau('C'), 3)
self.assertEqual(self.g_c.grau('E'), 3)
self.assertEqual(self.g_c.grau('P'), 3)
# Com laço. Lembrando que cada laço conta uma única vez por vértice para cálculo do grau
self.assertEqual(self.g_l1.grau('A'), 5)
self.assertEqual(self.g_l2.grau('B'), 4)
self.assertEqual(self.g_l4.grau('D'), 2)
def test_ha_paralelas(self):
self.assertTrue(self.g_p.ha_paralelas())
self.assertFalse(self.g_p_sem_paralelas.ha_paralelas())
self.assertFalse(self.g_c.ha_paralelas())
self.assertFalse(self.g_c2.ha_paralelas())
self.assertFalse(self.g_c3.ha_paralelas())
self.assertTrue(self.g_l1.ha_paralelas())
def test_arestas_sobre_vertice(self):
self.assertEqual(set(self.g_p.arestas_sobre_vertice('J')), set(['a1']))
self.assertEqual(set(self.g_p.arestas_sobre_vertice('C')), set(['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7']))
self.assertEqual(set(self.g_p.arestas_sobre_vertice('M')), set(['a7', 'a8']))
self.assertEqual(set(self.g_l2.arestas_sobre_vertice('B')), set(['a1', 'a2', 'a3']))
self.assertEqual(set(self.g_d.arestas_sobre_vertice('C')), set())
self.assertEqual(set(self.g_d.arestas_sobre_vertice('A')), set(['asd']))
with self.assertRaises(VerticeInvalidoException):
self.g_p.arestas_sobre_vertice('A')
def test_eh_completo(self):
self.assertFalse(self.g_p.eh_completo())
self.assertFalse((self.g_p_sem_paralelas.eh_completo()))
self.assertTrue((self.g_c.eh_completo()))
self.assertTrue((self.g_c2.eh_completo()))
self.assertTrue((self.g_c3.eh_completo()))
self.assertFalse((self.g_l1.eh_completo()))
self.assertFalse((self.g_l2.eh_completo()))
self.assertFalse((self.g_l3.eh_completo()))
self.assertFalse((self.g_l4.eh_completo()))
self.assertFalse((self.g_l5.eh_completo()))
if __name__ == '__main__':
unittest.main() | 0.354433 | 0.302468 |
__author__ = 'yangjian'
"""
"""
import copy
import time
import pandas as pd
from hypernets.conf import configure, Configurable, String, Int as cfg_int
from hypernets.core import TrialHistory, Trial, EarlyStoppingError
from hypernets.core.ops import Identity, HyperInput
from hypernets.core.search_space import HyperSpace, ParameterSpace
from hypernets.searchers import EvolutionSearcher, RandomSearcher, MCTSSearcher, GridSearcher, get_searcher_cls, \
Searcher
from hypernets.utils import logging
from hypernets.utils.common import isnotebook
from IPython.display import clear_output, display, update_display
logger = logging.get_logger(__name__)
@configure()
class ParamSearchCfg(Configurable):
work_dir = String(help='storage directory path to store running data.').tag(config=True)
trial_retry_limit = cfg_int(1000, min=1, help='maximum retry number to run trial.').tag(config=True)
def func_space(func):
space = HyperSpace()
with space.as_default():
params = {name: copy.copy(v) for name, v in zip(func.__code__.co_varnames, func.__defaults__) if
isinstance(v, ParameterSpace)}
for _, v in params.items():
v.attach_to_space(space, v.name)
input = HyperInput()
id1 = Identity(**params)(input)
space.set_outputs([id1])
return space
def build_searcher(cls, func, optimize_direction='min'):
cls = get_searcher_cls(cls)
search_space_fn = lambda: func_space(func)
if cls == EvolutionSearcher:
s = cls(search_space_fn, optimize_direction=optimize_direction,
population_size=30, sample_size=10, candidates_size=10,
regularized=True, use_meta_learner=True)
elif cls == MCTSSearcher:
s = MCTSSearcher(search_space_fn, optimize_direction=optimize_direction, max_node_space=10)
elif cls == RandomSearcher:
s = cls(search_space_fn, optimize_direction=optimize_direction)
elif cls == GridSearcher:
s = cls(search_space_fn, optimize_direction=optimize_direction)
else:
s = cls(search_space_fn, optimize_direction=optimize_direction)
return s
def search_params(func, searcher='Grid', max_trials=100, optimize_direction='min', clear_logs=False, **func_kwargs):
if not isinstance(searcher, Searcher):
searcher = build_searcher(searcher, func, optimize_direction)
retry_limit = ParamSearchCfg.trial_retry_limit
trial_no = 1
retry_counter = 0
history = TrialHistory(optimize_direction)
current_trial_display_id = None
trials_display_id = None
while trial_no <= max_trials:
try:
space_sample = searcher.sample()
if isnotebook():
if clear_logs:
clear_output()
current_trial_display_id = None
trials_display_id = None
if current_trial_display_id is None:
display({'text/markdown': '#### Current Trial:'}, raw=True, include=['text/markdown'])
handle = display(space_sample, display_id=True)
if handle is not None:
current_trial_display_id = handle.display_id
else:
update_display(space_sample, display_id=current_trial_display_id)
df_best_trials = pd.DataFrame([
(t.trial_no, t.reward, t.elapsed, t.space_sample.vectors) for t in history.get_top(100)],
columns=['Trial No.', 'Reward', 'Elapsed', 'Space Vector'])
if trials_display_id is None:
display({'text/markdown': '#### Top trials:'}, raw=True, include=['text/markdown'])
handle = display(df_best_trials, display_id=True)
if handle is not None:
trials_display_id = handle.display_id
else:
update_display(df_best_trials, display_id=trials_display_id)
if history.is_existed(space_sample):
if retry_counter >= retry_limit:
logger.info(f'Unable to take valid sample and exceed the retry limit {retry_limit}.')
break
retry_counter += 1
continue
ps = space_sample.get_assigned_params()
params = {p.alias.split('.')[-1]: p.value for p in ps}
func_params = func_kwargs.copy()
func_params.update(params)
trial_start = time.time()
last_reward = func(**func_params)
elapsed = time.time() - trial_start
trial = Trial(space_sample, trial_no, last_reward, elapsed)
if last_reward != 0: # success
improved = history.append(trial)
if logger.is_info_enabled():
best = history.get_best()
msg = f'Trial {trial_no} done, reward: {trial.reward}, ' \
f'best_trial_no:{best.trial_no}, best_reward:{best.reward}\n'
logger.info(msg)
except EarlyStoppingError:
break
# TODO: early stopping
except Exception as e:
import sys
import traceback
msg = f'{">" * 20} Trial {trial_no} failed! {"<" * 20}\n' \
+ f'{e.__class__.__name__}: {e}\n' \
+ traceback.format_exc() \
+ '*' * 50
logger.error(msg)
finally:
trial_no += 1
retry_counter = 0
return history | hypernets/utils/param_tuning.py | __author__ = 'yangjian'
"""
"""
import copy
import time
import pandas as pd
from hypernets.conf import configure, Configurable, String, Int as cfg_int
from hypernets.core import TrialHistory, Trial, EarlyStoppingError
from hypernets.core.ops import Identity, HyperInput
from hypernets.core.search_space import HyperSpace, ParameterSpace
from hypernets.searchers import EvolutionSearcher, RandomSearcher, MCTSSearcher, GridSearcher, get_searcher_cls, \
Searcher
from hypernets.utils import logging
from hypernets.utils.common import isnotebook
from IPython.display import clear_output, display, update_display
logger = logging.get_logger(__name__)
@configure()
class ParamSearchCfg(Configurable):
work_dir = String(help='storage directory path to store running data.').tag(config=True)
trial_retry_limit = cfg_int(1000, min=1, help='maximum retry number to run trial.').tag(config=True)
def func_space(func):
space = HyperSpace()
with space.as_default():
params = {name: copy.copy(v) for name, v in zip(func.__code__.co_varnames, func.__defaults__) if
isinstance(v, ParameterSpace)}
for _, v in params.items():
v.attach_to_space(space, v.name)
input = HyperInput()
id1 = Identity(**params)(input)
space.set_outputs([id1])
return space
def build_searcher(cls, func, optimize_direction='min'):
cls = get_searcher_cls(cls)
search_space_fn = lambda: func_space(func)
if cls == EvolutionSearcher:
s = cls(search_space_fn, optimize_direction=optimize_direction,
population_size=30, sample_size=10, candidates_size=10,
regularized=True, use_meta_learner=True)
elif cls == MCTSSearcher:
s = MCTSSearcher(search_space_fn, optimize_direction=optimize_direction, max_node_space=10)
elif cls == RandomSearcher:
s = cls(search_space_fn, optimize_direction=optimize_direction)
elif cls == GridSearcher:
s = cls(search_space_fn, optimize_direction=optimize_direction)
else:
s = cls(search_space_fn, optimize_direction=optimize_direction)
return s
def search_params(func, searcher='Grid', max_trials=100, optimize_direction='min', clear_logs=False, **func_kwargs):
if not isinstance(searcher, Searcher):
searcher = build_searcher(searcher, func, optimize_direction)
retry_limit = ParamSearchCfg.trial_retry_limit
trial_no = 1
retry_counter = 0
history = TrialHistory(optimize_direction)
current_trial_display_id = None
trials_display_id = None
while trial_no <= max_trials:
try:
space_sample = searcher.sample()
if isnotebook():
if clear_logs:
clear_output()
current_trial_display_id = None
trials_display_id = None
if current_trial_display_id is None:
display({'text/markdown': '#### Current Trial:'}, raw=True, include=['text/markdown'])
handle = display(space_sample, display_id=True)
if handle is not None:
current_trial_display_id = handle.display_id
else:
update_display(space_sample, display_id=current_trial_display_id)
df_best_trials = pd.DataFrame([
(t.trial_no, t.reward, t.elapsed, t.space_sample.vectors) for t in history.get_top(100)],
columns=['Trial No.', 'Reward', 'Elapsed', 'Space Vector'])
if trials_display_id is None:
display({'text/markdown': '#### Top trials:'}, raw=True, include=['text/markdown'])
handle = display(df_best_trials, display_id=True)
if handle is not None:
trials_display_id = handle.display_id
else:
update_display(df_best_trials, display_id=trials_display_id)
if history.is_existed(space_sample):
if retry_counter >= retry_limit:
logger.info(f'Unable to take valid sample and exceed the retry limit {retry_limit}.')
break
retry_counter += 1
continue
ps = space_sample.get_assigned_params()
params = {p.alias.split('.')[-1]: p.value for p in ps}
func_params = func_kwargs.copy()
func_params.update(params)
trial_start = time.time()
last_reward = func(**func_params)
elapsed = time.time() - trial_start
trial = Trial(space_sample, trial_no, last_reward, elapsed)
if last_reward != 0: # success
improved = history.append(trial)
if logger.is_info_enabled():
best = history.get_best()
msg = f'Trial {trial_no} done, reward: {trial.reward}, ' \
f'best_trial_no:{best.trial_no}, best_reward:{best.reward}\n'
logger.info(msg)
except EarlyStoppingError:
break
# TODO: early stopping
except Exception as e:
import sys
import traceback
msg = f'{">" * 20} Trial {trial_no} failed! {"<" * 20}\n' \
+ f'{e.__class__.__name__}: {e}\n' \
+ traceback.format_exc() \
+ '*' * 50
logger.error(msg)
finally:
trial_no += 1
retry_counter = 0
return history | 0.512205 | 0.145176 |
import os
from typing import Any, Dict, Optional
from flyteidl.core import literals_pb2 as _literals_pb2
from flytekit.clients.helpers import iterate_node_executions as _iterate_node_executions
from flytekit.common import utils as _common_utils
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.common.mixins import artifact as _artifact_mixin
from flytekit.core.context_manager import FlyteContextManager
from flytekit.core.type_engine import TypeEngine
from flytekit.engines.flyte import engine as _flyte_engine
from flytekit.interfaces.data import data_proxy as _data_proxy
from flytekit.models import literals as _literal_models
from flytekit.models.admin import task_execution as _task_execution_model
from flytekit.models.core import execution as _execution_models
class FlyteTaskExecution(_task_execution_model.TaskExecution, _artifact_mixin.ExecutionArtifact):
def __init__(self, *args, **kwargs):
super(FlyteTaskExecution, self).__init__(*args, **kwargs)
self._inputs = None
self._outputs = None
@property
def is_complete(self) -> bool:
"""Whether or not the execution is complete."""
return self.closure.phase in {
_execution_models.TaskExecutionPhase.ABORTED,
_execution_models.TaskExecutionPhase.FAILED,
_execution_models.TaskExecutionPhase.SUCCEEDED,
}
@property
def inputs(self) -> Dict[str, Any]:
"""
Returns the inputs of the task execution in the standard Python format that is produced by
the type engine.
"""
from flytekit.remote.remote import FlyteRemote
if self._inputs is None:
client = _flyte_engine.get_client()
remote = FlyteRemote()
execution_data = client.get_task_execution_data(self.id)
# Inputs are returned inline unless they are too big, in which case a url blob pointing to them is returned.
input_map = _literal_models.LiteralMap({})
if bool(execution_data.full_inputs.literals):
input_map = execution_data.full_inputs
elif execution_data.inputs.bytes > 0:
with _common_utils.AutoDeletingTempDir() as tmp_dir:
tmp_name = os.path.join(tmp_dir.name, "inputs.pb")
_data_proxy.Data.get_data(execution_data.inputs.url, tmp_name)
input_map = _literal_models.LiteralMap.from_flyte_idl(
_common_utils.load_proto_from_file(_literals_pb2.LiteralMap, tmp_name)
)
task = remote.fetch_task(
self.id.task_id.project, self.id.task_id.domain, self.id.task_id.name, self.id.task_id.version
)
self._inputs = TypeEngine.literal_map_to_kwargs(
ctx=FlyteContextManager.current_context(),
lm=input_map,
python_types=TypeEngine.guess_python_types(task.interface.inputs),
)
return self._inputs
@property
def outputs(self) -> Dict[str, Any]:
"""
Returns the outputs of the task execution, if available, in the standard Python format that is produced by
the type engine.
:raises: ``FlyteAssertion`` error if execution is in progress or execution ended in error.
"""
from flytekit.remote.remote import FlyteRemote
if not self.is_complete:
raise _user_exceptions.FlyteAssertion(
"Please what until the task execution has completed before requesting the outputs."
)
if self.error:
raise _user_exceptions.FlyteAssertion("Outputs could not be found because the execution ended in failure.")
if self._outputs is None:
client = _flyte_engine.get_client()
remote = FlyteRemote()
execution_data = client.get_task_execution_data(self.id)
# Inputs are returned inline unless they are too big, in which case a url blob pointing to them is returned.
output_map = _literal_models.LiteralMap({})
if bool(execution_data.full_outputs.literals):
output_map = execution_data.full_outputs
elif execution_data.outputs.bytes > 0:
with _common_utils.AutoDeletingTempDir() as t:
tmp_name = os.path.join(t.name, "outputs.pb")
_data_proxy.Data.get_data(execution_data.outputs.url, tmp_name)
output_map = _literal_models.LiteralMap.from_flyte_idl(
_common_utils.load_proto_from_file(_literals_pb2.LiteralMap, tmp_name)
)
task = remote.fetch_task(
self.id.task_id.project, self.id.task_id.domain, self.id.task_id.name, self.id.task_id.version
)
self._outputs = TypeEngine.literal_map_to_kwargs(
ctx=FlyteContextManager.current_context(),
lm=output_map,
python_types=TypeEngine.guess_python_types(task.interface.outputs),
)
return self._outputs
@property
def error(self) -> Optional[_execution_models.ExecutionError]:
"""
If execution is in progress, raise an exception. Otherwise, return None if no error was present upon
reaching completion.
"""
if not self.is_complete:
raise _user_exceptions.FlyteAssertion(
"Please what until the task execution has completed before requesting error information."
)
return self.closure.error
def get_child_executions(self, filters=None):
from flytekit.remote import nodes as _nodes
if not self.is_parent:
raise _user_exceptions.FlyteAssertion("Only task executions marked with 'is_parent' have child executions.")
client = _flyte_engine.get_client()
models = {
v.id.node_id: v
for v in _iterate_node_executions(client, task_execution_identifier=self.id, filters=filters)
}
return {k: _nodes.FlyteNodeExecution.promote_from_model(v) for k, v in models.items()}
@classmethod
def promote_from_model(cls, base_model: _task_execution_model.TaskExecution) -> "FlyteTaskExecution":
return cls(
closure=base_model.closure,
id=base_model.id,
input_uri=base_model.input_uri,
is_parent=base_model.is_parent,
)
def sync(self):
"""
Syncs the state of the underlying execution artifact with the state observed by the platform.
"""
self._sync_closure()
def _sync_closure(self):
"""
Syncs the closure of the underlying execution artifact with the state observed by the platform.
"""
self._closure = _flyte_engine.get_client().get_task_execution(self.id).closure | flytekit/remote/tasks/executions.py | import os
from typing import Any, Dict, Optional
from flyteidl.core import literals_pb2 as _literals_pb2
from flytekit.clients.helpers import iterate_node_executions as _iterate_node_executions
from flytekit.common import utils as _common_utils
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.common.mixins import artifact as _artifact_mixin
from flytekit.core.context_manager import FlyteContextManager
from flytekit.core.type_engine import TypeEngine
from flytekit.engines.flyte import engine as _flyte_engine
from flytekit.interfaces.data import data_proxy as _data_proxy
from flytekit.models import literals as _literal_models
from flytekit.models.admin import task_execution as _task_execution_model
from flytekit.models.core import execution as _execution_models
class FlyteTaskExecution(_task_execution_model.TaskExecution, _artifact_mixin.ExecutionArtifact):
def __init__(self, *args, **kwargs):
super(FlyteTaskExecution, self).__init__(*args, **kwargs)
self._inputs = None
self._outputs = None
@property
def is_complete(self) -> bool:
"""Whether or not the execution is complete."""
return self.closure.phase in {
_execution_models.TaskExecutionPhase.ABORTED,
_execution_models.TaskExecutionPhase.FAILED,
_execution_models.TaskExecutionPhase.SUCCEEDED,
}
@property
def inputs(self) -> Dict[str, Any]:
"""
Returns the inputs of the task execution in the standard Python format that is produced by
the type engine.
"""
from flytekit.remote.remote import FlyteRemote
if self._inputs is None:
client = _flyte_engine.get_client()
remote = FlyteRemote()
execution_data = client.get_task_execution_data(self.id)
# Inputs are returned inline unless they are too big, in which case a url blob pointing to them is returned.
input_map = _literal_models.LiteralMap({})
if bool(execution_data.full_inputs.literals):
input_map = execution_data.full_inputs
elif execution_data.inputs.bytes > 0:
with _common_utils.AutoDeletingTempDir() as tmp_dir:
tmp_name = os.path.join(tmp_dir.name, "inputs.pb")
_data_proxy.Data.get_data(execution_data.inputs.url, tmp_name)
input_map = _literal_models.LiteralMap.from_flyte_idl(
_common_utils.load_proto_from_file(_literals_pb2.LiteralMap, tmp_name)
)
task = remote.fetch_task(
self.id.task_id.project, self.id.task_id.domain, self.id.task_id.name, self.id.task_id.version
)
self._inputs = TypeEngine.literal_map_to_kwargs(
ctx=FlyteContextManager.current_context(),
lm=input_map,
python_types=TypeEngine.guess_python_types(task.interface.inputs),
)
return self._inputs
@property
def outputs(self) -> Dict[str, Any]:
"""
Returns the outputs of the task execution, if available, in the standard Python format that is produced by
the type engine.
:raises: ``FlyteAssertion`` error if execution is in progress or execution ended in error.
"""
from flytekit.remote.remote import FlyteRemote
if not self.is_complete:
raise _user_exceptions.FlyteAssertion(
"Please what until the task execution has completed before requesting the outputs."
)
if self.error:
raise _user_exceptions.FlyteAssertion("Outputs could not be found because the execution ended in failure.")
if self._outputs is None:
client = _flyte_engine.get_client()
remote = FlyteRemote()
execution_data = client.get_task_execution_data(self.id)
# Inputs are returned inline unless they are too big, in which case a url blob pointing to them is returned.
output_map = _literal_models.LiteralMap({})
if bool(execution_data.full_outputs.literals):
output_map = execution_data.full_outputs
elif execution_data.outputs.bytes > 0:
with _common_utils.AutoDeletingTempDir() as t:
tmp_name = os.path.join(t.name, "outputs.pb")
_data_proxy.Data.get_data(execution_data.outputs.url, tmp_name)
output_map = _literal_models.LiteralMap.from_flyte_idl(
_common_utils.load_proto_from_file(_literals_pb2.LiteralMap, tmp_name)
)
task = remote.fetch_task(
self.id.task_id.project, self.id.task_id.domain, self.id.task_id.name, self.id.task_id.version
)
self._outputs = TypeEngine.literal_map_to_kwargs(
ctx=FlyteContextManager.current_context(),
lm=output_map,
python_types=TypeEngine.guess_python_types(task.interface.outputs),
)
return self._outputs
@property
def error(self) -> Optional[_execution_models.ExecutionError]:
"""
If execution is in progress, raise an exception. Otherwise, return None if no error was present upon
reaching completion.
"""
if not self.is_complete:
raise _user_exceptions.FlyteAssertion(
"Please what until the task execution has completed before requesting error information."
)
return self.closure.error
def get_child_executions(self, filters=None):
from flytekit.remote import nodes as _nodes
if not self.is_parent:
raise _user_exceptions.FlyteAssertion("Only task executions marked with 'is_parent' have child executions.")
client = _flyte_engine.get_client()
models = {
v.id.node_id: v
for v in _iterate_node_executions(client, task_execution_identifier=self.id, filters=filters)
}
return {k: _nodes.FlyteNodeExecution.promote_from_model(v) for k, v in models.items()}
@classmethod
def promote_from_model(cls, base_model: _task_execution_model.TaskExecution) -> "FlyteTaskExecution":
return cls(
closure=base_model.closure,
id=base_model.id,
input_uri=base_model.input_uri,
is_parent=base_model.is_parent,
)
def sync(self):
"""
Syncs the state of the underlying execution artifact with the state observed by the platform.
"""
self._sync_closure()
def _sync_closure(self):
"""
Syncs the closure of the underlying execution artifact with the state observed by the platform.
"""
self._closure = _flyte_engine.get_client().get_task_execution(self.id).closure | 0.779741 | 0.119974 |
import pickle
import numpy as np
from typing import TextIO
from tight_binding_base_connection import DataBaseConnection
class DataManager(object):
def __init__(self, helpers, settings):
self.__helpers = helpers
self.__settings = settings
self.__dbconnection = DataBaseConnection(settings, helpers)
def __save_results_into_database(self, data):
table = self.__settings["db table"]
data_identification = self.__settings["data_identification"]
save_query = """INSERT INTO {} (Lattice identification,
Eigen energy,
Wave function,
Slater-Koster matrix,
DOS,
Projected DOS,
Configuration)
VALUES ({}, %s, %s, %s, %s, %s, %s) """.format(table, data_identification)
query = (save_query, data)
return query
def __download_required_data(self):
select = self.__settings["select"]
table = self.__settings["db table"]
identifier = self.__helpers["identifier"]
if identifier is None:
load_query = """SELECT {} from {}""".format(select, table)
else:
load_query = """SELECT {} from {} WHERE {}""".format(select, table, identifier)
return load_query
@staticmethod
def __save_as_pickle(matrix):
with open("interaction_matrix", 'wb') as outfile:
pickled_matrix = pickle.dump(matrix, outfile, pickle.HIGHEST_PROTOCOL)
return pickled_matrix
@staticmethod
def __load_as_pickle(pickled_matrix):
with open(pickled_matrix, 'rb') as infile:
matrix = pickle.load(infile)
return matrix
def __save_numerical_results(self, title, eigen_energies: np.array) -> TextIO:
"""
Method saves numerical results - eigen energies - into txt file
Args:
title: name of file
eigen_energies: array of eigen energies calculated by diagonalization of interaction matrix.
Returns: None
"""
saving_key = self.__helpers.generate_id_key(title)
with open(self.__helpers.__directory + "/" + saving_key + '_eigenvalues.txt', "w") as file:
for eigen_energy in eigen_energies:
file.write(str(eigen_energy) + "\n")
return file
def __save_data_locally(self, energies, wave_functions, interaction_matrix, density_of_states,
projected_density_of_states, configuration):
energy_file = self.__save_numerical_results("eigen_energies", energies)
wave_functions_file = self.__save_as_pickle(wave_functions)
interaction_matrix_file = self.__save_as_pickle(interaction_matrix)
dos_file = self.__save_numerical_results("DOS", density_of_states)
p_dos_file = self.__save_numerical_results("PDOS", projected_density_of_states)
configuration_file = self.__helpers.save_all_params_to_file("parametrization_file", configuration)
data_to_save = (energy_file, wave_functions_file,
interaction_matrix_file, dos_file, p_dos_file, configuration_file)
return data_to_save
def save_data(self, energies, wave_functions, interaction_matrix,
density_of_states, projected_density_of_states, configuration):
data_source = self.__settings["data_source"]
self.__helpers.save_log('[INFO]: Saving results locally \n')
data_to_save = self.__save_data_locally(energies,
wave_functions,
interaction_matrix,
density_of_states,
projected_density_of_states,
configuration)
if data_source == "data base":
self.__helpers.save_log('[INFO]: Saving results into data base\n')
save_data_query = self.__save_results_into_database(data_to_save)
self.__dbconnection.execute_query(save_data_query, "save")
else:
pass
return
def load_data(self):
self.__helpers.save_log('[INFO]: Loading results from data base\n')
data_source = self.__settings["data_source"]
if data_source == "data base":
data = self.__download_required_data()
required_data = self.__dbconnection.execute_query(data, "load")
else:
seeking_file = self.__settings["data_identification"]
required_data = self.__helpers.search_data_on_disc(seeking_file)
return required_data | tightbinding_code/data_manager.py | import pickle
import numpy as np
from typing import TextIO
from tight_binding_base_connection import DataBaseConnection
class DataManager(object):
def __init__(self, helpers, settings):
self.__helpers = helpers
self.__settings = settings
self.__dbconnection = DataBaseConnection(settings, helpers)
def __save_results_into_database(self, data):
table = self.__settings["db table"]
data_identification = self.__settings["data_identification"]
save_query = """INSERT INTO {} (Lattice identification,
Eigen energy,
Wave function,
Slater-Koster matrix,
DOS,
Projected DOS,
Configuration)
VALUES ({}, %s, %s, %s, %s, %s, %s) """.format(table, data_identification)
query = (save_query, data)
return query
def __download_required_data(self):
select = self.__settings["select"]
table = self.__settings["db table"]
identifier = self.__helpers["identifier"]
if identifier is None:
load_query = """SELECT {} from {}""".format(select, table)
else:
load_query = """SELECT {} from {} WHERE {}""".format(select, table, identifier)
return load_query
@staticmethod
def __save_as_pickle(matrix):
with open("interaction_matrix", 'wb') as outfile:
pickled_matrix = pickle.dump(matrix, outfile, pickle.HIGHEST_PROTOCOL)
return pickled_matrix
@staticmethod
def __load_as_pickle(pickled_matrix):
with open(pickled_matrix, 'rb') as infile:
matrix = pickle.load(infile)
return matrix
def __save_numerical_results(self, title, eigen_energies: np.array) -> TextIO:
"""
Method saves numerical results - eigen energies - into txt file
Args:
title: name of file
eigen_energies: array of eigen energies calculated by diagonalization of interaction matrix.
Returns: None
"""
saving_key = self.__helpers.generate_id_key(title)
with open(self.__helpers.__directory + "/" + saving_key + '_eigenvalues.txt', "w") as file:
for eigen_energy in eigen_energies:
file.write(str(eigen_energy) + "\n")
return file
def __save_data_locally(self, energies, wave_functions, interaction_matrix, density_of_states,
projected_density_of_states, configuration):
energy_file = self.__save_numerical_results("eigen_energies", energies)
wave_functions_file = self.__save_as_pickle(wave_functions)
interaction_matrix_file = self.__save_as_pickle(interaction_matrix)
dos_file = self.__save_numerical_results("DOS", density_of_states)
p_dos_file = self.__save_numerical_results("PDOS", projected_density_of_states)
configuration_file = self.__helpers.save_all_params_to_file("parametrization_file", configuration)
data_to_save = (energy_file, wave_functions_file,
interaction_matrix_file, dos_file, p_dos_file, configuration_file)
return data_to_save
def save_data(self, energies, wave_functions, interaction_matrix,
density_of_states, projected_density_of_states, configuration):
data_source = self.__settings["data_source"]
self.__helpers.save_log('[INFO]: Saving results locally \n')
data_to_save = self.__save_data_locally(energies,
wave_functions,
interaction_matrix,
density_of_states,
projected_density_of_states,
configuration)
if data_source == "data base":
self.__helpers.save_log('[INFO]: Saving results into data base\n')
save_data_query = self.__save_results_into_database(data_to_save)
self.__dbconnection.execute_query(save_data_query, "save")
else:
pass
return
def load_data(self):
self.__helpers.save_log('[INFO]: Loading results from data base\n')
data_source = self.__settings["data_source"]
if data_source == "data base":
data = self.__download_required_data()
required_data = self.__dbconnection.execute_query(data, "load")
else:
seeking_file = self.__settings["data_identification"]
required_data = self.__helpers.search_data_on_disc(seeking_file)
return required_data | 0.702122 | 0.158174 |
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.contrib.auth.models import User
from geoposition import Geoposition
from time import sleep
from seshdash.models import Alert_Rule, Sesh_Site, Sesh_User, Sesh_Organisation
from seshdash.data.db.influx import Influx
class TestSettings(TestCase):
"""
Testing the alert rules options in the
settings
"""
def setUp(self):
"""
Setting up the db
"""
# Adding an influx database used by the quick_status_icons functions used on the pages to render
self.influx_db_name = 'test_db'
self.i = Influx(database=self.influx_db_name)
try:
self.i.create_database(self.influx_db_name)
except:
# Database already exist
self.i.delete_database(self.influx_db_name)
sleep(1)
self.i.create_database(self.influx_db_name)
pass
self.client = Client()
self.organisation = Sesh_Organisation.objects.create(
name="test_org",
slack_token="<PASSWORD>"
)
self.site = Sesh_Site.objects.create(
site_name='test_site1',
organisation=self.organisation,
comission_date=timezone.now(),
location_city='Kigali',
location_country='Rwanda',
position=Geoposition(12,1),
installed_kw=25,
system_voltage=45,
number_of_panels=45,
battery_bank_capacity=450,
)
self.site2 = Sesh_Site.objects.create(
site_name='test_site2',
organisation=self.organisation,
comission_date=timezone.now(),
location_city='Kigali',
location_country='Rwanda',
position=Geoposition(12,1),
installed_kw=25,
system_voltage=45,
number_of_panels=45,
battery_bank_capacity=450,
)
self.alert_rule = Alert_Rule.objects.create(
site=self.site,
check_field='battery_voltage',
operator='lt',
value='0',
)
self.sesh_user = Sesh_User.objects.create_superuser(username='test_user',
email='<EMAIL>',
password='<PASSWORD>',
on_call=False,
send_sms=False,
send_mail=False)
def test_settings_alert_rules_page(self):
"""
Testing if the page is
being rednered correctl
"""
self.client.login(username='test_user', password='<PASSWORD>')
response = self.client.get(reverse('manage_alert_rules'))
self.assertEqual(response.status_code, 200)
def test_add_alert_rule(self):
"""
Testing the addition of alert rules
"""
data = {
'check_field': 'battery_voltage',
'operator': 'lt',
'value': '20',
}
self.client.login(username='test_user', password='<PASSWORD>')
# Testing the creation of an alert rulses
response = self.client.post(reverse('site_alert_rules', args=[self.site.id]), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Alert_Rule.objects.filter(site=self.site).count(), 2)
def test_edit_alert_rule(self):
"""
Testing the editing of an alert rule
"""
data = {
'check_field': 'battery_voltage',
'operator': 'gt',
'value': '100',
}
self.client.login(username='test_user', password='<PASSWORD>')
response = self.client.post(reverse('edit_alert_rule', args=[self.alert_rule.id]), data)
# Testing the success of editing an alert rule
self.assertEqual(response.status_code, 302)
alert_rule = Alert_Rule.objects.filter(id=self.alert_rule.id).first() # Need to query the database again to get updated version
self.assertEqual(alert_rule.operator, 'gt')
self.assertEqual(alert_rule.value, 100)
def test_delete_alert_rule(self):
"""
Testing the deletion of alert rules
"""
self.client.login(username='test_user', password='<PASSWORD>')
response = self.client.get(reverse('delete_alert_rule', args=[self.alert_rule.id]))
# Testing the success of deleting an alert rule
self.assertEqual(response.status_code, 302)
self.assertEqual(Alert_Rule.objects.all().count(), 0)
def test_user_notifications(self):
"""
Testing the user notifications
"""
self.client.login(username='test_user', password='<PASSWORD>')
data = {
u'form-MAX_NUM_FORMS': [u'1000'],
u'form-INITIAL_FORMS': [u'3'],
u'form-TOTAL_FORMS': [u'1'],
u'form-0-send_sms': [u'on'],
u'form-0-on_call': [u'on'],
u'form-0-id': [u'%s' % self.sesh_user.id],
u'form-MIN_NUM_FORMS': [u'0']
}
response = self.client.post(reverse('user_notifications'), data)
# Asserting the correctness of the response and the result of the post
self.assertEqual(response.status_code, 302)
user = Sesh_User.objects.filter(id=self.sesh_user.id).first()
self.assertEqual(user.on_call, True)
self.assertEqual(user.send_sms, True)
def test_delete_site(self):
"""
Testing the view that deletes a site
"""
self.client.login(username='test_user', password='<PASSWORD>')
# Users that are not admin of the organisation must not be allowed to delete a site
response = self.client.get(reverse('delete_site', args=[self.site.id]))
self.assertEqual(response.status_code, 403)
# testing if the site is deleted when the user is admin of the organisation
self.sesh_user.organisation = self.organisation
self.sesh_user.is_org_admin = True
self.sesh_user.save()
response = self.client.get(reverse('delete_site', args=[self.site2.id]))
self.assertRedirects(response, reverse('index'))
sites = Sesh_Site.objects.all()
self.assertEqual(sites.first().site_name,'test_site1' ) | seshdash/tests/test_settings.py | from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.contrib.auth.models import User
from geoposition import Geoposition
from time import sleep
from seshdash.models import Alert_Rule, Sesh_Site, Sesh_User, Sesh_Organisation
from seshdash.data.db.influx import Influx
class TestSettings(TestCase):
"""
Testing the alert rules options in the
settings
"""
def setUp(self):
"""
Setting up the db
"""
# Adding an influx database used by the quick_status_icons functions used on the pages to render
self.influx_db_name = 'test_db'
self.i = Influx(database=self.influx_db_name)
try:
self.i.create_database(self.influx_db_name)
except:
# Database already exist
self.i.delete_database(self.influx_db_name)
sleep(1)
self.i.create_database(self.influx_db_name)
pass
self.client = Client()
self.organisation = Sesh_Organisation.objects.create(
name="test_org",
slack_token="<PASSWORD>"
)
self.site = Sesh_Site.objects.create(
site_name='test_site1',
organisation=self.organisation,
comission_date=timezone.now(),
location_city='Kigali',
location_country='Rwanda',
position=Geoposition(12,1),
installed_kw=25,
system_voltage=45,
number_of_panels=45,
battery_bank_capacity=450,
)
self.site2 = Sesh_Site.objects.create(
site_name='test_site2',
organisation=self.organisation,
comission_date=timezone.now(),
location_city='Kigali',
location_country='Rwanda',
position=Geoposition(12,1),
installed_kw=25,
system_voltage=45,
number_of_panels=45,
battery_bank_capacity=450,
)
self.alert_rule = Alert_Rule.objects.create(
site=self.site,
check_field='battery_voltage',
operator='lt',
value='0',
)
self.sesh_user = Sesh_User.objects.create_superuser(username='test_user',
email='<EMAIL>',
password='<PASSWORD>',
on_call=False,
send_sms=False,
send_mail=False)
def test_settings_alert_rules_page(self):
"""
Testing if the page is
being rednered correctl
"""
self.client.login(username='test_user', password='<PASSWORD>')
response = self.client.get(reverse('manage_alert_rules'))
self.assertEqual(response.status_code, 200)
def test_add_alert_rule(self):
"""
Testing the addition of alert rules
"""
data = {
'check_field': 'battery_voltage',
'operator': 'lt',
'value': '20',
}
self.client.login(username='test_user', password='<PASSWORD>')
# Testing the creation of an alert rulses
response = self.client.post(reverse('site_alert_rules', args=[self.site.id]), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Alert_Rule.objects.filter(site=self.site).count(), 2)
def test_edit_alert_rule(self):
"""
Testing the editing of an alert rule
"""
data = {
'check_field': 'battery_voltage',
'operator': 'gt',
'value': '100',
}
self.client.login(username='test_user', password='<PASSWORD>')
response = self.client.post(reverse('edit_alert_rule', args=[self.alert_rule.id]), data)
# Testing the success of editing an alert rule
self.assertEqual(response.status_code, 302)
alert_rule = Alert_Rule.objects.filter(id=self.alert_rule.id).first() # Need to query the database again to get updated version
self.assertEqual(alert_rule.operator, 'gt')
self.assertEqual(alert_rule.value, 100)
def test_delete_alert_rule(self):
"""
Testing the deletion of alert rules
"""
self.client.login(username='test_user', password='<PASSWORD>')
response = self.client.get(reverse('delete_alert_rule', args=[self.alert_rule.id]))
# Testing the success of deleting an alert rule
self.assertEqual(response.status_code, 302)
self.assertEqual(Alert_Rule.objects.all().count(), 0)
def test_user_notifications(self):
"""
Testing the user notifications
"""
self.client.login(username='test_user', password='<PASSWORD>')
data = {
u'form-MAX_NUM_FORMS': [u'1000'],
u'form-INITIAL_FORMS': [u'3'],
u'form-TOTAL_FORMS': [u'1'],
u'form-0-send_sms': [u'on'],
u'form-0-on_call': [u'on'],
u'form-0-id': [u'%s' % self.sesh_user.id],
u'form-MIN_NUM_FORMS': [u'0']
}
response = self.client.post(reverse('user_notifications'), data)
# Asserting the correctness of the response and the result of the post
self.assertEqual(response.status_code, 302)
user = Sesh_User.objects.filter(id=self.sesh_user.id).first()
self.assertEqual(user.on_call, True)
self.assertEqual(user.send_sms, True)
def test_delete_site(self):
"""
Testing the view that deletes a site
"""
self.client.login(username='test_user', password='<PASSWORD>')
# Users that are not admin of the organisation must not be allowed to delete a site
response = self.client.get(reverse('delete_site', args=[self.site.id]))
self.assertEqual(response.status_code, 403)
# testing if the site is deleted when the user is admin of the organisation
self.sesh_user.organisation = self.organisation
self.sesh_user.is_org_admin = True
self.sesh_user.save()
response = self.client.get(reverse('delete_site', args=[self.site2.id]))
self.assertRedirects(response, reverse('index'))
sites = Sesh_Site.objects.all()
self.assertEqual(sites.first().site_name,'test_site1' ) | 0.431345 | 0.247538 |
from django.apps import AppConfig
from baserow.core.registries import (
plugin_registry,
application_type_registry,
)
from baserow.core.trash.registries import trash_item_type_registry
from baserow.ws.registries import page_registry
class DatabaseConfig(AppConfig):
name = "baserow.contrib.database"
def prevent_generated_model_for_registering(self):
"""
A nasty hack that prevents a generated table model and related auto created
models from being registered to the apps. When a model class is defined it
will be registered to the apps, but we do not always want that to happen
because models with the same class name can differ. They are also meant to be
temporary. Removing the model from the cache does not work because if there
are multiple requests at the same, it is not removed from the cache on time
which could result in hard failures. It is also hard to extend the
django.apps.registry.apps so this hack extends the original `register_model`
method and it will only call the original `register_model` method if the
model is not a generated table model.
If anyone has a better way to prevent the models from being registered then I
am happy to hear about it! :)
"""
original_register_model = self.apps.register_model
def register_model(app_label, model):
if not hasattr(model, "_generated_table_model") and not hasattr(
model._meta.auto_created, "_generated_table_model"
):
original_register_model(app_label, model)
else:
# Trigger the pending operations because the original register_model
# method also triggers them. Not triggering them can cause a memory
# leak because everytime a table model is generated, it will register
# new pending operations.
self.apps.do_pending_operations(model)
self.apps.clear_cache()
self.apps.register_model = register_model
def ready(self):
self.prevent_generated_model_for_registering()
from .views.registries import view_type_registry, view_filter_type_registry
from .fields.registries import field_type_registry, field_converter_registry
from .export.registries import table_exporter_registry
from .formula.registries import (
formula_function_registry,
)
from .plugins import DatabasePlugin
plugin_registry.register(DatabasePlugin())
from .fields.field_types import (
TextFieldType,
LongTextFieldType,
URLFieldType,
NumberFieldType,
BooleanFieldType,
DateFieldType,
LastModifiedFieldType,
CreatedOnFieldType,
LinkRowFieldType,
EmailFieldType,
FileFieldType,
SingleSelectFieldType,
MultipleSelectFieldType,
PhoneNumberFieldType,
FormulaFieldType,
)
field_type_registry.register(TextFieldType())
field_type_registry.register(LongTextFieldType())
field_type_registry.register(URLFieldType())
field_type_registry.register(EmailFieldType())
field_type_registry.register(NumberFieldType())
field_type_registry.register(BooleanFieldType())
field_type_registry.register(DateFieldType())
field_type_registry.register(LastModifiedFieldType())
field_type_registry.register(CreatedOnFieldType())
field_type_registry.register(LinkRowFieldType())
field_type_registry.register(FileFieldType())
field_type_registry.register(SingleSelectFieldType())
field_type_registry.register(MultipleSelectFieldType())
field_type_registry.register(PhoneNumberFieldType())
field_type_registry.register(FormulaFieldType())
from .fields.field_converters import (
LinkRowFieldConverter,
FileFieldConverter,
TextFieldToMultipleSelectFieldConverter,
MultipleSelectFieldToTextFieldConverter,
MultipleSelectFieldToSingleSelectFieldConverter,
SingleSelectFieldToMultipleSelectFieldConverter,
FormulaFieldConverter,
)
field_converter_registry.register(LinkRowFieldConverter())
field_converter_registry.register(FileFieldConverter())
field_converter_registry.register(TextFieldToMultipleSelectFieldConverter())
field_converter_registry.register(MultipleSelectFieldToTextFieldConverter())
field_converter_registry.register(
MultipleSelectFieldToSingleSelectFieldConverter()
)
field_converter_registry.register(
SingleSelectFieldToMultipleSelectFieldConverter()
)
field_converter_registry.register(FormulaFieldConverter())
from .views.view_types import GridViewType, FormViewType
view_type_registry.register(GridViewType())
view_type_registry.register(FormViewType())
from .views.view_filters import (
EqualViewFilterType,
NotEqualViewFilterType,
EmptyViewFilterType,
NotEmptyViewFilterType,
DateEqualViewFilterType,
DateBeforeViewFilterType,
DateAfterViewFilterType,
DateNotEqualViewFilterType,
DateEqualsTodayViewFilterType,
DateEqualsCurrentMonthViewFilterType,
DateEqualsCurrentYearViewFilterType,
HigherThanViewFilterType,
LowerThanViewFilterType,
ContainsViewFilterType,
FilenameContainsViewFilterType,
HasFileTypeViewFilterType,
ContainsNotViewFilterType,
BooleanViewFilterType,
SingleSelectEqualViewFilterType,
SingleSelectNotEqualViewFilterType,
LinkRowHasViewFilterType,
LinkRowHasNotViewFilterType,
MultipleSelectHasViewFilterType,
MultipleSelectHasNotViewFilterType,
)
view_filter_type_registry.register(EqualViewFilterType())
view_filter_type_registry.register(NotEqualViewFilterType())
view_filter_type_registry.register(FilenameContainsViewFilterType())
view_filter_type_registry.register(HasFileTypeViewFilterType())
view_filter_type_registry.register(ContainsViewFilterType())
view_filter_type_registry.register(ContainsNotViewFilterType())
view_filter_type_registry.register(HigherThanViewFilterType())
view_filter_type_registry.register(LowerThanViewFilterType())
view_filter_type_registry.register(DateEqualViewFilterType())
view_filter_type_registry.register(DateBeforeViewFilterType())
view_filter_type_registry.register(DateAfterViewFilterType())
view_filter_type_registry.register(DateNotEqualViewFilterType())
view_filter_type_registry.register(DateEqualsTodayViewFilterType())
view_filter_type_registry.register(DateEqualsCurrentMonthViewFilterType())
view_filter_type_registry.register(DateEqualsCurrentYearViewFilterType())
view_filter_type_registry.register(SingleSelectEqualViewFilterType())
view_filter_type_registry.register(SingleSelectNotEqualViewFilterType())
view_filter_type_registry.register(LinkRowHasViewFilterType())
view_filter_type_registry.register(LinkRowHasNotViewFilterType())
view_filter_type_registry.register(BooleanViewFilterType())
view_filter_type_registry.register(EmptyViewFilterType())
view_filter_type_registry.register(NotEmptyViewFilterType())
view_filter_type_registry.register(MultipleSelectHasViewFilterType())
view_filter_type_registry.register(MultipleSelectHasNotViewFilterType())
from .application_types import DatabaseApplicationType
application_type_registry.register(DatabaseApplicationType())
from .ws.pages import TablePageType
page_registry.register(TablePageType())
from .export.table_exporters.csv_table_exporter import CsvTableExporter
table_exporter_registry.register(CsvTableExporter())
from .trash.trash_types import (
TableTrashableItemType,
RowTrashableItemType,
FieldTrashableItemType,
)
trash_item_type_registry.register(TableTrashableItemType())
trash_item_type_registry.register(FieldTrashableItemType())
trash_item_type_registry.register(RowTrashableItemType())
from .formula.ast.function_defs import register_formula_functions
register_formula_functions(formula_function_registry)
# The signals must always be imported last because they use the registries
# which need to be filled first.
import baserow.contrib.database.ws.signals # noqa: F403, F401 | backend/src/baserow/contrib/database/apps.py | from django.apps import AppConfig
from baserow.core.registries import (
plugin_registry,
application_type_registry,
)
from baserow.core.trash.registries import trash_item_type_registry
from baserow.ws.registries import page_registry
class DatabaseConfig(AppConfig):
name = "baserow.contrib.database"
def prevent_generated_model_for_registering(self):
"""
A nasty hack that prevents a generated table model and related auto created
models from being registered to the apps. When a model class is defined it
will be registered to the apps, but we do not always want that to happen
because models with the same class name can differ. They are also meant to be
temporary. Removing the model from the cache does not work because if there
are multiple requests at the same, it is not removed from the cache on time
which could result in hard failures. It is also hard to extend the
django.apps.registry.apps so this hack extends the original `register_model`
method and it will only call the original `register_model` method if the
model is not a generated table model.
If anyone has a better way to prevent the models from being registered then I
am happy to hear about it! :)
"""
original_register_model = self.apps.register_model
def register_model(app_label, model):
if not hasattr(model, "_generated_table_model") and not hasattr(
model._meta.auto_created, "_generated_table_model"
):
original_register_model(app_label, model)
else:
# Trigger the pending operations because the original register_model
# method also triggers them. Not triggering them can cause a memory
# leak because everytime a table model is generated, it will register
# new pending operations.
self.apps.do_pending_operations(model)
self.apps.clear_cache()
self.apps.register_model = register_model
def ready(self):
self.prevent_generated_model_for_registering()
from .views.registries import view_type_registry, view_filter_type_registry
from .fields.registries import field_type_registry, field_converter_registry
from .export.registries import table_exporter_registry
from .formula.registries import (
formula_function_registry,
)
from .plugins import DatabasePlugin
plugin_registry.register(DatabasePlugin())
from .fields.field_types import (
TextFieldType,
LongTextFieldType,
URLFieldType,
NumberFieldType,
BooleanFieldType,
DateFieldType,
LastModifiedFieldType,
CreatedOnFieldType,
LinkRowFieldType,
EmailFieldType,
FileFieldType,
SingleSelectFieldType,
MultipleSelectFieldType,
PhoneNumberFieldType,
FormulaFieldType,
)
field_type_registry.register(TextFieldType())
field_type_registry.register(LongTextFieldType())
field_type_registry.register(URLFieldType())
field_type_registry.register(EmailFieldType())
field_type_registry.register(NumberFieldType())
field_type_registry.register(BooleanFieldType())
field_type_registry.register(DateFieldType())
field_type_registry.register(LastModifiedFieldType())
field_type_registry.register(CreatedOnFieldType())
field_type_registry.register(LinkRowFieldType())
field_type_registry.register(FileFieldType())
field_type_registry.register(SingleSelectFieldType())
field_type_registry.register(MultipleSelectFieldType())
field_type_registry.register(PhoneNumberFieldType())
field_type_registry.register(FormulaFieldType())
from .fields.field_converters import (
LinkRowFieldConverter,
FileFieldConverter,
TextFieldToMultipleSelectFieldConverter,
MultipleSelectFieldToTextFieldConverter,
MultipleSelectFieldToSingleSelectFieldConverter,
SingleSelectFieldToMultipleSelectFieldConverter,
FormulaFieldConverter,
)
field_converter_registry.register(LinkRowFieldConverter())
field_converter_registry.register(FileFieldConverter())
field_converter_registry.register(TextFieldToMultipleSelectFieldConverter())
field_converter_registry.register(MultipleSelectFieldToTextFieldConverter())
field_converter_registry.register(
MultipleSelectFieldToSingleSelectFieldConverter()
)
field_converter_registry.register(
SingleSelectFieldToMultipleSelectFieldConverter()
)
field_converter_registry.register(FormulaFieldConverter())
from .views.view_types import GridViewType, FormViewType
view_type_registry.register(GridViewType())
view_type_registry.register(FormViewType())
from .views.view_filters import (
EqualViewFilterType,
NotEqualViewFilterType,
EmptyViewFilterType,
NotEmptyViewFilterType,
DateEqualViewFilterType,
DateBeforeViewFilterType,
DateAfterViewFilterType,
DateNotEqualViewFilterType,
DateEqualsTodayViewFilterType,
DateEqualsCurrentMonthViewFilterType,
DateEqualsCurrentYearViewFilterType,
HigherThanViewFilterType,
LowerThanViewFilterType,
ContainsViewFilterType,
FilenameContainsViewFilterType,
HasFileTypeViewFilterType,
ContainsNotViewFilterType,
BooleanViewFilterType,
SingleSelectEqualViewFilterType,
SingleSelectNotEqualViewFilterType,
LinkRowHasViewFilterType,
LinkRowHasNotViewFilterType,
MultipleSelectHasViewFilterType,
MultipleSelectHasNotViewFilterType,
)
view_filter_type_registry.register(EqualViewFilterType())
view_filter_type_registry.register(NotEqualViewFilterType())
view_filter_type_registry.register(FilenameContainsViewFilterType())
view_filter_type_registry.register(HasFileTypeViewFilterType())
view_filter_type_registry.register(ContainsViewFilterType())
view_filter_type_registry.register(ContainsNotViewFilterType())
view_filter_type_registry.register(HigherThanViewFilterType())
view_filter_type_registry.register(LowerThanViewFilterType())
view_filter_type_registry.register(DateEqualViewFilterType())
view_filter_type_registry.register(DateBeforeViewFilterType())
view_filter_type_registry.register(DateAfterViewFilterType())
view_filter_type_registry.register(DateNotEqualViewFilterType())
view_filter_type_registry.register(DateEqualsTodayViewFilterType())
view_filter_type_registry.register(DateEqualsCurrentMonthViewFilterType())
view_filter_type_registry.register(DateEqualsCurrentYearViewFilterType())
view_filter_type_registry.register(SingleSelectEqualViewFilterType())
view_filter_type_registry.register(SingleSelectNotEqualViewFilterType())
view_filter_type_registry.register(LinkRowHasViewFilterType())
view_filter_type_registry.register(LinkRowHasNotViewFilterType())
view_filter_type_registry.register(BooleanViewFilterType())
view_filter_type_registry.register(EmptyViewFilterType())
view_filter_type_registry.register(NotEmptyViewFilterType())
view_filter_type_registry.register(MultipleSelectHasViewFilterType())
view_filter_type_registry.register(MultipleSelectHasNotViewFilterType())
from .application_types import DatabaseApplicationType
application_type_registry.register(DatabaseApplicationType())
from .ws.pages import TablePageType
page_registry.register(TablePageType())
from .export.table_exporters.csv_table_exporter import CsvTableExporter
table_exporter_registry.register(CsvTableExporter())
from .trash.trash_types import (
TableTrashableItemType,
RowTrashableItemType,
FieldTrashableItemType,
)
trash_item_type_registry.register(TableTrashableItemType())
trash_item_type_registry.register(FieldTrashableItemType())
trash_item_type_registry.register(RowTrashableItemType())
from .formula.ast.function_defs import register_formula_functions
register_formula_functions(formula_function_registry)
# The signals must always be imported last because they use the registries
# which need to be filled first.
import baserow.contrib.database.ws.signals # noqa: F403, F401 | 0.586168 | 0.231462 |
import numpy as np
import open3d as o3d
import time
import os
import sys
pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(pyexample_path)
import open3d_example as o3dex
def problem0():
mesh = o3dex.get_plane_mesh(height=1, width=1)
mesh = mesh.subdivide_midpoint(3)
vertices = np.asarray(mesh.vertices)
static_ids = [
1, 46, 47, 48, 16, 51, 49, 50, 6, 31, 33, 32, 11, 26, 27, 25, 0, 64, 65,
20, 66, 68, 67, 7, 69, 71, 70, 22, 72, 74, 73, 3, 15, 44, 43, 45, 5, 41,
40, 42, 13, 39, 37, 38, 2, 56, 55, 19, 61, 60, 59, 8, 76, 75, 77, 23
]
static_positions = []
for id in static_ids:
static_positions.append(vertices[id])
handle_ids = [4]
handle_positions = [vertices[4] + np.array((0, 0, 0.4))]
return mesh, static_ids + handle_ids, static_positions + handle_positions
def problem1():
mesh = o3dex.get_plane_mesh(height=1, width=1)
mesh = mesh.subdivide_midpoint(3)
vertices = np.asarray(mesh.vertices)
static_ids = [
1, 46, 15, 43, 5, 40, 13, 38, 2, 56, 37, 39, 42, 41, 45, 44, 48, 47
]
static_positions = []
for id in static_ids:
static_positions.append(vertices[id])
handle_ids = [21]
handle_positions = [vertices[21] + np.array((0, 0, 0.4))]
return mesh, static_ids + handle_ids, static_positions + handle_positions
def problem2():
armadillo_data = o3d.data.ArmadilloMesh()
mesh = o3d.io.read_triangle_mesh(armadillo_data.path)
vertices = np.asarray(mesh.vertices)
static_ids = [idx for idx in np.where(vertices[:, 1] < -30)[0]]
static_positions = []
for id in static_ids:
static_positions.append(vertices[id])
handle_ids = [2490]
handle_positions = [vertices[2490] + np.array((-40, -40, -40))]
return mesh, static_ids + handle_ids, static_positions + handle_positions
if __name__ == "__main__":
o3d.utility.set_verbosity_level(o3d.utility.Debug)
for mesh, constraint_ids, constraint_pos in [
problem0(), problem1(), problem2()
]:
constraint_ids = np.array(constraint_ids, dtype=np.int32)
constraint_pos = o3d.utility.Vector3dVector(constraint_pos)
tic = time.time()
mesh_prime = mesh.deform_as_rigid_as_possible(
o3d.utility.IntVector(constraint_ids), constraint_pos, max_iter=50)
print("deform took {}[s]".format(time.time() - tic))
mesh_prime.compute_vertex_normals()
mesh.paint_uniform_color((1, 0, 0))
handles = o3d.geometry.PointCloud()
handles.points = constraint_pos
handles.paint_uniform_color((0, 1, 0))
o3d.visualization.draw_geometries([mesh, mesh_prime, handles])
o3d.utility.set_verbosity_level(o3d.utility.Info) | examples/python/geometry/triangle_mesh_deformation.py |
import numpy as np
import open3d as o3d
import time
import os
import sys
pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(pyexample_path)
import open3d_example as o3dex
def problem0():
mesh = o3dex.get_plane_mesh(height=1, width=1)
mesh = mesh.subdivide_midpoint(3)
vertices = np.asarray(mesh.vertices)
static_ids = [
1, 46, 47, 48, 16, 51, 49, 50, 6, 31, 33, 32, 11, 26, 27, 25, 0, 64, 65,
20, 66, 68, 67, 7, 69, 71, 70, 22, 72, 74, 73, 3, 15, 44, 43, 45, 5, 41,
40, 42, 13, 39, 37, 38, 2, 56, 55, 19, 61, 60, 59, 8, 76, 75, 77, 23
]
static_positions = []
for id in static_ids:
static_positions.append(vertices[id])
handle_ids = [4]
handle_positions = [vertices[4] + np.array((0, 0, 0.4))]
return mesh, static_ids + handle_ids, static_positions + handle_positions
def problem1():
mesh = o3dex.get_plane_mesh(height=1, width=1)
mesh = mesh.subdivide_midpoint(3)
vertices = np.asarray(mesh.vertices)
static_ids = [
1, 46, 15, 43, 5, 40, 13, 38, 2, 56, 37, 39, 42, 41, 45, 44, 48, 47
]
static_positions = []
for id in static_ids:
static_positions.append(vertices[id])
handle_ids = [21]
handle_positions = [vertices[21] + np.array((0, 0, 0.4))]
return mesh, static_ids + handle_ids, static_positions + handle_positions
def problem2():
armadillo_data = o3d.data.ArmadilloMesh()
mesh = o3d.io.read_triangle_mesh(armadillo_data.path)
vertices = np.asarray(mesh.vertices)
static_ids = [idx for idx in np.where(vertices[:, 1] < -30)[0]]
static_positions = []
for id in static_ids:
static_positions.append(vertices[id])
handle_ids = [2490]
handle_positions = [vertices[2490] + np.array((-40, -40, -40))]
return mesh, static_ids + handle_ids, static_positions + handle_positions
if __name__ == "__main__":
o3d.utility.set_verbosity_level(o3d.utility.Debug)
for mesh, constraint_ids, constraint_pos in [
problem0(), problem1(), problem2()
]:
constraint_ids = np.array(constraint_ids, dtype=np.int32)
constraint_pos = o3d.utility.Vector3dVector(constraint_pos)
tic = time.time()
mesh_prime = mesh.deform_as_rigid_as_possible(
o3d.utility.IntVector(constraint_ids), constraint_pos, max_iter=50)
print("deform took {}[s]".format(time.time() - tic))
mesh_prime.compute_vertex_normals()
mesh.paint_uniform_color((1, 0, 0))
handles = o3d.geometry.PointCloud()
handles.points = constraint_pos
handles.paint_uniform_color((0, 1, 0))
o3d.visualization.draw_geometries([mesh, mesh_prime, handles])
o3d.utility.set_verbosity_level(o3d.utility.Info) | 0.376623 | 0.274731 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .channel_attention import *
class UNet_up_block(nn.Module):
def __init__(self, prev_channel, input_channel, output_channel, up_sample=True, use_skip=True):
super().__init__()
self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
if use_skip:
self.conv1 = nn.Conv2d(prev_channel + input_channel, output_channel, 3, padding=1)
else:
self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.relu = torch.nn.ReLU()
self.up_sample = up_sample
def forward(self, x, prev_feature_map=None):
if self.up_sample:
x = self.up_sampling(x)
if prev_feature_map is not None:
x = torch.cat((x, prev_feature_map), dim=1)
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
return x
class UNet_down_block(nn.Module):
def __init__(self, input_channel, output_channel, down_size=True):
super().__init__()
self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.max_pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU()
self.down_size = down_size
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
if self.down_size:
x = self.max_pool(x)
return x
class UNet(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, patch_size=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.patch_size = patch_size
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2**(4+i), 2**(5+i), True) for i in range(0, downsample)]
)
bottleneck = 2**(4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2**(4+i), 2**(5+i), 2**(4+i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](x, xvals[i])
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
# x = F.interpolate(x, scale_factor=(1/self.patch_size, 1/self.patch_size), mode='bilinear', align_corners=False)
return x
class UNetV2(nn.Module):
def __init__(self, in_channels=3, out_channels=3, patch_size=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.patch_size = patch_size
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([
UNet_down_block(16, 32, True),
UNet_down_block(32, 64, True),
UNet_down_block(64, 256, True),
UNet_down_block(256, 256, True),
UNet_down_block(256, 512, True),
UNet_down_block(512, 1024, True),
])
bottleneck = 1024
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([
UNet_up_block(512, 1024, 512),
UNet_up_block(256, 512, 256),
UNet_up_block(256, 256, 256),
UNet_up_block(64, 256, 64),
UNet_up_block(32, 64, 32),
UNet_up_block(16, 32, 16),
])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.InstanceNorm2d(16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.attention = ECALayer(out_channels, k_size=7)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for down_block in self.down_blocks:
x = down_block(x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for up_block, xval in zip(self.up_blocks, xvals[::-1][1:len(self.up_blocks)+1]):
x = up_block(x, xval)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
x = self.attention(x)
x = F.interpolate(x, scale_factor=(1/self.patch_size, 1/self.patch_size), mode='bilinear', align_corners=False)
return x | paper_code/models/unet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .channel_attention import *
class UNet_up_block(nn.Module):
def __init__(self, prev_channel, input_channel, output_channel, up_sample=True, use_skip=True):
super().__init__()
self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
if use_skip:
self.conv1 = nn.Conv2d(prev_channel + input_channel, output_channel, 3, padding=1)
else:
self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.relu = torch.nn.ReLU()
self.up_sample = up_sample
def forward(self, x, prev_feature_map=None):
if self.up_sample:
x = self.up_sampling(x)
if prev_feature_map is not None:
x = torch.cat((x, prev_feature_map), dim=1)
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
return x
class UNet_down_block(nn.Module):
def __init__(self, input_channel, output_channel, down_size=True):
super().__init__()
self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.max_pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU()
self.down_size = down_size
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
if self.down_size:
x = self.max_pool(x)
return x
class UNet(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, patch_size=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.downsample = downsample
self.patch_size = patch_size
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2**(4+i), 2**(5+i), True) for i in range(0, downsample)]
)
bottleneck = 2**(4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2**(4+i), 2**(5+i), 2**(4+i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](x, xvals[i])
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
# x = F.interpolate(x, scale_factor=(1/self.patch_size, 1/self.patch_size), mode='bilinear', align_corners=False)
return x
class UNetV2(nn.Module):
def __init__(self, in_channels=3, out_channels=3, patch_size=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.patch_size = patch_size
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList([
UNet_down_block(16, 32, True),
UNet_down_block(32, 64, True),
UNet_down_block(64, 256, True),
UNet_down_block(256, 256, True),
UNet_down_block(256, 512, True),
UNet_down_block(512, 1024, True),
])
bottleneck = 1024
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList([
UNet_up_block(512, 1024, 512),
UNet_up_block(256, 512, 256),
UNet_up_block(256, 256, 256),
UNet_up_block(64, 256, 64),
UNet_up_block(32, 64, 32),
UNet_up_block(16, 32, 16),
])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.InstanceNorm2d(16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.attention = ECALayer(out_channels, k_size=7)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for down_block in self.down_blocks:
x = down_block(x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for up_block, xval in zip(self.up_blocks, xvals[::-1][1:len(self.up_blocks)+1]):
x = up_block(x, xval)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
x = self.attention(x)
x = F.interpolate(x, scale_factor=(1/self.patch_size, 1/self.patch_size), mode='bilinear', align_corners=False)
return x | 0.951425 | 0.315248 |
import numpy as np
import xgboost as xgb
import sys
import pytest
sys.path.append("tests/python")
import testing as tm
def dmatrix_from_cupy(input_type, DMatrixT, missing=np.NAN):
'''Test constructing DMatrix from cupy'''
import cupy as cp
kRows = 80
kCols = 3
np_X = np.random.randn(kRows, kCols).astype(dtype=input_type)
X = cp.array(np_X)
X[5, 0] = missing
X[3, 1] = missing
y = cp.random.randn(kRows).astype(dtype=input_type)
dtrain = DMatrixT(X, missing=missing, label=y)
assert dtrain.num_col() == kCols
assert dtrain.num_row() == kRows
return dtrain
def _test_from_cupy(DMatrixT):
'''Test constructing DMatrix from cupy'''
import cupy as cp
dmatrix_from_cupy(np.float32, DMatrixT, np.NAN)
dmatrix_from_cupy(np.float64, DMatrixT, np.NAN)
dmatrix_from_cupy(np.uint8, DMatrixT, 2)
dmatrix_from_cupy(np.uint32, DMatrixT, 3)
dmatrix_from_cupy(np.uint64, DMatrixT, 4)
dmatrix_from_cupy(np.int8, DMatrixT, 2)
dmatrix_from_cupy(np.int32, DMatrixT, -2)
dmatrix_from_cupy(np.int64, DMatrixT, -3)
with pytest.raises(Exception):
X = cp.random.randn(2, 2, dtype="float32")
dtrain = DMatrixT(X, label=X)
def _test_cupy_training(DMatrixT):
import cupy as cp
np.random.seed(1)
cp.random.seed(1)
X = cp.random.randn(50, 10, dtype="float32")
y = cp.random.randn(50, dtype="float32")
weights = np.random.random(50) + 1
cupy_weights = cp.array(weights)
base_margin = np.random.random(50)
cupy_base_margin = cp.array(base_margin)
evals_result_cupy = {}
dtrain_cp = DMatrixT(X, y, weight=cupy_weights, base_margin=cupy_base_margin)
params = {'gpu_id': 0, 'nthread': 1, 'tree_method': 'gpu_hist'}
xgb.train(params, dtrain_cp, evals=[(dtrain_cp, "train")],
evals_result=evals_result_cupy)
evals_result_np = {}
dtrain_np = xgb.DMatrix(cp.asnumpy(X), cp.asnumpy(y), weight=weights,
base_margin=base_margin)
xgb.train(params, dtrain_np, evals=[(dtrain_np, "train")],
evals_result=evals_result_np)
assert np.array_equal(evals_result_cupy["train"]["rmse"], evals_result_np["train"]["rmse"])
def _test_cupy_metainfo(DMatrixT):
import cupy as cp
n = 100
X = np.random.random((n, 2))
dmat_cupy = DMatrixT(cp.array(X))
dmat = xgb.DMatrix(X)
floats = np.random.random(n)
uints = np.array([4, 2, 8]).astype("uint32")
cupy_floats = cp.array(floats)
cupy_uints = cp.array(uints)
dmat.set_float_info('weight', floats)
dmat.set_float_info('label', floats)
dmat.set_float_info('base_margin', floats)
dmat.set_uint_info('group', uints)
dmat_cupy.set_interface_info('weight', cupy_floats)
dmat_cupy.set_interface_info('label', cupy_floats)
dmat_cupy.set_interface_info('base_margin', cupy_floats)
dmat_cupy.set_interface_info('group', cupy_uints)
# Test setting info with cupy
assert np.array_equal(dmat.get_float_info('weight'), dmat_cupy.get_float_info('weight'))
assert np.array_equal(dmat.get_float_info('label'), dmat_cupy.get_float_info('label'))
assert np.array_equal(dmat.get_float_info('base_margin'),
dmat_cupy.get_float_info('base_margin'))
assert np.array_equal(dmat.get_uint_info('group_ptr'), dmat_cupy.get_uint_info('group_ptr'))
class TestFromCupy:
'''Tests for constructing DMatrix from data structure conforming Apache
Arrow specification.'''
@pytest.mark.skipif(**tm.no_cupy())
def test_simple_dmat_from_cupy(self):
_test_from_cupy(xgb.DMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_device_dmat_from_cupy(self):
_test_from_cupy(xgb.DeviceQuantileDMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_training_device_dmat(self):
_test_cupy_training(xgb.DeviceQuantileDMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_training_simple_dmat(self):
_test_cupy_training(xgb.DMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_metainfo_simple_dmat(self):
_test_cupy_metainfo(xgb.DMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_metainfo_device_dmat(self):
_test_cupy_metainfo(xgb.DeviceQuantileDMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_dlpack_simple_dmat(self):
import cupy as cp
n = 100
X = cp.random.random((n, 2))
xgb.DMatrix(X.toDlpack())
@pytest.mark.skipif(**tm.no_cupy())
def test_dlpack_device_dmat(self):
import cupy as cp
n = 100
X = cp.random.random((n, 2))
xgb.DeviceQuantileDMatrix(X.toDlpack())
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_specified_device(self):
import cupy as cp
cp.cuda.runtime.setDevice(0)
dtrain = dmatrix_from_cupy(
np.float32, xgb.DeviceQuantileDMatrix, np.nan)
with pytest.raises(xgb.core.XGBoostError):
xgb.train({'tree_method': 'gpu_hist', 'gpu_id': 1},
dtrain, num_boost_round=10) | tests/python-gpu/test_from_cupy.py | import numpy as np
import xgboost as xgb
import sys
import pytest
sys.path.append("tests/python")
import testing as tm
def dmatrix_from_cupy(input_type, DMatrixT, missing=np.NAN):
'''Test constructing DMatrix from cupy'''
import cupy as cp
kRows = 80
kCols = 3
np_X = np.random.randn(kRows, kCols).astype(dtype=input_type)
X = cp.array(np_X)
X[5, 0] = missing
X[3, 1] = missing
y = cp.random.randn(kRows).astype(dtype=input_type)
dtrain = DMatrixT(X, missing=missing, label=y)
assert dtrain.num_col() == kCols
assert dtrain.num_row() == kRows
return dtrain
def _test_from_cupy(DMatrixT):
'''Test constructing DMatrix from cupy'''
import cupy as cp
dmatrix_from_cupy(np.float32, DMatrixT, np.NAN)
dmatrix_from_cupy(np.float64, DMatrixT, np.NAN)
dmatrix_from_cupy(np.uint8, DMatrixT, 2)
dmatrix_from_cupy(np.uint32, DMatrixT, 3)
dmatrix_from_cupy(np.uint64, DMatrixT, 4)
dmatrix_from_cupy(np.int8, DMatrixT, 2)
dmatrix_from_cupy(np.int32, DMatrixT, -2)
dmatrix_from_cupy(np.int64, DMatrixT, -3)
with pytest.raises(Exception):
X = cp.random.randn(2, 2, dtype="float32")
dtrain = DMatrixT(X, label=X)
def _test_cupy_training(DMatrixT):
import cupy as cp
np.random.seed(1)
cp.random.seed(1)
X = cp.random.randn(50, 10, dtype="float32")
y = cp.random.randn(50, dtype="float32")
weights = np.random.random(50) + 1
cupy_weights = cp.array(weights)
base_margin = np.random.random(50)
cupy_base_margin = cp.array(base_margin)
evals_result_cupy = {}
dtrain_cp = DMatrixT(X, y, weight=cupy_weights, base_margin=cupy_base_margin)
params = {'gpu_id': 0, 'nthread': 1, 'tree_method': 'gpu_hist'}
xgb.train(params, dtrain_cp, evals=[(dtrain_cp, "train")],
evals_result=evals_result_cupy)
evals_result_np = {}
dtrain_np = xgb.DMatrix(cp.asnumpy(X), cp.asnumpy(y), weight=weights,
base_margin=base_margin)
xgb.train(params, dtrain_np, evals=[(dtrain_np, "train")],
evals_result=evals_result_np)
assert np.array_equal(evals_result_cupy["train"]["rmse"], evals_result_np["train"]["rmse"])
def _test_cupy_metainfo(DMatrixT):
import cupy as cp
n = 100
X = np.random.random((n, 2))
dmat_cupy = DMatrixT(cp.array(X))
dmat = xgb.DMatrix(X)
floats = np.random.random(n)
uints = np.array([4, 2, 8]).astype("uint32")
cupy_floats = cp.array(floats)
cupy_uints = cp.array(uints)
dmat.set_float_info('weight', floats)
dmat.set_float_info('label', floats)
dmat.set_float_info('base_margin', floats)
dmat.set_uint_info('group', uints)
dmat_cupy.set_interface_info('weight', cupy_floats)
dmat_cupy.set_interface_info('label', cupy_floats)
dmat_cupy.set_interface_info('base_margin', cupy_floats)
dmat_cupy.set_interface_info('group', cupy_uints)
# Test setting info with cupy
assert np.array_equal(dmat.get_float_info('weight'), dmat_cupy.get_float_info('weight'))
assert np.array_equal(dmat.get_float_info('label'), dmat_cupy.get_float_info('label'))
assert np.array_equal(dmat.get_float_info('base_margin'),
dmat_cupy.get_float_info('base_margin'))
assert np.array_equal(dmat.get_uint_info('group_ptr'), dmat_cupy.get_uint_info('group_ptr'))
class TestFromCupy:
'''Tests for constructing DMatrix from data structure conforming Apache
Arrow specification.'''
@pytest.mark.skipif(**tm.no_cupy())
def test_simple_dmat_from_cupy(self):
_test_from_cupy(xgb.DMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_device_dmat_from_cupy(self):
_test_from_cupy(xgb.DeviceQuantileDMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_training_device_dmat(self):
_test_cupy_training(xgb.DeviceQuantileDMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_training_simple_dmat(self):
_test_cupy_training(xgb.DMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_metainfo_simple_dmat(self):
_test_cupy_metainfo(xgb.DMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_cupy_metainfo_device_dmat(self):
_test_cupy_metainfo(xgb.DeviceQuantileDMatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_dlpack_simple_dmat(self):
import cupy as cp
n = 100
X = cp.random.random((n, 2))
xgb.DMatrix(X.toDlpack())
@pytest.mark.skipif(**tm.no_cupy())
def test_dlpack_device_dmat(self):
import cupy as cp
n = 100
X = cp.random.random((n, 2))
xgb.DeviceQuantileDMatrix(X.toDlpack())
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_specified_device(self):
import cupy as cp
cp.cuda.runtime.setDevice(0)
dtrain = dmatrix_from_cupy(
np.float32, xgb.DeviceQuantileDMatrix, np.nan)
with pytest.raises(xgb.core.XGBoostError):
xgb.train({'tree_method': 'gpu_hist', 'gpu_id': 1},
dtrain, num_boost_round=10) | 0.405096 | 0.438725 |
import markdown
from markdown.treeprocessors import Treeprocessor
from markdown import Extension
from markdown.util import etree
import re
from copy import copy
GROUP_RE = r'(^(@\{(?P<lightbox>.+)\})(?P<description>.*))'
HIDDEN_RE = r'(^(!)(?P<description>.*))'
class LightboxImagesTreeprocessor(Treeprocessor):
""" Lightbox Images Treeprocessor """
def __init__(self, md, group=True):
Treeprocessor.__init__(self, md)
self.group_re = re.compile(GROUP_RE)
self.hidden_re = re.compile(HIDDEN_RE)
self.group = group
def run(self, root):
parent_map = {c: p for p in root.iter() for c in p}
i = 0
images = root.iter("img")
for image in images:
desc = image.attrib["alt"]
h = self.hidden_re.match(desc)
if h:
desc = h.group("description")
hidden = True
else:
hidden = False
m = self.group_re.match(desc)
if m:
lb = m.group("lightbox")
desc = m.group("description")
elif self.group:
lb = "all_images"
else:
lb = "image" + str(i)
image.set("alt", desc)
parent = parent_map[image]
ix = list(parent).index(image)
new_node = etree.Element('a')
new_node.set("href", image.attrib["src"])
new_node.set("data-lightbox", lb)
new_node.set("data-title", desc)
new_node.tail = copy(image.tail)
parent.insert(ix, new_node)
parent.remove(image)
image.tail = markdown.util.AtomicString("")
if not hidden:
new_node.append(image)
i += 1
class LightboxImagesExtension(Extension):
"""
LightboxImagesExtension
Extension class for markdown
"""
def __init__(self, **kwargs):
self.config = {'group': [True, "group all images into same lightbox"]}
super(LightboxImagesExtension, self).__init__(**kwargs)
def extendMarkdown(self, md, md_globals):
lightbox_images = LightboxImagesTreeprocessor(
md, self.getConfig('group'))
md.treeprocessors.add("lightbox", lightbox_images, "_end")
md.registerExtension(self)
def makeExtension(**kwargs):
return LightboxImagesExtension(**kwargs) | mdx_lightbox.py | import markdown
from markdown.treeprocessors import Treeprocessor
from markdown import Extension
from markdown.util import etree
import re
from copy import copy
GROUP_RE = r'(^(@\{(?P<lightbox>.+)\})(?P<description>.*))'
HIDDEN_RE = r'(^(!)(?P<description>.*))'
class LightboxImagesTreeprocessor(Treeprocessor):
""" Lightbox Images Treeprocessor """
def __init__(self, md, group=True):
Treeprocessor.__init__(self, md)
self.group_re = re.compile(GROUP_RE)
self.hidden_re = re.compile(HIDDEN_RE)
self.group = group
def run(self, root):
parent_map = {c: p for p in root.iter() for c in p}
i = 0
images = root.iter("img")
for image in images:
desc = image.attrib["alt"]
h = self.hidden_re.match(desc)
if h:
desc = h.group("description")
hidden = True
else:
hidden = False
m = self.group_re.match(desc)
if m:
lb = m.group("lightbox")
desc = m.group("description")
elif self.group:
lb = "all_images"
else:
lb = "image" + str(i)
image.set("alt", desc)
parent = parent_map[image]
ix = list(parent).index(image)
new_node = etree.Element('a')
new_node.set("href", image.attrib["src"])
new_node.set("data-lightbox", lb)
new_node.set("data-title", desc)
new_node.tail = copy(image.tail)
parent.insert(ix, new_node)
parent.remove(image)
image.tail = markdown.util.AtomicString("")
if not hidden:
new_node.append(image)
i += 1
class LightboxImagesExtension(Extension):
"""
LightboxImagesExtension
Extension class for markdown
"""
def __init__(self, **kwargs):
self.config = {'group': [True, "group all images into same lightbox"]}
super(LightboxImagesExtension, self).__init__(**kwargs)
def extendMarkdown(self, md, md_globals):
lightbox_images = LightboxImagesTreeprocessor(
md, self.getConfig('group'))
md.treeprocessors.add("lightbox", lightbox_images, "_end")
md.registerExtension(self)
def makeExtension(**kwargs):
return LightboxImagesExtension(**kwargs) | 0.417746 | 0.14814 |
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.provider_out import ProviderOut # noqa: E501
from openapi_client.rest import ApiException
class TestProviderOut(unittest.TestCase):
"""ProviderOut unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ProviderOut
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.provider_out.ProviderOut() # noqa: E501
if include_optional :
return ProviderOut(
name = 'My Company AWS production',
type = 'AWS',
uuid = '57e60f90-8c0c-4bd1-87a0-2143759aae1c',
authentication = null,
billing_source = null,
customer = null,
created_by = null,
stats = {"Mon Dec 31 2018 19:00:00 GMT-0500 (Eastern Standard Time)":[{"assembly_id":"f0d262ff-cc93-449c-a834-74c4d958d45f","billing_period_start":"2019-01-01T00:00:00.000Z","files_processed":"1/1","last_process_start_date":"2019-01-07T21:50:58.000Z","last_process_complete_date":"2019-01-07T21:51:01.000Z","summary_data_creation_datetime":"2019-01-07T21:51:32.000Z","summary_data_updated_datetime":"2019-01-07T21:51:32.000Z"}]},
infrastructure = 'AWS',
active = True,
cost_models = [
openapi_client.models.provider_out_all_of_cost_models.ProviderOut_allOf_cost_models(
uuid = 'd823a725-dc10-496a-af08-12533e4f8fe4',
name = 'My Great Cost Model', )
]
)
else :
return ProviderOut(
name = 'My Company AWS production',
type = 'AWS',
uuid = '57e60f90-8c0c-4bd1-87a0-2143759aae1c',
authentication = null,
billing_source = null,
customer = null,
created_by = null,
)
def testProviderOut(self):
"""Test ProviderOut"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main() | test/test_provider_out.py | from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.provider_out import ProviderOut # noqa: E501
from openapi_client.rest import ApiException
class TestProviderOut(unittest.TestCase):
"""ProviderOut unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ProviderOut
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.provider_out.ProviderOut() # noqa: E501
if include_optional :
return ProviderOut(
name = 'My Company AWS production',
type = 'AWS',
uuid = '57e60f90-8c0c-4bd1-87a0-2143759aae1c',
authentication = null,
billing_source = null,
customer = null,
created_by = null,
stats = {"Mon Dec 31 2018 19:00:00 GMT-0500 (Eastern Standard Time)":[{"assembly_id":"f0d262ff-cc93-449c-a834-74c4d958d45f","billing_period_start":"2019-01-01T00:00:00.000Z","files_processed":"1/1","last_process_start_date":"2019-01-07T21:50:58.000Z","last_process_complete_date":"2019-01-07T21:51:01.000Z","summary_data_creation_datetime":"2019-01-07T21:51:32.000Z","summary_data_updated_datetime":"2019-01-07T21:51:32.000Z"}]},
infrastructure = 'AWS',
active = True,
cost_models = [
openapi_client.models.provider_out_all_of_cost_models.ProviderOut_allOf_cost_models(
uuid = 'd823a725-dc10-496a-af08-12533e4f8fe4',
name = 'My Great Cost Model', )
]
)
else :
return ProviderOut(
name = 'My Company AWS production',
type = 'AWS',
uuid = '57e60f90-8c0c-4bd1-87a0-2143759aae1c',
authentication = null,
billing_source = null,
customer = null,
created_by = null,
)
def testProviderOut(self):
"""Test ProviderOut"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main() | 0.553264 | 0.100746 |
import argparse
import pywinusb.hid as hid
def main():
# Get Device, catch exception if not found
try:
device = hid.HidDeviceFilter(vendor_id = 0x04D8, product_id = 0xF372).get_devices()[0]
except:
print 'Device Error'
raise SystemExit
# Setup argument parser
parser = initArgParser()
args = parser.parse_args()
# Determine which action
if args.action == 'color':
setColor(device, args)
elif args.action == 'fade':
setFade(device, args)
elif args.action == 'strobe':
setStrobe(device, args)
elif args.action == 'wave':
setWave(device, args)
elif args.action == 'pattern':
setPattern(device, args)
def setPattern(device, args):
# Check for arguments & set values if needed
if not args.t:
args.t = 5
if not args.p or args.p > 8:
args.p = 1
# Open device
device.open()
# Retrieve Data
reports = device.find_output_reports()
# Set Data
values = [0,6,args.p,args.t,0,0,0,0,0]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def setWave(device, args):
# Check for arguments & set values if needed
if not args.r:
args.r = 0
if not args.g:
args.g = 0
if not args.b:
args.b = 0
if not args.w or args.w > 5:
args.w = 1
if not args.s:
args.s = 30
if not args.t:
args.t = 5
# Open Device
device.open()
# Retrieve Data
reports = device.find_output_reports()
# Set Data
values = [0,4,args.w,args.r,args.g,args.b,0,args.t,args.s]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def setStrobe(device, args):
# Check for arguments & set values if needed
if not args.r:
args.r = 0
if not args.g:
args.g = 0
if not args.b:
args.b = 0
if not args.l:
args.l = 255
if not args.s:
args.s = 30
if not args.t:
args.t = 5
# Open Device
device.open()
# Retrieve Data
reports = device.find_output_reports()
values = [0,3,args.l,args.r,args.g,args.b,args.s,0,args.t]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def setFade(device, args):
# Check for arguments & set values if needed
if not args.r:
args.r = 0
if not args.g:
args.g = 0
if not args.b:
args.b = 0
if not args.l:
args.l = 255
if not args.s:
args.s = 30
# Open Device
device.open()
# Retrieve Data
reports = device.find_output_reports()
values = [0,2,args.l,args.r,args.g,args.b,args.s,0,0]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def setColor(device, args):
# Check for arguments & set values if needed
if not args.r:
args.r = 0
if not args.g:
args.g = 0
if not args.b:
args.b = 0
if not args.l:
args.l = 255
# Open Device
device.open()
# Retrieve Data
reports = device.find_output_reports()
values = [0,1,args.l,args.r,args.g,args.b,0,0,0]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def initArgParser():
# Setup argument parser
parser = argparse.ArgumentParser(description='Luxafor Arguments')
parser.add_argument('action', help='Action', choices=["color", "fade", "wave", "strobe", "pattern"])
parser.add_argument('-l', help='LED', type=int)
parser.add_argument('-b', help='Blue Value', type=int)
parser.add_argument('-r', help='Red Value', type=int)
parser.add_argument('-g', help='Green Value', type=int)
parser.add_argument('-s', help='Speed Value', type=int)
parser.add_argument('-t', help='Repeat Value', type=int)
parser.add_argument('-w', help='Wave Value', type=int)
parser.add_argument('-p', help='Pattern Value', type=int)
return parser
if __name__ == '__main__':
main() | luxafor-win.py | import argparse
import pywinusb.hid as hid
def main():
# Get Device, catch exception if not found
try:
device = hid.HidDeviceFilter(vendor_id = 0x04D8, product_id = 0xF372).get_devices()[0]
except:
print 'Device Error'
raise SystemExit
# Setup argument parser
parser = initArgParser()
args = parser.parse_args()
# Determine which action
if args.action == 'color':
setColor(device, args)
elif args.action == 'fade':
setFade(device, args)
elif args.action == 'strobe':
setStrobe(device, args)
elif args.action == 'wave':
setWave(device, args)
elif args.action == 'pattern':
setPattern(device, args)
def setPattern(device, args):
# Check for arguments & set values if needed
if not args.t:
args.t = 5
if not args.p or args.p > 8:
args.p = 1
# Open device
device.open()
# Retrieve Data
reports = device.find_output_reports()
# Set Data
values = [0,6,args.p,args.t,0,0,0,0,0]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def setWave(device, args):
# Check for arguments & set values if needed
if not args.r:
args.r = 0
if not args.g:
args.g = 0
if not args.b:
args.b = 0
if not args.w or args.w > 5:
args.w = 1
if not args.s:
args.s = 30
if not args.t:
args.t = 5
# Open Device
device.open()
# Retrieve Data
reports = device.find_output_reports()
# Set Data
values = [0,4,args.w,args.r,args.g,args.b,0,args.t,args.s]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def setStrobe(device, args):
# Check for arguments & set values if needed
if not args.r:
args.r = 0
if not args.g:
args.g = 0
if not args.b:
args.b = 0
if not args.l:
args.l = 255
if not args.s:
args.s = 30
if not args.t:
args.t = 5
# Open Device
device.open()
# Retrieve Data
reports = device.find_output_reports()
values = [0,3,args.l,args.r,args.g,args.b,args.s,0,args.t]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def setFade(device, args):
# Check for arguments & set values if needed
if not args.r:
args.r = 0
if not args.g:
args.g = 0
if not args.b:
args.b = 0
if not args.l:
args.l = 255
if not args.s:
args.s = 30
# Open Device
device.open()
# Retrieve Data
reports = device.find_output_reports()
values = [0,2,args.l,args.r,args.g,args.b,args.s,0,0]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def setColor(device, args):
# Check for arguments & set values if needed
if not args.r:
args.r = 0
if not args.g:
args.g = 0
if not args.b:
args.b = 0
if not args.l:
args.l = 255
# Open Device
device.open()
# Retrieve Data
reports = device.find_output_reports()
values = [0,1,args.l,args.r,args.g,args.b,0,0,0]
reports[0].set_raw_data(values)
reports[0].send()
# Close Device
device.close()
def initArgParser():
# Setup argument parser
parser = argparse.ArgumentParser(description='Luxafor Arguments')
parser.add_argument('action', help='Action', choices=["color", "fade", "wave", "strobe", "pattern"])
parser.add_argument('-l', help='LED', type=int)
parser.add_argument('-b', help='Blue Value', type=int)
parser.add_argument('-r', help='Red Value', type=int)
parser.add_argument('-g', help='Green Value', type=int)
parser.add_argument('-s', help='Speed Value', type=int)
parser.add_argument('-t', help='Repeat Value', type=int)
parser.add_argument('-w', help='Wave Value', type=int)
parser.add_argument('-p', help='Pattern Value', type=int)
return parser
if __name__ == '__main__':
main() | 0.311636 | 0.167797 |
import RPi.GPIO as GPIO
import MFRC522
import signal
import time
import textwrap
import sys
import NFCHelper
import argparse
continue_reading = True
is_Test = False
# Capture SIGINT for cleanup when the script is aborted
def end_read(signal,frame):
global continue_reading
print("Ctrl+C captured, ending read.")
continue_reading = False
GPIO.cleanup()
# Hook the SIGINT
signal.signal(signal.SIGINT, end_read)
parser = argparse.ArgumentParser(description='Write NFC tags for sonos.')
parser.add_argument('-test', type=bool, default=False, help='Just test the reset, but dont perform the action on the card')
#parser.add_argument('-nfcKey', type=str, default='FF:FF:FF:FF:FF:FF', help='The hex code of the nfc key to writ the content default: FF:FF:FF:FF:FF:FF')
args = parser.parse_args()
is_test = args.test
# Create an object of the class MFRC522
MIFAREReader = MFRC522.MFRC522()
print("Add NFC Tag ...")
# Program start
# This loop keeps checking for chips. If one is near it will get the UID and authenticate
while continue_reading:
# Scan for cards
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
# If a card is found
if status == MIFAREReader.MI_OK:
print("Card detected")
# Get the UID of the card
(status,uid) = MIFAREReader.MFRC522_Anticoll()
# If we have the UID, continue
if status == MIFAREReader.MI_OK:
# Print UID
print("Card UID: %s:%s:%s:%s" % (uid[0], uid[1], uid[2], uid[3]))
# Select the scanned tag
MIFAREReader.MFRC522_SelectTag(uid)
# Write the data
sectorCount = 1
print("cleaning Sector [%s-%s] need to be written with data." % (sectorCount, 64))
while sectorCount < 64:
if(sectorCount != 0 and sectorCount % 4 != 3 ):
if(not is_test):
NFCHelper.clear_Sector(MIFAREReader, uid, NFCHelper.AUTH_KEY_DEFAULT, sectorCount)
NFCHelper.read_Sector(MIFAREReader, uid, NFCHelper.AUTH_KEY_DEFAULT, sectorCount)
sectorCount = sectorCount + 1
if(not is_test):
NFCHelper.clear_Metadata(MIFAREReader, uid, NFCHelper.AUTH_KEY_DEFAULT)
nfcDataSize = NFCHelper.read_Metadata(MIFAREReader, uid, NFCHelper.AUTH_KEY_DEFAULT)
# Stop
MIFAREReader.MFRC522_StopCrypto1()
print("Card UID: %s:%s:%s:%s reset." % (uid[0], uid[1], uid[2], uid[3]))
time.sleep(3) | sonos-nfc-reset.py |
import RPi.GPIO as GPIO
import MFRC522
import signal
import time
import textwrap
import sys
import NFCHelper
import argparse
continue_reading = True
is_Test = False
# Capture SIGINT for cleanup when the script is aborted
def end_read(signal,frame):
global continue_reading
print("Ctrl+C captured, ending read.")
continue_reading = False
GPIO.cleanup()
# Hook the SIGINT
signal.signal(signal.SIGINT, end_read)
parser = argparse.ArgumentParser(description='Write NFC tags for sonos.')
parser.add_argument('-test', type=bool, default=False, help='Just test the reset, but dont perform the action on the card')
#parser.add_argument('-nfcKey', type=str, default='FF:FF:FF:FF:FF:FF', help='The hex code of the nfc key to writ the content default: FF:FF:FF:FF:FF:FF')
args = parser.parse_args()
is_test = args.test
# Create an object of the class MFRC522
MIFAREReader = MFRC522.MFRC522()
print("Add NFC Tag ...")
# Program start
# This loop keeps checking for chips. If one is near it will get the UID and authenticate
while continue_reading:
# Scan for cards
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
# If a card is found
if status == MIFAREReader.MI_OK:
print("Card detected")
# Get the UID of the card
(status,uid) = MIFAREReader.MFRC522_Anticoll()
# If we have the UID, continue
if status == MIFAREReader.MI_OK:
# Print UID
print("Card UID: %s:%s:%s:%s" % (uid[0], uid[1], uid[2], uid[3]))
# Select the scanned tag
MIFAREReader.MFRC522_SelectTag(uid)
# Write the data
sectorCount = 1
print("cleaning Sector [%s-%s] need to be written with data." % (sectorCount, 64))
while sectorCount < 64:
if(sectorCount != 0 and sectorCount % 4 != 3 ):
if(not is_test):
NFCHelper.clear_Sector(MIFAREReader, uid, NFCHelper.AUTH_KEY_DEFAULT, sectorCount)
NFCHelper.read_Sector(MIFAREReader, uid, NFCHelper.AUTH_KEY_DEFAULT, sectorCount)
sectorCount = sectorCount + 1
if(not is_test):
NFCHelper.clear_Metadata(MIFAREReader, uid, NFCHelper.AUTH_KEY_DEFAULT)
nfcDataSize = NFCHelper.read_Metadata(MIFAREReader, uid, NFCHelper.AUTH_KEY_DEFAULT)
# Stop
MIFAREReader.MFRC522_StopCrypto1()
print("Card UID: %s:%s:%s:%s reset." % (uid[0], uid[1], uid[2], uid[3]))
time.sleep(3) | 0.203945 | 0.166303 |
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
@click.command()
@click.argument("dir", default="env")
@click.option(
"-n", "--name", metavar="NAME",
help="Environment name (default is env parent directory name).")
@click.option(
"-p", "--python", metavar="VERSION",
help="Version of Python to use for the environment.")
@click.option(
"-g", "--guild", metavar="VERSION_OR_PATH",
help=(
"Version of Guild AI to use for the environment. "
"By default, the active version of Guild is installed. This "
"value may alternatively be a path to a Guild wheel distribution.")
)
@click.option(
"-r", "--requirement", metavar="REQ", multiple=True,
help=(
"Install required package or packages defined in a file. May be "
"used multiple times."))
@click.option(
"-P", "--path", metavar="DIR", multiple=True,
help="Include DIR as a Python path in the environment.")
@click.option(
"--no-reqs", is_flag=True,
help=(
"Don't install from requirements.txt or guild.yml in environment "
"parent directory."))
@click.option(
"--tensorflow", metavar="PACKAGE",
help=(
"Install PACKAGE for TensorFlow. By default installs the package "
"suitable for the system based on GPU support."))
@click.option(
"--skip-tensorflow", is_flag=True,
help="Don't install TensorFlow.")
@click.option(
"-l", "--local-resource-cache", is_flag=True,
help="Use a local cache when initializing an environment.")
@click.option(
"-y", "--yes", is_flag=True,
help="Initialize a Guild environment without prompting.")
@click.option(
"--no-progress", is_flag=True,
help="Don't show progress when installing environment packages.")
@click_util.use_args
def init(args):
"""Initialize a Guild environment.
`init` initializes a Guild environment in `DIR`, which is the
current directory by default.
`init` creates a virtual environment in `DIR` using `virtualenv`.
Use `--python` to specify the Python interpreter to use within the
generated virtual environment. By default, the default Python
interpreter for `virtualenv` is used unless `python` is explicitly
listed as a requirement. If `no-venv` is specified, `--python` is
ignored.
### Requirements
By default, any required packages listed under packages.requires
in `guild.yml` in the environment parent directory are installed
into the environment. Use `--no-reqs` to suppress this behavior.
Additionally, packages defined in `requirements.txt` in the
environment parent directory will be installed. Use `--no-reqs` to
suppress this behavior.
Note that packages defined in `guild.yml` use Guild package names
while packages defined in `requirements.txt` use PyPI package
names.
For information in requirements files, see:
https://pip.readthedocs.io/en/1.1/requirements.html
You may explicitly specify requirements file using `-r` or
`--requirement`. If `-r, --requirement` is specified, Guild will
not automatically install packages in `requirements.txt` -- that
file must be specified explicitly in the command.
### Guild AI
By default `init` installs the active version of Guild AI in the
initialized environment. To install a different version, or to
install a Guild wheel distribution file use the `--guild` option.
### TensorFlow
TensorFlow is installed to the environment unless
`--skip-tensorflow` is specified. The TensorFlow package to
install can be specified using `--tensorflow`. By default, Guild
installs the TensorFlow package suited for the system:
``tensorflow-gpu`` if a GPU is available, otherwise
``tensorflow``.
### Resource cache
By default resources are cached and shared at the user level in
`~/.guild/cache/resources` so that resources downloaded from one
environment are available to other environments. You can modify
this behavior to have all resources downloaded local to the
environment by specifying `--local-resource-cache`.
"""
from . import init_impl
init_impl.main(args) | guild/commands/init.py |
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
@click.command()
@click.argument("dir", default="env")
@click.option(
"-n", "--name", metavar="NAME",
help="Environment name (default is env parent directory name).")
@click.option(
"-p", "--python", metavar="VERSION",
help="Version of Python to use for the environment.")
@click.option(
"-g", "--guild", metavar="VERSION_OR_PATH",
help=(
"Version of Guild AI to use for the environment. "
"By default, the active version of Guild is installed. This "
"value may alternatively be a path to a Guild wheel distribution.")
)
@click.option(
"-r", "--requirement", metavar="REQ", multiple=True,
help=(
"Install required package or packages defined in a file. May be "
"used multiple times."))
@click.option(
"-P", "--path", metavar="DIR", multiple=True,
help="Include DIR as a Python path in the environment.")
@click.option(
"--no-reqs", is_flag=True,
help=(
"Don't install from requirements.txt or guild.yml in environment "
"parent directory."))
@click.option(
"--tensorflow", metavar="PACKAGE",
help=(
"Install PACKAGE for TensorFlow. By default installs the package "
"suitable for the system based on GPU support."))
@click.option(
"--skip-tensorflow", is_flag=True,
help="Don't install TensorFlow.")
@click.option(
"-l", "--local-resource-cache", is_flag=True,
help="Use a local cache when initializing an environment.")
@click.option(
"-y", "--yes", is_flag=True,
help="Initialize a Guild environment without prompting.")
@click.option(
"--no-progress", is_flag=True,
help="Don't show progress when installing environment packages.")
@click_util.use_args
def init(args):
"""Initialize a Guild environment.
`init` initializes a Guild environment in `DIR`, which is the
current directory by default.
`init` creates a virtual environment in `DIR` using `virtualenv`.
Use `--python` to specify the Python interpreter to use within the
generated virtual environment. By default, the default Python
interpreter for `virtualenv` is used unless `python` is explicitly
listed as a requirement. If `no-venv` is specified, `--python` is
ignored.
### Requirements
By default, any required packages listed under packages.requires
in `guild.yml` in the environment parent directory are installed
into the environment. Use `--no-reqs` to suppress this behavior.
Additionally, packages defined in `requirements.txt` in the
environment parent directory will be installed. Use `--no-reqs` to
suppress this behavior.
Note that packages defined in `guild.yml` use Guild package names
while packages defined in `requirements.txt` use PyPI package
names.
For information in requirements files, see:
https://pip.readthedocs.io/en/1.1/requirements.html
You may explicitly specify requirements file using `-r` or
`--requirement`. If `-r, --requirement` is specified, Guild will
not automatically install packages in `requirements.txt` -- that
file must be specified explicitly in the command.
### Guild AI
By default `init` installs the active version of Guild AI in the
initialized environment. To install a different version, or to
install a Guild wheel distribution file use the `--guild` option.
### TensorFlow
TensorFlow is installed to the environment unless
`--skip-tensorflow` is specified. The TensorFlow package to
install can be specified using `--tensorflow`. By default, Guild
installs the TensorFlow package suited for the system:
``tensorflow-gpu`` if a GPU is available, otherwise
``tensorflow``.
### Resource cache
By default resources are cached and shared at the user level in
`~/.guild/cache/resources` so that resources downloaded from one
environment are available to other environments. You can modify
this behavior to have all resources downloaded local to the
environment by specifying `--local-resource-cache`.
"""
from . import init_impl
init_impl.main(args) | 0.834508 | 0.216798 |
import gzip
import os
import random
import tempfile
import uuid
from io import BytesIO
from unittest import TestCase
import corehq.blobs.util as mod
from corehq.blobs.exceptions import GzipStreamError
class TestRandomUrlId(TestCase):
sample_size = 100
def setUp(self):
self.ids = [mod.random_url_id(8) for x in range(self.sample_size)]
def test_random_id_length(self):
self.assertGreater(min(len(id) for id in self.ids), 0, self.ids)
self.assertEqual(max(len(id) for id in self.ids), 11, self.ids)
def test_random_id_randomness(self):
self.assertEqual(len(set(self.ids)), self.sample_size, self.ids)
class TestGzipStream(TestCase):
def test_compression(self):
desired_size = mod.GzipStream.CHUNK_SIZE * 4
content = uuid.uuid4().bytes * 4
while len(content) < desired_size:
content += uuid.uuid4().bytes * 4
compress_stream = mod.GzipStream(BytesIO(content))
with tempfile.NamedTemporaryFile() as compressed_f:
compressed_f.write(compress_stream.read())
compressed_f.flush()
with gzip.open(compressed_f.name, 'r') as reader:
actual = reader.read()
file_size = os.stat(compressed_f.name).st_size
self.assertGreater(len(content), file_size)
self.assertEqual(content, actual)
self.assertEqual(len(content), compress_stream.content_length)
def test_content_length_access(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b"x" * 11)
f.seek(0)
compress_stream = mod.GzipStream(f)
# Try to read content_length without reading the stream
with self.assertRaises(GzipStreamError):
compress_stream.content_length # noqa
# Try to read content_length after partially reading the stream
content_length = len(compress_stream.read(5))
with self.assertRaises(GzipStreamError):
compress_stream.content_length # noqa
# Read content_length after completely reading the stream and check
# that it's correct
content_length += len(compress_stream.read())
self.assertNotEqual(compress_stream.content_length, content_length)
self.assertEqual(compress_stream.content_length, 11)
def test_content_length_0(self):
# NOTE invariant based on GzipFile implementation
zipper = mod.GzipStream(BytesIO(b""))
zipper.read(10)
assert zipper._buf.size == 0, f"invariant failed ({zipper._buf.size})"
with self.assertRaises(GzipStreamError):
zipper.content_length
zipper.read()
self.assertEqual(zipper.content_length, 0)
def test_content_length_1(self):
# NOTE invariant based on GzipFile implementation
zipper = mod.GzipStream(BytesIO(b"x"))
zipper.read(10)
assert zipper._buf.size == 0, f"invariant failed ({zipper._buf.size})"
with self.assertRaises(GzipStreamError):
zipper.content_length
zipper.read()
self.assertEqual(zipper.content_length, 1)
def test_content_length_10x_chunk(self):
# NOTE invariant based on GzipFile implementation
self.addCleanup(random.seed)
random.seed(42)
size = mod.GzipStream.CHUNK_SIZE * 10
data = bytes(random.getrandbits(8) for _ in range(size))
zipper = mod.GzipStream(BytesIO(data))
zipper.read(16405)
assert zipper._buf.size == 0, f"invariant failed ({zipper._buf.size})"
with self.assertRaises(GzipStreamError):
zipper.content_length
zipper.read()
self.assertEqual(zipper.content_length, size, "bad content length")
def test_content_length_after_partial_read_and_close(self):
# NOTE invariant based on GzipFile implementation
zipper = mod.GzipStream(BytesIO(b""))
zipper.read(1)
assert zipper._buf.size, f"invariant failed ({zipper._buf.size})"
zipper.close()
with self.assertRaises(GzipStreamError):
zipper.content_length
def test_content_length_after_full_read_and_close(self):
zipper = mod.GzipStream(BytesIO(b"x"))
zipper.read()
zipper.close()
self.assertEqual(zipper.content_length, 1) | corehq/blobs/tests/test_util.py | import gzip
import os
import random
import tempfile
import uuid
from io import BytesIO
from unittest import TestCase
import corehq.blobs.util as mod
from corehq.blobs.exceptions import GzipStreamError
class TestRandomUrlId(TestCase):
sample_size = 100
def setUp(self):
self.ids = [mod.random_url_id(8) for x in range(self.sample_size)]
def test_random_id_length(self):
self.assertGreater(min(len(id) for id in self.ids), 0, self.ids)
self.assertEqual(max(len(id) for id in self.ids), 11, self.ids)
def test_random_id_randomness(self):
self.assertEqual(len(set(self.ids)), self.sample_size, self.ids)
class TestGzipStream(TestCase):
def test_compression(self):
desired_size = mod.GzipStream.CHUNK_SIZE * 4
content = uuid.uuid4().bytes * 4
while len(content) < desired_size:
content += uuid.uuid4().bytes * 4
compress_stream = mod.GzipStream(BytesIO(content))
with tempfile.NamedTemporaryFile() as compressed_f:
compressed_f.write(compress_stream.read())
compressed_f.flush()
with gzip.open(compressed_f.name, 'r') as reader:
actual = reader.read()
file_size = os.stat(compressed_f.name).st_size
self.assertGreater(len(content), file_size)
self.assertEqual(content, actual)
self.assertEqual(len(content), compress_stream.content_length)
def test_content_length_access(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b"x" * 11)
f.seek(0)
compress_stream = mod.GzipStream(f)
# Try to read content_length without reading the stream
with self.assertRaises(GzipStreamError):
compress_stream.content_length # noqa
# Try to read content_length after partially reading the stream
content_length = len(compress_stream.read(5))
with self.assertRaises(GzipStreamError):
compress_stream.content_length # noqa
# Read content_length after completely reading the stream and check
# that it's correct
content_length += len(compress_stream.read())
self.assertNotEqual(compress_stream.content_length, content_length)
self.assertEqual(compress_stream.content_length, 11)
def test_content_length_0(self):
# NOTE invariant based on GzipFile implementation
zipper = mod.GzipStream(BytesIO(b""))
zipper.read(10)
assert zipper._buf.size == 0, f"invariant failed ({zipper._buf.size})"
with self.assertRaises(GzipStreamError):
zipper.content_length
zipper.read()
self.assertEqual(zipper.content_length, 0)
def test_content_length_1(self):
# NOTE invariant based on GzipFile implementation
zipper = mod.GzipStream(BytesIO(b"x"))
zipper.read(10)
assert zipper._buf.size == 0, f"invariant failed ({zipper._buf.size})"
with self.assertRaises(GzipStreamError):
zipper.content_length
zipper.read()
self.assertEqual(zipper.content_length, 1)
def test_content_length_10x_chunk(self):
# NOTE invariant based on GzipFile implementation
self.addCleanup(random.seed)
random.seed(42)
size = mod.GzipStream.CHUNK_SIZE * 10
data = bytes(random.getrandbits(8) for _ in range(size))
zipper = mod.GzipStream(BytesIO(data))
zipper.read(16405)
assert zipper._buf.size == 0, f"invariant failed ({zipper._buf.size})"
with self.assertRaises(GzipStreamError):
zipper.content_length
zipper.read()
self.assertEqual(zipper.content_length, size, "bad content length")
def test_content_length_after_partial_read_and_close(self):
# NOTE invariant based on GzipFile implementation
zipper = mod.GzipStream(BytesIO(b""))
zipper.read(1)
assert zipper._buf.size, f"invariant failed ({zipper._buf.size})"
zipper.close()
with self.assertRaises(GzipStreamError):
zipper.content_length
def test_content_length_after_full_read_and_close(self):
zipper = mod.GzipStream(BytesIO(b"x"))
zipper.read()
zipper.close()
self.assertEqual(zipper.content_length, 1) | 0.495606 | 0.402568 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from modeling.aspp import build_aspp
from modeling.decoder import build_decoder_kinematic, build_decoder
from modeling.backbone import build_backbone
from modeling.kinematic_graph import build_kinematic_graph
class DeepLab(nn.Module):
def __init__(self, args, backbone='resnet', output_stride=16, num_classes=21,
sync_bn=True, freeze_bn=False):
super(DeepLab, self).__init__()
self.args = args
if backbone == 'drn':
output_stride = 8
if sync_bn == True:
BatchNorm = SynchronizedBatchNorm2d
else:
BatchNorm = nn.BatchNorm2d
self.backbone = build_backbone(backbone, output_stride, BatchNorm)
self.aspp = build_aspp(backbone, output_stride, BatchNorm)
if self.args.use_kinematic == False:
self.decoder = build_decoder(num_classes, backbone, BatchNorm)
else:
self.decoder = build_decoder_kinematic(backbone, BatchNorm)
self.kinematic_layer = build_kinematic_graph(BatchNorm)
self.freeze_bn = freeze_bn
def forward(self, input):
x, low_level_feat = self.backbone(input)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
if not self.args.use_kinematic:
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
else:
x = self.kinematic_layer(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, SynchronizedBatchNorm2d):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
if self.args.use_kinematic:
modules = [self.aspp, self.decoder, self.kinematic_layer]
elif not self.args.use_kinematic:
modules = [self.aspp, self.decoder]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
if __name__ == "__main__":
from args import Args_occ5000
from tensorboardX import SummaryWriter
writer = SummaryWriter('/home/kidd/Documents/graph1')
args = Args_occ5000()
model = DeepLab(args=args, backbone='resnet', output_stride=16)
model.eval()
input = torch.rand(1, 3, 513, 513)
output = model(input)
writer.add_graph(model, input)
writer.close()
print(output.size()) | modeling/deeplab.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from modeling.aspp import build_aspp
from modeling.decoder import build_decoder_kinematic, build_decoder
from modeling.backbone import build_backbone
from modeling.kinematic_graph import build_kinematic_graph
class DeepLab(nn.Module):
def __init__(self, args, backbone='resnet', output_stride=16, num_classes=21,
sync_bn=True, freeze_bn=False):
super(DeepLab, self).__init__()
self.args = args
if backbone == 'drn':
output_stride = 8
if sync_bn == True:
BatchNorm = SynchronizedBatchNorm2d
else:
BatchNorm = nn.BatchNorm2d
self.backbone = build_backbone(backbone, output_stride, BatchNorm)
self.aspp = build_aspp(backbone, output_stride, BatchNorm)
if self.args.use_kinematic == False:
self.decoder = build_decoder(num_classes, backbone, BatchNorm)
else:
self.decoder = build_decoder_kinematic(backbone, BatchNorm)
self.kinematic_layer = build_kinematic_graph(BatchNorm)
self.freeze_bn = freeze_bn
def forward(self, input):
x, low_level_feat = self.backbone(input)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
if not self.args.use_kinematic:
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
else:
x = self.kinematic_layer(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, SynchronizedBatchNorm2d):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
if self.args.use_kinematic:
modules = [self.aspp, self.decoder, self.kinematic_layer]
elif not self.args.use_kinematic:
modules = [self.aspp, self.decoder]
for i in range(len(modules)):
for m in modules[i].named_modules():
if self.freeze_bn:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
else:
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
if __name__ == "__main__":
from args import Args_occ5000
from tensorboardX import SummaryWriter
writer = SummaryWriter('/home/kidd/Documents/graph1')
args = Args_occ5000()
model = DeepLab(args=args, backbone='resnet', output_stride=16)
model.eval()
input = torch.rand(1, 3, 513, 513)
output = model(input)
writer.add_graph(model, input)
writer.close()
print(output.size()) | 0.889511 | 0.316369 |
from ._util import process_service_request, force_default, handle_single_argument
from ._payload import malquery_fuzzy_payload, generic_payload_list
from ._payload import malquery_exact_search_payload, malquery_hunt_payload
from ._service_class import ServiceClass
from ._endpoint._malquery import _malquery_endpoints as Endpoints
class MalQuery(ServiceClass):
"""The only requirement to instantiate an instance of this class is one of the following:
- a valid client_id and client_secret provided as keywords.
- a credential dictionary with client_id and client_secret containing valid API credentials
{
"client_id": "CLIENT_ID_HERE",
"client_secret": "CLIENT_SECRET_HERE"
}
- a previously-authenticated instance of the authentication service class (oauth2.py)
- a valid token provided by the authentication service class (OAuth2.token())
"""
def get_quotas(self: object) -> dict:
"""Get information about search and download quotas in your environment
This method does not accept arguments or keywords.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryQuotasV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryQuotasV1"
)
@force_default(defaults=["body"], default_types=["dict"])
def fuzzy_search(self: object, body: dict = None, **kwargs) -> dict:
"""Search Falcon MalQuery quickly, but with more potential for false positives.
Search for a combination of hex patterns and strings in order to identify
samples based upon file content at byte level granularity.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"options": {
"filter_meta": [
"string"
],
"limit": 0
},
"patterns": [
{
"type": "string",
"value": "string"
}
]
}
filter_meta -- List of strings.
limit -- Integer representing maximum number of matches to return.
patterns -- List of dictionaries containing patterns to match.
{
"type": "string",
"value": "string
}
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryFuzzySearchV1
"""
if not body:
body = malquery_fuzzy_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryFuzzySearchV1",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_download(self: object, *args, parameters: dict = None, **kwargs) -> object:
"""Download a file indexed by MalQuery.
Specify the file using its SHA256.
Only one file is supported at this time.
Keyword arguments:
ids -- List of SHA256s to retrieve. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: binary object on SUCCESS, dict object containing API response on FAILURE.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryDownloadV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryDownloadV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_metadata(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Retrieve indexed files metadata by their hash
Keyword arguments:
ids -- List of SHA256s to retrieve metadata for. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryMetadataV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryMetadataV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_request(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Check the status and results of an asynchronous request, such as hunt or exact-search.
Supports a single request id at this time.
Keyword arguments:
ids -- List of MalQuery identifiers to retrieve. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryRequestV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryRequestV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_samples(self: object, *args, parameters: dict = None, **kwargs) -> object:
"""Fetch a zip archive with password 'infected' containing the samples.
Call this once the samples-multidownload request has finished processing
Keyword arguments:
ids -- Multi-download job ID. String.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: binary object on SUCCESS, dict object containing API response on FAILURE.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryEntitiesSamplesFetchV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryEntitiesSamplesFetchV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["body"], default_types=["dict"])
def samples_multidownload(self: object, *args, body: dict = None, **kwargs) -> dict:
"""Schedule samples for download. Use the result id with the /request endpoint to check
if the download is ready after which you can call get_samples to get the zip.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"samples": [
"string"
]
}
samples -- SHA256(s) of the samples to retrieve. String or list of strings.
Arguments: When not specified, the first argument to this method is assumed to be
'samples'. All others are ignored.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryEntitiesSamplesMultidownloadV1
"""
if not body:
body = generic_payload_list(submitted_arguments=args,
submitted_keywords=kwargs,
payload_value="samples"
)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryEntitiesSamplesMultidownloadV1",
body=body
)
@force_default(defaults=["body"], default_types=["dict"])
def exact_search(self: object, body: dict = None, **kwargs) -> dict:
"""Search Falcon MalQuery for a combination of hex patterns
and strings in order to identify samples based upon file content
at byte level granularity. You can filter results on criteria such
as file type, file size and first seen date.
Returns a request id which can be used with the /request endpoint.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"options": {
"filter_filetypes": [
"string"
],
"filter_meta": [
"string"
],
"limit": 0,
"max_date": "string",
"max_size": "string",
"min_date": "string",
"min_size": "string"
},
"patterns": [
{
"type": "string",
"value": "string"
}
]
}
filter_filetypes -- File types to filter on. List of strings.
filter_meta -- File metadata to filter on. List of strings.
limit -- Integer representing maximum number of matches to return.
max_date -- Maximum date to match. UTC formatted string.
min_date -- Minimum date to match. UTC formatted string.
max_size -- Maximum size in bytes to match. String.
min_size -- Minumum size in bytes to match. String.
patterns -- List of dictionaries containing patterns to match.
{
"type": "string",
"value": "string
}
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryExactSearchV1
"""
if not body:
body = malquery_exact_search_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryExactSearchV1",
body=body
)
@force_default(defaults=["body"], default_types=["dict"])
def hunt(self: object, body: dict = None, **kwargs) -> dict:
"""Schedule a YARA-based search for execution.
Returns a request id which can be used with the /request endpoint.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"options": {
"filter_filetypes": [
"string"
],
"filter_meta": [
"string"
],
"limit": 0,
"max_date": "string",
"max_size": "string",
"min_date": "string",
"min_size": "string"
},
"yara_rule": "string"
}
filter_filetypes -- File types to filter on. List of strings.
filter_meta -- File metadata to filter on. List of strings.
limit -- Integer representing maximum number of matches to return.
max_date -- Maximum date to match. UTC formatted string.
min_date -- Minimum date to match. UTC formatted string.
max_size -- Maximum size in bytes to match. String.
min_size -- Minumum size in bytes to match. String.
yara_rule -- Yara rule to use for matching. String.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryHuntV1
"""
if not body:
body = malquery_hunt_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryHuntV1",
body=body
)
# These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
GetMalQueryQuotasV1 = get_quotas
PostMalQueryFuzzySearchV1 = fuzzy_search
GetMalQueryDownloadV1 = get_download
GetMalQueryMetadataV1 = get_metadata
GetMalQueryRequestV1 = get_request
GetMalQueryEntitiesSamplesFetchV1 = get_samples
PostMalQueryEntitiesSamplesMultidownloadV1 = samples_multidownload
PostMalQueryExactSearchV1 = exact_search
PostMalQueryHuntV1 = hunt | src/falconpy/malquery.py | from ._util import process_service_request, force_default, handle_single_argument
from ._payload import malquery_fuzzy_payload, generic_payload_list
from ._payload import malquery_exact_search_payload, malquery_hunt_payload
from ._service_class import ServiceClass
from ._endpoint._malquery import _malquery_endpoints as Endpoints
class MalQuery(ServiceClass):
"""The only requirement to instantiate an instance of this class is one of the following:
- a valid client_id and client_secret provided as keywords.
- a credential dictionary with client_id and client_secret containing valid API credentials
{
"client_id": "CLIENT_ID_HERE",
"client_secret": "CLIENT_SECRET_HERE"
}
- a previously-authenticated instance of the authentication service class (oauth2.py)
- a valid token provided by the authentication service class (OAuth2.token())
"""
def get_quotas(self: object) -> dict:
"""Get information about search and download quotas in your environment
This method does not accept arguments or keywords.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryQuotasV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryQuotasV1"
)
@force_default(defaults=["body"], default_types=["dict"])
def fuzzy_search(self: object, body: dict = None, **kwargs) -> dict:
"""Search Falcon MalQuery quickly, but with more potential for false positives.
Search for a combination of hex patterns and strings in order to identify
samples based upon file content at byte level granularity.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"options": {
"filter_meta": [
"string"
],
"limit": 0
},
"patterns": [
{
"type": "string",
"value": "string"
}
]
}
filter_meta -- List of strings.
limit -- Integer representing maximum number of matches to return.
patterns -- List of dictionaries containing patterns to match.
{
"type": "string",
"value": "string
}
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryFuzzySearchV1
"""
if not body:
body = malquery_fuzzy_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryFuzzySearchV1",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_download(self: object, *args, parameters: dict = None, **kwargs) -> object:
"""Download a file indexed by MalQuery.
Specify the file using its SHA256.
Only one file is supported at this time.
Keyword arguments:
ids -- List of SHA256s to retrieve. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: binary object on SUCCESS, dict object containing API response on FAILURE.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryDownloadV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryDownloadV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_metadata(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Retrieve indexed files metadata by their hash
Keyword arguments:
ids -- List of SHA256s to retrieve metadata for. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryMetadataV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryMetadataV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_request(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Check the status and results of an asynchronous request, such as hunt or exact-search.
Supports a single request id at this time.
Keyword arguments:
ids -- List of MalQuery identifiers to retrieve. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryRequestV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryRequestV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_samples(self: object, *args, parameters: dict = None, **kwargs) -> object:
"""Fetch a zip archive with password 'infected' containing the samples.
Call this once the samples-multidownload request has finished processing
Keyword arguments:
ids -- Multi-download job ID. String.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: binary object on SUCCESS, dict object containing API response on FAILURE.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/GetMalQueryEntitiesSamplesFetchV1
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetMalQueryEntitiesSamplesFetchV1",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["body"], default_types=["dict"])
def samples_multidownload(self: object, *args, body: dict = None, **kwargs) -> dict:
"""Schedule samples for download. Use the result id with the /request endpoint to check
if the download is ready after which you can call get_samples to get the zip.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"samples": [
"string"
]
}
samples -- SHA256(s) of the samples to retrieve. String or list of strings.
Arguments: When not specified, the first argument to this method is assumed to be
'samples'. All others are ignored.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryEntitiesSamplesMultidownloadV1
"""
if not body:
body = generic_payload_list(submitted_arguments=args,
submitted_keywords=kwargs,
payload_value="samples"
)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryEntitiesSamplesMultidownloadV1",
body=body
)
@force_default(defaults=["body"], default_types=["dict"])
def exact_search(self: object, body: dict = None, **kwargs) -> dict:
"""Search Falcon MalQuery for a combination of hex patterns
and strings in order to identify samples based upon file content
at byte level granularity. You can filter results on criteria such
as file type, file size and first seen date.
Returns a request id which can be used with the /request endpoint.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"options": {
"filter_filetypes": [
"string"
],
"filter_meta": [
"string"
],
"limit": 0,
"max_date": "string",
"max_size": "string",
"min_date": "string",
"min_size": "string"
},
"patterns": [
{
"type": "string",
"value": "string"
}
]
}
filter_filetypes -- File types to filter on. List of strings.
filter_meta -- File metadata to filter on. List of strings.
limit -- Integer representing maximum number of matches to return.
max_date -- Maximum date to match. UTC formatted string.
min_date -- Minimum date to match. UTC formatted string.
max_size -- Maximum size in bytes to match. String.
min_size -- Minumum size in bytes to match. String.
patterns -- List of dictionaries containing patterns to match.
{
"type": "string",
"value": "string
}
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryExactSearchV1
"""
if not body:
body = malquery_exact_search_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryExactSearchV1",
body=body
)
@force_default(defaults=["body"], default_types=["dict"])
def hunt(self: object, body: dict = None, **kwargs) -> dict:
"""Schedule a YARA-based search for execution.
Returns a request id which can be used with the /request endpoint.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"options": {
"filter_filetypes": [
"string"
],
"filter_meta": [
"string"
],
"limit": 0,
"max_date": "string",
"max_size": "string",
"min_date": "string",
"min_size": "string"
},
"yara_rule": "string"
}
filter_filetypes -- File types to filter on. List of strings.
filter_meta -- File metadata to filter on. List of strings.
limit -- Integer representing maximum number of matches to return.
max_date -- Maximum date to match. UTC formatted string.
min_date -- Minimum date to match. UTC formatted string.
max_size -- Maximum size in bytes to match. String.
min_size -- Minumum size in bytes to match. String.
yara_rule -- Yara rule to use for matching. String.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/malquery/PostMalQueryHuntV1
"""
if not body:
body = malquery_hunt_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="PostMalQueryHuntV1",
body=body
)
# These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
GetMalQueryQuotasV1 = get_quotas
PostMalQueryFuzzySearchV1 = fuzzy_search
GetMalQueryDownloadV1 = get_download
GetMalQueryMetadataV1 = get_metadata
GetMalQueryRequestV1 = get_request
GetMalQueryEntitiesSamplesFetchV1 = get_samples
PostMalQueryEntitiesSamplesMultidownloadV1 = samples_multidownload
PostMalQueryExactSearchV1 = exact_search
PostMalQueryHuntV1 = hunt | 0.849644 | 0.239872 |
from pathlib import Path
import pytest
import scrapli_asyncssh
from scrapli.exceptions import KeyVerificationFailed
from scrapli_asyncssh.transport import Transport
TEST_DATA_DIR = f"{Path(scrapli_asyncssh.__file__).parents[1]}/tests/test_data"
class DummyClass:
pass
def test_creation():
conn = Transport("localhost")
assert conn.host == "localhost"
assert conn.port == 22
@pytest.mark.parametrize(
"test_host",
[
(
"1.2.3.4",
["carl", "~/.ssh/mysshkey", 1234],
),
(
"5.6.7.8",
["somebodyelse", "~/.ssh/lastresortkey", 22],
),
(
"scrapli",
["scrapli", "~/.ssh/lastresortkey", 22],
),
],
ids=["host_1.2.3.4", "catch_all", "specific_user_catch_all_key"],
)
def test__process_ssh_config(test_host):
host = test_host[0]
expected_auth_username = test_host[1][0]
expected_private_key = test_host[1][1]
expected_port = test_host[1][2]
conn = Transport(host, ssh_config_file=f"{TEST_DATA_DIR}/files/_ssh_config")
assert conn.host == host
assert conn.auth_username == expected_auth_username
assert conn.auth_private_key == str(Path(expected_private_key).expanduser())
assert conn.port == expected_port
def test__verify_key_valid():
conn = Transport("172.18.0.11")
conn.ssh_known_hosts_file = f"{TEST_DATA_DIR}/files/_ssh_known_hosts"
conn.session = DummyClass()
def mock_export_public_key():
return (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+9q0c7+tuKT0+xS5JqMhlSoZ5gMuePUwMj1ELoij2vjoPj1Vk/+MvubDTr"
b"/VGn6FwomQS9Ge3jNswk1mJN0SIcJuthg3OBN5LsQ/zEbh4RgrDnxaBjYkypabkTtOL3xTTd1mZBsa7+OvfGEb"
b"+/qfv53wNT7Oy6K7fLhxaSm5bd5CioIV5i9SyOpzxy7ss2wPKX6pGaRx8GERfyfF2FnqyM/rLAYdiKHuuyJPwjFDxe2dRbOzpqmH"
b"+RDd9lvggKaVzaL0XooXAhpDpz7BdD5efefwq6TysdLGtRvXEH0V/YhqodOCqntcjXTpRPX+Mi3fa8VS9FMS4qY5YKiLvRcil\n "
)
def mock_get_server_host_key():
remote_server_key = DummyClass()
remote_server_key.export_public_key = mock_export_public_key
return remote_server_key
conn.session.get_server_host_key = mock_get_server_host_key
conn._verify_key_value()
def test__verify_key_invalid():
conn = Transport("172.18.0.11")
conn.ssh_known_hosts_file = f"{TEST_DATA_DIR}/files/_ssh_known_hosts"
conn.session = DummyClass()
def mock_export_public_key():
return b"ssh-rsa blah\n "
def mock_get_server_host_key():
remote_server_key = DummyClass()
remote_server_key.export_public_key = mock_export_public_key
return remote_server_key
conn.session.get_server_host_key = mock_get_server_host_key
with pytest.raises(KeyVerificationFailed) as exc:
conn._verify_key_value()
assert str(exc.value) == "172.18.0.11 in known_hosts but public key does not match!"
def test__verify_key_not_found():
conn = Transport("1.1.1.1")
conn.ssh_known_hosts_file = f"{TEST_DATA_DIR}/files/_ssh_known_hosts"
conn.session = DummyClass()
def mock_export_public_key():
return b"ssh-rsa blah\n "
def mock_get_server_host_key():
remote_server_key = DummyClass()
remote_server_key.export_public_key = mock_export_public_key
return remote_server_key
conn.session.get_server_host_key = mock_get_server_host_key
with pytest.raises(KeyVerificationFailed) as exc:
conn._verify_key()
assert str(exc.value) == "1.1.1.1 not in known_hosts!"
@pytest.mark.asyncio
async def test_open_verify_key():
conn = Transport("172.18.0.11", auth_strict_key=True)
conn.ssh_known_hosts_file = f"{TEST_DATA_DIR}/files/_ssh_known_hosts"
conn.session = DummyClass()
def mock_export_public_key():
return (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+9q0c7+tuKT0+xS5JqMhlSoZ5gMuePUwMj1ELoij2vjoPj1Vk/+MvubDTr"
b"/VGn6FwomQS9Ge3jNswk1mJN0SIcJuthg3OBN5LsQ/zEbh4RgrDnxaBjYkypabkTtOL3xTTd1mZBsa7+OvfGEb"
b"+/qfv53wNT7Oy6K7fLhxaSm5bd5CioIV5i9SyOpzxy7ss2wPKX6pGaRx8GERfyfF2FnqyM/rLAYdiKHuuyJPwjFDxe2dRbOzpqmH"
b"+RDd9lvggKaVzaL<KEY> "
)
def mock_get_server_host_key():
remote_server_key = DummyClass()
remote_server_key.export_public_key = mock_export_public_key
return remote_server_key
async def mock_authenticate():
return True
async def mock_open_session(**kwargs):
return 1, 2, 3
conn._authenticate = mock_authenticate
conn.session.get_server_host_key = mock_get_server_host_key
conn.session.open_session = mock_open_session
await conn.open()
def test_set_timeout():
conn = Transport("172.18.0.11")
assert conn.timeout_transport == 30
conn.set_timeout(999)
assert conn.timeout_transport == 999 | tests/unit/transport/test_asyncssh_.py | from pathlib import Path
import pytest
import scrapli_asyncssh
from scrapli.exceptions import KeyVerificationFailed
from scrapli_asyncssh.transport import Transport
TEST_DATA_DIR = f"{Path(scrapli_asyncssh.__file__).parents[1]}/tests/test_data"
class DummyClass:
pass
def test_creation():
conn = Transport("localhost")
assert conn.host == "localhost"
assert conn.port == 22
@pytest.mark.parametrize(
"test_host",
[
(
"1.2.3.4",
["carl", "~/.ssh/mysshkey", 1234],
),
(
"5.6.7.8",
["somebodyelse", "~/.ssh/lastresortkey", 22],
),
(
"scrapli",
["scrapli", "~/.ssh/lastresortkey", 22],
),
],
ids=["host_1.2.3.4", "catch_all", "specific_user_catch_all_key"],
)
def test__process_ssh_config(test_host):
host = test_host[0]
expected_auth_username = test_host[1][0]
expected_private_key = test_host[1][1]
expected_port = test_host[1][2]
conn = Transport(host, ssh_config_file=f"{TEST_DATA_DIR}/files/_ssh_config")
assert conn.host == host
assert conn.auth_username == expected_auth_username
assert conn.auth_private_key == str(Path(expected_private_key).expanduser())
assert conn.port == expected_port
def test__verify_key_valid():
conn = Transport("172.18.0.11")
conn.ssh_known_hosts_file = f"{TEST_DATA_DIR}/files/_ssh_known_hosts"
conn.session = DummyClass()
def mock_export_public_key():
return (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+9q0c7+tuKT0+xS5JqMhlSoZ5gMuePUwMj1ELoij2vjoPj1Vk/+MvubDTr"
b"/VGn6FwomQS9Ge3jNswk1mJN0SIcJuthg3OBN5LsQ/zEbh4RgrDnxaBjYkypabkTtOL3xTTd1mZBsa7+OvfGEb"
b"+/qfv53wNT7Oy6K7fLhxaSm5bd5CioIV5i9SyOpzxy7ss2wPKX6pGaRx8GERfyfF2FnqyM/rLAYdiKHuuyJPwjFDxe2dRbOzpqmH"
b"+RDd9lvggKaVzaL0XooXAhpDpz7BdD5efefwq6TysdLGtRvXEH0V/YhqodOCqntcjXTpRPX+Mi3fa8VS9FMS4qY5YKiLvRcil\n "
)
def mock_get_server_host_key():
remote_server_key = DummyClass()
remote_server_key.export_public_key = mock_export_public_key
return remote_server_key
conn.session.get_server_host_key = mock_get_server_host_key
conn._verify_key_value()
def test__verify_key_invalid():
conn = Transport("172.18.0.11")
conn.ssh_known_hosts_file = f"{TEST_DATA_DIR}/files/_ssh_known_hosts"
conn.session = DummyClass()
def mock_export_public_key():
return b"ssh-rsa blah\n "
def mock_get_server_host_key():
remote_server_key = DummyClass()
remote_server_key.export_public_key = mock_export_public_key
return remote_server_key
conn.session.get_server_host_key = mock_get_server_host_key
with pytest.raises(KeyVerificationFailed) as exc:
conn._verify_key_value()
assert str(exc.value) == "172.18.0.11 in known_hosts but public key does not match!"
def test__verify_key_not_found():
conn = Transport("1.1.1.1")
conn.ssh_known_hosts_file = f"{TEST_DATA_DIR}/files/_ssh_known_hosts"
conn.session = DummyClass()
def mock_export_public_key():
return b"ssh-rsa blah\n "
def mock_get_server_host_key():
remote_server_key = DummyClass()
remote_server_key.export_public_key = mock_export_public_key
return remote_server_key
conn.session.get_server_host_key = mock_get_server_host_key
with pytest.raises(KeyVerificationFailed) as exc:
conn._verify_key()
assert str(exc.value) == "1.1.1.1 not in known_hosts!"
@pytest.mark.asyncio
async def test_open_verify_key():
conn = Transport("172.18.0.11", auth_strict_key=True)
conn.ssh_known_hosts_file = f"{TEST_DATA_DIR}/files/_ssh_known_hosts"
conn.session = DummyClass()
def mock_export_public_key():
return (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+9q0c7+tuKT0+xS5JqMhlSoZ5gMuePUwMj1ELoij2vjoPj1Vk/+MvubDTr"
b"/VGn6FwomQS9Ge3jNswk1mJN0SIcJuthg3OBN5LsQ/zEbh4RgrDnxaBjYkypabkTtOL3xTTd1mZBsa7+OvfGEb"
b"+/qfv53wNT7Oy6K7fLhxaSm5bd5CioIV5i9SyOpzxy7ss2wPKX6pGaRx8GERfyfF2FnqyM/rLAYdiKHuuyJPwjFDxe2dRbOzpqmH"
b"+RDd9lvggKaVzaL<KEY> "
)
def mock_get_server_host_key():
remote_server_key = DummyClass()
remote_server_key.export_public_key = mock_export_public_key
return remote_server_key
async def mock_authenticate():
return True
async def mock_open_session(**kwargs):
return 1, 2, 3
conn._authenticate = mock_authenticate
conn.session.get_server_host_key = mock_get_server_host_key
conn.session.open_session = mock_open_session
await conn.open()
def test_set_timeout():
conn = Transport("172.18.0.11")
assert conn.timeout_transport == 30
conn.set_timeout(999)
assert conn.timeout_transport == 999 | 0.513181 | 0.376107 |
import abc
import time
import os, shutil
import torch
import torch.nn as nn
from .base import BaseTraining, AverageMeter
from tqdm import tqdm
import warnings
def default_add_penalty(loss, data):
return loss
class AdversarialTraining(BaseTraining):
"""Adversarial Training frames if both data and label are ready.
The measure function only receives two arguments.
If you want to redirect the logging information to files using `tee`,
please use `python -u yourscript.py | tee yourfile` to get the expected
results. (Refer to https://github.com/tqdm/tqdm/issues/706 answered by f0k)
The method _initialize should be overloaded to register the perturbation method
through the argument 'perturb' in `other_config`.
You can also register the penalty function through the item 'penalty'.
"""
def _initialize(self):
self._perturb = self._other_config.get('perturb', None)
if self._perturb is None:
raise KeyError("Perturbation method is required in other_config")
self._add_penalty = self._other_config.get('penalty', default_add_penalty)
#weight of Lipschitz penalty required if you add Lipschitz penalty
self._lipschitz_c = self._other_config.get('lipschitz', 0)
warnings.warn("If you add Lipschitz penalty, please set `lipschitz` argument to nonzero.")
self._perturbed_data = None
self._perturbed_logits = None
def _sub_init_logger(self):
self._train_logger.info("%s advaced setting: pertub_method=%s, penalty_weight=%.5f",
self.__class__.__name__, self._perturb.__class__.__name__, self._lipschitz_c)
self._val_logger.info("%s advaced setting: pertub_method=%s, penalty_weight=%.5f",
self.__class__.__name__, self._perturb.__class__.__name__, self._lipschitz_c)
def _train(self, epoch):
avg_loss = AverageMeter()
avg_measre = AverageMeter()
data_stream = tqdm(enumerate(self._train_loader, 1))
for i, (data, label) in data_stream:
self._net.train()
# where we are
num_batches = len(self._train_loader)
current_iter = (epoch - 1) * num_batches + i
data, label = data.to(self._device), label.to(self._device)
# input perturbation
data = self._perturb(data, label)
if self._lipschitz_c > 0:
data.requires_grad_(True)
else:
data.requires_grad_(False)
self._optimizer.zero_grad()
self._net.zero_grad()
if self._forward_op is not None:
y = self._forward_op(data)
else:
y = data
logits = self._net(y)
current_loss = self._loss_fun(logits, label)
current_loss = self._add_penalty(current_loss, data)
if self._lipschitz_c > 0:
data.requires_grad_(False)
current_loss.backward()
self._optimizer.step()
if self._scheduler is not None:
self._scheduler.step()
lr = self._scheduler.get_lr()[0]
else:
lr = self._optimizer.defaults['lr']
avg_loss.update(current_loss.item(), data.size(0))
current_measure = self._measure(logits.detach(), label.detach())
avg_measre.update(current_measure, data.size(0))
#Update the progress
data_stream.set_description((
'Training Epoch: [{epoch}/{epochs}] | '
'Iteration: {iters} | '
'progress: [{trained}/{total}] ({percent:.0f}%) | '
'loss: {loss.val: .4f} (Avg {loss.avg:.4f}) | '
'measure: {mvalue.val: .2f} (Avg {mvalue.avg: .4f})'
).format(
epoch=epoch,
epochs=self._num_epochs,
iters=current_iter,
trained=i,
total=num_batches,
percent=(100.*i/num_batches),
loss=avg_loss,
mvalue=avg_measre,
)
)
if (current_iter-1)%self._log_freq_train == 0:
self._train_logger.info('%d \t %d \t %.5f \t %.4f \t %.4f \t %.4f \t %.4f',
epoch, current_iter, lr, avg_loss.val, avg_loss.avg,
avg_measre.val, avg_measre.avg)
def _val(self, epoch):
# check validate data loader
if self._val_loader is None:
return None, None
avg_loss = AverageMeter()
avg_measre = AverageMeter()
data_stream = tqdm(enumerate(self._val_loader, 1))
with torch.no_grad():
for i, (data, label) in data_stream:
self._net.eval()
# where we are
num_batches = len(self._val_loader)
current_iter = (epoch - 1) * num_batches + i
data, label = data.to(self._device), label.to(self._device)
if not self._train_flag:
data = self._perturb(data, label)
self._perturbed_data = data
if self._forward_op is not None:
y = self._forward_op(data)
else:
y = data
logits = self._net(y)
self._perturbed_logits = logits
current_loss = self._loss_fun(logits, label)
avg_loss.update(current_loss.item(), data.size(0))
current_measure = self._measure(logits.detach(), label.detach())
avg_measre.update(current_measure, data.size(0))
#Update the progress
data_stream.set_description((
'Validating Epoch: [{epoch}/{epochs}] | '
'Iteration: {iters} | '
'progress: [{trained}/{total}] ({percent:.0f}%) | '
'loss: {loss.val: .4f} (Avg {loss.avg:.4f}) | '
'measure: {mvalue.val: .2f} (Avg {mvalue.avg: .4f})'
).format(
epoch=epoch,
epochs=self._num_epochs,
iters=current_iter,
trained=i,
total=num_batches,
percent=(100.*i/num_batches),
loss=avg_loss,
mvalue=avg_measre,
)
)
if self._train_flag:
if self._scheduler is not None:
lr = self._scheduler.get_lr()[0]
else:
lr = self._optimizer.defaults['lr']
if (current_iter-1)%self._log_freq_val == 0:
self._val_logger.info('%d \t %d \t %.5f \t %.4f \t %.4f \t %.4f \t %.4f',
epoch, current_iter, lr, avg_loss.val, avg_loss.avg,
avg_measre.val, avg_measre.avg)
return avg_loss, avg_measre | dlt/adversarial.py | import abc
import time
import os, shutil
import torch
import torch.nn as nn
from .base import BaseTraining, AverageMeter
from tqdm import tqdm
import warnings
def default_add_penalty(loss, data):
return loss
class AdversarialTraining(BaseTraining):
"""Adversarial Training frames if both data and label are ready.
The measure function only receives two arguments.
If you want to redirect the logging information to files using `tee`,
please use `python -u yourscript.py | tee yourfile` to get the expected
results. (Refer to https://github.com/tqdm/tqdm/issues/706 answered by f0k)
The method _initialize should be overloaded to register the perturbation method
through the argument 'perturb' in `other_config`.
You can also register the penalty function through the item 'penalty'.
"""
def _initialize(self):
self._perturb = self._other_config.get('perturb', None)
if self._perturb is None:
raise KeyError("Perturbation method is required in other_config")
self._add_penalty = self._other_config.get('penalty', default_add_penalty)
#weight of Lipschitz penalty required if you add Lipschitz penalty
self._lipschitz_c = self._other_config.get('lipschitz', 0)
warnings.warn("If you add Lipschitz penalty, please set `lipschitz` argument to nonzero.")
self._perturbed_data = None
self._perturbed_logits = None
def _sub_init_logger(self):
self._train_logger.info("%s advaced setting: pertub_method=%s, penalty_weight=%.5f",
self.__class__.__name__, self._perturb.__class__.__name__, self._lipschitz_c)
self._val_logger.info("%s advaced setting: pertub_method=%s, penalty_weight=%.5f",
self.__class__.__name__, self._perturb.__class__.__name__, self._lipschitz_c)
def _train(self, epoch):
avg_loss = AverageMeter()
avg_measre = AverageMeter()
data_stream = tqdm(enumerate(self._train_loader, 1))
for i, (data, label) in data_stream:
self._net.train()
# where we are
num_batches = len(self._train_loader)
current_iter = (epoch - 1) * num_batches + i
data, label = data.to(self._device), label.to(self._device)
# input perturbation
data = self._perturb(data, label)
if self._lipschitz_c > 0:
data.requires_grad_(True)
else:
data.requires_grad_(False)
self._optimizer.zero_grad()
self._net.zero_grad()
if self._forward_op is not None:
y = self._forward_op(data)
else:
y = data
logits = self._net(y)
current_loss = self._loss_fun(logits, label)
current_loss = self._add_penalty(current_loss, data)
if self._lipschitz_c > 0:
data.requires_grad_(False)
current_loss.backward()
self._optimizer.step()
if self._scheduler is not None:
self._scheduler.step()
lr = self._scheduler.get_lr()[0]
else:
lr = self._optimizer.defaults['lr']
avg_loss.update(current_loss.item(), data.size(0))
current_measure = self._measure(logits.detach(), label.detach())
avg_measre.update(current_measure, data.size(0))
#Update the progress
data_stream.set_description((
'Training Epoch: [{epoch}/{epochs}] | '
'Iteration: {iters} | '
'progress: [{trained}/{total}] ({percent:.0f}%) | '
'loss: {loss.val: .4f} (Avg {loss.avg:.4f}) | '
'measure: {mvalue.val: .2f} (Avg {mvalue.avg: .4f})'
).format(
epoch=epoch,
epochs=self._num_epochs,
iters=current_iter,
trained=i,
total=num_batches,
percent=(100.*i/num_batches),
loss=avg_loss,
mvalue=avg_measre,
)
)
if (current_iter-1)%self._log_freq_train == 0:
self._train_logger.info('%d \t %d \t %.5f \t %.4f \t %.4f \t %.4f \t %.4f',
epoch, current_iter, lr, avg_loss.val, avg_loss.avg,
avg_measre.val, avg_measre.avg)
def _val(self, epoch):
# check validate data loader
if self._val_loader is None:
return None, None
avg_loss = AverageMeter()
avg_measre = AverageMeter()
data_stream = tqdm(enumerate(self._val_loader, 1))
with torch.no_grad():
for i, (data, label) in data_stream:
self._net.eval()
# where we are
num_batches = len(self._val_loader)
current_iter = (epoch - 1) * num_batches + i
data, label = data.to(self._device), label.to(self._device)
if not self._train_flag:
data = self._perturb(data, label)
self._perturbed_data = data
if self._forward_op is not None:
y = self._forward_op(data)
else:
y = data
logits = self._net(y)
self._perturbed_logits = logits
current_loss = self._loss_fun(logits, label)
avg_loss.update(current_loss.item(), data.size(0))
current_measure = self._measure(logits.detach(), label.detach())
avg_measre.update(current_measure, data.size(0))
#Update the progress
data_stream.set_description((
'Validating Epoch: [{epoch}/{epochs}] | '
'Iteration: {iters} | '
'progress: [{trained}/{total}] ({percent:.0f}%) | '
'loss: {loss.val: .4f} (Avg {loss.avg:.4f}) | '
'measure: {mvalue.val: .2f} (Avg {mvalue.avg: .4f})'
).format(
epoch=epoch,
epochs=self._num_epochs,
iters=current_iter,
trained=i,
total=num_batches,
percent=(100.*i/num_batches),
loss=avg_loss,
mvalue=avg_measre,
)
)
if self._train_flag:
if self._scheduler is not None:
lr = self._scheduler.get_lr()[0]
else:
lr = self._optimizer.defaults['lr']
if (current_iter-1)%self._log_freq_val == 0:
self._val_logger.info('%d \t %d \t %.5f \t %.4f \t %.4f \t %.4f \t %.4f',
epoch, current_iter, lr, avg_loss.val, avg_loss.avg,
avg_measre.val, avg_measre.avg)
return avg_loss, avg_measre | 0.528533 | 0.260863 |
import time
import uuid
import pytest
from click.testing import CliRunner
from mock import patch
from airflow_monitor.common.base_component import BaseMonitorComponent
from airflow_monitor.common.config_data import AirflowServerConfig
from airflow_monitor.config import AirflowMonitorConfig
from airflow_monitor.multiserver.cmd_liveness_probe import airflow_monitor_v2_alive
from airflow_monitor.multiserver.monitor_component_manager import KNOWN_COMPONENTS
from airflow_monitor.multiserver.multiserver import MultiServerMonitor
from test_dbnd_airflow_monitor.airflow_utils import TestConnectionError
@pytest.fixture
def airflow_monitor_config():
# use dummy local_dag_folder to generate "new" config object
# (that won't conflict with monitor v1 global configuration)
return AirflowMonitorConfig(local_dag_folder="/tmp")
@pytest.fixture
def multi_server(
mock_server_config_service,
mock_data_fetcher,
mock_tracking_service,
airflow_monitor_config,
):
with patch(
"airflow_monitor.multiserver.monitor_component_manager.get_data_fetcher",
return_value=mock_data_fetcher,
), patch(
"airflow_monitor.multiserver.multiserver.get_tracking_service",
return_value=mock_tracking_service,
), patch(
"airflow_monitor.common.base_component.get_data_fetcher",
return_value=mock_data_fetcher,
), patch(
"airflow_monitor.common.base_component.get_tracking_service",
return_value=mock_tracking_service,
):
yield MultiServerMonitor(mock_server_config_service, airflow_monitor_config)
@pytest.fixture
def mock_syncer_factory(mock_data_fetcher, mock_tracking_service):
yield lambda: MockSyncer(
config=mock_tracking_service.get_airflow_server_configuration(),
data_fetcher=mock_data_fetcher,
tracking_service=mock_tracking_service,
)
@pytest.fixture
def mock_syncer(mock_syncer_factory):
yield mock_syncer_factory()
def count_logged_exceptions(caplog):
logged_exceptions = [record for record in caplog.records if record.exc_info]
return len(logged_exceptions)
class MockSyncer(BaseMonitorComponent):
def __init__(self, *args, **kwargs):
super(MockSyncer, self).__init__(*args, **kwargs)
self.sync_count = 0
self.should_fail = False
def _sync_once(self):
if self.should_fail:
raise Exception("Mock - should fail")
self.sync_count += 1
def emulate_start_syncer(self, *args, **kwargs):
return self
class TestMultiServer(object):
def test_01_no_servers(self, multi_server):
multi_server.run_once()
# no servers - should stay empty
assert not multi_server.active_monitors
def test_02_config_service_not_available(
self, multi_server, mock_server_config_service
):
mock_server_config_service.alive = False
with pytest.raises(TestConnectionError):
multi_server.run_once()
def test_03_empty_config(self, multi_server, mock_server_config_service, caplog):
# server config is empty (all components disabled) - nothing should run
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4()),
AirflowServerConfig(uuid.uuid4()),
]
multi_server.run_once()
assert len(multi_server.active_monitors) == 2
for monitor in multi_server.active_monitors.values():
assert not monitor.active_components
assert not count_logged_exceptions(caplog)
def test_04_single_server_single_component(
self, multi_server, mock_server_config_service, mock_syncer, caplog
):
with patch.dict(
KNOWN_COMPONENTS, {"state_sync": mock_syncer.emulate_start_syncer}
):
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4(), state_sync_enabled=True)
]
multi_server.run_once()
# should start mock_server, should do 1 iteration
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 1
multi_server.run_once()
# should not start additional servers, should do 1 more iteration
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 2
mock_server_config_service.mock_servers = []
multi_server.run_once()
# should remove the server, don't do the additional iteration
assert len(multi_server.active_monitors) == 0
assert mock_syncer.sync_count == 2
assert not count_logged_exceptions(caplog)
def test_05_failing_syncer(
self,
multi_server,
mock_server_config_service,
mock_syncer_factory,
mock_tracking_service,
caplog,
):
mock_syncer1 = mock_syncer_factory()
mock_syncer2 = mock_syncer_factory()
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4(), state_sync_enabled=True)
]
with patch.dict(
KNOWN_COMPONENTS, {"state_sync": mock_syncer1.emulate_start_syncer}
):
multi_server.run_once()
# should start mock_server, should do 1 iteration
assert len(multi_server.active_monitors) == 1
assert mock_syncer1.sync_count == 1
assert (
mock_tracking_service.current_monitor_state.monitor_status == "Running"
)
with patch.dict(
KNOWN_COMPONENTS, {"state_sync": mock_syncer2.emulate_start_syncer}
):
# ensure it's not restarted (just because we've change component definition)
multi_server.run_once()
assert len(multi_server.active_monitors) == 1
assert mock_syncer1.sync_count == 2
assert mock_syncer2.sync_count == 0
assert not count_logged_exceptions(caplog)
assert not mock_tracking_service.current_monitor_state.monitor_error_message
mock_syncer1.should_fail = True
multi_server.run_once()
# should not start additional servers, no new iteration
assert len(multi_server.active_monitors) == 1
assert mock_syncer1.sync_count == 2
assert mock_syncer2.sync_count == 0
# we should expect here log message that syncer failed
assert count_logged_exceptions(caplog)
# we should expect error reported to webserver
assert (
"Traceback"
in mock_tracking_service.current_monitor_state.monitor_error_message
)
multi_server.run_once()
# should restart the server
assert len(multi_server.active_monitors) == 1
assert mock_syncer1.sync_count == 2
assert mock_syncer2.sync_count == 1
# should clean the error
assert not mock_tracking_service.current_monitor_state.monitor_error_message
assert count_logged_exceptions(caplog) < 2
def test_06_airflow_not_responsive(
self,
multi_server,
mock_data_fetcher,
mock_server_config_service,
mock_syncer,
caplog,
):
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4(), state_sync_enabled=True)
]
with patch.dict(
KNOWN_COMPONENTS, {"state_sync": mock_syncer.emulate_start_syncer}
):
mock_data_fetcher.alive = False
multi_server.run_once()
# should not start mock_server - airflow is not responding
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 0
mock_data_fetcher.alive = True
multi_server.run_once()
# should start now since it's alive
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 1
mock_data_fetcher.alive = False
multi_server.run_once()
# shouldn't actively kill the syncer, despite data_fetcher not responsive
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 2
for monitor in multi_server.active_monitors.values():
assert monitor.active_components
mock_syncer.should_fail = True
multi_server.run_once()
# now only if syncer fails (as a result of failing data_fetcher), it will be evicted
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 2
for monitor in multi_server.active_monitors.values():
assert not monitor.active_components
multi_server.run_once()
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 2
for monitor in multi_server.active_monitors.values():
assert not monitor.active_components
mock_syncer.should_fail = False
mock_data_fetcher.alive = True
# now if everything is ok - should be back
multi_server.run_once()
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 3 # due to test - it should be 3 and not 1
for monitor in multi_server.active_monitors.values():
assert monitor.active_components
# we should have only one exception (from failed syncer)
assert count_logged_exceptions(caplog) < 2
def test_07_test_error_cleanup(
self,
multi_server,
mock_server_config_service,
mock_data_fetcher,
mock_tracking_service,
caplog,
):
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4(), state_sync_enabled=True)
]
multi_server.run_once()
# should start mock_server, should do 1 iteration
assert len(multi_server.active_monitors) == 1
assert mock_tracking_service.current_monitor_state.monitor_status == "Running"
assert not mock_tracking_service.current_monitor_state.monitor_error_message
mock_data_fetcher.alive = False
multi_server.run_once()
# still alive
assert len(multi_server.active_monitors) == 1
assert mock_tracking_service.current_monitor_state.monitor_status == "Running"
assert mock_tracking_service.current_monitor_state.monitor_error_message
first_error_lines = mock_tracking_service.current_monitor_state.monitor_error_message.split(
"\n"
)
multi_server.run_once()
assert mock_tracking_service.current_monitor_state.monitor_error_message
new_error_lines = mock_tracking_service.current_monitor_state.monitor_error_message.split(
"\n"
)
# should be same message except for last (Timestamp) line
assert first_error_lines[:-1] == new_error_lines[:-1]
mock_data_fetcher.alive = True
multi_server.run_once()
assert len(multi_server.active_monitors) == 1
assert mock_tracking_service.current_monitor_state.monitor_status == "Running"
assert not mock_tracking_service.current_monitor_state.monitor_error_message
def test_08_liveness_prove(self, multi_server, mock_server_config_service, caplog):
runner = CliRunner()
multi_server.run_once()
result = runner.invoke(airflow_monitor_v2_alive, ["--max-time-diff", "5"])
assert result.exit_code == 0
time.sleep(6)
result = runner.invoke(airflow_monitor_v2_alive, ["--max-time-diff", "5"])
assert result.exit_code != 0
multi_server.run_once()
result = runner.invoke(airflow_monitor_v2_alive, ["--max-time-diff", "5"])
assert result.exit_code == 0 | modules/dbnd-airflow-monitor/test_dbnd_airflow_monitor/runtime_syncer/test_multiserver.py | import time
import uuid
import pytest
from click.testing import CliRunner
from mock import patch
from airflow_monitor.common.base_component import BaseMonitorComponent
from airflow_monitor.common.config_data import AirflowServerConfig
from airflow_monitor.config import AirflowMonitorConfig
from airflow_monitor.multiserver.cmd_liveness_probe import airflow_monitor_v2_alive
from airflow_monitor.multiserver.monitor_component_manager import KNOWN_COMPONENTS
from airflow_monitor.multiserver.multiserver import MultiServerMonitor
from test_dbnd_airflow_monitor.airflow_utils import TestConnectionError
@pytest.fixture
def airflow_monitor_config():
# use dummy local_dag_folder to generate "new" config object
# (that won't conflict with monitor v1 global configuration)
return AirflowMonitorConfig(local_dag_folder="/tmp")
@pytest.fixture
def multi_server(
mock_server_config_service,
mock_data_fetcher,
mock_tracking_service,
airflow_monitor_config,
):
with patch(
"airflow_monitor.multiserver.monitor_component_manager.get_data_fetcher",
return_value=mock_data_fetcher,
), patch(
"airflow_monitor.multiserver.multiserver.get_tracking_service",
return_value=mock_tracking_service,
), patch(
"airflow_monitor.common.base_component.get_data_fetcher",
return_value=mock_data_fetcher,
), patch(
"airflow_monitor.common.base_component.get_tracking_service",
return_value=mock_tracking_service,
):
yield MultiServerMonitor(mock_server_config_service, airflow_monitor_config)
@pytest.fixture
def mock_syncer_factory(mock_data_fetcher, mock_tracking_service):
yield lambda: MockSyncer(
config=mock_tracking_service.get_airflow_server_configuration(),
data_fetcher=mock_data_fetcher,
tracking_service=mock_tracking_service,
)
@pytest.fixture
def mock_syncer(mock_syncer_factory):
yield mock_syncer_factory()
def count_logged_exceptions(caplog):
logged_exceptions = [record for record in caplog.records if record.exc_info]
return len(logged_exceptions)
class MockSyncer(BaseMonitorComponent):
def __init__(self, *args, **kwargs):
super(MockSyncer, self).__init__(*args, **kwargs)
self.sync_count = 0
self.should_fail = False
def _sync_once(self):
if self.should_fail:
raise Exception("Mock - should fail")
self.sync_count += 1
def emulate_start_syncer(self, *args, **kwargs):
return self
class TestMultiServer(object):
def test_01_no_servers(self, multi_server):
multi_server.run_once()
# no servers - should stay empty
assert not multi_server.active_monitors
def test_02_config_service_not_available(
self, multi_server, mock_server_config_service
):
mock_server_config_service.alive = False
with pytest.raises(TestConnectionError):
multi_server.run_once()
def test_03_empty_config(self, multi_server, mock_server_config_service, caplog):
# server config is empty (all components disabled) - nothing should run
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4()),
AirflowServerConfig(uuid.uuid4()),
]
multi_server.run_once()
assert len(multi_server.active_monitors) == 2
for monitor in multi_server.active_monitors.values():
assert not monitor.active_components
assert not count_logged_exceptions(caplog)
def test_04_single_server_single_component(
self, multi_server, mock_server_config_service, mock_syncer, caplog
):
with patch.dict(
KNOWN_COMPONENTS, {"state_sync": mock_syncer.emulate_start_syncer}
):
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4(), state_sync_enabled=True)
]
multi_server.run_once()
# should start mock_server, should do 1 iteration
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 1
multi_server.run_once()
# should not start additional servers, should do 1 more iteration
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 2
mock_server_config_service.mock_servers = []
multi_server.run_once()
# should remove the server, don't do the additional iteration
assert len(multi_server.active_monitors) == 0
assert mock_syncer.sync_count == 2
assert not count_logged_exceptions(caplog)
def test_05_failing_syncer(
self,
multi_server,
mock_server_config_service,
mock_syncer_factory,
mock_tracking_service,
caplog,
):
mock_syncer1 = mock_syncer_factory()
mock_syncer2 = mock_syncer_factory()
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4(), state_sync_enabled=True)
]
with patch.dict(
KNOWN_COMPONENTS, {"state_sync": mock_syncer1.emulate_start_syncer}
):
multi_server.run_once()
# should start mock_server, should do 1 iteration
assert len(multi_server.active_monitors) == 1
assert mock_syncer1.sync_count == 1
assert (
mock_tracking_service.current_monitor_state.monitor_status == "Running"
)
with patch.dict(
KNOWN_COMPONENTS, {"state_sync": mock_syncer2.emulate_start_syncer}
):
# ensure it's not restarted (just because we've change component definition)
multi_server.run_once()
assert len(multi_server.active_monitors) == 1
assert mock_syncer1.sync_count == 2
assert mock_syncer2.sync_count == 0
assert not count_logged_exceptions(caplog)
assert not mock_tracking_service.current_monitor_state.monitor_error_message
mock_syncer1.should_fail = True
multi_server.run_once()
# should not start additional servers, no new iteration
assert len(multi_server.active_monitors) == 1
assert mock_syncer1.sync_count == 2
assert mock_syncer2.sync_count == 0
# we should expect here log message that syncer failed
assert count_logged_exceptions(caplog)
# we should expect error reported to webserver
assert (
"Traceback"
in mock_tracking_service.current_monitor_state.monitor_error_message
)
multi_server.run_once()
# should restart the server
assert len(multi_server.active_monitors) == 1
assert mock_syncer1.sync_count == 2
assert mock_syncer2.sync_count == 1
# should clean the error
assert not mock_tracking_service.current_monitor_state.monitor_error_message
assert count_logged_exceptions(caplog) < 2
def test_06_airflow_not_responsive(
self,
multi_server,
mock_data_fetcher,
mock_server_config_service,
mock_syncer,
caplog,
):
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4(), state_sync_enabled=True)
]
with patch.dict(
KNOWN_COMPONENTS, {"state_sync": mock_syncer.emulate_start_syncer}
):
mock_data_fetcher.alive = False
multi_server.run_once()
# should not start mock_server - airflow is not responding
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 0
mock_data_fetcher.alive = True
multi_server.run_once()
# should start now since it's alive
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 1
mock_data_fetcher.alive = False
multi_server.run_once()
# shouldn't actively kill the syncer, despite data_fetcher not responsive
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 2
for monitor in multi_server.active_monitors.values():
assert monitor.active_components
mock_syncer.should_fail = True
multi_server.run_once()
# now only if syncer fails (as a result of failing data_fetcher), it will be evicted
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 2
for monitor in multi_server.active_monitors.values():
assert not monitor.active_components
multi_server.run_once()
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 2
for monitor in multi_server.active_monitors.values():
assert not monitor.active_components
mock_syncer.should_fail = False
mock_data_fetcher.alive = True
# now if everything is ok - should be back
multi_server.run_once()
assert len(multi_server.active_monitors) == 1
assert mock_syncer.sync_count == 3 # due to test - it should be 3 and not 1
for monitor in multi_server.active_monitors.values():
assert monitor.active_components
# we should have only one exception (from failed syncer)
assert count_logged_exceptions(caplog) < 2
def test_07_test_error_cleanup(
self,
multi_server,
mock_server_config_service,
mock_data_fetcher,
mock_tracking_service,
caplog,
):
mock_server_config_service.mock_servers = [
AirflowServerConfig(uuid.uuid4(), state_sync_enabled=True)
]
multi_server.run_once()
# should start mock_server, should do 1 iteration
assert len(multi_server.active_monitors) == 1
assert mock_tracking_service.current_monitor_state.monitor_status == "Running"
assert not mock_tracking_service.current_monitor_state.monitor_error_message
mock_data_fetcher.alive = False
multi_server.run_once()
# still alive
assert len(multi_server.active_monitors) == 1
assert mock_tracking_service.current_monitor_state.monitor_status == "Running"
assert mock_tracking_service.current_monitor_state.monitor_error_message
first_error_lines = mock_tracking_service.current_monitor_state.monitor_error_message.split(
"\n"
)
multi_server.run_once()
assert mock_tracking_service.current_monitor_state.monitor_error_message
new_error_lines = mock_tracking_service.current_monitor_state.monitor_error_message.split(
"\n"
)
# should be same message except for last (Timestamp) line
assert first_error_lines[:-1] == new_error_lines[:-1]
mock_data_fetcher.alive = True
multi_server.run_once()
assert len(multi_server.active_monitors) == 1
assert mock_tracking_service.current_monitor_state.monitor_status == "Running"
assert not mock_tracking_service.current_monitor_state.monitor_error_message
def test_08_liveness_prove(self, multi_server, mock_server_config_service, caplog):
runner = CliRunner()
multi_server.run_once()
result = runner.invoke(airflow_monitor_v2_alive, ["--max-time-diff", "5"])
assert result.exit_code == 0
time.sleep(6)
result = runner.invoke(airflow_monitor_v2_alive, ["--max-time-diff", "5"])
assert result.exit_code != 0
multi_server.run_once()
result = runner.invoke(airflow_monitor_v2_alive, ["--max-time-diff", "5"])
assert result.exit_code == 0 | 0.55435 | 0.221624 |
import torch
from torch import nn, Tensor
from kbcr.smart import BaseSmartModel
from kbcr.reformulators import BaseReformulator
from profilehooks import profile
from typing import Tuple, Optional, List
import logging
logger = logging.getLogger(__name__)
class SimpleHoppy(BaseSmartModel):
def __init__(self,
model: BaseSmartModel,
entity_embeddings: nn.Embedding,
hops_lst: List[Tuple[BaseReformulator, bool]]):
super().__init__()
self.model = model
self.entity_embeddings = entity_embeddings
self.hops_lst = hops_lst
self._hops_lst = nn.ModuleList([hops for hops, _ in hops_lst])
def hop(self,
rel: Tensor,
arg1: Optional[Tensor],
arg2: Optional[Tensor],
mask_indices: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
assert (arg1 is None) ^ (arg2 is None)
batch_size, embedding_size = rel.shape[0], rel.shape[1]
res_sp, res_po = self.model.forward(rel, arg1, arg2, mask_indices=mask_indices)
res = res_sp if arg2 is None else res_po
# [B, K], [B, K, E]
scores, subs = res
assert scores.shape[0] == subs.shape[0]
assert scores.shape[1] == subs.shape[1]
assert scores.shape[0] == batch_size
assert subs.shape[2] == embedding_size
# [B, K], [B, K, E]
return res
@profile(immediate=True)
def score(self,
rel: Tensor,
arg1: Tensor,
arg2: Tensor,
mask_indices: Optional[Tensor] = None,
*args, **kwargs) -> Tensor:
batch_size, embedding_size = rel.shape[0], rel.shape[1]
global_res = None
for hops, is_reversed in self.hops_lst:
sources, scores = arg1, None
# XXX
prior = hops.prior(rel)
if prior is not None:
scores = prior
# scores = hops.prior(rel)
hop_rel_lst = hops(rel)
nb_hops = len(hop_rel_lst)
for hop_idx, hop_rel in enumerate(hop_rel_lst, start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B, S, E]
arg2_3d = arg2.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
# [B * S, E]
arg2_2d = arg2_3d.view(-1, embedding_size)
# [B * S]
if is_reversed:
z_scores_1d = self.model.score(hop_rel_2d, arg2_2d, sources_2d, mask_indices=mask_indices)
else:
z_scores_1d = self.model.score(hop_rel_2d, sources_2d, arg2_2d, mask_indices=mask_indices)
scores = z_scores_1d if scores is None else torch.min(z_scores_1d, scores)
if scores is not None:
scores_2d = scores.view(batch_size, -1)
res, _ = torch.max(scores_2d, dim=1)
else:
res = self.model.score(rel, arg1, arg2, mask_indices=mask_indices)
global_res = res if global_res is None else torch.max(global_res, res)
return global_res
@profile(immediate=True)
def forward(self,
rel: Tensor,
arg1: Optional[Tensor],
arg2: Optional[Tensor],
mask_indices: Optional[Tensor] = None,
*args, **kwargs) -> Tuple[Optional[Tensor], Optional[Tensor]]:
batch_size, embedding_size = rel.shape[0], rel.shape[1]
scores_sp = scores_po = None
global_scores_sp = global_scores_po = None
for hops, is_reversed in self.hops_lst:
hop_rel_lst = hops(rel)
nb_hops = len(hop_rel_lst)
if arg1 is not None:
sources, scores = arg1, None
# XXX
prior = hops.prior(rel)
if prior is not None:
scores = prior
# scores = hops.prior(rel)
for hop_idx, hop_rel in enumerate(hop_rel_lst, start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
_, res_sp = self.model.forward(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
res_sp, _ = self.model.forward(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
scores_sp, subs_sp = res_sp
k = scores_sp.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_sp = torch.min(scores, scores_sp)
# [B, S, K]
scores_sp = scores_sp.view(batch_size, -1, k)
# [B, K]
scores_sp, _ = torch.max(scores_sp, dim=1)
if arg2 is not None:
sources, scores = arg2, None
# XXX
scores = hops.prior(rel)
for hop_idx, hop_rel in enumerate(reversed([h for h in hop_rel_lst]), start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
res_po, _ = self.model.forward(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
_, res_po = self.model.forward(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
scores_po, subs_po = res_po
k = scores_po.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_po = torch.min(scores, scores_po)
# [B, S, K]
scores_po = scores_po.view(batch_size, -1, k)
# [B, K]
scores_po, _ = torch.max(scores_po, dim=1)
if scores_sp is None and scores_po is None:
scores_sp, scores_po = self.model.forward(rel, arg1, arg2, mask_indices=mask_indices)
global_scores_sp = scores_sp if global_scores_sp is None else torch.max(global_scores_sp, scores_sp)
global_scores_po = scores_po if global_scores_po is None else torch.max(global_scores_po, scores_po)
if global_scores_sp is None and global_scores_po is None:
global_scores_sp, global_scores_po = self.model.forward(rel, arg1, arg2, mask_indices=mask_indices)
return global_scores_sp, global_scores_po
@profile(immediate=True)
def forward_(self,
rel: Tensor,
arg1: Optional[Tensor],
arg2: Optional[Tensor],
mask_indices: Optional[Tensor] = None,
*args, **kwargs) -> Tuple[Optional[Tuple[Tensor, Optional[Tensor]]],
Optional[Tuple[Tensor, Optional[Tensor]]]]:
batch_size, embedding_size = rel.shape[0], rel.shape[1]
scores_sp = scores_po = None
global_scores_sp = global_scores_po = None
for hops, is_reversed in self.hops_lst:
hop_rel_lst = hops(rel)
nb_hops = len(hop_rel_lst)
if arg1 is not None:
sources, scores = arg1, None
for hop_idx, hop_rel in enumerate(hop_rel_lst, start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
_, res_sp = self.model.forward_(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
res_sp, _ = self.model.forward_(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
scores_sp, subs_sp = res_sp
k = scores_sp.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_sp = torch.min(scores, scores_sp)
# [B, S, K]
scores_sp = scores_sp.view(batch_size, -1, k)
# [B, K]
scores_sp, _ = torch.max(scores_sp, dim=1)
if arg2 is not None:
sources, scores = arg2, None
for hop_idx, hop_rel in enumerate(reversed([h for h in hop_rel_lst]), start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
res_po, _ = self.model.forward_(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
_, res_po = self.model.forward_(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
scores_po, subs_po = res_po
k = scores_po.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_po = torch.min(scores, scores_po)
# [B, S, K]
scores_po = scores_po.view(batch_size, -1, k)
# [B, K]
scores_po, _ = torch.max(scores_po, dim=1)
if scores_sp is None and scores_po is None:
(scores_sp, _), (scores_po, _) = self.model.forward_(rel, arg1, arg2, mask_indices=mask_indices)
global_scores_sp = scores_sp if global_scores_sp is None else torch.max(global_scores_sp, scores_sp)
global_scores_po = scores_po if global_scores_po is None else torch.max(global_scores_po, scores_po)
if global_scores_sp is None and global_scores_po is None:
(global_scores_sp, _), (global_scores_po, _) = self.model.forward_(rel, arg1, arg2, mask_indices=mask_indices)
return (global_scores_sp, None), (global_scores_po, None)
@profile(immediate=True)
def forward__(self,
rel: Tensor,
arg1: Optional[Tensor],
arg2: Optional[Tensor],
mask_indices: Optional[Tensor] = None,
*args, **kwargs) -> Tuple[Optional[Tuple[Tensor, Optional[Tensor]]],
Optional[Tuple[Tensor, Optional[Tensor]]]]:
batch_size, embedding_size = rel.shape[0], rel.shape[1]
scores_sp = scores_po = None
global_scores_sp = global_scores_po = None
for hops, is_reversed in self.hops_lst:
hop_rel_lst = hops(rel)
nb_hops = len(hop_rel_lst)
if arg1 is not None:
sources, scores = arg1, None
for hop_idx, hop_rel in enumerate(hop_rel_lst, start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
_, res_sp = self.model.forward__(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
res_sp, _ = self.model.forward__(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
scores_sp, subs_sp = res_sp
k = scores_sp.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_sp = torch.min(scores, scores_sp)
# [B, S, K]
scores_sp = scores_sp.view(batch_size, -1, k)
# [B, K]
scores_sp, _ = torch.max(scores_sp, dim=1)
if arg2 is not None:
sources, scores = arg2, None
for hop_idx, hop_rel in enumerate(reversed([h for h in hop_rel_lst]), start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
res_po, _ = self.model.forward__(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
_, res_po = self.model.forward__(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
scores_po, subs_po = res_po
k = scores_po.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_po = torch.min(scores, scores_po)
# [B, S, K]
scores_po = scores_po.view(batch_size, -1, k)
# [B, K]
scores_po, _ = torch.max(scores_po, dim=1)
if scores_sp is None and scores_po is None:
(scores_sp, _), (scores_po, _) = self.model.forward__(rel, arg1, arg2, mask_indices=mask_indices)
global_scores_sp = scores_sp if global_scores_sp is None else torch.max(global_scores_sp, scores_sp)
global_scores_po = scores_po if global_scores_po is None else torch.max(global_scores_po, scores_po)
if global_scores_sp is None and global_scores_po is None:
(global_scores_sp, _), (global_scores_po, _) = self.model.forward__(rel, arg1, arg2,
mask_indices=mask_indices)
return (global_scores_sp, None), (global_scores_po, None)
def factor(self,
embedding_vector: Tensor) -> Tensor:
return self.model.factor(embedding_vector) | kbcr/smart/simple.py |
import torch
from torch import nn, Tensor
from kbcr.smart import BaseSmartModel
from kbcr.reformulators import BaseReformulator
from profilehooks import profile
from typing import Tuple, Optional, List
import logging
logger = logging.getLogger(__name__)
class SimpleHoppy(BaseSmartModel):
def __init__(self,
model: BaseSmartModel,
entity_embeddings: nn.Embedding,
hops_lst: List[Tuple[BaseReformulator, bool]]):
super().__init__()
self.model = model
self.entity_embeddings = entity_embeddings
self.hops_lst = hops_lst
self._hops_lst = nn.ModuleList([hops for hops, _ in hops_lst])
def hop(self,
rel: Tensor,
arg1: Optional[Tensor],
arg2: Optional[Tensor],
mask_indices: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
assert (arg1 is None) ^ (arg2 is None)
batch_size, embedding_size = rel.shape[0], rel.shape[1]
res_sp, res_po = self.model.forward(rel, arg1, arg2, mask_indices=mask_indices)
res = res_sp if arg2 is None else res_po
# [B, K], [B, K, E]
scores, subs = res
assert scores.shape[0] == subs.shape[0]
assert scores.shape[1] == subs.shape[1]
assert scores.shape[0] == batch_size
assert subs.shape[2] == embedding_size
# [B, K], [B, K, E]
return res
@profile(immediate=True)
def score(self,
rel: Tensor,
arg1: Tensor,
arg2: Tensor,
mask_indices: Optional[Tensor] = None,
*args, **kwargs) -> Tensor:
batch_size, embedding_size = rel.shape[0], rel.shape[1]
global_res = None
for hops, is_reversed in self.hops_lst:
sources, scores = arg1, None
# XXX
prior = hops.prior(rel)
if prior is not None:
scores = prior
# scores = hops.prior(rel)
hop_rel_lst = hops(rel)
nb_hops = len(hop_rel_lst)
for hop_idx, hop_rel in enumerate(hop_rel_lst, start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B, S, E]
arg2_3d = arg2.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
# [B * S, E]
arg2_2d = arg2_3d.view(-1, embedding_size)
# [B * S]
if is_reversed:
z_scores_1d = self.model.score(hop_rel_2d, arg2_2d, sources_2d, mask_indices=mask_indices)
else:
z_scores_1d = self.model.score(hop_rel_2d, sources_2d, arg2_2d, mask_indices=mask_indices)
scores = z_scores_1d if scores is None else torch.min(z_scores_1d, scores)
if scores is not None:
scores_2d = scores.view(batch_size, -1)
res, _ = torch.max(scores_2d, dim=1)
else:
res = self.model.score(rel, arg1, arg2, mask_indices=mask_indices)
global_res = res if global_res is None else torch.max(global_res, res)
return global_res
@profile(immediate=True)
def forward(self,
rel: Tensor,
arg1: Optional[Tensor],
arg2: Optional[Tensor],
mask_indices: Optional[Tensor] = None,
*args, **kwargs) -> Tuple[Optional[Tensor], Optional[Tensor]]:
batch_size, embedding_size = rel.shape[0], rel.shape[1]
scores_sp = scores_po = None
global_scores_sp = global_scores_po = None
for hops, is_reversed in self.hops_lst:
hop_rel_lst = hops(rel)
nb_hops = len(hop_rel_lst)
if arg1 is not None:
sources, scores = arg1, None
# XXX
prior = hops.prior(rel)
if prior is not None:
scores = prior
# scores = hops.prior(rel)
for hop_idx, hop_rel in enumerate(hop_rel_lst, start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
_, res_sp = self.model.forward(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
res_sp, _ = self.model.forward(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
scores_sp, subs_sp = res_sp
k = scores_sp.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_sp = torch.min(scores, scores_sp)
# [B, S, K]
scores_sp = scores_sp.view(batch_size, -1, k)
# [B, K]
scores_sp, _ = torch.max(scores_sp, dim=1)
if arg2 is not None:
sources, scores = arg2, None
# XXX
scores = hops.prior(rel)
for hop_idx, hop_rel in enumerate(reversed([h for h in hop_rel_lst]), start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
res_po, _ = self.model.forward(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
_, res_po = self.model.forward(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
scores_po, subs_po = res_po
k = scores_po.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_po = torch.min(scores, scores_po)
# [B, S, K]
scores_po = scores_po.view(batch_size, -1, k)
# [B, K]
scores_po, _ = torch.max(scores_po, dim=1)
if scores_sp is None and scores_po is None:
scores_sp, scores_po = self.model.forward(rel, arg1, arg2, mask_indices=mask_indices)
global_scores_sp = scores_sp if global_scores_sp is None else torch.max(global_scores_sp, scores_sp)
global_scores_po = scores_po if global_scores_po is None else torch.max(global_scores_po, scores_po)
if global_scores_sp is None and global_scores_po is None:
global_scores_sp, global_scores_po = self.model.forward(rel, arg1, arg2, mask_indices=mask_indices)
return global_scores_sp, global_scores_po
@profile(immediate=True)
def forward_(self,
rel: Tensor,
arg1: Optional[Tensor],
arg2: Optional[Tensor],
mask_indices: Optional[Tensor] = None,
*args, **kwargs) -> Tuple[Optional[Tuple[Tensor, Optional[Tensor]]],
Optional[Tuple[Tensor, Optional[Tensor]]]]:
batch_size, embedding_size = rel.shape[0], rel.shape[1]
scores_sp = scores_po = None
global_scores_sp = global_scores_po = None
for hops, is_reversed in self.hops_lst:
hop_rel_lst = hops(rel)
nb_hops = len(hop_rel_lst)
if arg1 is not None:
sources, scores = arg1, None
for hop_idx, hop_rel in enumerate(hop_rel_lst, start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
_, res_sp = self.model.forward_(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
res_sp, _ = self.model.forward_(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
scores_sp, subs_sp = res_sp
k = scores_sp.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_sp = torch.min(scores, scores_sp)
# [B, S, K]
scores_sp = scores_sp.view(batch_size, -1, k)
# [B, K]
scores_sp, _ = torch.max(scores_sp, dim=1)
if arg2 is not None:
sources, scores = arg2, None
for hop_idx, hop_rel in enumerate(reversed([h for h in hop_rel_lst]), start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
res_po, _ = self.model.forward_(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
_, res_po = self.model.forward_(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
scores_po, subs_po = res_po
k = scores_po.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_po = torch.min(scores, scores_po)
# [B, S, K]
scores_po = scores_po.view(batch_size, -1, k)
# [B, K]
scores_po, _ = torch.max(scores_po, dim=1)
if scores_sp is None and scores_po is None:
(scores_sp, _), (scores_po, _) = self.model.forward_(rel, arg1, arg2, mask_indices=mask_indices)
global_scores_sp = scores_sp if global_scores_sp is None else torch.max(global_scores_sp, scores_sp)
global_scores_po = scores_po if global_scores_po is None else torch.max(global_scores_po, scores_po)
if global_scores_sp is None and global_scores_po is None:
(global_scores_sp, _), (global_scores_po, _) = self.model.forward_(rel, arg1, arg2, mask_indices=mask_indices)
return (global_scores_sp, None), (global_scores_po, None)
@profile(immediate=True)
def forward__(self,
rel: Tensor,
arg1: Optional[Tensor],
arg2: Optional[Tensor],
mask_indices: Optional[Tensor] = None,
*args, **kwargs) -> Tuple[Optional[Tuple[Tensor, Optional[Tensor]]],
Optional[Tuple[Tensor, Optional[Tensor]]]]:
batch_size, embedding_size = rel.shape[0], rel.shape[1]
scores_sp = scores_po = None
global_scores_sp = global_scores_po = None
for hops, is_reversed in self.hops_lst:
hop_rel_lst = hops(rel)
nb_hops = len(hop_rel_lst)
if arg1 is not None:
sources, scores = arg1, None
for hop_idx, hop_rel in enumerate(hop_rel_lst, start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
_, res_sp = self.model.forward__(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
else:
res_sp, _ = self.model.forward__(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
scores_sp, subs_sp = res_sp
k = scores_sp.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_sp = torch.min(scores, scores_sp)
# [B, S, K]
scores_sp = scores_sp.view(batch_size, -1, k)
# [B, K]
scores_sp, _ = torch.max(scores_sp, dim=1)
if arg2 is not None:
sources, scores = arg2, None
for hop_idx, hop_rel in enumerate(reversed([h for h in hop_rel_lst]), start=1):
# [B * S, E]
sources_2d = sources.view(-1, embedding_size)
nb_sources = sources_2d.shape[0]
nb_branches = nb_sources // batch_size
hop_rel_3d = hop_rel.view(-1, 1, embedding_size).repeat(1, nb_branches, 1)
hop_rel_2d = hop_rel_3d.view(-1, embedding_size)
if hop_idx < nb_hops:
# [B * S, K], [B * S, K, E]
if is_reversed:
z_scores, z_emb = self.hop(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
z_scores, z_emb = self.hop(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
k = z_emb.shape[1]
# [B * S * K]
z_scores_1d = z_scores.view(-1)
# [B * S * K, E]
z_emb_2d = z_emb.view(-1, embedding_size)
# [B * S * K, E]
sources = z_emb_2d
# [B * S * K]
if scores is None:
scores = z_scores_1d
else:
scores = torch.min(z_scores_1d, scores.view(-1, 1).repeat(1, k).view(-1))
else:
# [B * S, K]
if is_reversed:
res_po, _ = self.model.forward__(hop_rel_2d, sources_2d, None, mask_indices=mask_indices)
else:
_, res_po = self.model.forward__(hop_rel_2d, None, sources_2d, mask_indices=mask_indices)
scores_po, subs_po = res_po
k = scores_po.shape[1]
if scores is not None:
scores = scores.view(-1, 1).repeat(1, k)
scores_po = torch.min(scores, scores_po)
# [B, S, K]
scores_po = scores_po.view(batch_size, -1, k)
# [B, K]
scores_po, _ = torch.max(scores_po, dim=1)
if scores_sp is None and scores_po is None:
(scores_sp, _), (scores_po, _) = self.model.forward__(rel, arg1, arg2, mask_indices=mask_indices)
global_scores_sp = scores_sp if global_scores_sp is None else torch.max(global_scores_sp, scores_sp)
global_scores_po = scores_po if global_scores_po is None else torch.max(global_scores_po, scores_po)
if global_scores_sp is None and global_scores_po is None:
(global_scores_sp, _), (global_scores_po, _) = self.model.forward__(rel, arg1, arg2,
mask_indices=mask_indices)
return (global_scores_sp, None), (global_scores_po, None)
def factor(self,
embedding_vector: Tensor) -> Tensor:
return self.model.factor(embedding_vector) | 0.888429 | 0.412234 |
"""
"""
import thermosteam as tmo
from .units_of_measure import AbsoluteUnitsOfMeasure
from . import utils
from .exceptions import UndefinedChemical
from .base import PhaseHandle
from ._phase import Phase, LockedPhase, NoPhase, PhaseIndexer, phase_tuple, check_phase
from free_properties import PropertyFactory, property_array
from collections.abc import Iterable
import numpy as np
__all__ = (
'SplitIndexer',
'ChemicalIndexer',
'MaterialIndexer',
'ChemicalMolarFlowIndexer',
'MolarFlowIndexer',
'ChemicalMassFlowIndexer',
'MassFlowIndexer',
'ChemicalVolumetricFlowIndexer',
'VolumetricFlowIndexer',
'MassFlowProperty',
'VolumetricFlowProperty'
)
# %% Utilities
_new = object.__new__
def raise_material_indexer_index_error():
raise IndexError("index by [phase, IDs] where phase is a "
"(str, ellipsis, or missing), and IDs is a "
"(str, tuple(str), ellipisis, or missing)")
def find_main_phase(indexers, default):
main_indexer, *indexers = indexers
try:
phase = main_indexer.phase
for i in indexers:
if phase != i.phase: return default
except:
return default
return phase
def nonzeros(IDs, data):
index, = np.where(data != 0)
return [IDs[i] for i in index], data[index]
# %% Abstract indexer
class Indexer:
"""Abstract class for fast indexing."""
__slots__ = ('_data',)
units = None
def empty(self):
self._data[:] = 0
def isempty(self):
return (self._data == 0.).all()
def copy(self):
new = self._copy_without_data()
new._data = self._data.copy()
return new
__copy__ = copy
def get_conversion_factor(self, units):
if self.units:
return self.units.conversion_factor(units)
else:
raise TypeError(f"{type(self).__name__} object is unitless; "
f"cannot get conversion factor for {units}")
def get_data(self, units, *index):
length = len(index)
factor = self.get_conversion_factor(units)
if length == 0:
return factor * self._data
elif length == 1:
return factor * self[index[0]]
else:
return factor * self[index]
def set_data(self, data, units, *index):
length = len(index)
data = np.asarray(data, dtype=float)
factor = self.get_conversion_factor(units)
scaled_data = data / factor
if length == 0:
self._data[:] = scaled_data
elif length == 1:
self[index[0]] = scaled_data
else:
self[index] = scaled_data
@property
def data(self):
return self._data
# %% Phase data
@utils.chemicals_user
class SplitIndexer(Indexer):
"""
Create a SplitIndexer that can index a 1d-array given
chemical IDs.
Parameters
----------
chemicals : Chemicals
Required to define the chemicals that are present.
**ID_data : float
ID-value pairs
"""
__slots__ = ('_chemicals',)
def __new__(cls, chemicals=None, **ID_data):
self = cls.blank(chemicals)
if ID_data:
IDs = tuple(ID_data)
values = list(ID_data.values())
self[IDs] = values
return self
def __reduce__(self):
return self.from_data, (self._data, self._chemicals, False)
def reset_chemicals(self, chemicals, container=None):
old_data = self._data
if container is None:
self._data = data = np.zeros(chemicals.size, float)
else:
self._data = data = container
data[:] = 0.
for CAS, split in zip(self._chemicals.CASs, old_data):
if CAS in chemicals: data[chemicals.index(CAS)] = split
self._load_chemicals(chemicals)
return old_data
@classmethod
def blank(cls, chemicals=None):
self = _new(cls)
self._load_chemicals(chemicals)
self._data = np.zeros(self._chemicals.size, float)
return self
@classmethod
def from_data(cls, data, chemicals=None, check_data=True):
self = _new(cls)
self._load_chemicals(chemicals)
if check_data:
assert data.ndim == 1, 'data must be a 1d numpy array'
assert data.size == self._chemicals.size, ('size of data must be equal to '
'size of chemicals')
assert (data <= 1.).all(), 'data must be less or equal to one'
self._data = data
return self
def __getitem__(self, key):
chemicals = self._chemicals
index, kind = chemicals._get_index_and_kind(key)
if kind == 0 or kind == 1:
return self._data[index]
elif kind == 2:
data = self._data
return np.array([data[i] for i in index], dtype=object)
else:
raise IndexError('unknown error')
def __setitem__(self, key, data):
index, kind = self._chemicals._get_index_and_kind(key)
if kind == 0 or kind == 1:
self._data[index] = data
elif kind == 2:
local_data = self._data
isa = isinstance
if isa(data, Iterable):
for i, x in zip(index, data): local_data[i] = x
else:
for i in index: local_data[i] = data
else:
raise IndexError('unknown error')
def __format__(self, tabs=""):
if not tabs: tabs = 1
tabs = int(tabs)
tab = tabs*4*" "
if tab:
dlim = ",\n" + tab
else:
dlim = ", "
ID_data = utils.repr_IDs_data(self._chemicals.IDs, self._data, dlim, start='')
return f"{type(self).__name__}({ID_data})"
def __repr__(self):
return self.__format__()
def _info(self, N):
"""Return string with all specifications."""
IDs = self.chemicals.IDs
data = self.data
IDs, data = nonzeros(IDs, data)
N_IDs = len(IDs)
if N_IDs == 0:
return f"{type(self).__name__}: (all zeros)"
else:
basic_info = f"{type(self).__name__}:\n "
new_line = '\n '
data_info = ''
lengths = [len(i) for i in IDs]
maxlen = max(lengths) + 1
N_max = N or tmo.Stream.display_units.N
too_many_chemicals = N_IDs > N_max
N = N_max if too_many_chemicals else N_IDs
for i in range(N):
spaces = ' ' * (maxlen - lengths[i])
if i != 0:
data_info += new_line
data_info += IDs[i] + spaces + f' {data[i]:.3g}'
if too_many_chemicals:
data_info += new_line + '...'
return (basic_info
+ data_info)
def show(self, N=None):
"""Print all specifications.
Parameters
----------
N: int, optional
Number of compounds to display.
"""
print(self._info(N))
_ipython_display_ = show
@utils.chemicals_user
class ChemicalIndexer(Indexer):
"""
Create a ChemicalIndexer that can index a single-phase, 1d-array given
chemical IDs.
Parameters
----------
phase : [str or PhaseContainer] {'s', 'l', 'g', 'S', 'L', 'G'}
Phase of data.
units : str
Units of measure of input data.
chemicals : Chemicals
Required to define the chemicals that are present.
**ID_data : float
ID-value pairs
Notes
-----
A ChemicalIndexer does not have any units defined. To use units of
measure, use the `ChemicalMolarIndexer`, `ChemicalMassIndexer`, or
`ChemicalVolumetricIndexer`.
"""
__slots__ = ('_chemicals', '_phase', '_data_cache')
def __new__(cls, phase=NoPhase, units=None, chemicals=None, **ID_data):
self = cls.blank(phase, chemicals)
if ID_data:
IDs = tuple(ID_data)
values = list(ID_data.values())
self[IDs] = values
if units: self.set_data(self._data, units)
return self
def reset_chemicals(self, chemicals, container=None):
old_data = self._data
old_container = (old_data, self._data_cache)
if container is None:
self._data = data = np.zeros(chemicals.size, float)
self._data_cache = {}
else:
data, self._data_cache = container
self._data = data
data[:] = 0.
for CAS, value in zip(self._chemicals.CASs, old_data):
if value: data[chemicals.index(CAS)] = value
self._load_chemicals(chemicals)
return old_container
def __reduce__(self):
return self.from_data, (self._data, self._phase, self._chemicals, False)
def __getitem__(self, key):
index, kind = self._chemicals._get_index_and_kind(key)
if kind == 0:
return self._data[index]
elif kind == 1:
return self._data[index].sum()
elif kind == 2:
arr = np.zeros(len(index))
data = self._data
isa = isinstance
for d, s in enumerate(index):
if isa(s, list):
arr[d] = data[s].sum()
else:
arr[d] = data[s]
return arr
else:
raise IndexError('unknown index error')
def __setitem__(self, key, data):
index, kind = self._chemicals._get_index_and_kind(key)
if kind == 0:
self._data[index] = data
elif kind == 1:
composition = self.group_compositions[key]
self._data[index] = data * composition
elif kind == 2:
local_data = self._data
isa = isinstance
group_compositions = self.group_compositions
for n in range(len(index)):
i = index[n]
local_data[i] = data[n] * group_compositions[key[n]] if isa(i, list) else data[n]
else:
raise IndexError('unknown error')
def sum_across_phases(self):
return self._data
@property
def get_index(self):
return self._chemicals.get_index
def mix_from(self, others):
self.phase = find_main_phase(others, self.phase)
chemicals = self._chemicals
data = self._data
chemicals_data = [(i._chemicals, i._data.copy() if i is self else i.sum_across_phases())
for i in others]
data[:] = 0.
for ichemicals, idata in chemicals_data:
if chemicals is ichemicals:
data[:] += idata
else:
other_index, = np.where(idata)
CASs = ichemicals.CASs
self_index = chemicals.indices([CASs[i] for i in other_index])
data[self_index] += idata[other_index]
def separate_out(self, other):
if self._chemicals is other._chemicals:
self._data[:] -= other.sum_across_phases()
else:
idata = other._data
other_index, = np.where(idata)
IDs = other._chemicals.IDs
self_index = self._chemicals.indices([IDs[i] for i in other_index])
self._data[self_index] -= idata[other_index]
def to_material_indexer(self, phases):
material_array = self._MaterialIndexer.blank(phases, self._chemicals)
material_array[self.phase] = self._data
return material_array
def copy_like(self, other):
if self is other: return
if self.chemicals is other.chemicals:
self._data[:] = other._data
else:
self.empty()
other_index, = np.where(other._data)
CASs = other.chemicals.CASs
self_index = self.chemicals.indices([CASs[i] for i in other_index])
self._data[self_index] = other._data[other_index]
self.phase = other.phase
def _copy_without_data(self):
new = _new(self.__class__)
new._chemicals = self._chemicals
new._phase = self._phase.copy()
new._data_cache = {}
return new
@classmethod
def blank(cls, phase, chemicals=None):
self = _new(cls)
self._load_chemicals(chemicals)
self._data = np.zeros(self._chemicals.size, float)
self._phase = Phase.convert(phase)
self._data_cache = {}
return self
@classmethod
def from_data(cls, data, phase=NoPhase, chemicals=None, check_data=True):
self = _new(cls)
self._load_chemicals(chemicals)
self._phase = Phase.convert(phase)
if check_data:
assert data.ndim == 1, 'material data must be a 1d numpy array'
assert data.size == self._chemicals.size, ('size of material data must be equal to '
'size of chemicals')
self._data = data
self._data_cache = {}
return self
@property
def phase(self):
return self._phase._phase
@phase.setter
def phase(self, phase):
self._phase.phase = phase
def get_phase_and_composition(self):
"""Return phase and composition."""
data = self._data
return self.phase, data / data.sum()
def __format__(self, tabs=""):
if not tabs: tabs = 1
tabs = int(tabs)
tab = tabs*4*" "
phase = f"phase={repr(self.phase)}"
if tab:
dlim = ",\n" + tab
phase = '\n' + tab + phase
else:
dlim = ", "
ID_data = utils.repr_IDs_data(self._chemicals.IDs, self._data, dlim)
return f"{type(self).__name__}({phase}{ID_data})"
def __repr__(self):
return self.__format__()
def _info(self, N):
"""Return string with all specifications."""
IDs = self.chemicals.IDs
data = self.data
IDs, data = nonzeros(IDs, data)
N_IDs = len(IDs)
if N_IDs == 0:
return f"{type(self).__name__}: (empty)"
elif self.units:
basic_info = f"{type(self).__name__} ({self.units}):\n"
else:
basic_info = f"{type(self).__name__}:\n"
beginning = f' ({self.phase}) ' if self.phase else " "
new_line = '\n' + len(beginning) * ' '
data_info = ''
lengths = [len(i) for i in IDs]
maxlen = max(lengths) + 1
N_max = N or tmo.Stream.display_units.N
too_many_chemicals = N_IDs > N_max
N = N_max if too_many_chemicals else N_IDs
for i in range(N):
spaces = ' ' * (maxlen - lengths[i])
if i != 0:
data_info += new_line
data_info += IDs[i] + spaces + f' {data[i]:.3g}'
if too_many_chemicals:
data_info += new_line + '...'
return (basic_info
+ beginning
+ data_info)
_ipython_display_ = show = SplitIndexer.show
@utils.chemicals_user
class MaterialIndexer(Indexer):
"""
Create a MaterialIndexer that can index a multi-phase, 2d-array given
the phase and chemical IDs.
Parameters
----------
phases : tuple['s', 'l', 'g', 'S', 'L', 'G']
Phases of data rows.
units : str
Units of measure of input data.
chemicals : Chemicals
Required to define the chemicals that are present.
**phase_data : tuple[str, float]
phase-(ID, value) pairs
Notes
-----
A MaterialIndexer does not have any units defined. To use units of measure, use the
`MolarIndexer`, `MassIndexer`, or `VolumetricIndexer`.
"""
__slots__ = ('_chemicals', '_phases', '_phase_indexer',
'_index_cache', '_data_cache')
_index_caches = {}
_ChemicalIndexer = ChemicalIndexer
def __new__(cls, phases=None, units=None, chemicals=None, **phase_data):
self = cls.blank(phases or phase_data, chemicals)
if phase_data:
for phase, ID_data in phase_data.items():
IDs, data = zip(*ID_data)
self[phase, IDs] = data
if units: self.set_data(data, units)
return self
def reset_chemicals(self, chemicals, container=None):
old_data = self._data
old_data_cache = self._data_cache
shape = N_phases, N_chemicals = (len(self._phases), chemicals.size)
if container is None:
self._data = data = np.zeros(shape, float)
self._data_cache = {}
else:
data, cache = container
data[:] = 0.
old_chemicals = self._chemicals
old_index = range(old_chemicals.size)
CASs = old_chemicals.CASs
for i in range(N_phases):
for j in old_index:
value = old_data[i, j]
if value: data[i, chemicals.index(CASs[j])] = value
self._load_chemicals(chemicals)
self._set_cache()
return (old_data, old_data_cache)
def __reduce__(self):
return self.from_data, (self._data, self._phases, self._chemicals, False)
def phases_are_empty(self, phases):
get_phase_index = self.get_phase_index
data = self._data
for phase in set(self._phases).intersection(phases):
if data[get_phase_index(phase)].any(): return False
return True
def sum_across_phases(self):
return self._data.sum(0)
def copy_like(self, other):
if self is other: return
if isinstance(other, ChemicalIndexer):
self.empty()
other_data = other._data
phase_index = self.get_phase_index(other.phase)
if self.chemicals is other.chemicals:
self._data[phase_index, :] = other_data
else:
other_index, = np.where(other_data)
IDs = other.chemicals.IDs
self_index = self.chemicals.indices([IDs[i] for i in other_index])
self._data[phase_index, self_index] += other._data[other_index]
else:
if self.chemicals is other.chemicals:
self._data[:] = other._data
else:
self.empty()
other_data = other._data
other_index, = np.where(other_data.any(0))
IDs = other.chemicals.IDs
self_index = self.chemicals.indices([IDs[i] for i in other_index])
self._data[:, self_index] = other_data[:, other_index]
def mix_from(self, others):
isa = isinstance
data = self._data
get_phase_index = self.get_phase_index
chemicals = self._chemicals
phases = self._phases
indexer_data = [(i, i._data.copy() if i is self else i._data) for i in others]
data[:] = 0.
for i, idata in indexer_data:
if isa(i, MaterialIndexer):
if phases == i.phases:
if chemicals is i.chemicals:
data[:] += idata
else:
idata = i._data
other_index, = np.where(idata.any(0))
IDs = i.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[:, self_index] += idata[:, other_index]
else:
if chemicals is i.chemicals:
for phase, idata in zip(i.phases, idata):
if not idata.any(): continue
data[get_phase_index(phase), :] += idata
else:
for phase, idata in zip(i.phases, idata):
if not idata.any(): continue
other_index, = np.where(idata)
IDs = i.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[get_phase_index(phase), self_index] += idata[other_index]
elif isa(i, ChemicalIndexer):
if chemicals is i.chemicals:
data[get_phase_index(i.phase), :] += idata
else:
other_index, = np.where(idata != 0.)
IDs = i.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[get_phase_index(i.phase), self_index] += idata[other_index]
else:
raise ValueError("can only mix from chemical or material indexers")
def separate_out(self, other):
isa = isinstance
data = self._data
get_phase_index = self.get_phase_index
chemicals = self._chemicals
phases = self._phases
idata = other._data
if isa(other, MaterialIndexer):
if phases == other.phases:
if chemicals is other.chemicals:
data[:] -= idata
else:
idata = other._data
other_index, = np.where(idata.any(0))
IDs = other.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[:, self_index] -= idata[:, other_index]
else:
if chemicals is other.chemicals:
for phase, idata in zip(other.phases, idata):
if not idata.any(): continue
data[get_phase_index(phase), :] -= idata
else:
for phase, idata in zip(other.phases, idata):
if not idata.any(): continue
other_index, = np.where(idata)
IDs = other.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[get_phase_index(phase), self_index] -= idata[other_index]
elif isa(other, ChemicalIndexer):
if chemicals is other.chemicals:
data[get_phase_index(other.phase), :] -= idata
else:
other_index, = np.where(idata != 0.)
IDs = other.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[get_phase_index(other.phase), self_index] -= idata[other_index]
else:
raise ValueError("can only separate out from chemical or material indexers")
def _set_phases(self, phases):
self._phases = phases = phase_tuple(phases)
self._phase_indexer = PhaseIndexer(phases)
def _set_cache(self):
caches = self._index_caches
key = self._phases, self._chemicals
try:
self._index_cache = caches[key]
except KeyError:
self._index_cache = caches[key] = {}
def _copy_without_data(self):
new = _new(self.__class__)
new._phases = self._phases
new._chemicals = self._chemicals
new._phase_indexer = self._phase_indexer
new._index_cache = self._index_cache
new._data_cache = {}
return new
@classmethod
def blank(cls, phases, chemicals=None):
self = _new(cls)
self._load_chemicals(chemicals)
self._set_phases(phases)
self._set_cache()
shape = (len(self._phases), self._chemicals.size)
self._data = np.zeros(shape, float)
self._data_cache = {}
return self
@classmethod
def from_data(cls, data, phases, chemicals=None, check_data=True):
self = _new(cls)
self._load_chemicals(chemicals)
self._set_phases(phases)
self._set_cache()
if check_data:
assert data.ndim == 2, ('material data must be an 2d numpy array')
M_phases = len(self._phases)
N_chemicals = self._chemicals.size
M, N = data.shape
assert M == M_phases, ('number of phases must be equal to '
'the number of material data rows')
assert N == N_chemicals, ('size of chemicals '
'must be equal to '
'number of material data columns')
self._data = data
self._data_cache = {}
return self
@property
def phases(self):
return self._phases
@property
def get_phase_index(self):
return self._phase_indexer
def to_chemical_indexer(self, phase=NoPhase):
return self._ChemicalIndexer.from_data(self._data.sum(0), phase, self._chemicals, False)
def to_material_indexer(self, phases):
material_indexer = self.__class__.blank(phases, self._chemicals)
for phase, data in self:
if data.any(): material_indexer[phase] = data
return material_indexer
def get_phase(self, phase):
return self._ChemicalIndexer.from_data(self._data[self.get_phase_index(phase)],
LockedPhase(phase), self._chemicals, False)
def __getitem__(self, key):
index, kind, sum_across_phases = self._get_index_data(key)
if sum_across_phases:
if kind == 0: # Normal
values = self._data[:, index].sum(0)
elif kind == 1: # Chemical group
values = self._data[:, index].sum()
elif kind == 2: # Nested chemical group
data = self._data
values = np.array([data[:, i].sum() for i in index], dtype=float)
else:
if kind == 0: # Normal
return self._data[index]
elif kind == 1: # Chemical group
phase, index = index
if phase == slice(None):
values = self._data[phase, index].sum(1)
else:
values = self._data[phase, index].sum()
elif kind == 2: # Nested chemical group
data = self._data
isa = isinstance
phase, index = index
if phase == slice(None):
values = np.zeros([len(self.phases), len(index)])
for d, s in enumerate(index):
if isa(s, list):
values[:, d] = data[phase, s].sum(1)
else:
values[:, d] = data[phase, s]
else:
values = np.zeros(len(index))
for d, s in enumerate(index):
if isa(s, list):
values[d] = data[phase, s].sum()
else:
values[d] = data[phase, s]
return values
def __setitem__(self, key, data):
index, kind, sum_across_phases = self._get_index_data(key)
if sum_across_phases:
raise IndexError("multiple phases present; must include phase key "
"to set chemical data")
if kind == 0:
self._data[index] = data
elif kind == 1: # Chemical group
phase, index = index
_, key = key
composition = self.group_compositions[key]
self._data[phase, index] = data * composition
elif kind == 2: # Nested chemical group
phase, index = index
local_data = self._data
group_compositions = self.group_compositions
isa = isinstance
for n in range(len(index)):
i = index[n]
local_data[phase, i] = data[n] * group_compositions[key[n]] if isa(i, list) else data[n]
else:
raise IndexError('unknown error')
def _get_index_data(self, key):
cache = self._index_cache
try:
index_data = cache[key]
except KeyError:
try:
index, kind = self._chemicals._get_index_and_kind(key)
except UndefinedChemical as error:
index, kind = self._get_index_and_kind(key, error)
sum_across_phases = False
else:
sum_across_phases = True
cache[key] = index_data = (index, kind, sum_across_phases)
utils.trim_cache(cache)
except TypeError:
raise TypeError("only strings, tuples, and ellipsis are valid index keys")
return index_data
def _get_index_and_kind(self, phase_IDs, undefined_chemical_error):
isa = isinstance
if isa(phase_IDs, str):
if len(phase_IDs) == 1:
index = self.get_phase_index(phase_IDs)
kind = 0
else:
raise undefined_chemical_error
elif phase_IDs is ...:
index = slice(None)
kind = 0
else:
phase = phase_IDs[0]
if isa(phase, str):
if len(phase) == 1:
phase_index = self.get_phase_index(phase)
else:
raise undefined_chemical_error
elif phase is ...:
phase_index = slice(None)
else:
raise_material_indexer_index_error()
try:
phase, IDs = phase_IDs
except:
raise_material_indexer_index_error()
chemical_index, kind = self._chemicals._get_index_and_kind(IDs)
index = (phase_index, chemical_index)
return index, kind
def __iter__(self):
"""Iterate over phase-data pairs."""
return zip(self._phases, self._data)
def iter_composition(self):
"""Iterate over phase-composition pairs."""
array = self._data
total = array.sum() or 1.
return zip(self._phases, array/total)
def __format__(self, tabs="1"):
IDs = self._chemicals.IDs
phase_data = []
for phase, data in self:
ID_data = utils.repr_couples(", ", IDs, data)
if ID_data:
phase_data.append(f"{phase}=[{ID_data}]")
tabs = int(tabs) if tabs else 1
if tabs:
tab = tabs*4*" "
dlim = ",\n" + tab
else:
dlim = ", "
phase_data = dlim.join(phase_data)
if self.data.sum(1).all():
phases = ""
if phase_data:
phase_data = "\n" + tab + phase_data
else:
phases = f'phases={self._phases}'
if phase_data:
phase_data = dlim + phase_data
return f"{type(self).__name__}({phases}{phase_data})"
def __repr__(self):
return self.__format__("1")
def _info(self, N):
"""Return string with all specifications."""
from thermosteam import Stream
N_max = N or Stream.display_units.N
IDs = self.chemicals.IDs
index, = np.where(self.data.sum(0) != 0)
len_ = len(index)
if len_ == 0:
return f"{type(self).__name__}: (empty)"
elif self.units:
basic_info = f"{type(self).__name__} ({self.units}):\n"
else:
basic_info = f"{type(self).__name__}:\n"
all_IDs = tuple([IDs[i] for i in index])
# Length of chemical column
all_lengths = [len(i) for i in IDs]
maxlen = max(all_lengths + [8])
# Set up chemical data for all phases
phases_data_info = ''
for phase in self._phases:
phase_data = self[phase, all_IDs]
IDs, data = nonzeros(all_IDs, phase_data)
if not IDs: continue
# Get basic structure for phase data
beginning = f' ({phase}) '
new_line = '\n' + len(beginning) * ' '
# Set chemical data
data_info = ''
N_IDs = len(data)
too_many_chemicals = N_IDs > N_max
N = N_max if too_many_chemicals else N_IDs
lengths = [len(i) for i in IDs]
for i in range(N):
spaces = ' ' * (maxlen - lengths[i])
if i: data_info += new_line
data_info += f'{IDs[i]} ' + spaces + f' {data[i]:.3g}'
if too_many_chemicals: data += new_line + '...'
# Put it together
phases_data_info += beginning + data_info + '\n'
return basic_info + phases_data_info.rstrip('\n')
_ipython_display_ = show = ChemicalIndexer.show
def _replace_indexer_doc(Indexer, Parent):
doc = Parent.__doc__
doc = doc[:doc.index("Notes")]
Indexer.__doc__ = doc.replace(Parent.__name__, Indexer.__name__)
def _new_Indexer(name, units, f_group_composition):
dct = {'group_compositions': f_group_composition}
ChemicalIndexerSubclass = type('Chemical' + name + 'Indexer', (ChemicalIndexer,), dct)
MaterialIndexerSubclass = type(name + 'Indexer', (MaterialIndexer,), dct)
ChemicalIndexerSubclass.__slots__ = \
MaterialIndexerSubclass.__slots__ = ()
ChemicalIndexerSubclass.units = \
MaterialIndexerSubclass.units = AbsoluteUnitsOfMeasure(units)
MaterialIndexerSubclass._ChemicalIndexer = ChemicalIndexerSubclass
ChemicalIndexerSubclass._MaterialIndexer = MaterialIndexerSubclass
_replace_indexer_doc(ChemicalIndexerSubclass, ChemicalIndexer)
_replace_indexer_doc(MaterialIndexerSubclass, MaterialIndexer)
return ChemicalIndexerSubclass, MaterialIndexerSubclass
ChemicalIndexer._MaterialIndexer = MaterialIndexer
@property
def group_wt_compositions(self):
return self._chemicals._group_wt_compositions
@property
def group_mol_compositions(self):
return self._chemicals._group_mol_compositions
@property
def group_vol_composition(self):
raise AttributeError('cannot set groups by volumetric flow')
ChemicalMolarFlowIndexer, MolarFlowIndexer = _new_Indexer('MolarFlow', 'kmol/hr', group_mol_compositions)
ChemicalMassFlowIndexer, MassFlowIndexer = _new_Indexer('MassFlow', 'kg/hr', group_wt_compositions)
ChemicalVolumetricFlowIndexer, VolumetricFlowIndexer = _new_Indexer('VolumetricFlow', 'm^3/hr', group_vol_composition)
# %% Mass flow properties
@PropertyFactory(slots=('name', 'mol', 'index', 'MW'),
units='kg/hr')
def MassFlowProperty(self):
"""Mass flow (kg/hr)."""
return self.mol[self.index] * self.MW
@MassFlowProperty.setter
def MassFlowProperty(self, value):
self.mol[self.index] = value/self.MW
def by_mass(self):
"""Return a ChemicalMassFlowIndexer that references this object's molar data."""
try:
mass = self._data_cache['mass']
except:
chemicals = self.chemicals
mol = self.data
mass = np.zeros_like(mol, dtype=object)
for i, chem in enumerate(chemicals):
mass[i] = MassFlowProperty(chem.ID, mol, i, chem.MW)
self._data_cache['mass'] = mass = ChemicalMassFlowIndexer.from_data(
property_array(mass),
self._phase, chemicals,
False)
return mass
ChemicalMolarFlowIndexer.by_mass = by_mass
def by_mass(self):
"""Return a MassFlowIndexer that references this object's molar data."""
try:
mass = self._data_cache['mass']
except:
phases = self.phases
chemicals = self.chemicals
mol = self.data
mass = np.zeros_like(mol, dtype=object)
for i, phase in enumerate(phases):
for j, chem in enumerate(chemicals):
index = (i, j)
mass[index] = MassFlowProperty(chem.ID, mol, index, chem.MW)
self._data_cache['mass'] = mass = MassFlowIndexer.from_data(
property_array(mass),
phases, chemicals,
False)
return mass
MolarFlowIndexer.by_mass = by_mass; del by_mass
# %% Volumetric flow properties
@PropertyFactory(slots=('name', 'mol', 'index', 'V',
'TP', 'phase', 'phase_container'),
units='m^3/hr')
def VolumetricFlowProperty(self):
"""Volumetric flow (m^3/hr)."""
f_mol = self.mol[self.index]
phase = self.phase or self.phase_container.phase
V = getattr(self.V, phase) if isinstance(self.V, PhaseHandle) else self.V
return 1000. * f_mol * V(*self.TP) if f_mol else 0.
@VolumetricFlowProperty.setter
def VolumetricFlowProperty(self, value):
if value:
phase = self.phase or self.phase_container.phase
V = getattr(self.V, phase) if isinstance(self.V, PhaseHandle) else self.V
self.mol[self.index] = value / V(*self.TP) / 1000.
else:
self.mol[self.index] = 0.
def by_volume(self, TP):
"""Return a ChemicalVolumetricFlowIndexer that references this object's molar data.
Parameters
----------
TP : ThermalCondition
"""
try:
vol = self._data_cache['vol', TP]
except:
chemicals = self.chemicals
mol = self.data
vol = np.zeros_like(mol, dtype=object)
for i, chem in enumerate(chemicals):
vol[i] = VolumetricFlowProperty(chem.ID, mol, i, chem.V,
TP, None, self._phase)
self._data_cache['vol', TP] = \
vol = ChemicalVolumetricFlowIndexer.from_data(property_array(vol),
self._phase, chemicals,
False)
return vol
ChemicalMolarFlowIndexer.by_volume = by_volume
def by_volume(self, TP):
"""Return a VolumetricFlowIndexer that references this object's molar data.
Parameters
----------
TP : ThermalCondition
"""
try:
vol = self._data_cache[TP]
except:
phases = self.phases
chemicals = self.chemicals
mol = self.data
vol = np.zeros_like(mol, dtype=object)
for i, phase in enumerate(phases):
for j, chem in enumerate(chemicals):
index = i, j
phase_name = tmo.settings._phase_names[phase]
vol[index] = VolumetricFlowProperty(f"{phase_name}{chem.ID}",
mol, index, chem.V, TP, phase)
self._data_cache[TP] = \
vol = VolumetricFlowIndexer.from_data(property_array(vol),
phases, chemicals,
False)
return vol
MolarFlowIndexer.by_volume = by_volume; del by_volume
del PropertyFactory | thermosteam/indexer.py | """
"""
import thermosteam as tmo
from .units_of_measure import AbsoluteUnitsOfMeasure
from . import utils
from .exceptions import UndefinedChemical
from .base import PhaseHandle
from ._phase import Phase, LockedPhase, NoPhase, PhaseIndexer, phase_tuple, check_phase
from free_properties import PropertyFactory, property_array
from collections.abc import Iterable
import numpy as np
__all__ = (
'SplitIndexer',
'ChemicalIndexer',
'MaterialIndexer',
'ChemicalMolarFlowIndexer',
'MolarFlowIndexer',
'ChemicalMassFlowIndexer',
'MassFlowIndexer',
'ChemicalVolumetricFlowIndexer',
'VolumetricFlowIndexer',
'MassFlowProperty',
'VolumetricFlowProperty'
)
# %% Utilities
_new = object.__new__
def raise_material_indexer_index_error():
raise IndexError("index by [phase, IDs] where phase is a "
"(str, ellipsis, or missing), and IDs is a "
"(str, tuple(str), ellipisis, or missing)")
def find_main_phase(indexers, default):
main_indexer, *indexers = indexers
try:
phase = main_indexer.phase
for i in indexers:
if phase != i.phase: return default
except:
return default
return phase
def nonzeros(IDs, data):
index, = np.where(data != 0)
return [IDs[i] for i in index], data[index]
# %% Abstract indexer
class Indexer:
"""Abstract class for fast indexing."""
__slots__ = ('_data',)
units = None
def empty(self):
self._data[:] = 0
def isempty(self):
return (self._data == 0.).all()
def copy(self):
new = self._copy_without_data()
new._data = self._data.copy()
return new
__copy__ = copy
def get_conversion_factor(self, units):
if self.units:
return self.units.conversion_factor(units)
else:
raise TypeError(f"{type(self).__name__} object is unitless; "
f"cannot get conversion factor for {units}")
def get_data(self, units, *index):
length = len(index)
factor = self.get_conversion_factor(units)
if length == 0:
return factor * self._data
elif length == 1:
return factor * self[index[0]]
else:
return factor * self[index]
def set_data(self, data, units, *index):
length = len(index)
data = np.asarray(data, dtype=float)
factor = self.get_conversion_factor(units)
scaled_data = data / factor
if length == 0:
self._data[:] = scaled_data
elif length == 1:
self[index[0]] = scaled_data
else:
self[index] = scaled_data
@property
def data(self):
return self._data
# %% Phase data
@utils.chemicals_user
class SplitIndexer(Indexer):
"""
Create a SplitIndexer that can index a 1d-array given
chemical IDs.
Parameters
----------
chemicals : Chemicals
Required to define the chemicals that are present.
**ID_data : float
ID-value pairs
"""
__slots__ = ('_chemicals',)
def __new__(cls, chemicals=None, **ID_data):
self = cls.blank(chemicals)
if ID_data:
IDs = tuple(ID_data)
values = list(ID_data.values())
self[IDs] = values
return self
def __reduce__(self):
return self.from_data, (self._data, self._chemicals, False)
def reset_chemicals(self, chemicals, container=None):
old_data = self._data
if container is None:
self._data = data = np.zeros(chemicals.size, float)
else:
self._data = data = container
data[:] = 0.
for CAS, split in zip(self._chemicals.CASs, old_data):
if CAS in chemicals: data[chemicals.index(CAS)] = split
self._load_chemicals(chemicals)
return old_data
@classmethod
def blank(cls, chemicals=None):
self = _new(cls)
self._load_chemicals(chemicals)
self._data = np.zeros(self._chemicals.size, float)
return self
@classmethod
def from_data(cls, data, chemicals=None, check_data=True):
self = _new(cls)
self._load_chemicals(chemicals)
if check_data:
assert data.ndim == 1, 'data must be a 1d numpy array'
assert data.size == self._chemicals.size, ('size of data must be equal to '
'size of chemicals')
assert (data <= 1.).all(), 'data must be less or equal to one'
self._data = data
return self
def __getitem__(self, key):
chemicals = self._chemicals
index, kind = chemicals._get_index_and_kind(key)
if kind == 0 or kind == 1:
return self._data[index]
elif kind == 2:
data = self._data
return np.array([data[i] for i in index], dtype=object)
else:
raise IndexError('unknown error')
def __setitem__(self, key, data):
index, kind = self._chemicals._get_index_and_kind(key)
if kind == 0 or kind == 1:
self._data[index] = data
elif kind == 2:
local_data = self._data
isa = isinstance
if isa(data, Iterable):
for i, x in zip(index, data): local_data[i] = x
else:
for i in index: local_data[i] = data
else:
raise IndexError('unknown error')
def __format__(self, tabs=""):
if not tabs: tabs = 1
tabs = int(tabs)
tab = tabs*4*" "
if tab:
dlim = ",\n" + tab
else:
dlim = ", "
ID_data = utils.repr_IDs_data(self._chemicals.IDs, self._data, dlim, start='')
return f"{type(self).__name__}({ID_data})"
def __repr__(self):
return self.__format__()
def _info(self, N):
"""Return string with all specifications."""
IDs = self.chemicals.IDs
data = self.data
IDs, data = nonzeros(IDs, data)
N_IDs = len(IDs)
if N_IDs == 0:
return f"{type(self).__name__}: (all zeros)"
else:
basic_info = f"{type(self).__name__}:\n "
new_line = '\n '
data_info = ''
lengths = [len(i) for i in IDs]
maxlen = max(lengths) + 1
N_max = N or tmo.Stream.display_units.N
too_many_chemicals = N_IDs > N_max
N = N_max if too_many_chemicals else N_IDs
for i in range(N):
spaces = ' ' * (maxlen - lengths[i])
if i != 0:
data_info += new_line
data_info += IDs[i] + spaces + f' {data[i]:.3g}'
if too_many_chemicals:
data_info += new_line + '...'
return (basic_info
+ data_info)
def show(self, N=None):
"""Print all specifications.
Parameters
----------
N: int, optional
Number of compounds to display.
"""
print(self._info(N))
_ipython_display_ = show
@utils.chemicals_user
class ChemicalIndexer(Indexer):
"""
Create a ChemicalIndexer that can index a single-phase, 1d-array given
chemical IDs.
Parameters
----------
phase : [str or PhaseContainer] {'s', 'l', 'g', 'S', 'L', 'G'}
Phase of data.
units : str
Units of measure of input data.
chemicals : Chemicals
Required to define the chemicals that are present.
**ID_data : float
ID-value pairs
Notes
-----
A ChemicalIndexer does not have any units defined. To use units of
measure, use the `ChemicalMolarIndexer`, `ChemicalMassIndexer`, or
`ChemicalVolumetricIndexer`.
"""
__slots__ = ('_chemicals', '_phase', '_data_cache')
def __new__(cls, phase=NoPhase, units=None, chemicals=None, **ID_data):
self = cls.blank(phase, chemicals)
if ID_data:
IDs = tuple(ID_data)
values = list(ID_data.values())
self[IDs] = values
if units: self.set_data(self._data, units)
return self
def reset_chemicals(self, chemicals, container=None):
old_data = self._data
old_container = (old_data, self._data_cache)
if container is None:
self._data = data = np.zeros(chemicals.size, float)
self._data_cache = {}
else:
data, self._data_cache = container
self._data = data
data[:] = 0.
for CAS, value in zip(self._chemicals.CASs, old_data):
if value: data[chemicals.index(CAS)] = value
self._load_chemicals(chemicals)
return old_container
def __reduce__(self):
return self.from_data, (self._data, self._phase, self._chemicals, False)
def __getitem__(self, key):
index, kind = self._chemicals._get_index_and_kind(key)
if kind == 0:
return self._data[index]
elif kind == 1:
return self._data[index].sum()
elif kind == 2:
arr = np.zeros(len(index))
data = self._data
isa = isinstance
for d, s in enumerate(index):
if isa(s, list):
arr[d] = data[s].sum()
else:
arr[d] = data[s]
return arr
else:
raise IndexError('unknown index error')
def __setitem__(self, key, data):
index, kind = self._chemicals._get_index_and_kind(key)
if kind == 0:
self._data[index] = data
elif kind == 1:
composition = self.group_compositions[key]
self._data[index] = data * composition
elif kind == 2:
local_data = self._data
isa = isinstance
group_compositions = self.group_compositions
for n in range(len(index)):
i = index[n]
local_data[i] = data[n] * group_compositions[key[n]] if isa(i, list) else data[n]
else:
raise IndexError('unknown error')
def sum_across_phases(self):
return self._data
@property
def get_index(self):
return self._chemicals.get_index
def mix_from(self, others):
self.phase = find_main_phase(others, self.phase)
chemicals = self._chemicals
data = self._data
chemicals_data = [(i._chemicals, i._data.copy() if i is self else i.sum_across_phases())
for i in others]
data[:] = 0.
for ichemicals, idata in chemicals_data:
if chemicals is ichemicals:
data[:] += idata
else:
other_index, = np.where(idata)
CASs = ichemicals.CASs
self_index = chemicals.indices([CASs[i] for i in other_index])
data[self_index] += idata[other_index]
def separate_out(self, other):
if self._chemicals is other._chemicals:
self._data[:] -= other.sum_across_phases()
else:
idata = other._data
other_index, = np.where(idata)
IDs = other._chemicals.IDs
self_index = self._chemicals.indices([IDs[i] for i in other_index])
self._data[self_index] -= idata[other_index]
def to_material_indexer(self, phases):
material_array = self._MaterialIndexer.blank(phases, self._chemicals)
material_array[self.phase] = self._data
return material_array
def copy_like(self, other):
if self is other: return
if self.chemicals is other.chemicals:
self._data[:] = other._data
else:
self.empty()
other_index, = np.where(other._data)
CASs = other.chemicals.CASs
self_index = self.chemicals.indices([CASs[i] for i in other_index])
self._data[self_index] = other._data[other_index]
self.phase = other.phase
def _copy_without_data(self):
new = _new(self.__class__)
new._chemicals = self._chemicals
new._phase = self._phase.copy()
new._data_cache = {}
return new
@classmethod
def blank(cls, phase, chemicals=None):
self = _new(cls)
self._load_chemicals(chemicals)
self._data = np.zeros(self._chemicals.size, float)
self._phase = Phase.convert(phase)
self._data_cache = {}
return self
@classmethod
def from_data(cls, data, phase=NoPhase, chemicals=None, check_data=True):
self = _new(cls)
self._load_chemicals(chemicals)
self._phase = Phase.convert(phase)
if check_data:
assert data.ndim == 1, 'material data must be a 1d numpy array'
assert data.size == self._chemicals.size, ('size of material data must be equal to '
'size of chemicals')
self._data = data
self._data_cache = {}
return self
@property
def phase(self):
return self._phase._phase
@phase.setter
def phase(self, phase):
self._phase.phase = phase
def get_phase_and_composition(self):
"""Return phase and composition."""
data = self._data
return self.phase, data / data.sum()
def __format__(self, tabs=""):
if not tabs: tabs = 1
tabs = int(tabs)
tab = tabs*4*" "
phase = f"phase={repr(self.phase)}"
if tab:
dlim = ",\n" + tab
phase = '\n' + tab + phase
else:
dlim = ", "
ID_data = utils.repr_IDs_data(self._chemicals.IDs, self._data, dlim)
return f"{type(self).__name__}({phase}{ID_data})"
def __repr__(self):
return self.__format__()
def _info(self, N):
"""Return string with all specifications."""
IDs = self.chemicals.IDs
data = self.data
IDs, data = nonzeros(IDs, data)
N_IDs = len(IDs)
if N_IDs == 0:
return f"{type(self).__name__}: (empty)"
elif self.units:
basic_info = f"{type(self).__name__} ({self.units}):\n"
else:
basic_info = f"{type(self).__name__}:\n"
beginning = f' ({self.phase}) ' if self.phase else " "
new_line = '\n' + len(beginning) * ' '
data_info = ''
lengths = [len(i) for i in IDs]
maxlen = max(lengths) + 1
N_max = N or tmo.Stream.display_units.N
too_many_chemicals = N_IDs > N_max
N = N_max if too_many_chemicals else N_IDs
for i in range(N):
spaces = ' ' * (maxlen - lengths[i])
if i != 0:
data_info += new_line
data_info += IDs[i] + spaces + f' {data[i]:.3g}'
if too_many_chemicals:
data_info += new_line + '...'
return (basic_info
+ beginning
+ data_info)
_ipython_display_ = show = SplitIndexer.show
@utils.chemicals_user
class MaterialIndexer(Indexer):
"""
Create a MaterialIndexer that can index a multi-phase, 2d-array given
the phase and chemical IDs.
Parameters
----------
phases : tuple['s', 'l', 'g', 'S', 'L', 'G']
Phases of data rows.
units : str
Units of measure of input data.
chemicals : Chemicals
Required to define the chemicals that are present.
**phase_data : tuple[str, float]
phase-(ID, value) pairs
Notes
-----
A MaterialIndexer does not have any units defined. To use units of measure, use the
`MolarIndexer`, `MassIndexer`, or `VolumetricIndexer`.
"""
__slots__ = ('_chemicals', '_phases', '_phase_indexer',
'_index_cache', '_data_cache')
_index_caches = {}
_ChemicalIndexer = ChemicalIndexer
def __new__(cls, phases=None, units=None, chemicals=None, **phase_data):
self = cls.blank(phases or phase_data, chemicals)
if phase_data:
for phase, ID_data in phase_data.items():
IDs, data = zip(*ID_data)
self[phase, IDs] = data
if units: self.set_data(data, units)
return self
def reset_chemicals(self, chemicals, container=None):
old_data = self._data
old_data_cache = self._data_cache
shape = N_phases, N_chemicals = (len(self._phases), chemicals.size)
if container is None:
self._data = data = np.zeros(shape, float)
self._data_cache = {}
else:
data, cache = container
data[:] = 0.
old_chemicals = self._chemicals
old_index = range(old_chemicals.size)
CASs = old_chemicals.CASs
for i in range(N_phases):
for j in old_index:
value = old_data[i, j]
if value: data[i, chemicals.index(CASs[j])] = value
self._load_chemicals(chemicals)
self._set_cache()
return (old_data, old_data_cache)
def __reduce__(self):
return self.from_data, (self._data, self._phases, self._chemicals, False)
def phases_are_empty(self, phases):
get_phase_index = self.get_phase_index
data = self._data
for phase in set(self._phases).intersection(phases):
if data[get_phase_index(phase)].any(): return False
return True
def sum_across_phases(self):
return self._data.sum(0)
def copy_like(self, other):
if self is other: return
if isinstance(other, ChemicalIndexer):
self.empty()
other_data = other._data
phase_index = self.get_phase_index(other.phase)
if self.chemicals is other.chemicals:
self._data[phase_index, :] = other_data
else:
other_index, = np.where(other_data)
IDs = other.chemicals.IDs
self_index = self.chemicals.indices([IDs[i] for i in other_index])
self._data[phase_index, self_index] += other._data[other_index]
else:
if self.chemicals is other.chemicals:
self._data[:] = other._data
else:
self.empty()
other_data = other._data
other_index, = np.where(other_data.any(0))
IDs = other.chemicals.IDs
self_index = self.chemicals.indices([IDs[i] for i in other_index])
self._data[:, self_index] = other_data[:, other_index]
def mix_from(self, others):
isa = isinstance
data = self._data
get_phase_index = self.get_phase_index
chemicals = self._chemicals
phases = self._phases
indexer_data = [(i, i._data.copy() if i is self else i._data) for i in others]
data[:] = 0.
for i, idata in indexer_data:
if isa(i, MaterialIndexer):
if phases == i.phases:
if chemicals is i.chemicals:
data[:] += idata
else:
idata = i._data
other_index, = np.where(idata.any(0))
IDs = i.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[:, self_index] += idata[:, other_index]
else:
if chemicals is i.chemicals:
for phase, idata in zip(i.phases, idata):
if not idata.any(): continue
data[get_phase_index(phase), :] += idata
else:
for phase, idata in zip(i.phases, idata):
if not idata.any(): continue
other_index, = np.where(idata)
IDs = i.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[get_phase_index(phase), self_index] += idata[other_index]
elif isa(i, ChemicalIndexer):
if chemicals is i.chemicals:
data[get_phase_index(i.phase), :] += idata
else:
other_index, = np.where(idata != 0.)
IDs = i.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[get_phase_index(i.phase), self_index] += idata[other_index]
else:
raise ValueError("can only mix from chemical or material indexers")
def separate_out(self, other):
isa = isinstance
data = self._data
get_phase_index = self.get_phase_index
chemicals = self._chemicals
phases = self._phases
idata = other._data
if isa(other, MaterialIndexer):
if phases == other.phases:
if chemicals is other.chemicals:
data[:] -= idata
else:
idata = other._data
other_index, = np.where(idata.any(0))
IDs = other.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[:, self_index] -= idata[:, other_index]
else:
if chemicals is other.chemicals:
for phase, idata in zip(other.phases, idata):
if not idata.any(): continue
data[get_phase_index(phase), :] -= idata
else:
for phase, idata in zip(other.phases, idata):
if not idata.any(): continue
other_index, = np.where(idata)
IDs = other.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[get_phase_index(phase), self_index] -= idata[other_index]
elif isa(other, ChemicalIndexer):
if chemicals is other.chemicals:
data[get_phase_index(other.phase), :] -= idata
else:
other_index, = np.where(idata != 0.)
IDs = other.chemicals.IDs
self_index = chemicals.indices([IDs[i] for i in other_index])
data[get_phase_index(other.phase), self_index] -= idata[other_index]
else:
raise ValueError("can only separate out from chemical or material indexers")
def _set_phases(self, phases):
self._phases = phases = phase_tuple(phases)
self._phase_indexer = PhaseIndexer(phases)
def _set_cache(self):
caches = self._index_caches
key = self._phases, self._chemicals
try:
self._index_cache = caches[key]
except KeyError:
self._index_cache = caches[key] = {}
def _copy_without_data(self):
new = _new(self.__class__)
new._phases = self._phases
new._chemicals = self._chemicals
new._phase_indexer = self._phase_indexer
new._index_cache = self._index_cache
new._data_cache = {}
return new
@classmethod
def blank(cls, phases, chemicals=None):
self = _new(cls)
self._load_chemicals(chemicals)
self._set_phases(phases)
self._set_cache()
shape = (len(self._phases), self._chemicals.size)
self._data = np.zeros(shape, float)
self._data_cache = {}
return self
@classmethod
def from_data(cls, data, phases, chemicals=None, check_data=True):
self = _new(cls)
self._load_chemicals(chemicals)
self._set_phases(phases)
self._set_cache()
if check_data:
assert data.ndim == 2, ('material data must be an 2d numpy array')
M_phases = len(self._phases)
N_chemicals = self._chemicals.size
M, N = data.shape
assert M == M_phases, ('number of phases must be equal to '
'the number of material data rows')
assert N == N_chemicals, ('size of chemicals '
'must be equal to '
'number of material data columns')
self._data = data
self._data_cache = {}
return self
@property
def phases(self):
return self._phases
@property
def get_phase_index(self):
return self._phase_indexer
def to_chemical_indexer(self, phase=NoPhase):
return self._ChemicalIndexer.from_data(self._data.sum(0), phase, self._chemicals, False)
def to_material_indexer(self, phases):
material_indexer = self.__class__.blank(phases, self._chemicals)
for phase, data in self:
if data.any(): material_indexer[phase] = data
return material_indexer
def get_phase(self, phase):
return self._ChemicalIndexer.from_data(self._data[self.get_phase_index(phase)],
LockedPhase(phase), self._chemicals, False)
def __getitem__(self, key):
index, kind, sum_across_phases = self._get_index_data(key)
if sum_across_phases:
if kind == 0: # Normal
values = self._data[:, index].sum(0)
elif kind == 1: # Chemical group
values = self._data[:, index].sum()
elif kind == 2: # Nested chemical group
data = self._data
values = np.array([data[:, i].sum() for i in index], dtype=float)
else:
if kind == 0: # Normal
return self._data[index]
elif kind == 1: # Chemical group
phase, index = index
if phase == slice(None):
values = self._data[phase, index].sum(1)
else:
values = self._data[phase, index].sum()
elif kind == 2: # Nested chemical group
data = self._data
isa = isinstance
phase, index = index
if phase == slice(None):
values = np.zeros([len(self.phases), len(index)])
for d, s in enumerate(index):
if isa(s, list):
values[:, d] = data[phase, s].sum(1)
else:
values[:, d] = data[phase, s]
else:
values = np.zeros(len(index))
for d, s in enumerate(index):
if isa(s, list):
values[d] = data[phase, s].sum()
else:
values[d] = data[phase, s]
return values
def __setitem__(self, key, data):
index, kind, sum_across_phases = self._get_index_data(key)
if sum_across_phases:
raise IndexError("multiple phases present; must include phase key "
"to set chemical data")
if kind == 0:
self._data[index] = data
elif kind == 1: # Chemical group
phase, index = index
_, key = key
composition = self.group_compositions[key]
self._data[phase, index] = data * composition
elif kind == 2: # Nested chemical group
phase, index = index
local_data = self._data
group_compositions = self.group_compositions
isa = isinstance
for n in range(len(index)):
i = index[n]
local_data[phase, i] = data[n] * group_compositions[key[n]] if isa(i, list) else data[n]
else:
raise IndexError('unknown error')
def _get_index_data(self, key):
cache = self._index_cache
try:
index_data = cache[key]
except KeyError:
try:
index, kind = self._chemicals._get_index_and_kind(key)
except UndefinedChemical as error:
index, kind = self._get_index_and_kind(key, error)
sum_across_phases = False
else:
sum_across_phases = True
cache[key] = index_data = (index, kind, sum_across_phases)
utils.trim_cache(cache)
except TypeError:
raise TypeError("only strings, tuples, and ellipsis are valid index keys")
return index_data
def _get_index_and_kind(self, phase_IDs, undefined_chemical_error):
isa = isinstance
if isa(phase_IDs, str):
if len(phase_IDs) == 1:
index = self.get_phase_index(phase_IDs)
kind = 0
else:
raise undefined_chemical_error
elif phase_IDs is ...:
index = slice(None)
kind = 0
else:
phase = phase_IDs[0]
if isa(phase, str):
if len(phase) == 1:
phase_index = self.get_phase_index(phase)
else:
raise undefined_chemical_error
elif phase is ...:
phase_index = slice(None)
else:
raise_material_indexer_index_error()
try:
phase, IDs = phase_IDs
except:
raise_material_indexer_index_error()
chemical_index, kind = self._chemicals._get_index_and_kind(IDs)
index = (phase_index, chemical_index)
return index, kind
def __iter__(self):
"""Iterate over phase-data pairs."""
return zip(self._phases, self._data)
def iter_composition(self):
"""Iterate over phase-composition pairs."""
array = self._data
total = array.sum() or 1.
return zip(self._phases, array/total)
def __format__(self, tabs="1"):
IDs = self._chemicals.IDs
phase_data = []
for phase, data in self:
ID_data = utils.repr_couples(", ", IDs, data)
if ID_data:
phase_data.append(f"{phase}=[{ID_data}]")
tabs = int(tabs) if tabs else 1
if tabs:
tab = tabs*4*" "
dlim = ",\n" + tab
else:
dlim = ", "
phase_data = dlim.join(phase_data)
if self.data.sum(1).all():
phases = ""
if phase_data:
phase_data = "\n" + tab + phase_data
else:
phases = f'phases={self._phases}'
if phase_data:
phase_data = dlim + phase_data
return f"{type(self).__name__}({phases}{phase_data})"
def __repr__(self):
return self.__format__("1")
def _info(self, N):
"""Return string with all specifications."""
from thermosteam import Stream
N_max = N or Stream.display_units.N
IDs = self.chemicals.IDs
index, = np.where(self.data.sum(0) != 0)
len_ = len(index)
if len_ == 0:
return f"{type(self).__name__}: (empty)"
elif self.units:
basic_info = f"{type(self).__name__} ({self.units}):\n"
else:
basic_info = f"{type(self).__name__}:\n"
all_IDs = tuple([IDs[i] for i in index])
# Length of chemical column
all_lengths = [len(i) for i in IDs]
maxlen = max(all_lengths + [8])
# Set up chemical data for all phases
phases_data_info = ''
for phase in self._phases:
phase_data = self[phase, all_IDs]
IDs, data = nonzeros(all_IDs, phase_data)
if not IDs: continue
# Get basic structure for phase data
beginning = f' ({phase}) '
new_line = '\n' + len(beginning) * ' '
# Set chemical data
data_info = ''
N_IDs = len(data)
too_many_chemicals = N_IDs > N_max
N = N_max if too_many_chemicals else N_IDs
lengths = [len(i) for i in IDs]
for i in range(N):
spaces = ' ' * (maxlen - lengths[i])
if i: data_info += new_line
data_info += f'{IDs[i]} ' + spaces + f' {data[i]:.3g}'
if too_many_chemicals: data += new_line + '...'
# Put it together
phases_data_info += beginning + data_info + '\n'
return basic_info + phases_data_info.rstrip('\n')
_ipython_display_ = show = ChemicalIndexer.show
def _replace_indexer_doc(Indexer, Parent):
doc = Parent.__doc__
doc = doc[:doc.index("Notes")]
Indexer.__doc__ = doc.replace(Parent.__name__, Indexer.__name__)
def _new_Indexer(name, units, f_group_composition):
dct = {'group_compositions': f_group_composition}
ChemicalIndexerSubclass = type('Chemical' + name + 'Indexer', (ChemicalIndexer,), dct)
MaterialIndexerSubclass = type(name + 'Indexer', (MaterialIndexer,), dct)
ChemicalIndexerSubclass.__slots__ = \
MaterialIndexerSubclass.__slots__ = ()
ChemicalIndexerSubclass.units = \
MaterialIndexerSubclass.units = AbsoluteUnitsOfMeasure(units)
MaterialIndexerSubclass._ChemicalIndexer = ChemicalIndexerSubclass
ChemicalIndexerSubclass._MaterialIndexer = MaterialIndexerSubclass
_replace_indexer_doc(ChemicalIndexerSubclass, ChemicalIndexer)
_replace_indexer_doc(MaterialIndexerSubclass, MaterialIndexer)
return ChemicalIndexerSubclass, MaterialIndexerSubclass
ChemicalIndexer._MaterialIndexer = MaterialIndexer
@property
def group_wt_compositions(self):
return self._chemicals._group_wt_compositions
@property
def group_mol_compositions(self):
return self._chemicals._group_mol_compositions
@property
def group_vol_composition(self):
raise AttributeError('cannot set groups by volumetric flow')
ChemicalMolarFlowIndexer, MolarFlowIndexer = _new_Indexer('MolarFlow', 'kmol/hr', group_mol_compositions)
ChemicalMassFlowIndexer, MassFlowIndexer = _new_Indexer('MassFlow', 'kg/hr', group_wt_compositions)
ChemicalVolumetricFlowIndexer, VolumetricFlowIndexer = _new_Indexer('VolumetricFlow', 'm^3/hr', group_vol_composition)
# %% Mass flow properties
@PropertyFactory(slots=('name', 'mol', 'index', 'MW'),
units='kg/hr')
def MassFlowProperty(self):
"""Mass flow (kg/hr)."""
return self.mol[self.index] * self.MW
@MassFlowProperty.setter
def MassFlowProperty(self, value):
self.mol[self.index] = value/self.MW
def by_mass(self):
"""Return a ChemicalMassFlowIndexer that references this object's molar data."""
try:
mass = self._data_cache['mass']
except:
chemicals = self.chemicals
mol = self.data
mass = np.zeros_like(mol, dtype=object)
for i, chem in enumerate(chemicals):
mass[i] = MassFlowProperty(chem.ID, mol, i, chem.MW)
self._data_cache['mass'] = mass = ChemicalMassFlowIndexer.from_data(
property_array(mass),
self._phase, chemicals,
False)
return mass
ChemicalMolarFlowIndexer.by_mass = by_mass
def by_mass(self):
"""Return a MassFlowIndexer that references this object's molar data."""
try:
mass = self._data_cache['mass']
except:
phases = self.phases
chemicals = self.chemicals
mol = self.data
mass = np.zeros_like(mol, dtype=object)
for i, phase in enumerate(phases):
for j, chem in enumerate(chemicals):
index = (i, j)
mass[index] = MassFlowProperty(chem.ID, mol, index, chem.MW)
self._data_cache['mass'] = mass = MassFlowIndexer.from_data(
property_array(mass),
phases, chemicals,
False)
return mass
MolarFlowIndexer.by_mass = by_mass; del by_mass
# %% Volumetric flow properties
@PropertyFactory(slots=('name', 'mol', 'index', 'V',
'TP', 'phase', 'phase_container'),
units='m^3/hr')
def VolumetricFlowProperty(self):
"""Volumetric flow (m^3/hr)."""
f_mol = self.mol[self.index]
phase = self.phase or self.phase_container.phase
V = getattr(self.V, phase) if isinstance(self.V, PhaseHandle) else self.V
return 1000. * f_mol * V(*self.TP) if f_mol else 0.
@VolumetricFlowProperty.setter
def VolumetricFlowProperty(self, value):
if value:
phase = self.phase or self.phase_container.phase
V = getattr(self.V, phase) if isinstance(self.V, PhaseHandle) else self.V
self.mol[self.index] = value / V(*self.TP) / 1000.
else:
self.mol[self.index] = 0.
def by_volume(self, TP):
"""Return a ChemicalVolumetricFlowIndexer that references this object's molar data.
Parameters
----------
TP : ThermalCondition
"""
try:
vol = self._data_cache['vol', TP]
except:
chemicals = self.chemicals
mol = self.data
vol = np.zeros_like(mol, dtype=object)
for i, chem in enumerate(chemicals):
vol[i] = VolumetricFlowProperty(chem.ID, mol, i, chem.V,
TP, None, self._phase)
self._data_cache['vol', TP] = \
vol = ChemicalVolumetricFlowIndexer.from_data(property_array(vol),
self._phase, chemicals,
False)
return vol
ChemicalMolarFlowIndexer.by_volume = by_volume
def by_volume(self, TP):
"""Return a VolumetricFlowIndexer that references this object's molar data.
Parameters
----------
TP : ThermalCondition
"""
try:
vol = self._data_cache[TP]
except:
phases = self.phases
chemicals = self.chemicals
mol = self.data
vol = np.zeros_like(mol, dtype=object)
for i, phase in enumerate(phases):
for j, chem in enumerate(chemicals):
index = i, j
phase_name = tmo.settings._phase_names[phase]
vol[index] = VolumetricFlowProperty(f"{phase_name}{chem.ID}",
mol, index, chem.V, TP, phase)
self._data_cache[TP] = \
vol = VolumetricFlowIndexer.from_data(property_array(vol),
phases, chemicals,
False)
return vol
MolarFlowIndexer.by_volume = by_volume; del by_volume
del PropertyFactory | 0.756807 | 0.396039 |
from __future__ import absolute_import, print_function
import copy
from sage.structure.sage_object import SageObject
from sage.rings.all import ComplexField, Integer
from sage.misc.all import verbose, sage_eval
import sage.interfaces.gp
class Dokchitser(SageObject):
r"""
Dokchitser's `L`-functions Calculator
Create a Dokchitser `L`-series with
Dokchitser(conductor, gammaV, weight, eps, poles, residues, init,
prec)
where
- ``conductor`` - integer, the conductor
- ``gammaV`` - list of Gamma-factor parameters, e.g. [0] for
Riemann zeta, [0,1] for ell.curves, (see examples).
- ``weight`` - positive real number, usually an integer e.g. 1 for
Riemann zeta, 2 for `H^1` of curves/`\QQ`
- ``eps`` - complex number; sign in functional equation
- ``poles`` - (default: []) list of points where `L^*(s)` has
(simple) poles; only poles with `Re(s)>weight/2` should be
included
- ``residues`` - vector of residues of `L^*(s)` in those poles or
set residues='automatic' (default value)
- ``prec`` - integer (default: 53) number of *bits* of precision
RIEMANN ZETA FUNCTION:
We compute with the Riemann Zeta function.
::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L
Dokchitser L-series of conductor 1 and weight 1
sage: L(1)
Traceback (most recent call last):
...
ArithmeticError
sage: L(2)
1.64493406684823
sage: L(2, 1.1)
1.64493406684823
sage: L.derivative(2)
-0.937548254315844
sage: h = RR('0.0000000000001')
sage: (zeta(2+h) - zeta(2.))/h
-0.937028232783632
sage: L.taylor_series(2, k=5)
1.64493406684823 - 0.937548254315844*z + 0.994640117149451*z^2 - 1.00002430047384*z^3 + 1.00006193307...*z^4 + O(z^5)
RANK 1 ELLIPTIC CURVE:
We compute with the `L`-series of a rank `1`
curve.
::
sage: E = EllipticCurve('37a')
sage: L = E.lseries().dokchitser(); L
Dokchitser L-function associated to Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field
sage: L(1)
0.000000000000000
sage: L.derivative(1)
0.305999773834052
sage: L.derivative(1,2)
0.373095594536324
sage: L.num_coeffs()
48
sage: L.taylor_series(1,4)
0.000000000000000 + 0.305999773834052*z + 0.186547797268162*z^2 - 0.136791463097188*z^3 + O(z^4)
sage: L.check_functional_equation()
6.11218974700000e-18 # 32-bit
6.04442711160669e-18 # 64-bit
RANK 2 ELLIPTIC CURVE:
We compute the leading coefficient and Taylor expansion of the
`L`-series of a rank `2` curve.
::
sage: E = EllipticCurve('389a')
sage: L = E.lseries().dokchitser()
sage: L.num_coeffs ()
156
sage: L.derivative(1,E.rank())
1.51863300057685
sage: L.taylor_series(1,4)
-1.27685190980159e-23 + (7.23588070754027e-24)*z + 0.759316500288427*z^2 - 0.430302337583362*z^3 + O(z^4) # 32-bit
-2.72911738151096e-23 + (1.54658247036311e-23)*z + 0.759316500288427*z^2 - 0.430302337583362*z^3 + O(z^4) # 64-bit
RAMANUJAN DELTA L-FUNCTION:
The coefficients are given by Ramanujan's tau function::
sage: L = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: pari_precode = 'tau(n)=(5*sigma(n,3)+7*sigma(n,5))*n/12 - 35*sum(k=1,n-1,(6*k-4*(n-k))*sigma(k,3)*sigma(n-k,5))'
sage: L.init_coeffs('tau(k)', pari_precode=pari_precode)
We redefine the default bound on the coefficients: Deligne's
estimate on tau(n) is better than the default
coefgrow(n)=`(4n)^{11/2}` (by a factor 1024), so
re-defining coefgrow() improves efficiency (slightly faster).
::
sage: L.num_coeffs()
12
sage: L.set_coeff_growth('2*n^(11/2)')
sage: L.num_coeffs()
11
Now we're ready to evaluate, etc.
::
sage: L(1)
0.0374412812685155
sage: L(1, 1.1)
0.0374412812685155
sage: L.taylor_series(1,3)
0.0374412812685155 + 0.0709221123619322*z + 0.0380744761270520*z^2 + O(z^3)
"""
def __init__(self, conductor, gammaV, weight, eps, \
poles=[], residues='automatic', prec=53,
init=None):
"""
Initialization of Dokchitser calculator EXAMPLES::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L.num_coeffs()
4
"""
self.conductor = conductor
self.gammaV = gammaV
self.weight = weight
self.eps = eps
self.poles = poles
self.residues = residues
self.prec = prec
self.__CC = ComplexField(self.prec)
self.__RR = self.__CC._real_field()
if not init is None:
self.init_coeffs(init)
self.__init = init
else:
self.__init = False
def __reduce__(self):
D = copy.copy(self.__dict__)
if '_Dokchitser__gp' in D:
del D['_Dokchitser__gp']
return reduce_load_dokchitser, (D, )
def _repr_(self):
z = "Dokchitser L-series of conductor %s and weight %s"%(
self.conductor, self.weight)
return z
def __del__(self):
self.gp().quit()
def gp(self):
"""
Return the gp interpreter that is used to implement this Dokchitser
L-function.
EXAMPLES::
sage: E = EllipticCurve('11a')
sage: L = E.lseries().dokchitser()
sage: L(2)
0.546048036215014
sage: L.gp()
PARI/GP interpreter
"""
try:
return self.__gp
except AttributeError:
logfile = None
# For debugging
import os
from sage.env import DOT_SAGE
logfile = os.path.join(DOT_SAGE, 'dokchitser.log')
g = sage.interfaces.gp.Gp(script_subdirectory='dokchitser', logfile=logfile)
g.read('computel.gp')
self.__gp = g
self._gp_eval('default(realprecision, %s)'%(self.prec//3 + 2))
self._gp_eval('conductor = %s'%self.conductor)
self._gp_eval('gammaV = %s'%self.gammaV)
self._gp_eval('weight = %s'%self.weight)
self._gp_eval('sgn = %s'%self.eps)
self._gp_eval('Lpoles = %s'%self.poles)
self._gp_eval('Lresidues = %s'%self.residues)
g._dokchitser = True
return g
def _gp_eval(self, s):
try:
t = self.gp().eval(s)
except (RuntimeError, TypeError):
raise RuntimeError("Unable to create L-series, due to precision or other limits in PARI.")
if '***' in t:
raise RuntimeError("Unable to create L-series, due to precision or other limits in PARI.")
return t
def __check_init(self):
if not self.__init:
raise ValueError("you must call init_coeffs on the L-function first")
def num_coeffs(self, T=1):
"""
Return number of coefficients `a_n` that are needed in
order to perform most relevant `L`-function computations to
the desired precision.
EXAMPLES::
sage: E = EllipticCurve('11a')
sage: L = E.lseries().dokchitser()
sage: L.num_coeffs()
26
sage: E = EllipticCurve('5077a')
sage: L = E.lseries().dokchitser()
sage: L.num_coeffs()
568
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L.num_coeffs()
4
"""
return Integer(self.gp().eval('cflength(%s)'%T))
def init_coeffs(self, v, cutoff=1,
w=None,
pari_precode='',
max_imaginary_part=0,
max_asymp_coeffs=40):
"""
Set the coefficients `a_n` of the `L`-series. If
`L(s)` is not equal to its dual, pass the coefficients of
the dual as the second optional argument.
INPUT:
- ``v`` - list of complex numbers or string (pari
function of k)
- ``cutoff`` - real number = 1 (default: 1)
- ``w`` - list of complex numbers or string (pari
function of k)
- ``pari_precode`` - some code to execute in pari
before calling initLdata
- ``max_imaginary_part`` - (default: 0): redefine if
you want to compute L(s) for s having large imaginary part,
- ``max_asymp_coeffs`` - (default: 40): at most this
many terms are generated in asymptotic series for phi(t) and
G(s,t).
EXAMPLES::
sage: L = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: pari_precode = 'tau(n)=(5*sigma(n,3)+7*sigma(n,5))*n/12 - 35*sum(k=1,n-1,(6*k-4*(n-k))*sigma(k,3)*sigma(n-k,5))'
sage: L.init_coeffs('tau(k)', pari_precode=pari_precode)
Evaluate the resulting L-function at a point, and compare with the answer that
one gets "by definition" (of L-function attached to a modular form)::
sage: L(14)
0.998583063162746
sage: a = delta_qexp(1000)
sage: sum(a[n]/float(n)^14 for n in range(1,1000))
0.9985830631627459
Illustrate that one can give a list of complex numbers for v
(see :trac:`10937`)::
sage: L2 = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: L2.init_coeffs(list(delta_qexp(1000))[1:])
sage: L2(14)
0.998583063162746
TESTS:
Verify that setting the `w` parameter does not raise an error
(see :trac:`10937`). Note that the meaning of `w` does not seem to
be documented anywhere in Dokchitser's package yet, so there is
no claim that the example below is meaningful! ::
sage: L2 = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: L2.init_coeffs(list(delta_qexp(1000))[1:], w=[1..1000])
"""
if isinstance(v, tuple) and w is None:
v, cutoff, w, pari_precode, max_imaginary_part, max_asymp_coeffs = v
self.__init = (v, cutoff, w, pari_precode, max_imaginary_part, max_asymp_coeffs)
gp = self.gp()
if pari_precode != '':
self._gp_eval(pari_precode)
RR = self.__CC._real_field()
cutoff = RR(cutoff)
if isinstance(v, str):
if w is None:
self._gp_eval('initLdata("%s", %s)'%(v, cutoff))
return
self._gp_eval('initLdata("%s",%s,"%s")'%(v,cutoff,w))
return
if not isinstance(v, (list, tuple)):
raise TypeError("v (=%s) must be a list, tuple, or string"%v)
CC = self.__CC
v = ','.join([CC(a)._pari_init_() for a in v])
self._gp_eval('Avec = [%s]'%v)
if w is None:
self._gp_eval('initLdata("Avec[k]", %s)'%cutoff)
return
w = ','.join([CC(a)._pari_init_() for a in w])
self._gp_eval('Bvec = [%s]'%w)
self._gp_eval('initLdata("Avec[k]",%s,"Bvec[k]")'%cutoff)
def __to_CC(self, s):
s = s.replace('.E','.0E').replace(' ','')
return self.__CC(sage_eval(s, locals={'I':self.__CC.gen(0)}))
def _clear_value_cache(self):
del self.__values
def __call__(self, s, c=None):
r"""
INPUT:
- ``s`` - complex number
.. note::
Evaluation of the function takes a long time, so each
evaluation is cached. Call ``self._clear_value_cache()`` to
clear the evaluation cache.
EXAMPLES::
sage: E = EllipticCurve('5077a')
sage: L = E.lseries().dokchitser(100)
sage: L(1)
0.00000000000000000000000000000
sage: L(1+I)
-1.3085436607849493358323930438 + 0.81298000036784359634835412129*I
"""
self.__check_init()
s = self.__CC(s)
try:
return self.__values[s]
except AttributeError:
self.__values = {}
except KeyError:
pass
z = self.gp().eval('L(%s)'%s)
if 'pole' in z:
print(z)
raise ArithmeticError
elif '***' in z:
print(z)
raise RuntimeError
elif 'Warning' in z:
i = z.rfind('\n')
msg = z[:i].replace('digits','decimal digits')
verbose(msg, level=-1)
ans = self.__to_CC(z[i+1:])
self.__values[s] = ans
return ans
ans = self.__to_CC(z)
self.__values[s] = ans
return ans
def derivative(self, s, k=1):
r"""
Return the `k`-th derivative of the `L`-series at
`s`.
.. warning::
If `k` is greater than the order of vanishing of
`L` at `s` you may get nonsense.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: L = E.lseries().dokchitser()
sage: L.derivative(1,E.rank())
1.51863300057685
"""
self.__check_init()
s = self.__CC(s)
k = Integer(k)
z = self.gp().eval('L(%s,,%s)'%(s,k))
if 'pole' in z:
raise ArithmeticError(z)
elif 'Warning' in z:
i = z.rfind('\n')
msg = z[:i].replace('digits','decimal digits')
verbose(msg, level=-1)
return self.__CC(z[i:])
return self.__CC(z)
def taylor_series(self, a=0, k=6, var='z'):
r"""
Return the first `k` terms of the Taylor series expansion
of the `L`-series about `a`.
This is returned as a series in ``var``, where you
should view ``var`` as equal to `s-a`. Thus
this function returns the formal power series whose coefficients
are `L^{(n)}(a)/n!`.
INPUT:
- ``a`` - complex number (default: 0); point about
which to expand
- ``k`` - integer (default: 6), series is
`O(``var``^k)`
- ``var`` - string (default: 'z'), variable of power
series
EXAMPLES::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L.taylor_series(2, 3)
1.64493406684823 - 0.937548254315844*z + 0.994640117149451*z^2 + O(z^3)
sage: E = EllipticCurve('37a')
sage: L = E.lseries().dokchitser()
sage: L.taylor_series(1)
0.000000000000000 + 0.305999773834052*z + 0.186547797268162*z^2 - 0.136791463097188*z^3 + 0.0161066468496401*z^4 + 0.0185955175398802*z^5 + O(z^6)
We compute a Taylor series where each coefficient is to high
precision.
::
sage: E = EllipticCurve('389a')
sage: L = E.lseries().dokchitser(200)
sage: L.taylor_series(1,3)
-9.094...e-82 + (5.1538...e-82)*z + 0.75931650028842677023019260789472201907809751649492435158581*z^2 + O(z^3)
"""
self.__check_init()
a = self.__CC(a)
k = Integer(k)
try:
z = self.gp()('Vec(Lseries(%s,,%s))'%(a,k-1))
except TypeError as msg:
raise RuntimeError("%s\nUnable to compute Taylor expansion (try lowering the number of terms)"%msg)
r = repr(z)
if 'pole' in r:
raise ArithmeticError(r)
elif 'Warning' in r:
i = r.rfind('\n')
msg = r[:i].replace('digits','decimal digits')
verbose(msg, level=-1)
v = list(z)
K = self.__CC
v = [K(repr(x)) for x in v]
R = self.__CC[[var]]
return R(v,len(v))
def check_functional_equation(self, T=1.2):
r"""
Verifies how well numerically the functional equation is satisfied,
and also determines the residues if ``self.poles !=
[]`` and residues='automatic'.
More specifically: for `T>1` (default 1.2),
``self.check_functional_equation(T)`` should ideally
return 0 (to the current precision).
- if what this function returns does not look like 0 at all,
probably the functional equation is wrong (i.e. some of the
parameters gammaV, conductor etc., or the coefficients are wrong),
- if checkfeq(T) is to be used, more coefficients have to be
generated (approximately T times more), e.g. call cflength(1.3),
initLdata("a(k)",1.3), checkfeq(1.3)
- T=1 always (!) returns 0, so T has to be away from 1
- default value `T=1.2` seems to give a reasonable
balance
- if you don't have to verify the functional equation or the
L-values, call num_coeffs(1) and initLdata("a(k)",1), you need
slightly less coefficients.
EXAMPLES::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L.check_functional_equation()
-1.35525271600000e-20 # 32-bit
-2.71050543121376e-20 # 64-bit
If we choose the sign in functional equation for the
`\zeta` function incorrectly, the functional equation
doesn't check out.
::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=-11, poles=[1], residues=[-1], init='1')
sage: L.check_functional_equation()
-9.73967861488124
"""
self.__check_init()
z = self.gp().eval('checkfeq(%s)'%T).replace(' ','')
return self.__CC(z)
def set_coeff_growth(self, coefgrow):
r"""
You might have to redefine the coefficient growth function if the
`a_n` of the `L`-series are not given by the
following PARI function::
coefgrow(n) = if(length(Lpoles),
1.5*n^(vecmax(real(Lpoles))-1),
sqrt(4*n)^(weight-1));
INPUT:
- ``coefgrow`` - string that evaluates to a PARI
function of n that defines a coefgrow function.
EXAMPLE::
sage: L = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: pari_precode = 'tau(n)=(5*sigma(n,3)+7*sigma(n,5))*n/12 - 35*sum(k=1,n-1,(6*k-4*(n-k))*sigma(k,3)*sigma(n-k,5))'
sage: L.init_coeffs('tau(k)', pari_precode=pari_precode)
sage: L.set_coeff_growth('2*n^(11/2)')
sage: L(1)
0.0374412812685155
"""
if not isinstance(coefgrow, str):
raise TypeError("coefgrow must be a string")
g = self.gp()
g.eval('coefgrow(n) = %s'%(coefgrow.replace('\n',' ')))
def reduce_load_dokchitser(D):
X = Dokchitser(1,1,1,1)
X.__dict__ = D
X.init_coeffs(X._Dokchitser__init)
return X | src/sage/lfunctions/dokchitser.py | from __future__ import absolute_import, print_function
import copy
from sage.structure.sage_object import SageObject
from sage.rings.all import ComplexField, Integer
from sage.misc.all import verbose, sage_eval
import sage.interfaces.gp
class Dokchitser(SageObject):
r"""
Dokchitser's `L`-functions Calculator
Create a Dokchitser `L`-series with
Dokchitser(conductor, gammaV, weight, eps, poles, residues, init,
prec)
where
- ``conductor`` - integer, the conductor
- ``gammaV`` - list of Gamma-factor parameters, e.g. [0] for
Riemann zeta, [0,1] for ell.curves, (see examples).
- ``weight`` - positive real number, usually an integer e.g. 1 for
Riemann zeta, 2 for `H^1` of curves/`\QQ`
- ``eps`` - complex number; sign in functional equation
- ``poles`` - (default: []) list of points where `L^*(s)` has
(simple) poles; only poles with `Re(s)>weight/2` should be
included
- ``residues`` - vector of residues of `L^*(s)` in those poles or
set residues='automatic' (default value)
- ``prec`` - integer (default: 53) number of *bits* of precision
RIEMANN ZETA FUNCTION:
We compute with the Riemann Zeta function.
::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L
Dokchitser L-series of conductor 1 and weight 1
sage: L(1)
Traceback (most recent call last):
...
ArithmeticError
sage: L(2)
1.64493406684823
sage: L(2, 1.1)
1.64493406684823
sage: L.derivative(2)
-0.937548254315844
sage: h = RR('0.0000000000001')
sage: (zeta(2+h) - zeta(2.))/h
-0.937028232783632
sage: L.taylor_series(2, k=5)
1.64493406684823 - 0.937548254315844*z + 0.994640117149451*z^2 - 1.00002430047384*z^3 + 1.00006193307...*z^4 + O(z^5)
RANK 1 ELLIPTIC CURVE:
We compute with the `L`-series of a rank `1`
curve.
::
sage: E = EllipticCurve('37a')
sage: L = E.lseries().dokchitser(); L
Dokchitser L-function associated to Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field
sage: L(1)
0.000000000000000
sage: L.derivative(1)
0.305999773834052
sage: L.derivative(1,2)
0.373095594536324
sage: L.num_coeffs()
48
sage: L.taylor_series(1,4)
0.000000000000000 + 0.305999773834052*z + 0.186547797268162*z^2 - 0.136791463097188*z^3 + O(z^4)
sage: L.check_functional_equation()
6.11218974700000e-18 # 32-bit
6.04442711160669e-18 # 64-bit
RANK 2 ELLIPTIC CURVE:
We compute the leading coefficient and Taylor expansion of the
`L`-series of a rank `2` curve.
::
sage: E = EllipticCurve('389a')
sage: L = E.lseries().dokchitser()
sage: L.num_coeffs ()
156
sage: L.derivative(1,E.rank())
1.51863300057685
sage: L.taylor_series(1,4)
-1.27685190980159e-23 + (7.23588070754027e-24)*z + 0.759316500288427*z^2 - 0.430302337583362*z^3 + O(z^4) # 32-bit
-2.72911738151096e-23 + (1.54658247036311e-23)*z + 0.759316500288427*z^2 - 0.430302337583362*z^3 + O(z^4) # 64-bit
RAMANUJAN DELTA L-FUNCTION:
The coefficients are given by Ramanujan's tau function::
sage: L = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: pari_precode = 'tau(n)=(5*sigma(n,3)+7*sigma(n,5))*n/12 - 35*sum(k=1,n-1,(6*k-4*(n-k))*sigma(k,3)*sigma(n-k,5))'
sage: L.init_coeffs('tau(k)', pari_precode=pari_precode)
We redefine the default bound on the coefficients: Deligne's
estimate on tau(n) is better than the default
coefgrow(n)=`(4n)^{11/2}` (by a factor 1024), so
re-defining coefgrow() improves efficiency (slightly faster).
::
sage: L.num_coeffs()
12
sage: L.set_coeff_growth('2*n^(11/2)')
sage: L.num_coeffs()
11
Now we're ready to evaluate, etc.
::
sage: L(1)
0.0374412812685155
sage: L(1, 1.1)
0.0374412812685155
sage: L.taylor_series(1,3)
0.0374412812685155 + 0.0709221123619322*z + 0.0380744761270520*z^2 + O(z^3)
"""
def __init__(self, conductor, gammaV, weight, eps, \
poles=[], residues='automatic', prec=53,
init=None):
"""
Initialization of Dokchitser calculator EXAMPLES::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L.num_coeffs()
4
"""
self.conductor = conductor
self.gammaV = gammaV
self.weight = weight
self.eps = eps
self.poles = poles
self.residues = residues
self.prec = prec
self.__CC = ComplexField(self.prec)
self.__RR = self.__CC._real_field()
if not init is None:
self.init_coeffs(init)
self.__init = init
else:
self.__init = False
def __reduce__(self):
D = copy.copy(self.__dict__)
if '_Dokchitser__gp' in D:
del D['_Dokchitser__gp']
return reduce_load_dokchitser, (D, )
def _repr_(self):
z = "Dokchitser L-series of conductor %s and weight %s"%(
self.conductor, self.weight)
return z
def __del__(self):
self.gp().quit()
def gp(self):
"""
Return the gp interpreter that is used to implement this Dokchitser
L-function.
EXAMPLES::
sage: E = EllipticCurve('11a')
sage: L = E.lseries().dokchitser()
sage: L(2)
0.546048036215014
sage: L.gp()
PARI/GP interpreter
"""
try:
return self.__gp
except AttributeError:
logfile = None
# For debugging
import os
from sage.env import DOT_SAGE
logfile = os.path.join(DOT_SAGE, 'dokchitser.log')
g = sage.interfaces.gp.Gp(script_subdirectory='dokchitser', logfile=logfile)
g.read('computel.gp')
self.__gp = g
self._gp_eval('default(realprecision, %s)'%(self.prec//3 + 2))
self._gp_eval('conductor = %s'%self.conductor)
self._gp_eval('gammaV = %s'%self.gammaV)
self._gp_eval('weight = %s'%self.weight)
self._gp_eval('sgn = %s'%self.eps)
self._gp_eval('Lpoles = %s'%self.poles)
self._gp_eval('Lresidues = %s'%self.residues)
g._dokchitser = True
return g
def _gp_eval(self, s):
try:
t = self.gp().eval(s)
except (RuntimeError, TypeError):
raise RuntimeError("Unable to create L-series, due to precision or other limits in PARI.")
if '***' in t:
raise RuntimeError("Unable to create L-series, due to precision or other limits in PARI.")
return t
def __check_init(self):
if not self.__init:
raise ValueError("you must call init_coeffs on the L-function first")
def num_coeffs(self, T=1):
"""
Return number of coefficients `a_n` that are needed in
order to perform most relevant `L`-function computations to
the desired precision.
EXAMPLES::
sage: E = EllipticCurve('11a')
sage: L = E.lseries().dokchitser()
sage: L.num_coeffs()
26
sage: E = EllipticCurve('5077a')
sage: L = E.lseries().dokchitser()
sage: L.num_coeffs()
568
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L.num_coeffs()
4
"""
return Integer(self.gp().eval('cflength(%s)'%T))
def init_coeffs(self, v, cutoff=1,
w=None,
pari_precode='',
max_imaginary_part=0,
max_asymp_coeffs=40):
"""
Set the coefficients `a_n` of the `L`-series. If
`L(s)` is not equal to its dual, pass the coefficients of
the dual as the second optional argument.
INPUT:
- ``v`` - list of complex numbers or string (pari
function of k)
- ``cutoff`` - real number = 1 (default: 1)
- ``w`` - list of complex numbers or string (pari
function of k)
- ``pari_precode`` - some code to execute in pari
before calling initLdata
- ``max_imaginary_part`` - (default: 0): redefine if
you want to compute L(s) for s having large imaginary part,
- ``max_asymp_coeffs`` - (default: 40): at most this
many terms are generated in asymptotic series for phi(t) and
G(s,t).
EXAMPLES::
sage: L = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: pari_precode = 'tau(n)=(5*sigma(n,3)+7*sigma(n,5))*n/12 - 35*sum(k=1,n-1,(6*k-4*(n-k))*sigma(k,3)*sigma(n-k,5))'
sage: L.init_coeffs('tau(k)', pari_precode=pari_precode)
Evaluate the resulting L-function at a point, and compare with the answer that
one gets "by definition" (of L-function attached to a modular form)::
sage: L(14)
0.998583063162746
sage: a = delta_qexp(1000)
sage: sum(a[n]/float(n)^14 for n in range(1,1000))
0.9985830631627459
Illustrate that one can give a list of complex numbers for v
(see :trac:`10937`)::
sage: L2 = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: L2.init_coeffs(list(delta_qexp(1000))[1:])
sage: L2(14)
0.998583063162746
TESTS:
Verify that setting the `w` parameter does not raise an error
(see :trac:`10937`). Note that the meaning of `w` does not seem to
be documented anywhere in Dokchitser's package yet, so there is
no claim that the example below is meaningful! ::
sage: L2 = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: L2.init_coeffs(list(delta_qexp(1000))[1:], w=[1..1000])
"""
if isinstance(v, tuple) and w is None:
v, cutoff, w, pari_precode, max_imaginary_part, max_asymp_coeffs = v
self.__init = (v, cutoff, w, pari_precode, max_imaginary_part, max_asymp_coeffs)
gp = self.gp()
if pari_precode != '':
self._gp_eval(pari_precode)
RR = self.__CC._real_field()
cutoff = RR(cutoff)
if isinstance(v, str):
if w is None:
self._gp_eval('initLdata("%s", %s)'%(v, cutoff))
return
self._gp_eval('initLdata("%s",%s,"%s")'%(v,cutoff,w))
return
if not isinstance(v, (list, tuple)):
raise TypeError("v (=%s) must be a list, tuple, or string"%v)
CC = self.__CC
v = ','.join([CC(a)._pari_init_() for a in v])
self._gp_eval('Avec = [%s]'%v)
if w is None:
self._gp_eval('initLdata("Avec[k]", %s)'%cutoff)
return
w = ','.join([CC(a)._pari_init_() for a in w])
self._gp_eval('Bvec = [%s]'%w)
self._gp_eval('initLdata("Avec[k]",%s,"Bvec[k]")'%cutoff)
def __to_CC(self, s):
s = s.replace('.E','.0E').replace(' ','')
return self.__CC(sage_eval(s, locals={'I':self.__CC.gen(0)}))
def _clear_value_cache(self):
del self.__values
def __call__(self, s, c=None):
r"""
INPUT:
- ``s`` - complex number
.. note::
Evaluation of the function takes a long time, so each
evaluation is cached. Call ``self._clear_value_cache()`` to
clear the evaluation cache.
EXAMPLES::
sage: E = EllipticCurve('5077a')
sage: L = E.lseries().dokchitser(100)
sage: L(1)
0.00000000000000000000000000000
sage: L(1+I)
-1.3085436607849493358323930438 + 0.81298000036784359634835412129*I
"""
self.__check_init()
s = self.__CC(s)
try:
return self.__values[s]
except AttributeError:
self.__values = {}
except KeyError:
pass
z = self.gp().eval('L(%s)'%s)
if 'pole' in z:
print(z)
raise ArithmeticError
elif '***' in z:
print(z)
raise RuntimeError
elif 'Warning' in z:
i = z.rfind('\n')
msg = z[:i].replace('digits','decimal digits')
verbose(msg, level=-1)
ans = self.__to_CC(z[i+1:])
self.__values[s] = ans
return ans
ans = self.__to_CC(z)
self.__values[s] = ans
return ans
def derivative(self, s, k=1):
r"""
Return the `k`-th derivative of the `L`-series at
`s`.
.. warning::
If `k` is greater than the order of vanishing of
`L` at `s` you may get nonsense.
EXAMPLES::
sage: E = EllipticCurve('389a')
sage: L = E.lseries().dokchitser()
sage: L.derivative(1,E.rank())
1.51863300057685
"""
self.__check_init()
s = self.__CC(s)
k = Integer(k)
z = self.gp().eval('L(%s,,%s)'%(s,k))
if 'pole' in z:
raise ArithmeticError(z)
elif 'Warning' in z:
i = z.rfind('\n')
msg = z[:i].replace('digits','decimal digits')
verbose(msg, level=-1)
return self.__CC(z[i:])
return self.__CC(z)
def taylor_series(self, a=0, k=6, var='z'):
r"""
Return the first `k` terms of the Taylor series expansion
of the `L`-series about `a`.
This is returned as a series in ``var``, where you
should view ``var`` as equal to `s-a`. Thus
this function returns the formal power series whose coefficients
are `L^{(n)}(a)/n!`.
INPUT:
- ``a`` - complex number (default: 0); point about
which to expand
- ``k`` - integer (default: 6), series is
`O(``var``^k)`
- ``var`` - string (default: 'z'), variable of power
series
EXAMPLES::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L.taylor_series(2, 3)
1.64493406684823 - 0.937548254315844*z + 0.994640117149451*z^2 + O(z^3)
sage: E = EllipticCurve('37a')
sage: L = E.lseries().dokchitser()
sage: L.taylor_series(1)
0.000000000000000 + 0.305999773834052*z + 0.186547797268162*z^2 - 0.136791463097188*z^3 + 0.0161066468496401*z^4 + 0.0185955175398802*z^5 + O(z^6)
We compute a Taylor series where each coefficient is to high
precision.
::
sage: E = EllipticCurve('389a')
sage: L = E.lseries().dokchitser(200)
sage: L.taylor_series(1,3)
-9.094...e-82 + (5.1538...e-82)*z + 0.75931650028842677023019260789472201907809751649492435158581*z^2 + O(z^3)
"""
self.__check_init()
a = self.__CC(a)
k = Integer(k)
try:
z = self.gp()('Vec(Lseries(%s,,%s))'%(a,k-1))
except TypeError as msg:
raise RuntimeError("%s\nUnable to compute Taylor expansion (try lowering the number of terms)"%msg)
r = repr(z)
if 'pole' in r:
raise ArithmeticError(r)
elif 'Warning' in r:
i = r.rfind('\n')
msg = r[:i].replace('digits','decimal digits')
verbose(msg, level=-1)
v = list(z)
K = self.__CC
v = [K(repr(x)) for x in v]
R = self.__CC[[var]]
return R(v,len(v))
def check_functional_equation(self, T=1.2):
r"""
Verifies how well numerically the functional equation is satisfied,
and also determines the residues if ``self.poles !=
[]`` and residues='automatic'.
More specifically: for `T>1` (default 1.2),
``self.check_functional_equation(T)`` should ideally
return 0 (to the current precision).
- if what this function returns does not look like 0 at all,
probably the functional equation is wrong (i.e. some of the
parameters gammaV, conductor etc., or the coefficients are wrong),
- if checkfeq(T) is to be used, more coefficients have to be
generated (approximately T times more), e.g. call cflength(1.3),
initLdata("a(k)",1.3), checkfeq(1.3)
- T=1 always (!) returns 0, so T has to be away from 1
- default value `T=1.2` seems to give a reasonable
balance
- if you don't have to verify the functional equation or the
L-values, call num_coeffs(1) and initLdata("a(k)",1), you need
slightly less coefficients.
EXAMPLES::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=1, poles=[1], residues=[-1], init='1')
sage: L.check_functional_equation()
-1.35525271600000e-20 # 32-bit
-2.71050543121376e-20 # 64-bit
If we choose the sign in functional equation for the
`\zeta` function incorrectly, the functional equation
doesn't check out.
::
sage: L = Dokchitser(conductor=1, gammaV=[0], weight=1, eps=-11, poles=[1], residues=[-1], init='1')
sage: L.check_functional_equation()
-9.73967861488124
"""
self.__check_init()
z = self.gp().eval('checkfeq(%s)'%T).replace(' ','')
return self.__CC(z)
def set_coeff_growth(self, coefgrow):
r"""
You might have to redefine the coefficient growth function if the
`a_n` of the `L`-series are not given by the
following PARI function::
coefgrow(n) = if(length(Lpoles),
1.5*n^(vecmax(real(Lpoles))-1),
sqrt(4*n)^(weight-1));
INPUT:
- ``coefgrow`` - string that evaluates to a PARI
function of n that defines a coefgrow function.
EXAMPLE::
sage: L = Dokchitser(conductor=1, gammaV=[0,1], weight=12, eps=1)
sage: pari_precode = 'tau(n)=(5*sigma(n,3)+7*sigma(n,5))*n/12 - 35*sum(k=1,n-1,(6*k-4*(n-k))*sigma(k,3)*sigma(n-k,5))'
sage: L.init_coeffs('tau(k)', pari_precode=pari_precode)
sage: L.set_coeff_growth('2*n^(11/2)')
sage: L(1)
0.0374412812685155
"""
if not isinstance(coefgrow, str):
raise TypeError("coefgrow must be a string")
g = self.gp()
g.eval('coefgrow(n) = %s'%(coefgrow.replace('\n',' ')))
def reduce_load_dokchitser(D):
X = Dokchitser(1,1,1,1)
X.__dict__ = D
X.init_coeffs(X._Dokchitser__init)
return X | 0.877739 | 0.547464 |
import math
import numpy as np
import numba as nb
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
mod = SourceModule("""
__global__ void AdvanceCuda(float *p_pos_x, float *p_pos_y, float *p_pos_z,
float *p_dir_x, float *p_dir_y, float *p_dir_z,
int *p_mesh_cell, float *p_speed, float *p_time,
float *clever_in, float *mesh_total_xsec,
int *p_end_trans, float *rands,
float *mesh_dist_traveled, float *mesh_dist_traveled_squared,
int *num_dead)
{
float dx = clever_in[1];
float L = clever_in[0];
const int num_part = clever_in[2];
const int max_mesh_index = clever_in[3];
const int i = threadIdx.x;
const float kicker = 1e-10;
const int init_cell = p_mesh_cell[i];
float p_dist_travled = 0.0;
int cell_next;
if (i < num_part){
if (p_end_trans[i] == 0){
if (p_pos_x[i] < 0){
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
}
else if (p_pos_x[i] >= L){
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
}
else{
float dist = -log(rands[i]/mesh_total_xsec[p_mesh_cell[i]]);
float x_loc = (p_dir_x[i] * dist) + p_pos_x[i];
float LB = p_mesh_cell[i] * dx;
float RB = LB + dx;
if (x_loc < LB){
p_dist_travled = (LB - p_pos_x[i])/p_dir_x[i] + kicker; //29
cell_next = p_mesh_cell[i] - 1;
}
else if (x_loc > RB){
p_dist_travled = (RB - p_pos_x[i])/p_dir_x[i] + kicker;
cell_next = p_mesh_cell[i] + 1;
}
else{
p_dist_travled = dist;
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
cell_next = p_mesh_cell[i];
}
p_pos_x[i] += p_dir_x[i]*p_dist_travled;
p_pos_y[i] += p_dir_y[i]*p_dist_travled;
p_pos_z[i] += p_dir_z[i]*p_dist_travled;
atomicAdd(&mesh_dist_traveled[init_cell], p_dist_travled);
atomicAdd(&mesh_dist_traveled_squared[init_cell], pow(p_dist_travled,2));
p_mesh_cell[i] = cell_next;
p_time[i] += p_dist_travled/p_speed[i];
}
}
}
}
""")
def Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,
num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):
p_end_trans = np.zeros(num_part, dtype=np.int32)
end_flag = 0
max_mesh_index = len(mesh_total_xsec)-1
cycle_count = 0
#copy data to cuda device
d_p_pos_x = drv.mem_alloc(p_pos_x.nbytes)
d_p_pos_y = drv.mem_alloc(p_pos_y.nbytes)
d_p_pos_z = drv.mem_alloc(p_pos_z.nbytes)
drv.memcpy_htod(d_p_pos_x, p_pos_x)
drv.memcpy_htod(d_p_pos_y, p_pos_y)
drv.memcpy_htod(d_p_pos_z, p_pos_z)
d_p_dir_y = drv.mem_alloc(p_dir_y.nbytes)
d_p_dir_z = drv.mem_alloc(p_dir_z.nbytes)
d_p_dir_x = drv.mem_alloc(p_dir_x.nbytes)
drv.memcpy_htod(d_p_dir_x, p_dir_x)
drv.memcpy_htod(d_p_dir_y, p_dir_y)
drv.memcpy_htod(d_p_dir_z, p_dir_z)
d_p_mesh_cell = drv.mem_alloc(p_mesh_cell.nbytes)
d_p_speed = drv.mem_alloc(p_speed.nbytes)
d_p_time = drv.mem_alloc(p_time.nbytes)
drv.memcpy_htod(d_p_mesh_cell, p_mesh_cell)
drv.memcpy_htod(d_p_speed, p_speed)
drv.memcpy_htod(d_p_time, p_time)
d_p_end_trans = drv.mem_alloc(p_end_trans.nbytes)
d_mesh_total_xsec = drv.mem_alloc(mesh_total_xsec.nbytes)
drv.memcpy_htod(d_p_end_trans, p_end_trans)
drv.memcpy_htod(d_mesh_total_xsec, mesh_total_xsec)
d_mesh_dist_traveled = drv.mem_alloc(mesh_dist_traveled.nbytes)
d_mesh_dist_traveled_squared = drv.mem_alloc(mesh_dist_traveled_squared.nbytes)
drv.memcpy_htod(d_mesh_dist_traveled, mesh_dist_traveled)
drv.memcpy_htod(d_mesh_dist_traveled_squared, mesh_dist_traveled_squared)
threadsperblock = 32
blockspergrid = (num_part + (threadsperblock - 1)) // threadsperblock
summer = num_part
number_done = np.zeros(1, dtype=np.int32)
d_number_done = drv.mem_alloc(number_done.nbytes)
drv.memcpy_htod(d_number_done, number_done)
#d_number_done = cuda.to_device(number_done)
AdvanceCuda = mod.get_function("AdvanceCuda")
clever_io = np.array([L, dx, num_part, max_mesh_index], np.float32)
while end_flag == 0 and cycle_count < 1000:
#allocate randoms
rands = np.random.random(num_part).astype(np.float32)
AdvanceCuda(d_p_pos_x, d_p_pos_y, d_p_pos_z,
d_p_dir_y, d_p_dir_z, d_p_dir_x,
d_p_mesh_cell, d_p_speed, d_p_time,
drv.In(clever_io), d_mesh_total_xsec,
d_p_end_trans, drv.In(rands), d_mesh_dist_traveled, d_mesh_dist_traveled_squared, d_number_done,
block=(threadsperblock, blockspergrid, 1))
if (number_done == num_part):
end_flag = 1
cycle_count += 1
#print("Number done (atomics): {0} Number done (classical): {1}".format(d_number_done[0], number_done_2))
print("Advance Complete:......{0}% ({1}/{2}) cycle: {3}".format(int(100*summer/num_part), summer, num_part, cycle_count), end = "\r")
print()
drv.memcpy_dtoh(p_pos_x, d_p_pos_x)
drv.memcpy_dtoh(p_pos_y, d_p_pos_y)
drv.memcpy_dtoh(p_pos_z, d_p_pos_z)
drv.memcpy_dtoh(p_dir_x, d_p_dir_x)
drv.memcpy_dtoh(p_dir_y, d_p_dir_y)
drv.memcpy_dtoh(p_dir_z, d_p_dir_z)
drv.memcpy_dtoh(p_speed, d_p_speed)
drv.memcpy_dtoh(p_time, d_p_time)
drv.memcpy_dtoh(p_mesh_cell, d_p_mesh_cell)
drv.memcpy_dtoh(mesh_dist_traveled, d_mesh_dist_traveled)
drv.memcpy_dtoh(mesh_dist_traveled_squared, d_mesh_dist_traveled_squared)
return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared)
@nb.jit(nopython=True)
def StillIn(p_pos_x, surface_distances, p_alive, num_part):
tally_left = 0
tally_right = 0
for i in range(num_part):
#exit at left
if p_pos_x[i] <= surface_distances[0]:
tally_left += 1
p_alive[i] = False
elif p_pos_x[i] >= surface_distances[len(surface_distances)-1]:
tally_right += 1
p_alive[i] = False
return(p_alive, tally_left, tally_right)
def test_Advance():
L: float = 1
dx: float = .25
N_m: int = 4
num_part: int = 6
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1], np.float32)
p_pos_y = 2.1*np.ones(num_part, np.float32)
p_pos_z = 3.4*np.ones(num_part, np.float32)
p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4], np.int32)
p_dir_x = np.ones(num_part, np.float32)
p_dir_x[0] = -1
p_dir_y = np.zeros(num_part, np.float32)
p_dir_z = np.zeros(num_part, np.float32)
p_speed = np.ones(num_part, np.float32)
p_time = np.zeros(num_part, np.float32)
p_alive = np.ones(num_part, np.int32)
p_alive[5] = 0
particle_speed = 1
mesh_total_xsec = np.array([0.1,1,.1,100], np.float32)
mesh_dist_traveled_squared = np.zeros(N_m, np.float32)
mesh_dist_traveled = np.zeros(N_m, np.float32)
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)
assert (np.sum(mesh_dist_traveled) > 0)
assert (np.sum(mesh_dist_traveled_squared) > 0)
assert (p_pos_x[0] == -.01)
assert (p_pos_x[5] == 1.1)
assert (p_pos_x[1:4].all() > .75)
def test_StillIn():
num_part = 7
surface_distances = [0,.25,.75,1]
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1, 1])
p_alive = np.ones(num_part, bool)
[p_alive, tally_left, tally_right] = StillIn(p_pos_x, surface_distances, p_alive, num_part)
assert(p_alive[0] == False)
assert(p_alive[5] == False)
assert(tally_left == 2)
assert(tally_right == 2)
assert(p_alive[2:4].all() == True)
if __name__ == '__main__':
test_Advance()
#test_StillIn() | mcdc_tnt/mako_kernels/gpu/advance.py | import math
import numpy as np
import numba as nb
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
mod = SourceModule("""
__global__ void AdvanceCuda(float *p_pos_x, float *p_pos_y, float *p_pos_z,
float *p_dir_x, float *p_dir_y, float *p_dir_z,
int *p_mesh_cell, float *p_speed, float *p_time,
float *clever_in, float *mesh_total_xsec,
int *p_end_trans, float *rands,
float *mesh_dist_traveled, float *mesh_dist_traveled_squared,
int *num_dead)
{
float dx = clever_in[1];
float L = clever_in[0];
const int num_part = clever_in[2];
const int max_mesh_index = clever_in[3];
const int i = threadIdx.x;
const float kicker = 1e-10;
const int init_cell = p_mesh_cell[i];
float p_dist_travled = 0.0;
int cell_next;
if (i < num_part){
if (p_end_trans[i] == 0){
if (p_pos_x[i] < 0){
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
}
else if (p_pos_x[i] >= L){
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
}
else{
float dist = -log(rands[i]/mesh_total_xsec[p_mesh_cell[i]]);
float x_loc = (p_dir_x[i] * dist) + p_pos_x[i];
float LB = p_mesh_cell[i] * dx;
float RB = LB + dx;
if (x_loc < LB){
p_dist_travled = (LB - p_pos_x[i])/p_dir_x[i] + kicker; //29
cell_next = p_mesh_cell[i] - 1;
}
else if (x_loc > RB){
p_dist_travled = (RB - p_pos_x[i])/p_dir_x[i] + kicker;
cell_next = p_mesh_cell[i] + 1;
}
else{
p_dist_travled = dist;
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
cell_next = p_mesh_cell[i];
}
p_pos_x[i] += p_dir_x[i]*p_dist_travled;
p_pos_y[i] += p_dir_y[i]*p_dist_travled;
p_pos_z[i] += p_dir_z[i]*p_dist_travled;
atomicAdd(&mesh_dist_traveled[init_cell], p_dist_travled);
atomicAdd(&mesh_dist_traveled_squared[init_cell], pow(p_dist_travled,2));
p_mesh_cell[i] = cell_next;
p_time[i] += p_dist_travled/p_speed[i];
}
}
}
}
""")
def Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,
num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):
p_end_trans = np.zeros(num_part, dtype=np.int32)
end_flag = 0
max_mesh_index = len(mesh_total_xsec)-1
cycle_count = 0
#copy data to cuda device
d_p_pos_x = drv.mem_alloc(p_pos_x.nbytes)
d_p_pos_y = drv.mem_alloc(p_pos_y.nbytes)
d_p_pos_z = drv.mem_alloc(p_pos_z.nbytes)
drv.memcpy_htod(d_p_pos_x, p_pos_x)
drv.memcpy_htod(d_p_pos_y, p_pos_y)
drv.memcpy_htod(d_p_pos_z, p_pos_z)
d_p_dir_y = drv.mem_alloc(p_dir_y.nbytes)
d_p_dir_z = drv.mem_alloc(p_dir_z.nbytes)
d_p_dir_x = drv.mem_alloc(p_dir_x.nbytes)
drv.memcpy_htod(d_p_dir_x, p_dir_x)
drv.memcpy_htod(d_p_dir_y, p_dir_y)
drv.memcpy_htod(d_p_dir_z, p_dir_z)
d_p_mesh_cell = drv.mem_alloc(p_mesh_cell.nbytes)
d_p_speed = drv.mem_alloc(p_speed.nbytes)
d_p_time = drv.mem_alloc(p_time.nbytes)
drv.memcpy_htod(d_p_mesh_cell, p_mesh_cell)
drv.memcpy_htod(d_p_speed, p_speed)
drv.memcpy_htod(d_p_time, p_time)
d_p_end_trans = drv.mem_alloc(p_end_trans.nbytes)
d_mesh_total_xsec = drv.mem_alloc(mesh_total_xsec.nbytes)
drv.memcpy_htod(d_p_end_trans, p_end_trans)
drv.memcpy_htod(d_mesh_total_xsec, mesh_total_xsec)
d_mesh_dist_traveled = drv.mem_alloc(mesh_dist_traveled.nbytes)
d_mesh_dist_traveled_squared = drv.mem_alloc(mesh_dist_traveled_squared.nbytes)
drv.memcpy_htod(d_mesh_dist_traveled, mesh_dist_traveled)
drv.memcpy_htod(d_mesh_dist_traveled_squared, mesh_dist_traveled_squared)
threadsperblock = 32
blockspergrid = (num_part + (threadsperblock - 1)) // threadsperblock
summer = num_part
number_done = np.zeros(1, dtype=np.int32)
d_number_done = drv.mem_alloc(number_done.nbytes)
drv.memcpy_htod(d_number_done, number_done)
#d_number_done = cuda.to_device(number_done)
AdvanceCuda = mod.get_function("AdvanceCuda")
clever_io = np.array([L, dx, num_part, max_mesh_index], np.float32)
while end_flag == 0 and cycle_count < 1000:
#allocate randoms
rands = np.random.random(num_part).astype(np.float32)
AdvanceCuda(d_p_pos_x, d_p_pos_y, d_p_pos_z,
d_p_dir_y, d_p_dir_z, d_p_dir_x,
d_p_mesh_cell, d_p_speed, d_p_time,
drv.In(clever_io), d_mesh_total_xsec,
d_p_end_trans, drv.In(rands), d_mesh_dist_traveled, d_mesh_dist_traveled_squared, d_number_done,
block=(threadsperblock, blockspergrid, 1))
if (number_done == num_part):
end_flag = 1
cycle_count += 1
#print("Number done (atomics): {0} Number done (classical): {1}".format(d_number_done[0], number_done_2))
print("Advance Complete:......{0}% ({1}/{2}) cycle: {3}".format(int(100*summer/num_part), summer, num_part, cycle_count), end = "\r")
print()
drv.memcpy_dtoh(p_pos_x, d_p_pos_x)
drv.memcpy_dtoh(p_pos_y, d_p_pos_y)
drv.memcpy_dtoh(p_pos_z, d_p_pos_z)
drv.memcpy_dtoh(p_dir_x, d_p_dir_x)
drv.memcpy_dtoh(p_dir_y, d_p_dir_y)
drv.memcpy_dtoh(p_dir_z, d_p_dir_z)
drv.memcpy_dtoh(p_speed, d_p_speed)
drv.memcpy_dtoh(p_time, d_p_time)
drv.memcpy_dtoh(p_mesh_cell, d_p_mesh_cell)
drv.memcpy_dtoh(mesh_dist_traveled, d_mesh_dist_traveled)
drv.memcpy_dtoh(mesh_dist_traveled_squared, d_mesh_dist_traveled_squared)
return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared)
@nb.jit(nopython=True)
def StillIn(p_pos_x, surface_distances, p_alive, num_part):
tally_left = 0
tally_right = 0
for i in range(num_part):
#exit at left
if p_pos_x[i] <= surface_distances[0]:
tally_left += 1
p_alive[i] = False
elif p_pos_x[i] >= surface_distances[len(surface_distances)-1]:
tally_right += 1
p_alive[i] = False
return(p_alive, tally_left, tally_right)
def test_Advance():
L: float = 1
dx: float = .25
N_m: int = 4
num_part: int = 6
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1], np.float32)
p_pos_y = 2.1*np.ones(num_part, np.float32)
p_pos_z = 3.4*np.ones(num_part, np.float32)
p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4], np.int32)
p_dir_x = np.ones(num_part, np.float32)
p_dir_x[0] = -1
p_dir_y = np.zeros(num_part, np.float32)
p_dir_z = np.zeros(num_part, np.float32)
p_speed = np.ones(num_part, np.float32)
p_time = np.zeros(num_part, np.float32)
p_alive = np.ones(num_part, np.int32)
p_alive[5] = 0
particle_speed = 1
mesh_total_xsec = np.array([0.1,1,.1,100], np.float32)
mesh_dist_traveled_squared = np.zeros(N_m, np.float32)
mesh_dist_traveled = np.zeros(N_m, np.float32)
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)
assert (np.sum(mesh_dist_traveled) > 0)
assert (np.sum(mesh_dist_traveled_squared) > 0)
assert (p_pos_x[0] == -.01)
assert (p_pos_x[5] == 1.1)
assert (p_pos_x[1:4].all() > .75)
def test_StillIn():
num_part = 7
surface_distances = [0,.25,.75,1]
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1, 1])
p_alive = np.ones(num_part, bool)
[p_alive, tally_left, tally_right] = StillIn(p_pos_x, surface_distances, p_alive, num_part)
assert(p_alive[0] == False)
assert(p_alive[5] == False)
assert(tally_left == 2)
assert(tally_right == 2)
assert(p_alive[2:4].all() == True)
if __name__ == '__main__':
test_Advance()
#test_StillIn() | 0.226955 | 0.337695 |
from qwt.graphic import QwtGraphic
from qwt.painter import QwtPainter
from qtpy.QtGui import (
QPainter,
QTransform,
QPixmap,
QPen,
QPolygonF,
QPainterPath,
QBrush,
)
from qtpy.QtCore import QSize, QRect, QPointF, QRectF, QSizeF, Qt, QPoint
from qtpy.QtSvg import QSvgRenderer
import numpy as np
class QwtTriangle(object):
# enum Type
Left, Right, Up, Down = list(range(4))
def qwtPathGraphic(path, pen, brush):
graphic = QwtGraphic()
graphic.setRenderHint(QwtGraphic.RenderPensUnscaled)
painter = QPainter(graphic)
painter.setPen(pen)
painter.setBrush(brush)
painter.drawPath(path)
painter.end()
return graphic
def qwtScaleBoundingRect(graphic, size):
scaledSize = QSize(size)
if scaledSize.isEmpty():
scaledSize = graphic.defaultSize()
sz = graphic.controlPointRect().size()
sx = 1.0
if sz.width() > 0.0:
sx = scaledSize.width() / sz.width()
sy = 1.0
if sz.height() > 0.0:
sy = scaledSize.height() / sz.height()
return graphic.scaledBoundingRect(sx, sy)
def qwtDrawPixmapSymbols(painter, points, numPoints, symbol):
size = symbol.size()
if size.isEmpty():
size = symbol.pixmap().size()
transform = QTransform(painter.transform())
if transform.isScaling():
r = QRect(0, 0, size.width(), size.height())
size = transform.mapRect(r).size()
pm = QPixmap(symbol.pixmap())
if pm.size() != size:
pm = pm.scaled(size)
pinPoint = QPointF(0.5 * size.width(), 0.5 * size.height())
if symbol.isPinPointEnabled():
pinPoint = symbol.pinPoint()
painter.resetTransform()
for pos in points:
pos = QPointF(transform.map(pos)) - pinPoint
QwtPainter.drawPixmap(painter, QRect(pos.toPoint(), pm.size()), pm)
def qwtDrawSvgSymbols(painter, points, numPoints, renderer, symbol):
if renderer is None or not renderer.isValid():
return
viewBox = QRectF(renderer.viewBoxF())
if viewBox.isEmpty():
return
sz = QSizeF(symbol.size())
if not sz.isValid():
sz = viewBox.size()
sx = sz.width() / viewBox.width()
sy = sz.height() / viewBox.height()
pinPoint = QPointF(viewBox.center())
if symbol.isPinPointEnabled():
pinPoint = symbol.pinPoint()
dx = sx * (pinPoint.x() - viewBox.left())
dy = sy * (pinPoint.y() - viewBox.top())
for pos in points:
x = pos.x() - dx
y = pos.y() - dy
renderer.render(painter, QRectF(x, y, sz.width(), sz.height()))
def qwtDrawGraphicSymbols(painter, points, numPoint, graphic, symbol):
pointRect = QRectF(graphic.controlPointRect())
if pointRect.isEmpty():
return
sx = 1.0
sy = 1.0
sz = symbol.size()
if sz.isValid():
sx = sz.width() / pointRect.width()
sy = sz.height() / pointRect.height()
pinPoint = QPointF(pointRect.center())
if symbol.isPinPointEnabled():
pinPoint = symbol.pinPoint()
transform = QTransform(painter.transform())
for pos in points:
tr = QTransform(transform)
tr.translate(pos.x(), pos.y())
tr.scale(sx, sy)
tr.translate(-pinPoint.x(), -pinPoint.y())
painter.setTransform(tr)
graphic.render(painter)
painter.setTransform(transform)
def qwtDrawEllipseSymbols(painter, points, numPoints, symbol):
painter.setBrush(symbol.brush())
painter.setPen(symbol.pen())
size = symbol.size()
sw = size.width()
sh = size.height()
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
x = pos.x()
y = pos.y()
r = QRectF(x - sw2, y - sh2, sw, sh)
painter.drawEllipse(r)
def qwtDrawRectSymbols(painter, points, numPoints, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
pen.setJoinStyle(Qt.MiterJoin)
painter.setPen(pen)
painter.setBrush(symbol.brush())
painter.setRenderHint(QPainter.Antialiasing, False)
sw = size.width()
sh = size.height()
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
x = pos.x()
y = pos.y()
r = QRectF(x - sw2, y - sh2, sw, sh)
painter.drawRect(r)
def qwtDrawDiamondSymbols(painter, points, numPoints, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
pen.setJoinStyle(Qt.MiterJoin)
painter.setPen(pen)
painter.setBrush(symbol.brush())
for pos in points:
x1 = pos.x() - 0.5 * size.width()
y1 = pos.y() - 0.5 * size.height()
x2 = x1 + size.width()
y2 = y1 + size.height()
polygon = QPolygonF()
polygon += QPointF(pos.x(), y1)
polygon += QPointF(x1, pos.y())
polygon += QPointF(pos.x(), y2)
polygon += QPointF(x2, pos.y())
painter.drawPolygon(polygon)
def qwtDrawTriangleSymbols(painter, type, points, numPoint, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
pen.setJoinStyle(Qt.MiterJoin)
painter.setPen(pen)
painter.setBrush(symbol.brush())
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
x = pos.x()
y = pos.y()
x1 = x - sw2
x2 = x1 + size.width()
y1 = y - sh2
y2 = y1 + size.height()
if type == QwtTriangle.Left:
triangle = [QPointF(x2, y1), QPointF(x1, y), QPointF(x2, y2)]
elif type == QwtTriangle.Right:
triangle = [QPointF(x1, y1), QPointF(x2, y), QPointF(x1, y2)]
elif type == QwtTriangle.Up:
triangle = [QPointF(x1, y2), QPointF(x, y1), QPointF(x2, y2)]
elif type == QwtTriangle.Down:
triangle = [QPointF(x1, y1), QPointF(x, y2), QPointF(x2, y1)]
else:
raise TypeError("Unknown triangle type %s" % type)
painter.drawPolygon(QPolygonF(triangle))
def qwtDrawLineSymbols(painter, orientations, points, numPoints, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
if pen.width() > 1:
pen.setCapStyle(Qt.FlatCap)
painter.setPen(pen)
painter.setRenderHint(QPainter.Antialiasing, False)
sw = size.width()
sh = size.height()
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
if orientations & Qt.Horizontal:
x = round(pos.x()) - sw2
y = round(pos.y())
painter.drawLine(x, y, x + sw, y)
if orientations & Qt.Vertical:
x = round(pos.x())
y = round(pos.y()) - sh2
painter.drawLine(x, y, x, y + sh)
def qwtDrawXCrossSymbols(painter, points, numPoints, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
if pen.width() > 1:
pen.setCapStyle(Qt.FlatCap)
painter.setPen(pen)
sw = size.width()
sh = size.height()
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
x1 = pos.x() - sw2
x2 = x1 + sw
y1 = pos.y() - sh2
y2 = y1 + sh
painter.drawLine(x1, y1, x2, y2)
painter.drawLine(x2, y1, x1, y2)
def qwtDrawStar1Symbols(painter, points, numPoints, symbol):
size = symbol.size()
painter.setPen(symbol.pen())
sqrt1_2 = np.sqrt(0.5)
r = QRectF(0, 0, size.width(), size.height())
for pos in points:
r.moveCenter(pos.toPoint())
c = QPointF(r.center())
d1 = r.width() / 2.0 * (1.0 - sqrt1_2)
painter.drawLine(r.left() + d1, r.top() + d1, r.right() - d1, r.bottom() - d1)
painter.drawLine(r.left() + d1, r.bottom() - d1, r.right() - d1, r.top() + d1)
painter.drawLine(c.x(), r.top(), c.x(), r.bottom())
painter.drawLine(r.left(), c.y(), r.right(), c.y())
def qwtDrawStar2Symbols(painter, points, numPoints, symbol):
pen = QPen(symbol.pen())
if pen.width() > 1:
pen.setCapStyle(Qt.FlatCap)
pen.setJoinStyle(Qt.MiterJoin)
painter.setPen(pen)
painter.setBrush(symbol.brush())
cos30 = np.cos(30 * np.pi / 180.0)
dy = 0.25 * symbol.size().height()
dx = 0.5 * symbol.size().width() * cos30 / 3.0
for pos in points:
x = pos.x()
y = pos.y()
x1 = x - 3 * dx
y1 = y - 2 * dy
x2 = x1 + 1 * dx
x3 = x1 + 2 * dx
x4 = x1 + 3 * dx
x5 = x1 + 4 * dx
x6 = x1 + 5 * dx
x7 = x1 + 6 * dx
y2 = y1 + 1 * dy
y3 = y1 + 2 * dy
y4 = y1 + 3 * dy
y5 = y1 + 4 * dy
star = [
QPointF(x4, y1),
QPointF(x5, y2),
QPointF(x7, y2),
QPointF(x6, y3),
QPointF(x7, y4),
QPointF(x5, y4),
QPointF(x4, y5),
QPointF(x3, y4),
QPointF(x1, y4),
QPointF(x2, y3),
QPointF(x1, y2),
QPointF(x3, y2),
]
painter.drawPolygon(QPolygonF(star))
def qwtDrawHexagonSymbols(painter, points, numPoints, symbol):
painter.setBrush(symbol.brush())
painter.setPen(symbol.pen())
cos30 = np.cos(30 * np.pi / 180.0)
dx = 0.5 * (symbol.size().width() - cos30)
dy = 0.25 * symbol.size().height()
for pos in points:
x = pos.x()
y = pos.y()
x1 = x - dx
y1 = y - 2 * dy
x2 = x1 + 1 * dx
x3 = x1 + 2 * dx
y2 = y1 + 1 * dy
y3 = y1 + 3 * dy
y4 = y1 + 4 * dy
hexa = [
QPointF(x2, y1),
QPointF(x3, y2),
QPointF(x3, y3),
QPointF(x2, y4),
QPointF(x1, y3),
QPointF(x1, y2),
]
painter.drawPolygon(QPolygonF(hexa))
class QwtSymbol_PrivateData(object):
def __init__(self, st, br, pn, sz):
self.style = st
self.size = sz
self.brush = br
self.pen = pn
self.isPinPointEnabled = False
self.pinPoint = QPointF()
class Path(object):
def __init__(self):
self.path = QPainterPath()
self.graphic = QwtGraphic()
self.path = Path()
class Pixmap(object):
def __init__(self):
self.pixmap = QPixmap()
self.pixmap = None # Pixmap()
class Graphic(object):
def __init__(self):
self.graphic = QwtGraphic()
self.graphic = Graphic()
class SVG(object):
def __init__(self):
self.renderer = QSvgRenderer()
self.svg = SVG()
class PaintCache(object):
def __init__(self):
self.policy = 0
self.pixmap = None # QPixmap()
self.cache = PaintCache()
class QwtSymbol(object):
"""
A class for drawing symbols
Symbol styles:
* `QwtSymbol.NoSymbol`: No Style. The symbol cannot be drawn.
* `QwtSymbol.Ellipse`: Ellipse or circle
* `QwtSymbol.Rect`: Rectangle
* `QwtSymbol.Diamond`: Diamond
* `QwtSymbol.Triangle`: Triangle pointing upwards
* `QwtSymbol.DTriangle`: Triangle pointing downwards
* `QwtSymbol.UTriangle`: Triangle pointing upwards
* `QwtSymbol.LTriangle`: Triangle pointing left
* `QwtSymbol.RTriangle`: Triangle pointing right
* `QwtSymbol.Cross`: Cross (+)
* `QwtSymbol.XCross`: Diagonal cross (X)
* `QwtSymbol.HLine`: Horizontal line
* `QwtSymbol.VLine`: Vertical line
* `QwtSymbol.Star1`: X combined with +
* `QwtSymbol.Star2`: Six-pointed star
* `QwtSymbol.Hexagon`: Hexagon
* `QwtSymbol.Path`: The symbol is represented by a painter path, where
the origin (0, 0) of the path coordinate system is mapped to the
position of the symbol
..seealso::
:py:meth:`setPath()`, :py:meth:`path()`
* `QwtSymbol.Pixmap`: The symbol is represented by a pixmap.
The pixmap is centered or aligned to its pin point.
..seealso::
:py:meth:`setPinPoint()`
* `QwtSymbol.Graphic`: The symbol is represented by a graphic.
The graphic is centered or aligned to its pin point.
..seealso::
:py:meth:`setPinPoint()`
* `QwtSymbol.SvgDocument`: The symbol is represented by a SVG graphic.
The graphic is centered or aligned to its pin point.
..seealso::
:py:meth:`setPinPoint()`
* `QwtSymbol.UserStyle`: Styles >= `QwtSymbol.UserStyle` are reserved
for derived classes of `QwtSymbol` that overload `drawSymbols()` with
additional application specific symbol types.
Cache policies:
Depending on the render engine and the complexity of the
symbol shape it might be faster to render the symbol
to a pixmap and to paint this pixmap.
F.e. the raster paint engine is a pure software renderer
where in cache mode a draw operation usually ends in
raster operation with the the backing store, that are usually
faster, than the algorithms for rendering polygons.
But the opposite can be expected for graphic pipelines
that can make use of hardware acceleration.
The default setting is AutoCache
..seealso::
:py:meth:`setCachePolicy()`, :py:meth:`cachePolicy()`
.. note::
The policy has no effect, when the symbol is painted
to a vector graphics format (PDF, SVG).
.. warning::
Since Qt 4.8 raster is the default backend on X11
Valid cache policies:
* `QwtSymbol.NoCache`: Don't use a pixmap cache
* `QwtSymbol.Cache`: Always use a pixmap cache
* `QwtSymbol.AutoCache`: Use a cache when the symbol is rendered
with the software renderer (`QPaintEngine.Raster`)
.. py:class:: QwtSymbol([style=QwtSymbol.NoSymbol])
The symbol is constructed with gray interior,
black outline with zero width, no size and style 'NoSymbol'.
:param int style: Symbol Style
.. py:class:: QwtSymbol(style, brush, pen, size)
:noindex:
:param int style: Symbol Style
:param QBrush brush: Brush to fill the interior
:param QPen pen: Outline pen
:param QSize size: Size
.. py:class:: QwtSymbol(path, brush, pen)
:noindex:
:param QPainterPath path: Painter path
:param QBrush brush: Brush to fill the interior
:param QPen pen: Outline pen
.. seealso::
:py:meth:`setPath()`, :py:meth:`setBrush()`,
:py:meth:`setPen()`, :py:meth:`setSize()`
"""
# enum Style
Style = int
NoSymbol = -1
(
Ellipse,
Rect,
Diamond,
Triangle,
DTriangle,
UTriangle,
LTriangle,
RTriangle,
Cross,
XCross,
HLine,
VLine,
Star1,
Star2,
Hexagon,
Path,
Pixmap,
Graphic,
SvgDocument,
) = list(range(19))
UserStyle = 1000
# enum CachePolicy
NoCache, Cache, AutoCache = list(range(3))
def __init__(self, *args):
if len(args) in (0, 1):
if args:
(style,) = args
else:
style = QwtSymbol.NoSymbol
self.__data = QwtSymbol_PrivateData(
style, QBrush(Qt.gray), QPen(Qt.black, 0), QSize()
)
elif len(args) == 4:
style, brush, pen, size = args
self.__data = QwtSymbol_PrivateData(style, brush, pen, size)
elif len(args) == 3:
path, brush, pen = args
self.__data = QwtSymbol_PrivateData(QwtSymbol.Path, brush, pen, QSize())
self.setPath(path)
else:
raise TypeError(
"%s() takes 1, 3, or 4 argument(s) (%s given)"
% (self.__class__.__name__, len(args))
)
@classmethod
def make(
cls,
style=None,
brush=None,
pen=None,
size=None,
path=None,
pixmap=None,
graphic=None,
svgdocument=None,
pinpoint=None,
):
"""
Create and setup a new `QwtSymbol` object (convenience function).
:param style: Symbol Style
:type style: int or None
:param brush: Brush to fill the interior
:type brush: QBrush or None
:param pen: Outline pen
:type pen: QPen or None
:param size: Size
:type size: QSize or None
:param path: Painter path
:type path: QPainterPath or None
:param path: Painter path
:type path: QPainterPath or None
:param pixmap: Pixmap as symbol
:type pixmap: QPixmap or None
:param graphic: Graphic
:type graphic: qwt.graphic.QwtGraphic or None
:param svgdocument: SVG icon as symbol
.. seealso::
:py:meth:`setPixmap()`, :py:meth:`setGraphic()`, :py:meth:`setPath()`
"""
style = QwtSymbol.NoSymbol if style is None else style
brush = QBrush(Qt.gray) if brush is None else QBrush(brush)
pen = QPen(Qt.black, 0) if pen is None else QPen(pen)
size = QSize() if size is None else size
if not isinstance(size, QSize):
if isinstance(size, tuple) and len(size) == 2:
size = QSize(size[0], size[1])
else:
raise TypeError("Invalid size %r" % size)
item = cls(style, brush, pen, size)
if path is not None:
item.setPath(path)
elif pixmap is not None:
item.setPixmap(pixmap)
elif graphic is not None:
item.setGraphic(graphic)
elif svgdocument is not None:
item.setSvgDocument(svgdocument)
if pinpoint is not None:
item.setPinPoint(pinpoint)
return item
def setCachePolicy(self, policy):
"""
Change the cache policy
The default policy is AutoCache
:param int policy: Cache policy
.. seealso::
:py:meth:`cachePolicy()`
"""
if self.__data.cache.policy != policy:
self.__data.cache.policy = policy
self.invalidateCache()
def cachePolicy(self):
"""
:return: Cache policy
.. seealso::
:py:meth:`setCachePolicy()`
"""
return self.__data.cache.policy
def setPath(self, path):
"""
Set a painter path as symbol
The symbol is represented by a painter path, where the
origin (0, 0) of the path coordinate system is mapped to
the position of the symbol.
When the symbol has valid size the painter path gets scaled
to fit into the size. Otherwise the symbol size depends on
the bounding rectangle of the path.
The following code defines a symbol drawing an arrow::
from qtpy.QtGui import QApplication, QPen, QPainterPath, QTransform
from qtpy.QtCore import Qt, QPointF
from qwt import QwtPlot, QwtPlotCurve, QwtSymbol
import numpy as np
app = QApplication([])
# --- Construct custom symbol ---
path = QPainterPath()
path.moveTo(0, 8)
path.lineTo(0, 5)
path.lineTo(-3, 5)
path.lineTo(0, 0)
path.lineTo(3, 5)
path.lineTo(0, 5)
transform = QTransform()
transform.rotate(-30.0)
path = transform.map(path)
pen = QPen(Qt.black, 2 );
pen.setJoinStyle(Qt.MiterJoin)
symbol = QwtSymbol()
symbol.setPen(pen)
symbol.setBrush(Qt.red)
symbol.setPath(path)
symbol.setPinPoint(QPointF(0., 0.))
symbol.setSize(10, 14)
# --- Test it within a simple plot ---
curve = QwtPlotCurve()
curve_pen = QPen(Qt.blue)
curve_pen.setStyle(Qt.DotLine)
curve.setPen(curve_pen)
curve.setSymbol(symbol)
x = np.linspace(0, 10, 10)
curve.setData(x, np.sin(x))
plot = QwtPlot()
curve.attach(plot)
plot.resize(600, 300)
plot.replot()
plot.show()
app.exec_()
.. image:: /images/symbol_path_example.png
:param QPainterPath path: Painter path
.. seealso::
:py:meth:`path()`, :py:meth:`setSize()`
"""
self.__data.style = QwtSymbol.Path
self.__data.path.path = path
self.__data.path.graphic.reset()
def path(self):
"""
:return: Painter path for displaying the symbol
.. seealso::
:py:meth:`setPath()`
"""
return self.__data.path.path
def setPixmap(self, pixmap):
"""
Set a pixmap as symbol
:param QPixmap pixmap: Pixmap
.. seealso::
:py:meth:`pixmap()`, :py:meth:`setGraphic()`
.. note::
The `style()` is set to `QwtSymbol.Pixmap`
.. note::
`brush()` and `pen()` have no effect
"""
self.__data.style = QwtSymbol.Pixmap
self.__data.pixmap.pixmap = pixmap
def pixmap(self):
"""
:return: Assigned pixmap
.. seealso::
:py:meth:`setPixmap()`
"""
return self.__data.pixmap.pixmap
def setGraphic(self, graphic):
"""
Set a graphic as symbol
:param qwt.graphic.QwtGraphic graphic: Graphic
.. seealso::
:py:meth:`graphic()`, :py:meth:`setPixmap()`
.. note::
The `style()` is set to `QwtSymbol.Graphic`
.. note::
`brush()` and `pen()` have no effect
"""
self.__data.style = QwtSymbol.Graphic
self.__data.graphic.graphic = graphic
def graphic(self):
"""
:return: Assigned graphic
.. seealso::
:py:meth:`setGraphic()`
"""
return self.__data.graphic.graphic
def setSvgDocument(self, svgDocument):
"""
Set a SVG icon as symbol
:param svgDocument: SVG icon
.. seealso::
:py:meth:`setGraphic()`, :py:meth:`setPixmap()`
.. note::
The `style()` is set to `QwtSymbol.SvgDocument`
.. note::
`brush()` and `pen()` have no effect
"""
self.__data.style = QwtSymbol.SvgDocument
if self.__data.svg.renderer is None:
self.__data.svg.renderer = QSvgRenderer()
self.__data.svg.renderer.load(svgDocument)
def setSize(self, *args):
"""
Specify the symbol's size
.. py:method:: setSize(width, [height=-1])
:noindex:
:param int width: Width
:param int height: Height
.. py:method:: setSize(size)
:noindex:
:param QSize size: Size
.. seealso::
:py:meth:`size()`
"""
if len(args) == 2:
width, height = args
if width >= 0 and height < 0:
height = width
self.setSize(QSize(width, height))
elif len(args) == 1:
if isinstance(args[0], QSize):
(size,) = args
if size.isValid() and size != self.__data.size:
self.__data.size = size
self.invalidateCache()
else:
(width,) = args
self.setSize(width, -1)
else:
raise TypeError(
"%s().setSize() takes 1 or 2 argument(s) (%s given)"
% (self.__class__.__name__, len(args))
)
def size(self):
"""
:return: Size
.. seealso::
:py:meth:`setSize()`
"""
return self.__data.size
def setBrush(self, brush):
"""
Assign a brush
The brush is used to draw the interior of the symbol.
:param QBrush brush: Brush
.. seealso::
:py:meth:`brush()`
"""
if brush != self.__data.brush:
self.__data.brush = brush
self.invalidateCache()
if self.__data.style == QwtSymbol.Path:
self.__data.path.graphic.reset()
def brush(self):
"""
:return: Brush
.. seealso::
:py:meth:`setBrush()`
"""
return self.__data.brush
def setPen(self, *args):
"""
Build and/or assign a pen, depending on the arguments.
.. py:method:: setPen(color, width, style)
:noindex:
Build and assign a pen
In Qt5 the default pen width is 1.0 ( 0.0 in Qt4 ) what makes it
non cosmetic (see `QPen.isCosmetic()`). This method signature has
been introduced to hide this incompatibility.
:param QColor color: Pen color
:param float width: Pen width
:param Qt.PenStyle style: Pen style
.. py:method:: setPen(pen)
:noindex:
Assign a pen
:param QPen pen: New pen
.. seealso::
:py:meth:`pen()`, :py:meth:`brush()`
"""
if len(args) == 3:
color, width, style = args
self.setPen(QPen(color, width, style))
elif len(args) == 1:
(pen,) = args
if pen != self.__data.pen:
self.__data.pen = pen
self.invalidateCache()
if self.__data.style == QwtSymbol.Path:
self.__data.path.graphic.reset()
else:
raise TypeError(
"%s().setPen() takes 1 or 3 argument(s) (%s given)"
% (self.__class__.__name__, len(args))
)
def pen(self):
"""
:return: Pen
.. seealso::
:py:meth:`setPen()`, :py:meth:`brush()`
"""
return self.__data.pen
def setColor(self, color):
"""
Set the color of the symbol
Change the color of the brush for symbol types with a filled area.
For all other symbol types the color will be assigned to the pen.
:param QColor color: Color
.. seealso::
:py:meth:`setPen()`, :py:meth:`setBrush()`,
:py:meth:`brush()`, :py:meth:`pen()`
"""
if self.__data.style in (
QwtSymbol.Ellipse,
QwtSymbol.Rect,
QwtSymbol.Diamond,
QwtSymbol.Triangle,
QwtSymbol.UTriangle,
QwtSymbol.DTriangle,
QwtSymbol.RTriangle,
QwtSymbol.LTriangle,
QwtSymbol.Star2,
QwtSymbol.Hexagon,
):
if self.__data.brush.color() != color:
self.__data.brush.setColor(color)
self.invalidateCache()
elif self.__data.style in (
QwtSymbol.Cross,
QwtSymbol.XCross,
QwtSymbol.HLine,
QwtSymbol.VLine,
QwtSymbol.Star1,
):
if self.__data.pen.color() != color:
self.__data.pen.setColor(color)
self.invalidateCache()
else:
if self.__data.brush.color() != color or self.__data.pen.color() != color:
self.invalidateCache()
self.__data.brush.setColor(color)
self.__data.pen.setColor(color)
def setPinPoint(self, pos, enable=True):
"""
Set and enable a pin point
The position of a complex symbol is not always aligned to its center
( f.e an arrow, where the peak points to a position ). The pin point
defines the position inside of a Pixmap, Graphic, SvgDocument
or PainterPath symbol where the represented point has to
be aligned to.
:param QPointF pos: Position
:enable bool enable: En/Disable the pin point alignment
.. seealso::
:py:meth:`pinPoint()`, :py:meth:`setPinPointEnabled()`
"""
if self.__data.pinPoint != pos:
self.__data.pinPoint = pos
if self.__data.isPinPointEnabled:
self.invalidateCache()
self.setPinPointEnabled(enable)
def pinPoint(self):
"""
:return: Pin point
.. seealso::
:py:meth:`setPinPoint()`, :py:meth:`setPinPointEnabled()`
"""
return self.__data.pinPoint
def setPinPointEnabled(self, on):
"""
En/Disable the pin point alignment
:param bool on: Enabled, when on is true
.. seealso::
:py:meth:`setPinPoint()`, :py:meth:`isPinPointEnabled()`
"""
if self.__data.isPinPointEnabled != on:
self.__data.isPinPointEnabled = on
self.invalidateCache()
def isPinPointEnabled(self):
"""
:return: True, when the pin point translation is enabled
.. seealso::
:py:meth:`setPinPoint()`, :py:meth:`setPinPointEnabled()`
"""
return self.__data.isPinPointEnabled
def drawSymbols(self, painter, points, numPoints=None):
"""
Render an array of symbols
Painting several symbols is more effective than drawing symbols
one by one, as a couple of layout calculations and setting of pen/brush
can be done once for the complete array.
:param QPainter painter: Painter
:param QPolygonF points: Positions of the symbols in screen coordinates
"""
# TODO: remove argument numPoints (not necessary in `PythonQwt`)
if numPoints is not None and numPoints <= 0:
return
painter.save()
self.renderSymbols(painter, points, numPoints)
painter.restore()
def drawSymbol(self, painter, point_or_rect):
"""
Draw the symbol into a rectangle
The symbol is painted centered and scaled into the target rectangle.
It is always painted uncached and the pin point is ignored.
This method is primarily intended for drawing a symbol to the legend.
:param QPainter painter: Painter
:param point_or_rect: Position or target rectangle of the symbol in screen coordinates
:type point_or_rect: QPointF or QPoint or QRectF
"""
if isinstance(point_or_rect, (QPointF, QPoint)):
# drawSymbol( QPainter *, const QPointF & )
self.drawSymbols(painter, [point_or_rect])
return
# drawSymbol( QPainter *, const QRectF & )
rect = point_or_rect
assert isinstance(rect, QRectF)
if self.__data.style == QwtSymbol.NoSymbol:
return
if self.__data.style == QwtSymbol.Graphic:
self.__data.graphic.graphic.render(painter, rect, Qt.KeepAspectRatio)
elif self.__data.style == QwtSymbol.Path:
if self.__data.path.graphic.isNull():
self.__data.path.graphic = qwtPathGraphic(
self.__data.path.path, self.__data.pen, self.__data.brush
)
self.__data.path.graphic.render(painter, rect, Qt.KeepAspectRatio)
return
elif self.__data.style == QwtSymbol.SvgDocument:
if self.__data.svg.renderer is not None:
scaledRect = QRectF()
sz = QSizeF(self.__data.svg.renderer.viewBoxF().size())
if not sz.isEmpty():
sz.scale(rect.size(), Qt.KeepAspectRatio)
scaledRect.setSize(sz)
scaledRect.moveCenter(rect.center())
else:
scaledRect = rect
self.__data.svg.renderer.render(painter, scaledRect)
else:
br = QRect(self.boundingRect())
ratio = min([rect.width() / br.width(), rect.height() / br.height()])
painter.save()
painter.translate(rect.center())
painter.scale(ratio, ratio)
isPinPointEnabled = self.__data.isPinPointEnabled
self.__data.isPinPointEnabled = False
pos = QPointF()
self.renderSymbols(painter, pos, 1)
self.__data.isPinPointEnabled = isPinPointEnabled
painter.restore()
def renderSymbols(self, painter, points, numPoints=None):
"""
Render the symbol to series of points
:param QPainter painter: Painter
:param point_or_rect: Positions of the symbols
"""
# TODO: remove argument numPoints (not necessary in `PythonQwt`)
try:
assert numPoints is None
except AssertionError:
raise RuntimeError(
"argument numPoints is not implemented " "in `PythonQwt`"
)
if self.__data.style == QwtSymbol.Ellipse:
qwtDrawEllipseSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Rect:
qwtDrawRectSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Diamond:
qwtDrawDiamondSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Cross:
qwtDrawLineSymbols(
painter, Qt.Horizontal | Qt.Vertical, points, numPoints, self
)
elif self.__data.style == QwtSymbol.XCross:
qwtDrawXCrossSymbols(painter, points, numPoints, self)
elif self.__data.style in (QwtSymbol.Triangle, QwtSymbol.UTriangle):
qwtDrawTriangleSymbols(painter, QwtTriangle.Up, points, numPoints, self)
elif self.__data.style == QwtSymbol.DTriangle:
qwtDrawTriangleSymbols(painter, QwtTriangle.Down, points, numPoints, self)
elif self.__data.style == QwtSymbol.RTriangle:
qwtDrawTriangleSymbols(painter, QwtTriangle.Right, points, numPoints, self)
elif self.__data.style == QwtSymbol.LTriangle:
qwtDrawTriangleSymbols(painter, QwtTriangle.Left, points, numPoints, self)
elif self.__data.style == QwtSymbol.HLine:
qwtDrawLineSymbols(painter, Qt.Horizontal, points, numPoints, self)
elif self.__data.style == QwtSymbol.VLine:
qwtDrawLineSymbols(painter, Qt.Vertical, points, numPoints, self)
elif self.__data.style == QwtSymbol.Star1:
qwtDrawStar1Symbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Star2:
qwtDrawStar2Symbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Hexagon:
qwtDrawHexagonSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Path:
if self.__data.path.graphic.isNull():
self.__data.path.graphic = qwtPathGraphic(
self.__data.path.path, self.__data.pen, self.__data.brush
)
qwtDrawGraphicSymbols(
painter, points, numPoints, self.__data.path.graphic, self
)
elif self.__data.style == QwtSymbol.Pixmap:
qwtDrawPixmapSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Graphic:
qwtDrawGraphicSymbols(
painter, points, numPoints, self.__data.graphic.graphic, self
)
elif self.__data.style == QwtSymbol.SvgDocument:
qwtDrawSvgSymbols(
painter, points, numPoints, self.__data.svg.renderer, self
)
def boundingRect(self):
"""
Calculate the bounding rectangle for a symbol at position (0,0).
:return: Bounding rectangle
"""
rect = QRectF()
pinPointTranslation = False
if self.__data.style in (QwtSymbol.Ellipse, QwtSymbol.Rect, QwtSymbol.Hexagon):
pw = 0.0
if self.__data.pen.style() != Qt.NoPen:
pw = max([self.__data.pen.widthF(), 1.0])
rect.setSize(self.__data.size + QSizeF(pw, pw))
rect.moveCenter(QPointF(0.0, 0.0))
elif self.__data.style in (
QwtSymbol.XCross,
QwtSymbol.Diamond,
QwtSymbol.Triangle,
QwtSymbol.UTriangle,
QwtSymbol.DTriangle,
QwtSymbol.RTriangle,
QwtSymbol.LTriangle,
QwtSymbol.Star1,
QwtSymbol.Star2,
):
pw = 0.0
if self.__data.pen.style() != Qt.NoPen:
pw = max([self.__data.pen.widthF(), 1.0])
rect.setSize(QSizeF(self.__data.size) + QSizeF(2 * pw, 2 * pw))
rect.moveCenter(QPointF(0.0, 0.0))
elif self.__data.style == QwtSymbol.Path:
if self.__data.path.graphic.isNull():
self.__data.path.graphic = qwtPathGraphic(
self.__data.path.path, self.__data.pen, self.__data.brush
)
rect = qwtScaleBoundingRect(self.__data.path.graphic, self.__data.size)
pinPointTranslation = True
elif self.__data.style == QwtSymbol.Pixmap:
if self.__data.size.isEmpty():
rect.setSize(self.__data.pixmap.pixmap.size())
else:
rect.setSize(self.__data.size)
pinPointTranslation = True
elif self.__data.style == QwtSymbol.Graphic:
rect = qwtScaleBoundingRect(self.__data.graphic.graphic, self.__data.size)
pinPointTranslation = True
elif self.__data.style == QwtSymbol.SvgDocument:
if self.__data.svg.renderer is not None:
rect = self.__data.svg.renderer.viewBoxF()
if self.__data.size.isValid() and not rect.isEmpty():
sz = QSizeF(rect.size())
sx = self.__data.size.width() / sz.width()
sy = self.__data.size.height() / sz.height()
transform = QTransform()
transform.scale(sx, sy)
rect = transform.mapRect(rect)
pinPointTranslation = True
else:
rect.setSize(self.__data.size)
rect.moveCenter(QPointF(0.0, 0.0))
if pinPointTranslation:
pinPoint = QPointF(0.0, 0.0)
if self.__data.isPinPointEnabled:
pinPoint = rect.center() - self.__data.pinPoint
rect.moveCenter(pinPoint)
r = QRect()
r.setLeft(np.floor(rect.left()))
r.setTop(np.floor(rect.top()))
r.setRight(np.floor(rect.right()))
r.setBottom(np.floor(rect.bottom()))
if self.__data.style != QwtSymbol.Pixmap:
r.adjust(-1, -1, 1, 1)
return r
def invalidateCache(self):
"""
Invalidate the cached symbol pixmap
The symbol invalidates its cache, whenever an attribute is changed
that has an effect ob how to display a symbol. In case of derived
classes with individual styles (>= `QwtSymbol.UserStyle`) it
might be necessary to call invalidateCache() for attributes
that are relevant for this style.
.. seealso::
:py:meth:`setCachePolicy()`, :py:meth:`drawSymbols()`
"""
if self.__data.cache.pixmap is not None:
self.__data.cache.pixmap = QPixmap()
def setStyle(self, style):
"""
Specify the symbol style
:param int style: Style
.. seealso::
:py:meth:`style()`
"""
if self.__data.style != style:
self.__data.style = style
self.invalidateCache()
def style(self):
"""
:return: Current symbol style
.. seealso::
:py:meth:`setStyle()`
"""
return self.__data.style | Lib/site-packages/qwt/symbol.py | from qwt.graphic import QwtGraphic
from qwt.painter import QwtPainter
from qtpy.QtGui import (
QPainter,
QTransform,
QPixmap,
QPen,
QPolygonF,
QPainterPath,
QBrush,
)
from qtpy.QtCore import QSize, QRect, QPointF, QRectF, QSizeF, Qt, QPoint
from qtpy.QtSvg import QSvgRenderer
import numpy as np
class QwtTriangle(object):
# enum Type
Left, Right, Up, Down = list(range(4))
def qwtPathGraphic(path, pen, brush):
graphic = QwtGraphic()
graphic.setRenderHint(QwtGraphic.RenderPensUnscaled)
painter = QPainter(graphic)
painter.setPen(pen)
painter.setBrush(brush)
painter.drawPath(path)
painter.end()
return graphic
def qwtScaleBoundingRect(graphic, size):
scaledSize = QSize(size)
if scaledSize.isEmpty():
scaledSize = graphic.defaultSize()
sz = graphic.controlPointRect().size()
sx = 1.0
if sz.width() > 0.0:
sx = scaledSize.width() / sz.width()
sy = 1.0
if sz.height() > 0.0:
sy = scaledSize.height() / sz.height()
return graphic.scaledBoundingRect(sx, sy)
def qwtDrawPixmapSymbols(painter, points, numPoints, symbol):
size = symbol.size()
if size.isEmpty():
size = symbol.pixmap().size()
transform = QTransform(painter.transform())
if transform.isScaling():
r = QRect(0, 0, size.width(), size.height())
size = transform.mapRect(r).size()
pm = QPixmap(symbol.pixmap())
if pm.size() != size:
pm = pm.scaled(size)
pinPoint = QPointF(0.5 * size.width(), 0.5 * size.height())
if symbol.isPinPointEnabled():
pinPoint = symbol.pinPoint()
painter.resetTransform()
for pos in points:
pos = QPointF(transform.map(pos)) - pinPoint
QwtPainter.drawPixmap(painter, QRect(pos.toPoint(), pm.size()), pm)
def qwtDrawSvgSymbols(painter, points, numPoints, renderer, symbol):
if renderer is None or not renderer.isValid():
return
viewBox = QRectF(renderer.viewBoxF())
if viewBox.isEmpty():
return
sz = QSizeF(symbol.size())
if not sz.isValid():
sz = viewBox.size()
sx = sz.width() / viewBox.width()
sy = sz.height() / viewBox.height()
pinPoint = QPointF(viewBox.center())
if symbol.isPinPointEnabled():
pinPoint = symbol.pinPoint()
dx = sx * (pinPoint.x() - viewBox.left())
dy = sy * (pinPoint.y() - viewBox.top())
for pos in points:
x = pos.x() - dx
y = pos.y() - dy
renderer.render(painter, QRectF(x, y, sz.width(), sz.height()))
def qwtDrawGraphicSymbols(painter, points, numPoint, graphic, symbol):
pointRect = QRectF(graphic.controlPointRect())
if pointRect.isEmpty():
return
sx = 1.0
sy = 1.0
sz = symbol.size()
if sz.isValid():
sx = sz.width() / pointRect.width()
sy = sz.height() / pointRect.height()
pinPoint = QPointF(pointRect.center())
if symbol.isPinPointEnabled():
pinPoint = symbol.pinPoint()
transform = QTransform(painter.transform())
for pos in points:
tr = QTransform(transform)
tr.translate(pos.x(), pos.y())
tr.scale(sx, sy)
tr.translate(-pinPoint.x(), -pinPoint.y())
painter.setTransform(tr)
graphic.render(painter)
painter.setTransform(transform)
def qwtDrawEllipseSymbols(painter, points, numPoints, symbol):
painter.setBrush(symbol.brush())
painter.setPen(symbol.pen())
size = symbol.size()
sw = size.width()
sh = size.height()
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
x = pos.x()
y = pos.y()
r = QRectF(x - sw2, y - sh2, sw, sh)
painter.drawEllipse(r)
def qwtDrawRectSymbols(painter, points, numPoints, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
pen.setJoinStyle(Qt.MiterJoin)
painter.setPen(pen)
painter.setBrush(symbol.brush())
painter.setRenderHint(QPainter.Antialiasing, False)
sw = size.width()
sh = size.height()
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
x = pos.x()
y = pos.y()
r = QRectF(x - sw2, y - sh2, sw, sh)
painter.drawRect(r)
def qwtDrawDiamondSymbols(painter, points, numPoints, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
pen.setJoinStyle(Qt.MiterJoin)
painter.setPen(pen)
painter.setBrush(symbol.brush())
for pos in points:
x1 = pos.x() - 0.5 * size.width()
y1 = pos.y() - 0.5 * size.height()
x2 = x1 + size.width()
y2 = y1 + size.height()
polygon = QPolygonF()
polygon += QPointF(pos.x(), y1)
polygon += QPointF(x1, pos.y())
polygon += QPointF(pos.x(), y2)
polygon += QPointF(x2, pos.y())
painter.drawPolygon(polygon)
def qwtDrawTriangleSymbols(painter, type, points, numPoint, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
pen.setJoinStyle(Qt.MiterJoin)
painter.setPen(pen)
painter.setBrush(symbol.brush())
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
x = pos.x()
y = pos.y()
x1 = x - sw2
x2 = x1 + size.width()
y1 = y - sh2
y2 = y1 + size.height()
if type == QwtTriangle.Left:
triangle = [QPointF(x2, y1), QPointF(x1, y), QPointF(x2, y2)]
elif type == QwtTriangle.Right:
triangle = [QPointF(x1, y1), QPointF(x2, y), QPointF(x1, y2)]
elif type == QwtTriangle.Up:
triangle = [QPointF(x1, y2), QPointF(x, y1), QPointF(x2, y2)]
elif type == QwtTriangle.Down:
triangle = [QPointF(x1, y1), QPointF(x, y2), QPointF(x2, y1)]
else:
raise TypeError("Unknown triangle type %s" % type)
painter.drawPolygon(QPolygonF(triangle))
def qwtDrawLineSymbols(painter, orientations, points, numPoints, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
if pen.width() > 1:
pen.setCapStyle(Qt.FlatCap)
painter.setPen(pen)
painter.setRenderHint(QPainter.Antialiasing, False)
sw = size.width()
sh = size.height()
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
if orientations & Qt.Horizontal:
x = round(pos.x()) - sw2
y = round(pos.y())
painter.drawLine(x, y, x + sw, y)
if orientations & Qt.Vertical:
x = round(pos.x())
y = round(pos.y()) - sh2
painter.drawLine(x, y, x, y + sh)
def qwtDrawXCrossSymbols(painter, points, numPoints, symbol):
size = symbol.size()
pen = QPen(symbol.pen())
if pen.width() > 1:
pen.setCapStyle(Qt.FlatCap)
painter.setPen(pen)
sw = size.width()
sh = size.height()
sw2 = 0.5 * size.width()
sh2 = 0.5 * size.height()
for pos in points:
x1 = pos.x() - sw2
x2 = x1 + sw
y1 = pos.y() - sh2
y2 = y1 + sh
painter.drawLine(x1, y1, x2, y2)
painter.drawLine(x2, y1, x1, y2)
def qwtDrawStar1Symbols(painter, points, numPoints, symbol):
size = symbol.size()
painter.setPen(symbol.pen())
sqrt1_2 = np.sqrt(0.5)
r = QRectF(0, 0, size.width(), size.height())
for pos in points:
r.moveCenter(pos.toPoint())
c = QPointF(r.center())
d1 = r.width() / 2.0 * (1.0 - sqrt1_2)
painter.drawLine(r.left() + d1, r.top() + d1, r.right() - d1, r.bottom() - d1)
painter.drawLine(r.left() + d1, r.bottom() - d1, r.right() - d1, r.top() + d1)
painter.drawLine(c.x(), r.top(), c.x(), r.bottom())
painter.drawLine(r.left(), c.y(), r.right(), c.y())
def qwtDrawStar2Symbols(painter, points, numPoints, symbol):
pen = QPen(symbol.pen())
if pen.width() > 1:
pen.setCapStyle(Qt.FlatCap)
pen.setJoinStyle(Qt.MiterJoin)
painter.setPen(pen)
painter.setBrush(symbol.brush())
cos30 = np.cos(30 * np.pi / 180.0)
dy = 0.25 * symbol.size().height()
dx = 0.5 * symbol.size().width() * cos30 / 3.0
for pos in points:
x = pos.x()
y = pos.y()
x1 = x - 3 * dx
y1 = y - 2 * dy
x2 = x1 + 1 * dx
x3 = x1 + 2 * dx
x4 = x1 + 3 * dx
x5 = x1 + 4 * dx
x6 = x1 + 5 * dx
x7 = x1 + 6 * dx
y2 = y1 + 1 * dy
y3 = y1 + 2 * dy
y4 = y1 + 3 * dy
y5 = y1 + 4 * dy
star = [
QPointF(x4, y1),
QPointF(x5, y2),
QPointF(x7, y2),
QPointF(x6, y3),
QPointF(x7, y4),
QPointF(x5, y4),
QPointF(x4, y5),
QPointF(x3, y4),
QPointF(x1, y4),
QPointF(x2, y3),
QPointF(x1, y2),
QPointF(x3, y2),
]
painter.drawPolygon(QPolygonF(star))
def qwtDrawHexagonSymbols(painter, points, numPoints, symbol):
painter.setBrush(symbol.brush())
painter.setPen(symbol.pen())
cos30 = np.cos(30 * np.pi / 180.0)
dx = 0.5 * (symbol.size().width() - cos30)
dy = 0.25 * symbol.size().height()
for pos in points:
x = pos.x()
y = pos.y()
x1 = x - dx
y1 = y - 2 * dy
x2 = x1 + 1 * dx
x3 = x1 + 2 * dx
y2 = y1 + 1 * dy
y3 = y1 + 3 * dy
y4 = y1 + 4 * dy
hexa = [
QPointF(x2, y1),
QPointF(x3, y2),
QPointF(x3, y3),
QPointF(x2, y4),
QPointF(x1, y3),
QPointF(x1, y2),
]
painter.drawPolygon(QPolygonF(hexa))
class QwtSymbol_PrivateData(object):
def __init__(self, st, br, pn, sz):
self.style = st
self.size = sz
self.brush = br
self.pen = pn
self.isPinPointEnabled = False
self.pinPoint = QPointF()
class Path(object):
def __init__(self):
self.path = QPainterPath()
self.graphic = QwtGraphic()
self.path = Path()
class Pixmap(object):
def __init__(self):
self.pixmap = QPixmap()
self.pixmap = None # Pixmap()
class Graphic(object):
def __init__(self):
self.graphic = QwtGraphic()
self.graphic = Graphic()
class SVG(object):
def __init__(self):
self.renderer = QSvgRenderer()
self.svg = SVG()
class PaintCache(object):
def __init__(self):
self.policy = 0
self.pixmap = None # QPixmap()
self.cache = PaintCache()
class QwtSymbol(object):
"""
A class for drawing symbols
Symbol styles:
* `QwtSymbol.NoSymbol`: No Style. The symbol cannot be drawn.
* `QwtSymbol.Ellipse`: Ellipse or circle
* `QwtSymbol.Rect`: Rectangle
* `QwtSymbol.Diamond`: Diamond
* `QwtSymbol.Triangle`: Triangle pointing upwards
* `QwtSymbol.DTriangle`: Triangle pointing downwards
* `QwtSymbol.UTriangle`: Triangle pointing upwards
* `QwtSymbol.LTriangle`: Triangle pointing left
* `QwtSymbol.RTriangle`: Triangle pointing right
* `QwtSymbol.Cross`: Cross (+)
* `QwtSymbol.XCross`: Diagonal cross (X)
* `QwtSymbol.HLine`: Horizontal line
* `QwtSymbol.VLine`: Vertical line
* `QwtSymbol.Star1`: X combined with +
* `QwtSymbol.Star2`: Six-pointed star
* `QwtSymbol.Hexagon`: Hexagon
* `QwtSymbol.Path`: The symbol is represented by a painter path, where
the origin (0, 0) of the path coordinate system is mapped to the
position of the symbol
..seealso::
:py:meth:`setPath()`, :py:meth:`path()`
* `QwtSymbol.Pixmap`: The symbol is represented by a pixmap.
The pixmap is centered or aligned to its pin point.
..seealso::
:py:meth:`setPinPoint()`
* `QwtSymbol.Graphic`: The symbol is represented by a graphic.
The graphic is centered or aligned to its pin point.
..seealso::
:py:meth:`setPinPoint()`
* `QwtSymbol.SvgDocument`: The symbol is represented by a SVG graphic.
The graphic is centered or aligned to its pin point.
..seealso::
:py:meth:`setPinPoint()`
* `QwtSymbol.UserStyle`: Styles >= `QwtSymbol.UserStyle` are reserved
for derived classes of `QwtSymbol` that overload `drawSymbols()` with
additional application specific symbol types.
Cache policies:
Depending on the render engine and the complexity of the
symbol shape it might be faster to render the symbol
to a pixmap and to paint this pixmap.
F.e. the raster paint engine is a pure software renderer
where in cache mode a draw operation usually ends in
raster operation with the the backing store, that are usually
faster, than the algorithms for rendering polygons.
But the opposite can be expected for graphic pipelines
that can make use of hardware acceleration.
The default setting is AutoCache
..seealso::
:py:meth:`setCachePolicy()`, :py:meth:`cachePolicy()`
.. note::
The policy has no effect, when the symbol is painted
to a vector graphics format (PDF, SVG).
.. warning::
Since Qt 4.8 raster is the default backend on X11
Valid cache policies:
* `QwtSymbol.NoCache`: Don't use a pixmap cache
* `QwtSymbol.Cache`: Always use a pixmap cache
* `QwtSymbol.AutoCache`: Use a cache when the symbol is rendered
with the software renderer (`QPaintEngine.Raster`)
.. py:class:: QwtSymbol([style=QwtSymbol.NoSymbol])
The symbol is constructed with gray interior,
black outline with zero width, no size and style 'NoSymbol'.
:param int style: Symbol Style
.. py:class:: QwtSymbol(style, brush, pen, size)
:noindex:
:param int style: Symbol Style
:param QBrush brush: Brush to fill the interior
:param QPen pen: Outline pen
:param QSize size: Size
.. py:class:: QwtSymbol(path, brush, pen)
:noindex:
:param QPainterPath path: Painter path
:param QBrush brush: Brush to fill the interior
:param QPen pen: Outline pen
.. seealso::
:py:meth:`setPath()`, :py:meth:`setBrush()`,
:py:meth:`setPen()`, :py:meth:`setSize()`
"""
# enum Style
Style = int
NoSymbol = -1
(
Ellipse,
Rect,
Diamond,
Triangle,
DTriangle,
UTriangle,
LTriangle,
RTriangle,
Cross,
XCross,
HLine,
VLine,
Star1,
Star2,
Hexagon,
Path,
Pixmap,
Graphic,
SvgDocument,
) = list(range(19))
UserStyle = 1000
# enum CachePolicy
NoCache, Cache, AutoCache = list(range(3))
def __init__(self, *args):
if len(args) in (0, 1):
if args:
(style,) = args
else:
style = QwtSymbol.NoSymbol
self.__data = QwtSymbol_PrivateData(
style, QBrush(Qt.gray), QPen(Qt.black, 0), QSize()
)
elif len(args) == 4:
style, brush, pen, size = args
self.__data = QwtSymbol_PrivateData(style, brush, pen, size)
elif len(args) == 3:
path, brush, pen = args
self.__data = QwtSymbol_PrivateData(QwtSymbol.Path, brush, pen, QSize())
self.setPath(path)
else:
raise TypeError(
"%s() takes 1, 3, or 4 argument(s) (%s given)"
% (self.__class__.__name__, len(args))
)
@classmethod
def make(
cls,
style=None,
brush=None,
pen=None,
size=None,
path=None,
pixmap=None,
graphic=None,
svgdocument=None,
pinpoint=None,
):
"""
Create and setup a new `QwtSymbol` object (convenience function).
:param style: Symbol Style
:type style: int or None
:param brush: Brush to fill the interior
:type brush: QBrush or None
:param pen: Outline pen
:type pen: QPen or None
:param size: Size
:type size: QSize or None
:param path: Painter path
:type path: QPainterPath or None
:param path: Painter path
:type path: QPainterPath or None
:param pixmap: Pixmap as symbol
:type pixmap: QPixmap or None
:param graphic: Graphic
:type graphic: qwt.graphic.QwtGraphic or None
:param svgdocument: SVG icon as symbol
.. seealso::
:py:meth:`setPixmap()`, :py:meth:`setGraphic()`, :py:meth:`setPath()`
"""
style = QwtSymbol.NoSymbol if style is None else style
brush = QBrush(Qt.gray) if brush is None else QBrush(brush)
pen = QPen(Qt.black, 0) if pen is None else QPen(pen)
size = QSize() if size is None else size
if not isinstance(size, QSize):
if isinstance(size, tuple) and len(size) == 2:
size = QSize(size[0], size[1])
else:
raise TypeError("Invalid size %r" % size)
item = cls(style, brush, pen, size)
if path is not None:
item.setPath(path)
elif pixmap is not None:
item.setPixmap(pixmap)
elif graphic is not None:
item.setGraphic(graphic)
elif svgdocument is not None:
item.setSvgDocument(svgdocument)
if pinpoint is not None:
item.setPinPoint(pinpoint)
return item
def setCachePolicy(self, policy):
"""
Change the cache policy
The default policy is AutoCache
:param int policy: Cache policy
.. seealso::
:py:meth:`cachePolicy()`
"""
if self.__data.cache.policy != policy:
self.__data.cache.policy = policy
self.invalidateCache()
def cachePolicy(self):
"""
:return: Cache policy
.. seealso::
:py:meth:`setCachePolicy()`
"""
return self.__data.cache.policy
def setPath(self, path):
"""
Set a painter path as symbol
The symbol is represented by a painter path, where the
origin (0, 0) of the path coordinate system is mapped to
the position of the symbol.
When the symbol has valid size the painter path gets scaled
to fit into the size. Otherwise the symbol size depends on
the bounding rectangle of the path.
The following code defines a symbol drawing an arrow::
from qtpy.QtGui import QApplication, QPen, QPainterPath, QTransform
from qtpy.QtCore import Qt, QPointF
from qwt import QwtPlot, QwtPlotCurve, QwtSymbol
import numpy as np
app = QApplication([])
# --- Construct custom symbol ---
path = QPainterPath()
path.moveTo(0, 8)
path.lineTo(0, 5)
path.lineTo(-3, 5)
path.lineTo(0, 0)
path.lineTo(3, 5)
path.lineTo(0, 5)
transform = QTransform()
transform.rotate(-30.0)
path = transform.map(path)
pen = QPen(Qt.black, 2 );
pen.setJoinStyle(Qt.MiterJoin)
symbol = QwtSymbol()
symbol.setPen(pen)
symbol.setBrush(Qt.red)
symbol.setPath(path)
symbol.setPinPoint(QPointF(0., 0.))
symbol.setSize(10, 14)
# --- Test it within a simple plot ---
curve = QwtPlotCurve()
curve_pen = QPen(Qt.blue)
curve_pen.setStyle(Qt.DotLine)
curve.setPen(curve_pen)
curve.setSymbol(symbol)
x = np.linspace(0, 10, 10)
curve.setData(x, np.sin(x))
plot = QwtPlot()
curve.attach(plot)
plot.resize(600, 300)
plot.replot()
plot.show()
app.exec_()
.. image:: /images/symbol_path_example.png
:param QPainterPath path: Painter path
.. seealso::
:py:meth:`path()`, :py:meth:`setSize()`
"""
self.__data.style = QwtSymbol.Path
self.__data.path.path = path
self.__data.path.graphic.reset()
def path(self):
"""
:return: Painter path for displaying the symbol
.. seealso::
:py:meth:`setPath()`
"""
return self.__data.path.path
def setPixmap(self, pixmap):
"""
Set a pixmap as symbol
:param QPixmap pixmap: Pixmap
.. seealso::
:py:meth:`pixmap()`, :py:meth:`setGraphic()`
.. note::
The `style()` is set to `QwtSymbol.Pixmap`
.. note::
`brush()` and `pen()` have no effect
"""
self.__data.style = QwtSymbol.Pixmap
self.__data.pixmap.pixmap = pixmap
def pixmap(self):
"""
:return: Assigned pixmap
.. seealso::
:py:meth:`setPixmap()`
"""
return self.__data.pixmap.pixmap
def setGraphic(self, graphic):
"""
Set a graphic as symbol
:param qwt.graphic.QwtGraphic graphic: Graphic
.. seealso::
:py:meth:`graphic()`, :py:meth:`setPixmap()`
.. note::
The `style()` is set to `QwtSymbol.Graphic`
.. note::
`brush()` and `pen()` have no effect
"""
self.__data.style = QwtSymbol.Graphic
self.__data.graphic.graphic = graphic
def graphic(self):
"""
:return: Assigned graphic
.. seealso::
:py:meth:`setGraphic()`
"""
return self.__data.graphic.graphic
def setSvgDocument(self, svgDocument):
"""
Set a SVG icon as symbol
:param svgDocument: SVG icon
.. seealso::
:py:meth:`setGraphic()`, :py:meth:`setPixmap()`
.. note::
The `style()` is set to `QwtSymbol.SvgDocument`
.. note::
`brush()` and `pen()` have no effect
"""
self.__data.style = QwtSymbol.SvgDocument
if self.__data.svg.renderer is None:
self.__data.svg.renderer = QSvgRenderer()
self.__data.svg.renderer.load(svgDocument)
def setSize(self, *args):
"""
Specify the symbol's size
.. py:method:: setSize(width, [height=-1])
:noindex:
:param int width: Width
:param int height: Height
.. py:method:: setSize(size)
:noindex:
:param QSize size: Size
.. seealso::
:py:meth:`size()`
"""
if len(args) == 2:
width, height = args
if width >= 0 and height < 0:
height = width
self.setSize(QSize(width, height))
elif len(args) == 1:
if isinstance(args[0], QSize):
(size,) = args
if size.isValid() and size != self.__data.size:
self.__data.size = size
self.invalidateCache()
else:
(width,) = args
self.setSize(width, -1)
else:
raise TypeError(
"%s().setSize() takes 1 or 2 argument(s) (%s given)"
% (self.__class__.__name__, len(args))
)
def size(self):
"""
:return: Size
.. seealso::
:py:meth:`setSize()`
"""
return self.__data.size
def setBrush(self, brush):
"""
Assign a brush
The brush is used to draw the interior of the symbol.
:param QBrush brush: Brush
.. seealso::
:py:meth:`brush()`
"""
if brush != self.__data.brush:
self.__data.brush = brush
self.invalidateCache()
if self.__data.style == QwtSymbol.Path:
self.__data.path.graphic.reset()
def brush(self):
"""
:return: Brush
.. seealso::
:py:meth:`setBrush()`
"""
return self.__data.brush
def setPen(self, *args):
"""
Build and/or assign a pen, depending on the arguments.
.. py:method:: setPen(color, width, style)
:noindex:
Build and assign a pen
In Qt5 the default pen width is 1.0 ( 0.0 in Qt4 ) what makes it
non cosmetic (see `QPen.isCosmetic()`). This method signature has
been introduced to hide this incompatibility.
:param QColor color: Pen color
:param float width: Pen width
:param Qt.PenStyle style: Pen style
.. py:method:: setPen(pen)
:noindex:
Assign a pen
:param QPen pen: New pen
.. seealso::
:py:meth:`pen()`, :py:meth:`brush()`
"""
if len(args) == 3:
color, width, style = args
self.setPen(QPen(color, width, style))
elif len(args) == 1:
(pen,) = args
if pen != self.__data.pen:
self.__data.pen = pen
self.invalidateCache()
if self.__data.style == QwtSymbol.Path:
self.__data.path.graphic.reset()
else:
raise TypeError(
"%s().setPen() takes 1 or 3 argument(s) (%s given)"
% (self.__class__.__name__, len(args))
)
def pen(self):
"""
:return: Pen
.. seealso::
:py:meth:`setPen()`, :py:meth:`brush()`
"""
return self.__data.pen
def setColor(self, color):
"""
Set the color of the symbol
Change the color of the brush for symbol types with a filled area.
For all other symbol types the color will be assigned to the pen.
:param QColor color: Color
.. seealso::
:py:meth:`setPen()`, :py:meth:`setBrush()`,
:py:meth:`brush()`, :py:meth:`pen()`
"""
if self.__data.style in (
QwtSymbol.Ellipse,
QwtSymbol.Rect,
QwtSymbol.Diamond,
QwtSymbol.Triangle,
QwtSymbol.UTriangle,
QwtSymbol.DTriangle,
QwtSymbol.RTriangle,
QwtSymbol.LTriangle,
QwtSymbol.Star2,
QwtSymbol.Hexagon,
):
if self.__data.brush.color() != color:
self.__data.brush.setColor(color)
self.invalidateCache()
elif self.__data.style in (
QwtSymbol.Cross,
QwtSymbol.XCross,
QwtSymbol.HLine,
QwtSymbol.VLine,
QwtSymbol.Star1,
):
if self.__data.pen.color() != color:
self.__data.pen.setColor(color)
self.invalidateCache()
else:
if self.__data.brush.color() != color or self.__data.pen.color() != color:
self.invalidateCache()
self.__data.brush.setColor(color)
self.__data.pen.setColor(color)
def setPinPoint(self, pos, enable=True):
"""
Set and enable a pin point
The position of a complex symbol is not always aligned to its center
( f.e an arrow, where the peak points to a position ). The pin point
defines the position inside of a Pixmap, Graphic, SvgDocument
or PainterPath symbol where the represented point has to
be aligned to.
:param QPointF pos: Position
:enable bool enable: En/Disable the pin point alignment
.. seealso::
:py:meth:`pinPoint()`, :py:meth:`setPinPointEnabled()`
"""
if self.__data.pinPoint != pos:
self.__data.pinPoint = pos
if self.__data.isPinPointEnabled:
self.invalidateCache()
self.setPinPointEnabled(enable)
def pinPoint(self):
"""
:return: Pin point
.. seealso::
:py:meth:`setPinPoint()`, :py:meth:`setPinPointEnabled()`
"""
return self.__data.pinPoint
def setPinPointEnabled(self, on):
"""
En/Disable the pin point alignment
:param bool on: Enabled, when on is true
.. seealso::
:py:meth:`setPinPoint()`, :py:meth:`isPinPointEnabled()`
"""
if self.__data.isPinPointEnabled != on:
self.__data.isPinPointEnabled = on
self.invalidateCache()
def isPinPointEnabled(self):
"""
:return: True, when the pin point translation is enabled
.. seealso::
:py:meth:`setPinPoint()`, :py:meth:`setPinPointEnabled()`
"""
return self.__data.isPinPointEnabled
def drawSymbols(self, painter, points, numPoints=None):
"""
Render an array of symbols
Painting several symbols is more effective than drawing symbols
one by one, as a couple of layout calculations and setting of pen/brush
can be done once for the complete array.
:param QPainter painter: Painter
:param QPolygonF points: Positions of the symbols in screen coordinates
"""
# TODO: remove argument numPoints (not necessary in `PythonQwt`)
if numPoints is not None and numPoints <= 0:
return
painter.save()
self.renderSymbols(painter, points, numPoints)
painter.restore()
def drawSymbol(self, painter, point_or_rect):
"""
Draw the symbol into a rectangle
The symbol is painted centered and scaled into the target rectangle.
It is always painted uncached and the pin point is ignored.
This method is primarily intended for drawing a symbol to the legend.
:param QPainter painter: Painter
:param point_or_rect: Position or target rectangle of the symbol in screen coordinates
:type point_or_rect: QPointF or QPoint or QRectF
"""
if isinstance(point_or_rect, (QPointF, QPoint)):
# drawSymbol( QPainter *, const QPointF & )
self.drawSymbols(painter, [point_or_rect])
return
# drawSymbol( QPainter *, const QRectF & )
rect = point_or_rect
assert isinstance(rect, QRectF)
if self.__data.style == QwtSymbol.NoSymbol:
return
if self.__data.style == QwtSymbol.Graphic:
self.__data.graphic.graphic.render(painter, rect, Qt.KeepAspectRatio)
elif self.__data.style == QwtSymbol.Path:
if self.__data.path.graphic.isNull():
self.__data.path.graphic = qwtPathGraphic(
self.__data.path.path, self.__data.pen, self.__data.brush
)
self.__data.path.graphic.render(painter, rect, Qt.KeepAspectRatio)
return
elif self.__data.style == QwtSymbol.SvgDocument:
if self.__data.svg.renderer is not None:
scaledRect = QRectF()
sz = QSizeF(self.__data.svg.renderer.viewBoxF().size())
if not sz.isEmpty():
sz.scale(rect.size(), Qt.KeepAspectRatio)
scaledRect.setSize(sz)
scaledRect.moveCenter(rect.center())
else:
scaledRect = rect
self.__data.svg.renderer.render(painter, scaledRect)
else:
br = QRect(self.boundingRect())
ratio = min([rect.width() / br.width(), rect.height() / br.height()])
painter.save()
painter.translate(rect.center())
painter.scale(ratio, ratio)
isPinPointEnabled = self.__data.isPinPointEnabled
self.__data.isPinPointEnabled = False
pos = QPointF()
self.renderSymbols(painter, pos, 1)
self.__data.isPinPointEnabled = isPinPointEnabled
painter.restore()
def renderSymbols(self, painter, points, numPoints=None):
"""
Render the symbol to series of points
:param QPainter painter: Painter
:param point_or_rect: Positions of the symbols
"""
# TODO: remove argument numPoints (not necessary in `PythonQwt`)
try:
assert numPoints is None
except AssertionError:
raise RuntimeError(
"argument numPoints is not implemented " "in `PythonQwt`"
)
if self.__data.style == QwtSymbol.Ellipse:
qwtDrawEllipseSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Rect:
qwtDrawRectSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Diamond:
qwtDrawDiamondSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Cross:
qwtDrawLineSymbols(
painter, Qt.Horizontal | Qt.Vertical, points, numPoints, self
)
elif self.__data.style == QwtSymbol.XCross:
qwtDrawXCrossSymbols(painter, points, numPoints, self)
elif self.__data.style in (QwtSymbol.Triangle, QwtSymbol.UTriangle):
qwtDrawTriangleSymbols(painter, QwtTriangle.Up, points, numPoints, self)
elif self.__data.style == QwtSymbol.DTriangle:
qwtDrawTriangleSymbols(painter, QwtTriangle.Down, points, numPoints, self)
elif self.__data.style == QwtSymbol.RTriangle:
qwtDrawTriangleSymbols(painter, QwtTriangle.Right, points, numPoints, self)
elif self.__data.style == QwtSymbol.LTriangle:
qwtDrawTriangleSymbols(painter, QwtTriangle.Left, points, numPoints, self)
elif self.__data.style == QwtSymbol.HLine:
qwtDrawLineSymbols(painter, Qt.Horizontal, points, numPoints, self)
elif self.__data.style == QwtSymbol.VLine:
qwtDrawLineSymbols(painter, Qt.Vertical, points, numPoints, self)
elif self.__data.style == QwtSymbol.Star1:
qwtDrawStar1Symbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Star2:
qwtDrawStar2Symbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Hexagon:
qwtDrawHexagonSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Path:
if self.__data.path.graphic.isNull():
self.__data.path.graphic = qwtPathGraphic(
self.__data.path.path, self.__data.pen, self.__data.brush
)
qwtDrawGraphicSymbols(
painter, points, numPoints, self.__data.path.graphic, self
)
elif self.__data.style == QwtSymbol.Pixmap:
qwtDrawPixmapSymbols(painter, points, numPoints, self)
elif self.__data.style == QwtSymbol.Graphic:
qwtDrawGraphicSymbols(
painter, points, numPoints, self.__data.graphic.graphic, self
)
elif self.__data.style == QwtSymbol.SvgDocument:
qwtDrawSvgSymbols(
painter, points, numPoints, self.__data.svg.renderer, self
)
def boundingRect(self):
"""
Calculate the bounding rectangle for a symbol at position (0,0).
:return: Bounding rectangle
"""
rect = QRectF()
pinPointTranslation = False
if self.__data.style in (QwtSymbol.Ellipse, QwtSymbol.Rect, QwtSymbol.Hexagon):
pw = 0.0
if self.__data.pen.style() != Qt.NoPen:
pw = max([self.__data.pen.widthF(), 1.0])
rect.setSize(self.__data.size + QSizeF(pw, pw))
rect.moveCenter(QPointF(0.0, 0.0))
elif self.__data.style in (
QwtSymbol.XCross,
QwtSymbol.Diamond,
QwtSymbol.Triangle,
QwtSymbol.UTriangle,
QwtSymbol.DTriangle,
QwtSymbol.RTriangle,
QwtSymbol.LTriangle,
QwtSymbol.Star1,
QwtSymbol.Star2,
):
pw = 0.0
if self.__data.pen.style() != Qt.NoPen:
pw = max([self.__data.pen.widthF(), 1.0])
rect.setSize(QSizeF(self.__data.size) + QSizeF(2 * pw, 2 * pw))
rect.moveCenter(QPointF(0.0, 0.0))
elif self.__data.style == QwtSymbol.Path:
if self.__data.path.graphic.isNull():
self.__data.path.graphic = qwtPathGraphic(
self.__data.path.path, self.__data.pen, self.__data.brush
)
rect = qwtScaleBoundingRect(self.__data.path.graphic, self.__data.size)
pinPointTranslation = True
elif self.__data.style == QwtSymbol.Pixmap:
if self.__data.size.isEmpty():
rect.setSize(self.__data.pixmap.pixmap.size())
else:
rect.setSize(self.__data.size)
pinPointTranslation = True
elif self.__data.style == QwtSymbol.Graphic:
rect = qwtScaleBoundingRect(self.__data.graphic.graphic, self.__data.size)
pinPointTranslation = True
elif self.__data.style == QwtSymbol.SvgDocument:
if self.__data.svg.renderer is not None:
rect = self.__data.svg.renderer.viewBoxF()
if self.__data.size.isValid() and not rect.isEmpty():
sz = QSizeF(rect.size())
sx = self.__data.size.width() / sz.width()
sy = self.__data.size.height() / sz.height()
transform = QTransform()
transform.scale(sx, sy)
rect = transform.mapRect(rect)
pinPointTranslation = True
else:
rect.setSize(self.__data.size)
rect.moveCenter(QPointF(0.0, 0.0))
if pinPointTranslation:
pinPoint = QPointF(0.0, 0.0)
if self.__data.isPinPointEnabled:
pinPoint = rect.center() - self.__data.pinPoint
rect.moveCenter(pinPoint)
r = QRect()
r.setLeft(np.floor(rect.left()))
r.setTop(np.floor(rect.top()))
r.setRight(np.floor(rect.right()))
r.setBottom(np.floor(rect.bottom()))
if self.__data.style != QwtSymbol.Pixmap:
r.adjust(-1, -1, 1, 1)
return r
def invalidateCache(self):
"""
Invalidate the cached symbol pixmap
The symbol invalidates its cache, whenever an attribute is changed
that has an effect ob how to display a symbol. In case of derived
classes with individual styles (>= `QwtSymbol.UserStyle`) it
might be necessary to call invalidateCache() for attributes
that are relevant for this style.
.. seealso::
:py:meth:`setCachePolicy()`, :py:meth:`drawSymbols()`
"""
if self.__data.cache.pixmap is not None:
self.__data.cache.pixmap = QPixmap()
def setStyle(self, style):
"""
Specify the symbol style
:param int style: Style
.. seealso::
:py:meth:`style()`
"""
if self.__data.style != style:
self.__data.style = style
self.invalidateCache()
def style(self):
"""
:return: Current symbol style
.. seealso::
:py:meth:`setStyle()`
"""
return self.__data.style | 0.584508 | 0.479808 |
import os
import platform
import shutil
import sys
import tempfile
import uuid
import pytest
import host_tools.cargo_build as build_tools
import host_tools.network as net_tools
import host_tools.proc as proc
from framework.artifacts import ArtifactCollection
import framework.utils as utils
from framework.microvm import Microvm
from framework.s3fetcher import MicrovmImageS3Fetcher
from framework.scheduler import PytestScheduler
SPEC_S3_BUCKET = 'spec.ccfc.min'
"""The s3 bucket that holds global Firecracker specifications."""
DEFAULT_TEST_IMAGES_S3_BUCKET = 'spec.ccfc.min'
"""The default s3 bucket that holds Firecracker microvm test images."""
ENV_TEST_IMAGES_S3_BUCKET = 'TEST_MICROVM_IMAGES_S3_BUCKET'
"""Environment variable for configuring the test microvm s3 bucket.
If variable exists in `os.environ`, its value will be used as the s3 bucket
for microvm test images.
"""
SCRIPT_FOLDER = os.path.dirname(os.path.realpath(__file__))
# This codebase uses Python features available in Python 3.6 or above
if sys.version_info < (3, 6):
raise SystemError("This codebase requires Python 3.6 or above.")
# Some tests create system-level resources; ensure we run as root.
if os.geteuid() != 0:
raise PermissionError("Test session needs to be run as root.")
# Style related tests are run only on AMD.
if "AMD" not in proc.proc_type():
collect_ignore = [os.path.join(SCRIPT_FOLDER, "integration_tests/style")]
if "AMD" in proc.proc_type():
collect_ignore = [os.path.join(
SCRIPT_FOLDER, "integration_tests/performance/test_snapshot_perf.py")]
def _test_images_s3_bucket():
"""Auxiliary function for getting this session's bucket name."""
return os.environ.get(
ENV_TEST_IMAGES_S3_BUCKET,
DEFAULT_TEST_IMAGES_S3_BUCKET
)
ARTIFACTS_COLLECTION = ArtifactCollection(_test_images_s3_bucket())
MICROVM_S3_FETCHER = MicrovmImageS3Fetcher(_test_images_s3_bucket())
def init_microvm(root_path, bin_cloner_path,
fc_binary=None, jailer_binary=None):
"""Auxiliary function for instantiating a microvm and setting it up."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
microvm_id = str(uuid.uuid4())
if fc_binary is None or jailer_binary is None:
fc_binary, jailer_binary = build_tools.get_firecracker_binaries()
# Make sure we always have both binaries.
assert fc_binary
assert jailer_binary
vm = Microvm(
resource_path=root_path,
fc_binary_path=fc_binary,
jailer_binary_path=jailer_binary,
microvm_id=microvm_id,
bin_cloner_path=bin_cloner_path)
vm.setup()
return vm
def pytest_configure(config):
"""Pytest hook - initialization.
Initialize the test scheduler and IPC services.
"""
config.addinivalue_line("markers", "nonci: mark test as nonci.")
PytestScheduler.instance().register_mp_singleton(
net_tools.UniqueIPv4Generator.instance()
)
config.pluginmanager.register(PytestScheduler.instance())
def pytest_addoption(parser):
"""Pytest hook. Add concurrency command line option.
For some reason, pytest doesn't properly pick up this hook in our plugin
class, so we need to call it from here.
"""
return PytestScheduler.instance().do_pytest_addoption(parser)
@pytest.fixture(autouse=True, scope='session')
def test_session_root_path():
"""Ensure and yield the testrun root directory.
Created at session initialization time, this directory will be
session-unique. This is important, since the scheduler will run
multiple pytest sessions concurrently.
"""
root_path = tempfile.mkdtemp(prefix="fctest-")
yield root_path
shutil.rmtree(root_path)
@pytest.fixture
def test_session_tmp_path(test_session_root_path):
"""Yield a random temporary directory. Destroyed on teardown."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
tmp_path = tempfile.mkdtemp(prefix=test_session_root_path)
yield tmp_path
shutil.rmtree(tmp_path)
def _gcc_compile(src_file, output_file, extra_flags="-static -O3"):
"""Build a source file with gcc."""
compile_cmd = 'gcc {} -o {} {}'.format(
src_file,
output_file,
extra_flags
)
utils.run_cmd(compile_cmd)
@pytest.fixture(scope='session')
def bin_cloner_path(test_session_root_path):
"""Build a binary that `clone`s into the jailer.
It's necessary because Python doesn't interface well with the `clone()`
syscall directly.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
cloner_bin_path = os.path.join(test_session_root_path, 'newpid_cloner')
_gcc_compile(
'host_tools/newpid_cloner.c',
cloner_bin_path
)
yield cloner_bin_path
@pytest.fixture(scope='session')
def bin_vsock_path(test_session_root_path):
"""Build a simple vsock client/server application."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
vsock_helper_bin_path = os.path.join(
test_session_root_path,
'vsock_helper'
)
_gcc_compile(
'host_tools/vsock_helper.c',
vsock_helper_bin_path
)
yield vsock_helper_bin_path
@pytest.fixture(scope='session')
def change_net_config_space_bin(test_session_root_path):
"""Build a binary that changes the MMIO config space."""
# pylint: disable=redefined-outer-name
change_net_config_space_bin = os.path.join(
test_session_root_path,
'change_net_config_space'
)
_gcc_compile(
'host_tools/change_net_config_space.c',
change_net_config_space_bin,
extra_flags=""
)
yield change_net_config_space_bin
@pytest.fixture(scope='session')
def bin_seccomp_paths(test_session_root_path):
"""Build jailers and jailed binaries to test seccomp.
They currently consist of:
* a jailer with a simple syscall whitelist;
* a jailer with a (syscall, arguments) advanced whitelist;
* a jailed binary that follows the seccomp rules;
* a jailed binary that breaks the seccomp rules.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
seccomp_build_path = os.path.join(
test_session_root_path,
build_tools.CARGO_RELEASE_REL_PATH
)
extra_args = '--release --target {}-unknown-linux-musl'
extra_args = extra_args.format(platform.machine())
build_tools.cargo_build(seccomp_build_path,
extra_args=extra_args,
src_dir='integration_tests/security/demo_seccomp')
release_binaries_path = os.path.join(
test_session_root_path,
build_tools.CARGO_RELEASE_REL_PATH,
build_tools.RELEASE_BINARIES_REL_PATH
)
demo_basic_jailer = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_basic_jailer'
)
)
demo_advanced_jailer = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_advanced_jailer'
)
)
demo_harmless = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_harmless'
)
)
demo_malicious = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_malicious'
)
)
yield {
'demo_basic_jailer': demo_basic_jailer,
'demo_advanced_jailer': demo_advanced_jailer,
'demo_harmless': demo_harmless,
'demo_malicious': demo_malicious
}
@pytest.fixture()
def microvm(test_session_root_path, bin_cloner_path):
"""Instantiate a microvm."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
# Make sure the necessary binaries are there before instantiating the
# microvm.
vm = init_microvm(test_session_root_path, bin_cloner_path)
yield vm
vm.kill()
shutil.rmtree(os.path.join(test_session_root_path, vm.id))
@pytest.fixture
def network_config():
"""Yield a UniqueIPv4Generator."""
yield net_tools.UniqueIPv4Generator.instance()
@pytest.fixture(
params=MICROVM_S3_FETCHER.list_microvm_images(
capability_filter=['*']
)
)
def test_microvm_any(request, microvm):
"""Yield a microvm that can have any image in the spec bucket.
A test case using this fixture will run for every microvm image.
When using a pytest parameterized fixture, a test case is created for each
parameter in the list. We generate the list dynamically based on the
capability filter. This will result in
`len(MICROVM_S3_FETCHER.list_microvm_images(capability_filter=['*']))`
test cases for each test that depends on this fixture, each receiving a
microvm instance with a different microvm image.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
MICROVM_S3_FETCHER.init_vm_resources(request.param, microvm)
yield microvm
@pytest.fixture
def test_multiple_microvms(
test_session_root_path,
context,
bin_cloner_path
):
"""Yield one or more microvms based on the context provided.
`context` is a dynamically parameterized fixture created inside the special
function `pytest_generate_tests` and it holds a tuple containing the name
of the guest image used to spawn a microvm and the number of microvms
to spawn.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
microvms = []
(microvm_resources, how_many) = context
# When the context specifies multiple microvms, we use the first vm to
# populate the other ones by hardlinking its resources.
first_vm = init_microvm(test_session_root_path, bin_cloner_path)
MICROVM_S3_FETCHER.init_vm_resources(
microvm_resources,
first_vm
)
microvms.append(first_vm)
# It is safe to do this as the dynamically generated fixture `context`
# asserts that the `how_many` parameter is always positive
# (i.e strictly greater than 0).
for _ in range(how_many - 1):
vm = init_microvm(test_session_root_path, bin_cloner_path)
MICROVM_S3_FETCHER.hardlink_vm_resources(
microvm_resources,
first_vm,
vm
)
microvms.append(vm)
yield microvms
for i in range(how_many):
microvms[i].kill()
shutil.rmtree(os.path.join(test_session_root_path, microvms[i].id))
def pytest_generate_tests(metafunc):
"""Implement customized parametrization scheme.
This is a special hook which is called by the pytest infrastructure when
collecting a test function. The `metafunc` contains the requesting test
context. Amongst other things, the `metafunc` provides the list of fixture
names that the calling test function is using. If we find a fixture that
is called `context`, we check the calling function through the
`metafunc.function` field for the `_pool_size` attribute which we
previously set with a decorator. Then we create the list of parameters
for this fixture.
The parameter will be a list of tuples of the form (cap, pool_size).
For each parameter from the list (i.e. tuple) a different test case
scenario will be created.
"""
if 'context' in metafunc.fixturenames:
# In order to create the params for the current fixture, we need the
# capability and the number of vms we want to spawn.
# 1. Look if the test function set the pool size through the decorator.
# If it did not, we set it to 1.
how_many = int(getattr(metafunc.function, '_pool_size', None))
assert how_many > 0
# 2. Check if the test function set the capability field through
# the decorator. If it did not, we set it to any.
cap = getattr(metafunc.function, '_capability', '*')
# 3. Before parametrization, get the list of images that have the
# desired capability. By parametrize-ing the fixture with it, we
# trigger tests cases for each of them.
image_list = MICROVM_S3_FETCHER.list_microvm_images(
capability_filter=[cap]
)
metafunc.parametrize(
'context',
[(item, how_many) for item in image_list],
ids=['{}, {} instance(s)'.format(
item, how_many
) for item in image_list]
)
TEST_MICROVM_CAP_FIXTURE_TEMPLATE = (
"@pytest.fixture("
" params=MICROVM_S3_FETCHER.list_microvm_images(\n"
" capability_filter=['CAP']\n"
" )\n"
")\n"
"def test_microvm_with_CAP(request, microvm):\n"
" MICROVM_S3_FETCHER.init_vm_resources(\n"
" request.param, microvm\n"
" )\n"
" yield microvm"
)
# To make test writing easy, we want to dynamically create fixtures with all
# capabilities present in the test microvm images bucket. `pytest` doesn't
# provide a way to do that outright, but luckily all of python is just lists of
# of lists and a cursor, so exec() works fine here.
for capability in MICROVM_S3_FETCHER.enum_capabilities():
TEST_MICROVM_CAP_FIXTURE = (
TEST_MICROVM_CAP_FIXTURE_TEMPLATE.replace('CAP', capability)
)
# pylint: disable=exec-used
# This is the most straightforward way to achieve this result.
exec(TEST_MICROVM_CAP_FIXTURE) | tests/conftest.py | import os
import platform
import shutil
import sys
import tempfile
import uuid
import pytest
import host_tools.cargo_build as build_tools
import host_tools.network as net_tools
import host_tools.proc as proc
from framework.artifacts import ArtifactCollection
import framework.utils as utils
from framework.microvm import Microvm
from framework.s3fetcher import MicrovmImageS3Fetcher
from framework.scheduler import PytestScheduler
SPEC_S3_BUCKET = 'spec.ccfc.min'
"""The s3 bucket that holds global Firecracker specifications."""
DEFAULT_TEST_IMAGES_S3_BUCKET = 'spec.ccfc.min'
"""The default s3 bucket that holds Firecracker microvm test images."""
ENV_TEST_IMAGES_S3_BUCKET = 'TEST_MICROVM_IMAGES_S3_BUCKET'
"""Environment variable for configuring the test microvm s3 bucket.
If variable exists in `os.environ`, its value will be used as the s3 bucket
for microvm test images.
"""
SCRIPT_FOLDER = os.path.dirname(os.path.realpath(__file__))
# This codebase uses Python features available in Python 3.6 or above
if sys.version_info < (3, 6):
raise SystemError("This codebase requires Python 3.6 or above.")
# Some tests create system-level resources; ensure we run as root.
if os.geteuid() != 0:
raise PermissionError("Test session needs to be run as root.")
# Style related tests are run only on AMD.
if "AMD" not in proc.proc_type():
collect_ignore = [os.path.join(SCRIPT_FOLDER, "integration_tests/style")]
if "AMD" in proc.proc_type():
collect_ignore = [os.path.join(
SCRIPT_FOLDER, "integration_tests/performance/test_snapshot_perf.py")]
def _test_images_s3_bucket():
"""Auxiliary function for getting this session's bucket name."""
return os.environ.get(
ENV_TEST_IMAGES_S3_BUCKET,
DEFAULT_TEST_IMAGES_S3_BUCKET
)
ARTIFACTS_COLLECTION = ArtifactCollection(_test_images_s3_bucket())
MICROVM_S3_FETCHER = MicrovmImageS3Fetcher(_test_images_s3_bucket())
def init_microvm(root_path, bin_cloner_path,
fc_binary=None, jailer_binary=None):
"""Auxiliary function for instantiating a microvm and setting it up."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
microvm_id = str(uuid.uuid4())
if fc_binary is None or jailer_binary is None:
fc_binary, jailer_binary = build_tools.get_firecracker_binaries()
# Make sure we always have both binaries.
assert fc_binary
assert jailer_binary
vm = Microvm(
resource_path=root_path,
fc_binary_path=fc_binary,
jailer_binary_path=jailer_binary,
microvm_id=microvm_id,
bin_cloner_path=bin_cloner_path)
vm.setup()
return vm
def pytest_configure(config):
"""Pytest hook - initialization.
Initialize the test scheduler and IPC services.
"""
config.addinivalue_line("markers", "nonci: mark test as nonci.")
PytestScheduler.instance().register_mp_singleton(
net_tools.UniqueIPv4Generator.instance()
)
config.pluginmanager.register(PytestScheduler.instance())
def pytest_addoption(parser):
"""Pytest hook. Add concurrency command line option.
For some reason, pytest doesn't properly pick up this hook in our plugin
class, so we need to call it from here.
"""
return PytestScheduler.instance().do_pytest_addoption(parser)
@pytest.fixture(autouse=True, scope='session')
def test_session_root_path():
"""Ensure and yield the testrun root directory.
Created at session initialization time, this directory will be
session-unique. This is important, since the scheduler will run
multiple pytest sessions concurrently.
"""
root_path = tempfile.mkdtemp(prefix="fctest-")
yield root_path
shutil.rmtree(root_path)
@pytest.fixture
def test_session_tmp_path(test_session_root_path):
"""Yield a random temporary directory. Destroyed on teardown."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
tmp_path = tempfile.mkdtemp(prefix=test_session_root_path)
yield tmp_path
shutil.rmtree(tmp_path)
def _gcc_compile(src_file, output_file, extra_flags="-static -O3"):
"""Build a source file with gcc."""
compile_cmd = 'gcc {} -o {} {}'.format(
src_file,
output_file,
extra_flags
)
utils.run_cmd(compile_cmd)
@pytest.fixture(scope='session')
def bin_cloner_path(test_session_root_path):
"""Build a binary that `clone`s into the jailer.
It's necessary because Python doesn't interface well with the `clone()`
syscall directly.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
cloner_bin_path = os.path.join(test_session_root_path, 'newpid_cloner')
_gcc_compile(
'host_tools/newpid_cloner.c',
cloner_bin_path
)
yield cloner_bin_path
@pytest.fixture(scope='session')
def bin_vsock_path(test_session_root_path):
"""Build a simple vsock client/server application."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
vsock_helper_bin_path = os.path.join(
test_session_root_path,
'vsock_helper'
)
_gcc_compile(
'host_tools/vsock_helper.c',
vsock_helper_bin_path
)
yield vsock_helper_bin_path
@pytest.fixture(scope='session')
def change_net_config_space_bin(test_session_root_path):
"""Build a binary that changes the MMIO config space."""
# pylint: disable=redefined-outer-name
change_net_config_space_bin = os.path.join(
test_session_root_path,
'change_net_config_space'
)
_gcc_compile(
'host_tools/change_net_config_space.c',
change_net_config_space_bin,
extra_flags=""
)
yield change_net_config_space_bin
@pytest.fixture(scope='session')
def bin_seccomp_paths(test_session_root_path):
"""Build jailers and jailed binaries to test seccomp.
They currently consist of:
* a jailer with a simple syscall whitelist;
* a jailer with a (syscall, arguments) advanced whitelist;
* a jailed binary that follows the seccomp rules;
* a jailed binary that breaks the seccomp rules.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
seccomp_build_path = os.path.join(
test_session_root_path,
build_tools.CARGO_RELEASE_REL_PATH
)
extra_args = '--release --target {}-unknown-linux-musl'
extra_args = extra_args.format(platform.machine())
build_tools.cargo_build(seccomp_build_path,
extra_args=extra_args,
src_dir='integration_tests/security/demo_seccomp')
release_binaries_path = os.path.join(
test_session_root_path,
build_tools.CARGO_RELEASE_REL_PATH,
build_tools.RELEASE_BINARIES_REL_PATH
)
demo_basic_jailer = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_basic_jailer'
)
)
demo_advanced_jailer = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_advanced_jailer'
)
)
demo_harmless = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_harmless'
)
)
demo_malicious = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_malicious'
)
)
yield {
'demo_basic_jailer': demo_basic_jailer,
'demo_advanced_jailer': demo_advanced_jailer,
'demo_harmless': demo_harmless,
'demo_malicious': demo_malicious
}
@pytest.fixture()
def microvm(test_session_root_path, bin_cloner_path):
"""Instantiate a microvm."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
# Make sure the necessary binaries are there before instantiating the
# microvm.
vm = init_microvm(test_session_root_path, bin_cloner_path)
yield vm
vm.kill()
shutil.rmtree(os.path.join(test_session_root_path, vm.id))
@pytest.fixture
def network_config():
"""Yield a UniqueIPv4Generator."""
yield net_tools.UniqueIPv4Generator.instance()
@pytest.fixture(
params=MICROVM_S3_FETCHER.list_microvm_images(
capability_filter=['*']
)
)
def test_microvm_any(request, microvm):
"""Yield a microvm that can have any image in the spec bucket.
A test case using this fixture will run for every microvm image.
When using a pytest parameterized fixture, a test case is created for each
parameter in the list. We generate the list dynamically based on the
capability filter. This will result in
`len(MICROVM_S3_FETCHER.list_microvm_images(capability_filter=['*']))`
test cases for each test that depends on this fixture, each receiving a
microvm instance with a different microvm image.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
MICROVM_S3_FETCHER.init_vm_resources(request.param, microvm)
yield microvm
@pytest.fixture
def test_multiple_microvms(
test_session_root_path,
context,
bin_cloner_path
):
"""Yield one or more microvms based on the context provided.
`context` is a dynamically parameterized fixture created inside the special
function `pytest_generate_tests` and it holds a tuple containing the name
of the guest image used to spawn a microvm and the number of microvms
to spawn.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
microvms = []
(microvm_resources, how_many) = context
# When the context specifies multiple microvms, we use the first vm to
# populate the other ones by hardlinking its resources.
first_vm = init_microvm(test_session_root_path, bin_cloner_path)
MICROVM_S3_FETCHER.init_vm_resources(
microvm_resources,
first_vm
)
microvms.append(first_vm)
# It is safe to do this as the dynamically generated fixture `context`
# asserts that the `how_many` parameter is always positive
# (i.e strictly greater than 0).
for _ in range(how_many - 1):
vm = init_microvm(test_session_root_path, bin_cloner_path)
MICROVM_S3_FETCHER.hardlink_vm_resources(
microvm_resources,
first_vm,
vm
)
microvms.append(vm)
yield microvms
for i in range(how_many):
microvms[i].kill()
shutil.rmtree(os.path.join(test_session_root_path, microvms[i].id))
def pytest_generate_tests(metafunc):
"""Implement customized parametrization scheme.
This is a special hook which is called by the pytest infrastructure when
collecting a test function. The `metafunc` contains the requesting test
context. Amongst other things, the `metafunc` provides the list of fixture
names that the calling test function is using. If we find a fixture that
is called `context`, we check the calling function through the
`metafunc.function` field for the `_pool_size` attribute which we
previously set with a decorator. Then we create the list of parameters
for this fixture.
The parameter will be a list of tuples of the form (cap, pool_size).
For each parameter from the list (i.e. tuple) a different test case
scenario will be created.
"""
if 'context' in metafunc.fixturenames:
# In order to create the params for the current fixture, we need the
# capability and the number of vms we want to spawn.
# 1. Look if the test function set the pool size through the decorator.
# If it did not, we set it to 1.
how_many = int(getattr(metafunc.function, '_pool_size', None))
assert how_many > 0
# 2. Check if the test function set the capability field through
# the decorator. If it did not, we set it to any.
cap = getattr(metafunc.function, '_capability', '*')
# 3. Before parametrization, get the list of images that have the
# desired capability. By parametrize-ing the fixture with it, we
# trigger tests cases for each of them.
image_list = MICROVM_S3_FETCHER.list_microvm_images(
capability_filter=[cap]
)
metafunc.parametrize(
'context',
[(item, how_many) for item in image_list],
ids=['{}, {} instance(s)'.format(
item, how_many
) for item in image_list]
)
TEST_MICROVM_CAP_FIXTURE_TEMPLATE = (
"@pytest.fixture("
" params=MICROVM_S3_FETCHER.list_microvm_images(\n"
" capability_filter=['CAP']\n"
" )\n"
")\n"
"def test_microvm_with_CAP(request, microvm):\n"
" MICROVM_S3_FETCHER.init_vm_resources(\n"
" request.param, microvm\n"
" )\n"
" yield microvm"
)
# To make test writing easy, we want to dynamically create fixtures with all
# capabilities present in the test microvm images bucket. `pytest` doesn't
# provide a way to do that outright, but luckily all of python is just lists of
# of lists and a cursor, so exec() works fine here.
for capability in MICROVM_S3_FETCHER.enum_capabilities():
TEST_MICROVM_CAP_FIXTURE = (
TEST_MICROVM_CAP_FIXTURE_TEMPLATE.replace('CAP', capability)
)
# pylint: disable=exec-used
# This is the most straightforward way to achieve this result.
exec(TEST_MICROVM_CAP_FIXTURE) | 0.550366 | 0.275485 |
from farmconfig import FarmConfig
class Config(FarmConfig):
# Wiki identity ----------------------------------------------------
# Site name, used by default for wiki name-logo [Unicode]
sitename = u'JythonWiki'
# Wiki logo. You can use an image, text or both. [Unicode]
# Example: u'<img src="/wiki/mywiki.png" alt="My Wiki">My Wiki'
# For no logo or text, use ''
logo_string = u'<img src="/wiki/europython/img/jython-new-small.gif" alt="JythonWiki"> '
# The interwiki name used in interwiki links
interwikiname = 'JythonWiki'
# Critical setup ---------------------------------------------------
# Misconfiguration here will render your wiki unusable. Check that
# all directories are accessible by the web server or moin server.
# If you encounter problems, try to set data_dir and data_underlay_dir
# to absolute paths.
# Where your mutable wiki pages are. You want to make regular
# backups of this directory.
data_dir = '/data/moin/instances/jython/data/'
# Where read-only system and help page are. You might want to share
# this directory between several wikis. When you update MoinMoin,
# you can safely replace the underlay directory with a new one. This
# directory is part of MoinMoin distribution, you don't have to
# backup it.
## data_underlay_dir = '/data/moin/instances/underlay/'
# This must be '/wiki' for twisted and standalone. For CGI, it should
# match your Apache Alias setting.
## url_prefix = 'http://wiki.python.org/wiki'
# Security ----------------------------------------------------------
# Security critical actions (disabled by default)
# Uncomment to enable options you like.
# IMPORTANT: grant yourself admin rights! replace YourName with
# your user name. See HelpOnAccessControlLists for more help.
acl_rights_before = u"BlockedUsersGroup: AdminGroup:read,write,delete,revert,admin"
# Only users in the NewUsersGroup may edit pages, since we're simply
# getting too much spam and vandalism. MAL 2014-05-31
acl_rights_default = u"EditorsGroup:read,write,delete,revert All:read"
# Link spam protection for public wikis (Uncomment to enable)
# Needs a reliable internet connection.
from MoinMoin.security.antispam import SecurityPolicy
# User interface ----------------------------------------------------
# Add your wikis important pages at the end. It is not recommended to
# remove the default links. Leave room for user links - don't use
# more than 6 short items.
# You MUST use Unicode strings here, but you need not use localized
# page names for system and help pages, those will be used automatically
# according to the user selected language. [Unicode]
navi_bar = [
# Will use page_front_page, (default FrontPage)
u'%(page_front_page)s',
u'RecentChanges',
u'FindPage',
u'HelpContents',
]
# The default theme anonymous or new users get
##theme_default = 'jythonwiki'
# Language options --------------------------------------------------
# See http://moinmoin.wikiwikiweb.de/ConfigMarket for configuration in
# YOUR language that other people contributed.
# The main wiki language, set the direction of the wiki pages
default_lang = 'en'
# Content options ---------------------------------------------------
# Show users hostnames in RecentChanges
show_hosts = 1
# Enumerate headlines?
show_section_numbers = 0
# Charts size, require gdchart (Set to None to disable).
chart_options = {'width': 600, 'height': 300}
# Enable textchas.
textchas_disabled_group = u"TrustedEditorsGroup"
textchas = {
'en': {
#u"Type peanut in here:": ur" *(?i)peanut *",
#u"Are you a friend or foe?": ur" *(?i)(friend|foe) *",
#u"Are you a bot, yes or no?": ur" *(?i)(no|yes) *",
#u"Say cheese:": ur" *(?i)cheese *",
#u"Say friend and enter:": ur" *(?i)friend *",
#u"What does J in jython stand for?": ur" *(?i)java *",
# New ones (2013-02-14):
#u"What does Jython's interactive prompt look like?": ur" *>>> *",
#u"Say green but do not say blue.": ur" *(?i)green *",
#u"What is the smallest number in 10, 5, 11, 15?": ur" *(?i)(5|five) *",
#u"What is <NAME>'s last name?": ur" *(?i)washington *",
#u"How many wings does a typical bird have?": ur" .*(?i)(2|two) .*",
# New ones 2013-03-20:
u"Which programming language does Jython implement?": ur" *(?i)python *",
u"Jython is written in": ur" *(?i)java *",
#u"What does Jython's interactive prompt look like?": ur" *>>> *",
u"What is <NAME>'s first name?": ur" *(?i)guido *",
u"Which foundation protects the Jython IP?": ur" *(?i)(psf|python +software +foundation|python +software|python +foundation) *",
u"x = 1; x += 1; x ==": ur" *2 *",
u"x = 2; x /= 2; x ==": ur" *(1|1.0) *",
u"l = [1,2,3]; l.remove(1); l[0] ==": ur" *2 *",
u"l = [1,2,3]; del l[1]; l[0] ==": ur" *1 *",
u"s = 'guido'; s[3:5] ==": ur" *(?i)do *",
u"x = range(10,18,2)[2]; x ==": ur" *14 *",
u"x = map(lambda x:x**2,range(10))[3]; x ==": ur" *9 *",
},
} | salt/moin/configs/jython.py | from farmconfig import FarmConfig
class Config(FarmConfig):
# Wiki identity ----------------------------------------------------
# Site name, used by default for wiki name-logo [Unicode]
sitename = u'JythonWiki'
# Wiki logo. You can use an image, text or both. [Unicode]
# Example: u'<img src="/wiki/mywiki.png" alt="My Wiki">My Wiki'
# For no logo or text, use ''
logo_string = u'<img src="/wiki/europython/img/jython-new-small.gif" alt="JythonWiki"> '
# The interwiki name used in interwiki links
interwikiname = 'JythonWiki'
# Critical setup ---------------------------------------------------
# Misconfiguration here will render your wiki unusable. Check that
# all directories are accessible by the web server or moin server.
# If you encounter problems, try to set data_dir and data_underlay_dir
# to absolute paths.
# Where your mutable wiki pages are. You want to make regular
# backups of this directory.
data_dir = '/data/moin/instances/jython/data/'
# Where read-only system and help page are. You might want to share
# this directory between several wikis. When you update MoinMoin,
# you can safely replace the underlay directory with a new one. This
# directory is part of MoinMoin distribution, you don't have to
# backup it.
## data_underlay_dir = '/data/moin/instances/underlay/'
# This must be '/wiki' for twisted and standalone. For CGI, it should
# match your Apache Alias setting.
## url_prefix = 'http://wiki.python.org/wiki'
# Security ----------------------------------------------------------
# Security critical actions (disabled by default)
# Uncomment to enable options you like.
# IMPORTANT: grant yourself admin rights! replace YourName with
# your user name. See HelpOnAccessControlLists for more help.
acl_rights_before = u"BlockedUsersGroup: AdminGroup:read,write,delete,revert,admin"
# Only users in the NewUsersGroup may edit pages, since we're simply
# getting too much spam and vandalism. MAL 2014-05-31
acl_rights_default = u"EditorsGroup:read,write,delete,revert All:read"
# Link spam protection for public wikis (Uncomment to enable)
# Needs a reliable internet connection.
from MoinMoin.security.antispam import SecurityPolicy
# User interface ----------------------------------------------------
# Add your wikis important pages at the end. It is not recommended to
# remove the default links. Leave room for user links - don't use
# more than 6 short items.
# You MUST use Unicode strings here, but you need not use localized
# page names for system and help pages, those will be used automatically
# according to the user selected language. [Unicode]
navi_bar = [
# Will use page_front_page, (default FrontPage)
u'%(page_front_page)s',
u'RecentChanges',
u'FindPage',
u'HelpContents',
]
# The default theme anonymous or new users get
##theme_default = 'jythonwiki'
# Language options --------------------------------------------------
# See http://moinmoin.wikiwikiweb.de/ConfigMarket for configuration in
# YOUR language that other people contributed.
# The main wiki language, set the direction of the wiki pages
default_lang = 'en'
# Content options ---------------------------------------------------
# Show users hostnames in RecentChanges
show_hosts = 1
# Enumerate headlines?
show_section_numbers = 0
# Charts size, require gdchart (Set to None to disable).
chart_options = {'width': 600, 'height': 300}
# Enable textchas.
textchas_disabled_group = u"TrustedEditorsGroup"
textchas = {
'en': {
#u"Type peanut in here:": ur" *(?i)peanut *",
#u"Are you a friend or foe?": ur" *(?i)(friend|foe) *",
#u"Are you a bot, yes or no?": ur" *(?i)(no|yes) *",
#u"Say cheese:": ur" *(?i)cheese *",
#u"Say friend and enter:": ur" *(?i)friend *",
#u"What does J in jython stand for?": ur" *(?i)java *",
# New ones (2013-02-14):
#u"What does Jython's interactive prompt look like?": ur" *>>> *",
#u"Say green but do not say blue.": ur" *(?i)green *",
#u"What is the smallest number in 10, 5, 11, 15?": ur" *(?i)(5|five) *",
#u"What is <NAME>'s last name?": ur" *(?i)washington *",
#u"How many wings does a typical bird have?": ur" .*(?i)(2|two) .*",
# New ones 2013-03-20:
u"Which programming language does Jython implement?": ur" *(?i)python *",
u"Jython is written in": ur" *(?i)java *",
#u"What does Jython's interactive prompt look like?": ur" *>>> *",
u"What is <NAME>'s first name?": ur" *(?i)guido *",
u"Which foundation protects the Jython IP?": ur" *(?i)(psf|python +software +foundation|python +software|python +foundation) *",
u"x = 1; x += 1; x ==": ur" *2 *",
u"x = 2; x /= 2; x ==": ur" *(1|1.0) *",
u"l = [1,2,3]; l.remove(1); l[0] ==": ur" *2 *",
u"l = [1,2,3]; del l[1]; l[0] ==": ur" *1 *",
u"s = 'guido'; s[3:5] ==": ur" *(?i)do *",
u"x = range(10,18,2)[2]; x ==": ur" *14 *",
u"x = map(lambda x:x**2,range(10))[3]; x ==": ur" *9 *",
},
} | 0.520253 | 0.22531 |
import argparse
import glob
import os
import os.path as osp
from pathlib import Path
import sys
sys.path.append(str(Path(__file__).parent.parent))
import numpy as np
from skimage.io import imread, imsave
from skimage.measure import label
from joblib import Parallel, delayed
from utils.metrics import *
parser = argparse.ArgumentParser()
parser.add_argument('pred_root')
args = parser.parse_args()
pred_root = args.pred_root
new_pred_root = pred_root + '-new'
if not osp.exists(new_pred_root):
os.mkdir(new_pred_root)
executor = Parallel(n_jobs=os.cpu_count())
def postprocess(pred):
regions = label(pred)
for region_idx in range(regions.max() + 1):
region_mask = regions == region_idx
if region_mask.sum() < 5000:
pred[region_mask] = 0
revert_regions = label(1 - pred)
for region_idx in range(revert_regions.max() + 1):
region_mask = revert_regions == region_idx
if region_mask.sum() < 5000:
pred[region_mask] = 1
return pred
def compute_metrics(iterable):
accuracies = executor(delayed(accuracy)(pred, gt) for pred, gt in iterable)
print('Accuracy:', np.mean(accuracies))
dices = executor(delayed(dice)(pred, gt) for pred, gt in iterable)
print('Dice:', np.mean(dices))
detection_f1s = executor(delayed(detection_f1)(pred, gt) for pred, gt in iterable)
print('Detection F1:', np.mean(detection_f1s))
object_dices = executor(delayed(object_dice)(pred, gt) for pred, gt in iterable)
print('Object Dice:', np.mean(object_dices))
object_hausdorffs = executor(delayed(object_hausdorff)(pred, gt) for pred, gt in iterable)
print('Object Hausdorff:', np.mean(object_hausdorffs))
print('Reading predictions and gts ...')
pred_paths = sorted(glob.glob(osp.join(pred_root, '*.png')))
predictions = executor(delayed(postprocess)(imread(pred_path) / 255) for pred_path in pred_paths)
gts = executor(delayed(imread)(gt_path) for gt_path in sorted(glob.glob('/home/mrc/data/CRAG/test/masks/*.png')))
print('Saving new predictions ...')
for pred, pred_path in zip(predictions, pred_paths):
imsave(pred_path.replace(pred_root, pred_root + '-new'), (pred * 255).astype('uint8'))
compute_metrics(list(zip(predictions, gts))) | scripts/evaluate_crag.py | import argparse
import glob
import os
import os.path as osp
from pathlib import Path
import sys
sys.path.append(str(Path(__file__).parent.parent))
import numpy as np
from skimage.io import imread, imsave
from skimage.measure import label
from joblib import Parallel, delayed
from utils.metrics import *
parser = argparse.ArgumentParser()
parser.add_argument('pred_root')
args = parser.parse_args()
pred_root = args.pred_root
new_pred_root = pred_root + '-new'
if not osp.exists(new_pred_root):
os.mkdir(new_pred_root)
executor = Parallel(n_jobs=os.cpu_count())
def postprocess(pred):
regions = label(pred)
for region_idx in range(regions.max() + 1):
region_mask = regions == region_idx
if region_mask.sum() < 5000:
pred[region_mask] = 0
revert_regions = label(1 - pred)
for region_idx in range(revert_regions.max() + 1):
region_mask = revert_regions == region_idx
if region_mask.sum() < 5000:
pred[region_mask] = 1
return pred
def compute_metrics(iterable):
accuracies = executor(delayed(accuracy)(pred, gt) for pred, gt in iterable)
print('Accuracy:', np.mean(accuracies))
dices = executor(delayed(dice)(pred, gt) for pred, gt in iterable)
print('Dice:', np.mean(dices))
detection_f1s = executor(delayed(detection_f1)(pred, gt) for pred, gt in iterable)
print('Detection F1:', np.mean(detection_f1s))
object_dices = executor(delayed(object_dice)(pred, gt) for pred, gt in iterable)
print('Object Dice:', np.mean(object_dices))
object_hausdorffs = executor(delayed(object_hausdorff)(pred, gt) for pred, gt in iterable)
print('Object Hausdorff:', np.mean(object_hausdorffs))
print('Reading predictions and gts ...')
pred_paths = sorted(glob.glob(osp.join(pred_root, '*.png')))
predictions = executor(delayed(postprocess)(imread(pred_path) / 255) for pred_path in pred_paths)
gts = executor(delayed(imread)(gt_path) for gt_path in sorted(glob.glob('/home/mrc/data/CRAG/test/masks/*.png')))
print('Saving new predictions ...')
for pred, pred_path in zip(predictions, pred_paths):
imsave(pred_path.replace(pred_root, pred_root + '-new'), (pred * 255).astype('uint8'))
compute_metrics(list(zip(predictions, gts))) | 0.267983 | 0.192331 |
from troposphere import Tags
from . import AWSObject, AWSProperty
from .validators import boolean, integer
class ClearTimer(AWSProperty):
props = {
"TimerName": (str, False),
}
class Firehose(AWSProperty):
props = {
"DeliveryStreamName": (str, False),
"Separator": (str, False),
}
class IotEvents(AWSProperty):
props = {
"InputName": (str, False),
}
class IotTopicPublish(AWSProperty):
props = {
"MqttTopic": (str, False),
}
class Lambda(AWSProperty):
props = {
"FunctionArn": (str, False),
}
class ResetTimer(AWSProperty):
props = {
"TimerName": (str, False),
}
class SetTimer(AWSProperty):
props = {
"Seconds": (integer, False),
"TimerName": (str, False),
}
class SetVariable(AWSProperty):
props = {
"Value": (str, False),
"VariableName": (str, False),
}
class Sns(AWSProperty):
props = {
"TargetArn": (str, False),
}
class Sqs(AWSProperty):
props = {
"QueueUrl": (str, False),
"UseBase64": (boolean, False),
}
class Action(AWSProperty):
props = {
"ClearTimer": (ClearTimer, False),
"Firehose": (Firehose, False),
"IotEvents": (IotEvents, False),
"IotTopicPublish": (IotTopicPublish, False),
"Lambda": (Lambda, False),
"ResetTimer": (ResetTimer, False),
"SetTimer": (SetTimer, False),
"SetVariable": (SetVariable, False),
"Sns": (Sns, False),
"Sqs": (Sqs, False),
}
class Event(AWSProperty):
props = {
"Actions": ([Action], False),
"Condition": (str, False),
"EventName": (str, False),
}
class OnEnter(AWSProperty):
props = {
"Events": ([Event], False),
}
class OnExit(AWSProperty):
props = {
"Events": ([Event], False),
}
class TransitionEvent(AWSProperty):
props = {
"Actions": ([Action], False),
"Condition": (str, False),
"EventName": (str, False),
"NextState": (str, False),
}
class OnInput(AWSProperty):
props = {
"Events": ([Event], False),
"TransitionEvents": ([TransitionEvent], False),
}
class State(AWSProperty):
props = {
"OnEnter": (OnEnter, False),
"OnExit": (OnExit, False),
"OnInput": (OnInput, False),
"StateName": (str, False),
}
class DetectorModelDefinition(AWSProperty):
props = {
"InitialStateName": (str, False),
"States": ([State], False),
}
class DetectorModel(AWSObject):
resource_type = "AWS::IoTEvents::DetectorModel"
props = {
"DetectorModelDefinition": (DetectorModelDefinition, False),
"DetectorModelDescription": (str, False),
"DetectorModelName": (str, False),
"Key": (str, False),
"RoleArn": (str, False),
"Tags": (Tags, False),
}
class Attribute(AWSProperty):
props = {
"JsonPath": (str, False),
}
class InputDefinition(AWSProperty):
props = {
"Attributes": ([Attribute], False),
}
class Input(AWSObject):
resource_type = "AWS::IoTEvents::Input"
props = {
"InputDefinition": (InputDefinition, False),
"InputDescription": (str, False),
"InputName": (str, False),
"Tags": (Tags, False),
} | troposphere/iotevents.py |
from troposphere import Tags
from . import AWSObject, AWSProperty
from .validators import boolean, integer
class ClearTimer(AWSProperty):
props = {
"TimerName": (str, False),
}
class Firehose(AWSProperty):
props = {
"DeliveryStreamName": (str, False),
"Separator": (str, False),
}
class IotEvents(AWSProperty):
props = {
"InputName": (str, False),
}
class IotTopicPublish(AWSProperty):
props = {
"MqttTopic": (str, False),
}
class Lambda(AWSProperty):
props = {
"FunctionArn": (str, False),
}
class ResetTimer(AWSProperty):
props = {
"TimerName": (str, False),
}
class SetTimer(AWSProperty):
props = {
"Seconds": (integer, False),
"TimerName": (str, False),
}
class SetVariable(AWSProperty):
props = {
"Value": (str, False),
"VariableName": (str, False),
}
class Sns(AWSProperty):
props = {
"TargetArn": (str, False),
}
class Sqs(AWSProperty):
props = {
"QueueUrl": (str, False),
"UseBase64": (boolean, False),
}
class Action(AWSProperty):
props = {
"ClearTimer": (ClearTimer, False),
"Firehose": (Firehose, False),
"IotEvents": (IotEvents, False),
"IotTopicPublish": (IotTopicPublish, False),
"Lambda": (Lambda, False),
"ResetTimer": (ResetTimer, False),
"SetTimer": (SetTimer, False),
"SetVariable": (SetVariable, False),
"Sns": (Sns, False),
"Sqs": (Sqs, False),
}
class Event(AWSProperty):
props = {
"Actions": ([Action], False),
"Condition": (str, False),
"EventName": (str, False),
}
class OnEnter(AWSProperty):
props = {
"Events": ([Event], False),
}
class OnExit(AWSProperty):
props = {
"Events": ([Event], False),
}
class TransitionEvent(AWSProperty):
props = {
"Actions": ([Action], False),
"Condition": (str, False),
"EventName": (str, False),
"NextState": (str, False),
}
class OnInput(AWSProperty):
props = {
"Events": ([Event], False),
"TransitionEvents": ([TransitionEvent], False),
}
class State(AWSProperty):
props = {
"OnEnter": (OnEnter, False),
"OnExit": (OnExit, False),
"OnInput": (OnInput, False),
"StateName": (str, False),
}
class DetectorModelDefinition(AWSProperty):
props = {
"InitialStateName": (str, False),
"States": ([State], False),
}
class DetectorModel(AWSObject):
resource_type = "AWS::IoTEvents::DetectorModel"
props = {
"DetectorModelDefinition": (DetectorModelDefinition, False),
"DetectorModelDescription": (str, False),
"DetectorModelName": (str, False),
"Key": (str, False),
"RoleArn": (str, False),
"Tags": (Tags, False),
}
class Attribute(AWSProperty):
props = {
"JsonPath": (str, False),
}
class InputDefinition(AWSProperty):
props = {
"Attributes": ([Attribute], False),
}
class Input(AWSObject):
resource_type = "AWS::IoTEvents::Input"
props = {
"InputDefinition": (InputDefinition, False),
"InputDescription": (str, False),
"InputName": (str, False),
"Tags": (Tags, False),
} | 0.603465 | 0.270435 |
import os
import sys
import subprocess
import traceback
import importlib.util as il
spec = il.spec_from_file_location("config", snakemake.params.config)
config = il.module_from_spec(spec)
sys.modules[spec.name] = config
spec.loader.exec_module(config)
sys.path.append(snakemake.config['args']['mcc_path'])
import scripts.mccutils as mccutils
def main():
mccutils.log("teflon","Running TEFLoN")
consensus = snakemake.input.consensus
reference_genome = snakemake.input.reference_genome
ref_bed = snakemake.input.ref_bed
teflon_taxonomy = snakemake.input.teflon_taxonomy
bam = snakemake.input.bam
threads = snakemake.threads
out_dir = snakemake.params.out_dir
script_dir = snakemake.params.script_dir
log = snakemake.params.log
status_log = snakemake.params.status_log
prev_steps_succeeded = mccutils.check_status_file(status_log)
if prev_steps_succeeded:
try:
sample_table = make_sample_table(out_dir, bam)
run_teflon(
script_dir,
out_dir,
sample_table,
threads=threads,
log=log,
quality_threshold=config.PARAMS['-q'],
stdev=config.PARAMS['-sd'],
cov=config.PARAMS['-cov'],
te_support1=config.PARAMS['-n1'],
te_support2=config.PARAMS['-n2'],
read_count_lower_threshold=config.PARAMS['-lt'],
read_count_higher_threshold=config.PARAMS['-ht']
)
mccutils.check_file_exists(snakemake.output[0])
with open(status_log,"w") as l:
l.write("COMPLETED\n")
except Exception as e:
track = traceback.format_exc()
print(track, file=sys.stderr)
with open(log,"a") as l:
print(track, file=l)
mccutils.log("teflon","teflon run failed")
with open(status_log,"w") as l:
l.write("FAILED\n")
mccutils.run_command(["touch", snakemake.output[0]])
else:
mccutils.run_command(["touch", snakemake.output[0]])
def make_sample_table(out_dir, bam):
with open(out_dir+"samples.tsv", "w") as samples:
samples.write(bam+"\tsample\n")
return out_dir+"samples.tsv"
def run_teflon(script_dir, out_dir, sample_file, threads=1, log=None, quality_threshold=20, stdev=None, cov=None, te_support1=1, te_support2=1, read_count_lower_threshold=1, read_count_higher_threshold=None):
command = [
"python", script_dir+"teflon.v0.4.py",
"-wd", out_dir,
"-d", out_dir+"teflon.prep_TF/",
"-s", sample_file,
"-i", "sample",
"-l1", 'family',
"-l2", 'family',
"-t", str(threads),
"-q", str(quality_threshold)
]
if stdev is not None:
command += ["-sd", str(stdev)]
if cov is not None:
command += ["-cov", str(cov)]
mccutils.run_command(command, log=log)
command = [
"python", script_dir+"teflon_collapse.py",
"-wd", out_dir,
"-d", out_dir+"teflon.prep_TF/",
"-s", sample_file,
"-t", str(threads),
"-n1", str(te_support1),
"-n2", str(te_support2),
"-q", str(quality_threshold)
]
mccutils.run_command(command, log=log)
command = [
"python", script_dir+"teflon_count.py",
"-wd", out_dir,
"-d", out_dir+"teflon.prep_TF/",
"-s", sample_file,
"-i", "sample",
"-l2", "family",
"-t", str(threads),
"-q", str(quality_threshold)
]
mccutils.run_command(command, log=log)
command = [
"python", script_dir+"teflon_genotype.py",
"-wd", out_dir,
"-d", out_dir+"teflon.prep_TF/",
"-s", sample_file,
"-lt", str(read_count_lower_threshold),
"-dt", "pooled"
]
if read_count_higher_threshold is not None:
command += ["-ht", str(read_count_higher_threshold)]
mccutils.run_command(command, log=log)
if __name__ == "__main__":
main() | scripts/teflon/teflon_run.py | import os
import sys
import subprocess
import traceback
import importlib.util as il
spec = il.spec_from_file_location("config", snakemake.params.config)
config = il.module_from_spec(spec)
sys.modules[spec.name] = config
spec.loader.exec_module(config)
sys.path.append(snakemake.config['args']['mcc_path'])
import scripts.mccutils as mccutils
def main():
mccutils.log("teflon","Running TEFLoN")
consensus = snakemake.input.consensus
reference_genome = snakemake.input.reference_genome
ref_bed = snakemake.input.ref_bed
teflon_taxonomy = snakemake.input.teflon_taxonomy
bam = snakemake.input.bam
threads = snakemake.threads
out_dir = snakemake.params.out_dir
script_dir = snakemake.params.script_dir
log = snakemake.params.log
status_log = snakemake.params.status_log
prev_steps_succeeded = mccutils.check_status_file(status_log)
if prev_steps_succeeded:
try:
sample_table = make_sample_table(out_dir, bam)
run_teflon(
script_dir,
out_dir,
sample_table,
threads=threads,
log=log,
quality_threshold=config.PARAMS['-q'],
stdev=config.PARAMS['-sd'],
cov=config.PARAMS['-cov'],
te_support1=config.PARAMS['-n1'],
te_support2=config.PARAMS['-n2'],
read_count_lower_threshold=config.PARAMS['-lt'],
read_count_higher_threshold=config.PARAMS['-ht']
)
mccutils.check_file_exists(snakemake.output[0])
with open(status_log,"w") as l:
l.write("COMPLETED\n")
except Exception as e:
track = traceback.format_exc()
print(track, file=sys.stderr)
with open(log,"a") as l:
print(track, file=l)
mccutils.log("teflon","teflon run failed")
with open(status_log,"w") as l:
l.write("FAILED\n")
mccutils.run_command(["touch", snakemake.output[0]])
else:
mccutils.run_command(["touch", snakemake.output[0]])
def make_sample_table(out_dir, bam):
with open(out_dir+"samples.tsv", "w") as samples:
samples.write(bam+"\tsample\n")
return out_dir+"samples.tsv"
def run_teflon(script_dir, out_dir, sample_file, threads=1, log=None, quality_threshold=20, stdev=None, cov=None, te_support1=1, te_support2=1, read_count_lower_threshold=1, read_count_higher_threshold=None):
command = [
"python", script_dir+"teflon.v0.4.py",
"-wd", out_dir,
"-d", out_dir+"teflon.prep_TF/",
"-s", sample_file,
"-i", "sample",
"-l1", 'family',
"-l2", 'family',
"-t", str(threads),
"-q", str(quality_threshold)
]
if stdev is not None:
command += ["-sd", str(stdev)]
if cov is not None:
command += ["-cov", str(cov)]
mccutils.run_command(command, log=log)
command = [
"python", script_dir+"teflon_collapse.py",
"-wd", out_dir,
"-d", out_dir+"teflon.prep_TF/",
"-s", sample_file,
"-t", str(threads),
"-n1", str(te_support1),
"-n2", str(te_support2),
"-q", str(quality_threshold)
]
mccutils.run_command(command, log=log)
command = [
"python", script_dir+"teflon_count.py",
"-wd", out_dir,
"-d", out_dir+"teflon.prep_TF/",
"-s", sample_file,
"-i", "sample",
"-l2", "family",
"-t", str(threads),
"-q", str(quality_threshold)
]
mccutils.run_command(command, log=log)
command = [
"python", script_dir+"teflon_genotype.py",
"-wd", out_dir,
"-d", out_dir+"teflon.prep_TF/",
"-s", sample_file,
"-lt", str(read_count_lower_threshold),
"-dt", "pooled"
]
if read_count_higher_threshold is not None:
command += ["-ht", str(read_count_higher_threshold)]
mccutils.run_command(command, log=log)
if __name__ == "__main__":
main() | 0.09003 | 0.129485 |
import logging
from bisect import bisect
from typing import Union, List, Callable, Tuple, Dict, Any
from hanlp_common.constant import IDX
from hanlp.layers.transformers.utils import build_optimizer_scheduler_with_transformer
import torch
from torch.utils.data import DataLoader
from hanlp.common.dataset import PadSequenceDataLoader, SortingSampler
from hanlp.common.torch_component import TorchComponent
from hanlp.common.transform import FieldLength
from hanlp.common.vocab import Vocab
from hanlp.components.srl.span_rank.inference_utils import srl_decode
from hanlp.components.srl.span_rank.span_ranking_srl_model import SpanRankingSRLModel
from hanlp.components.srl.span_rank.srl_eval_utils import compute_srl_f1
from hanlp.datasets.srl.conll2012 import CoNLL2012SRLDataset, filter_v_args, unpack_srl, \
group_pa_by_p
from hanlp.layers.embeddings.embedding import Embedding
from hanlp.metrics.f1 import F1
from hanlp_common.visualization import markdown_table
from hanlp.utils.time_util import CountdownTimer
from hanlp_common.util import merge_locals_kwargs, reorder
class SpanRankingSemanticRoleLabeler(TorchComponent):
def __init__(self, **kwargs) -> None:
"""An implementation of "Jointly Predicting Predicates and Arguments in Neural Semantic Role Labeling"
(:cite:`he-etal-2018-jointly`). It generates candidates triples of (predicate, arg_start, arg_end) and rank them.
Args:
**kwargs: Predefined config.
"""
super().__init__(**kwargs)
self.model: SpanRankingSRLModel = None
def build_optimizer(self,
trn,
epochs,
lr,
adam_epsilon,
weight_decay,
warmup_steps,
transformer_lr,
**kwargs):
# noinspection PyProtectedMember
transformer = self._get_transformer()
if transformer:
num_training_steps = len(trn) * epochs // self.config.get('gradient_accumulation', 1)
optimizer, scheduler = build_optimizer_scheduler_with_transformer(self.model,
transformer,
lr, transformer_lr,
num_training_steps, warmup_steps,
weight_decay, adam_epsilon)
else:
optimizer = torch.optim.Adam(self.model.parameters(), self.config.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode='max',
factor=0.5,
patience=2,
verbose=True,
)
return optimizer, scheduler
def _get_transformer(self):
return getattr(self.model_.embed, 'transformer', None)
def build_criterion(self, **kwargs):
pass
# noinspection PyProtectedMember
def build_metric(self, **kwargs) -> Tuple[F1, F1]:
predicate_f1 = F1()
end_to_end_f1 = F1()
return predicate_f1, end_to_end_f1
def execute_training_loop(self,
trn: DataLoader,
dev: DataLoader,
epochs,
criterion,
optimizer,
metric,
save_dir,
logger: logging.Logger,
devices,
**kwargs):
best_epoch, best_metric = 0, -1
predicate, end_to_end = metric
optimizer, scheduler = optimizer
timer = CountdownTimer(epochs)
ratio_width = len(f'{len(trn)}/{len(trn)}')
for epoch in range(1, epochs + 1):
logger.info(f"[yellow]Epoch {epoch} / {epochs}:[/yellow]")
self.fit_dataloader(trn, criterion, optimizer, metric, logger,
linear_scheduler=scheduler if self._get_transformer() else None)
if dev:
self.evaluate_dataloader(dev, criterion, metric, logger, ratio_width=ratio_width)
report = f'{timer.elapsed_human}/{timer.total_time_human}'
dev_score = end_to_end.score
if not self._get_transformer():
scheduler.step(dev_score)
if dev_score > best_metric:
self.save_weights(save_dir)
best_metric = dev_score
report += ' [red]saved[/red]'
timer.log(report, ratio_percentage=False, newline=True, ratio=False)
def fit_dataloader(self,
trn: DataLoader,
criterion,
optimizer,
metric,
logger: logging.Logger,
linear_scheduler=None,
gradient_accumulation=1,
**kwargs):
self.model.train()
timer = CountdownTimer(len(trn) // gradient_accumulation)
total_loss = 0
self.reset_metrics(metric)
for idx, batch in enumerate(trn):
output_dict = self.feed_batch(batch)
self.update_metrics(batch, output_dict, metric)
loss = output_dict['loss']
loss = loss.sum() # For data parallel
loss.backward()
if gradient_accumulation and gradient_accumulation > 1:
loss /= gradient_accumulation
if self.config.grad_norm:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_norm)
if (idx + 1) % gradient_accumulation == 0:
self._step(optimizer, linear_scheduler)
timer.log(self.report_metrics(total_loss / (timer.current + 1), metric), ratio_percentage=None,
logger=logger)
total_loss += loss.item()
del loss
if len(trn) % gradient_accumulation:
self._step(optimizer, linear_scheduler)
return total_loss / timer.total
def _step(self, optimizer, linear_scheduler):
optimizer.step()
optimizer.zero_grad()
if linear_scheduler:
linear_scheduler.step()
# noinspection PyMethodOverriding
@torch.no_grad()
def evaluate_dataloader(self,
data: DataLoader,
criterion: Callable,
metric,
logger,
ratio_width=None,
output=False,
official=False,
confusion_matrix=False,
**kwargs):
self.model.eval()
self.reset_metrics(metric)
timer = CountdownTimer(len(data))
total_loss = 0
if official:
sentences = []
gold = []
pred = []
for batch in data:
output_dict = self.feed_batch(batch)
if official:
sentences += batch['token']
gold += batch['srl']
pred += output_dict['prediction']
self.update_metrics(batch, output_dict, metric)
loss = output_dict['loss']
total_loss += loss.item()
timer.log(self.report_metrics(total_loss / (timer.current + 1), metric), ratio_percentage=None,
logger=logger,
ratio_width=ratio_width)
del loss
if official:
scores = compute_srl_f1(sentences, gold, pred)
if logger:
if confusion_matrix:
labels = sorted(set(y for x in scores.label_confusions.keys() for y in x))
headings = ['GOLD↓PRED→'] + labels
matrix = []
for i, gold in enumerate(labels):
row = [gold]
matrix.append(row)
for j, pred in enumerate(labels):
row.append(scores.label_confusions.get((gold, pred), 0))
matrix = markdown_table(headings, matrix)
logger.info(f'{"Confusion Matrix": ^{len(matrix.splitlines()[0])}}')
logger.info(matrix)
headings = ['Settings', 'Precision', 'Recall', 'F1']
data = []
for h, (p, r, f) in zip(['Unlabeled', 'Labeled', 'Official'], [
[scores.unlabeled_precision, scores.unlabeled_recall, scores.unlabeled_f1],
[scores.precision, scores.recall, scores.f1],
[scores.conll_precision, scores.conll_recall, scores.conll_f1],
]):
data.append([h] + [f'{x:.2%}' for x in [p, r, f]])
table = markdown_table(headings, data)
logger.info(f'{"Scores": ^{len(table.splitlines()[0])}}')
logger.info(table)
else:
scores = metric
return total_loss / timer.total, scores
def build_model(self,
training=True,
**kwargs) -> torch.nn.Module:
# noinspection PyTypeChecker
# embed: torch.nn.Embedding = self.config.embed.module(vocabs=self.vocabs)[0].embed
model = SpanRankingSRLModel(self.config,
self.config.embed.module(vocabs=self.vocabs, training=training),
self.config.context_layer,
len(self.vocabs.srl_label))
return model
# noinspection PyMethodOverriding
def build_dataloader(self, data, batch_size, shuffle, device, logger: logging.Logger,
generate_idx=False, **kwargs) -> DataLoader:
batch_max_tokens = self.config.batch_max_tokens
gradient_accumulation = self.config.get('gradient_accumulation', 1)
if batch_size:
batch_size //= gradient_accumulation
if batch_max_tokens:
batch_max_tokens //= gradient_accumulation
dataset = self.build_dataset(data, generate_idx, logger)
sampler = SortingSampler([x['token_length'] for x in dataset],
batch_size=batch_size,
batch_max_tokens=batch_max_tokens,
shuffle=shuffle)
return PadSequenceDataLoader(batch_sampler=sampler,
device=device,
dataset=dataset)
def build_dataset(self, data, generate_idx, logger, transform=None):
dataset = CoNLL2012SRLDataset(data, transform=[filter_v_args, unpack_srl, group_pa_by_p],
doc_level_offset=self.config.doc_level_offset, generate_idx=generate_idx)
if transform:
dataset.append_transform(transform)
if isinstance(self.config.get('embed', None), Embedding):
transform = self.config.embed.transform(vocabs=self.vocabs)
if transform:
dataset.append_transform(transform)
dataset.append_transform(self.vocabs)
dataset.append_transform(FieldLength('token'))
if isinstance(data, str):
dataset.purge_cache() # Enable cache
if self.vocabs.mutable:
self.build_vocabs(dataset, logger)
return dataset
def predict(self, data: Union[str, List[str]], batch_size: int = None, fmt='dict', **kwargs):
if not data:
return []
flat = self.input_is_flat(data)
if flat:
data = [data]
samples = []
for token in data:
sample = dict()
sample['token'] = token
samples.append(sample)
batch_size = batch_size or self.config.batch_size
dataloader = self.build_dataloader(samples, batch_size, False, self.device, None, generate_idx=True)
outputs = []
order = []
for batch in dataloader:
output_dict = self.feed_batch(batch)
outputs.extend(output_dict['prediction'])
order.extend(batch[IDX])
outputs = reorder(outputs, order)
if fmt == 'list':
outputs = self.format_dict_to_results(data, outputs)
if flat:
return outputs[0]
return outputs
@staticmethod
def format_dict_to_results(data, outputs, exclusive_offset=False, with_predicate=False, with_argument=False,
label_first=False):
results = []
for i in range(len(outputs)):
tokens = data[i]
output = []
for p, a in outputs[i].items():
# a: [(0, 0, 'ARG0')]
if with_predicate:
a.insert(bisect([x[0] for x in a], p), (p, p, 'PRED'))
if with_argument is not False:
a = [x + (tokens[x[0]:x[1] + 1],) for x in a]
if isinstance(with_argument, str):
a = [x[:-1] + (with_argument.join(x[-1]),) for x in a]
if exclusive_offset:
a = [(x[0], x[1] + 1) + x[2:] for x in a]
if label_first:
a = [tuple(reversed(x[2:])) + x[:2] for x in a]
output.append(a)
results.append(output)
return results
def input_is_flat(self, data):
return isinstance(data[0], str)
# noinspection PyMethodOverriding
def fit(self,
trn_data,
dev_data,
save_dir,
embed,
context_layer,
batch_size=40,
batch_max_tokens=700,
lexical_dropout=0.5,
dropout=0.2,
span_width_feature_size=20,
ffnn_size=150,
ffnn_depth=2,
argument_ratio=0.8,
predicate_ratio=0.4,
max_arg_width=30,
mlp_label_size=100,
enforce_srl_constraint=False,
use_gold_predicates=False,
doc_level_offset=True,
use_biaffine=False,
lr=1e-3,
transformer_lr=1e-5,
adam_epsilon=1e-6,
weight_decay=0.01,
warmup_steps=0.1,
grad_norm=5.0,
gradient_accumulation=1,
loss_reduction='sum',
devices=None,
logger=None,
seed=None,
**kwargs
):
return super().fit(**merge_locals_kwargs(locals(), kwargs))
def build_vocabs(self, dataset, logger, **kwargs):
self.vocabs.srl_label = Vocab(pad_token=None, unk_token=None)
# Use null to indicate no relationship
self.vocabs.srl_label.add('<null>')
timer = CountdownTimer(len(dataset))
max_seq_len = 0
for each in dataset:
max_seq_len = max(max_seq_len, len(each['token_input_ids']))
timer.log(f'Building vocabs (max sequence length {max_seq_len}) [blink][yellow]...[/yellow][/blink]')
pass
timer.stop()
timer.erase()
self.vocabs['srl_label'].set_unk_as_safe_unk()
self.vocabs.lock()
self.vocabs.summary(logger)
def reset_metrics(self, metrics):
for each in metrics:
each.reset()
def report_metrics(self, loss, metrics):
predicate, end_to_end = metrics
return f'loss: {loss:.4f} predicate: {predicate.score:.2%} end_to_end: {end_to_end.score:.2%}'
def feed_batch(self, batch) -> Dict[str, Any]:
output_dict = self.model(batch)
prediction = self.decode_output(output_dict, batch, self.model.training)
output_dict['prediction'] = prediction
return output_dict
def decode_output(self, output_dict, batch, training=False):
idx_to_label = self.vocabs['srl_label'].idx_to_token
if training:
# Use fast decoding during training,
prediction = []
top_predicate_indices = output_dict['predicates'].tolist()
top_spans = torch.stack([output_dict['arg_starts'], output_dict['arg_ends']], dim=-1).tolist()
srl_mask = output_dict['srl_mask'].tolist()
for n, (pal, predicate_indices, argument_spans) in enumerate(
zip(output_dict['srl_scores'].argmax(-1).tolist(), top_predicate_indices, top_spans)):
srl_per_sentence = {}
for p, (al, predicate_index) in enumerate(zip(pal, predicate_indices)):
for a, (l, argument_span) in enumerate(zip(al, argument_spans)):
if l and srl_mask[n][p][a]:
args = srl_per_sentence.get(p, None)
if args is None:
args = srl_per_sentence[p] = []
args.append((*argument_span, idx_to_label[l]))
prediction.append(srl_per_sentence)
else:
prediction = srl_decode(batch['token_length'], output_dict, idx_to_label, self.config)
return prediction
def update_metrics(self, batch: dict, output_dict: dict, metrics):
def unpack(y: dict):
return set((p, bel) for p, a in y.items() for bel in a)
predicate, end_to_end = metrics
for pred, gold in zip(output_dict['prediction'], batch['srl']):
predicate(pred.keys(), gold.keys())
end_to_end(unpack(pred), unpack(gold)) | hanlp/components/srl/span_rank/span_rank.py | import logging
from bisect import bisect
from typing import Union, List, Callable, Tuple, Dict, Any
from hanlp_common.constant import IDX
from hanlp.layers.transformers.utils import build_optimizer_scheduler_with_transformer
import torch
from torch.utils.data import DataLoader
from hanlp.common.dataset import PadSequenceDataLoader, SortingSampler
from hanlp.common.torch_component import TorchComponent
from hanlp.common.transform import FieldLength
from hanlp.common.vocab import Vocab
from hanlp.components.srl.span_rank.inference_utils import srl_decode
from hanlp.components.srl.span_rank.span_ranking_srl_model import SpanRankingSRLModel
from hanlp.components.srl.span_rank.srl_eval_utils import compute_srl_f1
from hanlp.datasets.srl.conll2012 import CoNLL2012SRLDataset, filter_v_args, unpack_srl, \
group_pa_by_p
from hanlp.layers.embeddings.embedding import Embedding
from hanlp.metrics.f1 import F1
from hanlp_common.visualization import markdown_table
from hanlp.utils.time_util import CountdownTimer
from hanlp_common.util import merge_locals_kwargs, reorder
class SpanRankingSemanticRoleLabeler(TorchComponent):
def __init__(self, **kwargs) -> None:
"""An implementation of "Jointly Predicting Predicates and Arguments in Neural Semantic Role Labeling"
(:cite:`he-etal-2018-jointly`). It generates candidates triples of (predicate, arg_start, arg_end) and rank them.
Args:
**kwargs: Predefined config.
"""
super().__init__(**kwargs)
self.model: SpanRankingSRLModel = None
def build_optimizer(self,
trn,
epochs,
lr,
adam_epsilon,
weight_decay,
warmup_steps,
transformer_lr,
**kwargs):
# noinspection PyProtectedMember
transformer = self._get_transformer()
if transformer:
num_training_steps = len(trn) * epochs // self.config.get('gradient_accumulation', 1)
optimizer, scheduler = build_optimizer_scheduler_with_transformer(self.model,
transformer,
lr, transformer_lr,
num_training_steps, warmup_steps,
weight_decay, adam_epsilon)
else:
optimizer = torch.optim.Adam(self.model.parameters(), self.config.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode='max',
factor=0.5,
patience=2,
verbose=True,
)
return optimizer, scheduler
def _get_transformer(self):
return getattr(self.model_.embed, 'transformer', None)
def build_criterion(self, **kwargs):
pass
# noinspection PyProtectedMember
def build_metric(self, **kwargs) -> Tuple[F1, F1]:
predicate_f1 = F1()
end_to_end_f1 = F1()
return predicate_f1, end_to_end_f1
def execute_training_loop(self,
trn: DataLoader,
dev: DataLoader,
epochs,
criterion,
optimizer,
metric,
save_dir,
logger: logging.Logger,
devices,
**kwargs):
best_epoch, best_metric = 0, -1
predicate, end_to_end = metric
optimizer, scheduler = optimizer
timer = CountdownTimer(epochs)
ratio_width = len(f'{len(trn)}/{len(trn)}')
for epoch in range(1, epochs + 1):
logger.info(f"[yellow]Epoch {epoch} / {epochs}:[/yellow]")
self.fit_dataloader(trn, criterion, optimizer, metric, logger,
linear_scheduler=scheduler if self._get_transformer() else None)
if dev:
self.evaluate_dataloader(dev, criterion, metric, logger, ratio_width=ratio_width)
report = f'{timer.elapsed_human}/{timer.total_time_human}'
dev_score = end_to_end.score
if not self._get_transformer():
scheduler.step(dev_score)
if dev_score > best_metric:
self.save_weights(save_dir)
best_metric = dev_score
report += ' [red]saved[/red]'
timer.log(report, ratio_percentage=False, newline=True, ratio=False)
def fit_dataloader(self,
trn: DataLoader,
criterion,
optimizer,
metric,
logger: logging.Logger,
linear_scheduler=None,
gradient_accumulation=1,
**kwargs):
self.model.train()
timer = CountdownTimer(len(trn) // gradient_accumulation)
total_loss = 0
self.reset_metrics(metric)
for idx, batch in enumerate(trn):
output_dict = self.feed_batch(batch)
self.update_metrics(batch, output_dict, metric)
loss = output_dict['loss']
loss = loss.sum() # For data parallel
loss.backward()
if gradient_accumulation and gradient_accumulation > 1:
loss /= gradient_accumulation
if self.config.grad_norm:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_norm)
if (idx + 1) % gradient_accumulation == 0:
self._step(optimizer, linear_scheduler)
timer.log(self.report_metrics(total_loss / (timer.current + 1), metric), ratio_percentage=None,
logger=logger)
total_loss += loss.item()
del loss
if len(trn) % gradient_accumulation:
self._step(optimizer, linear_scheduler)
return total_loss / timer.total
def _step(self, optimizer, linear_scheduler):
optimizer.step()
optimizer.zero_grad()
if linear_scheduler:
linear_scheduler.step()
# noinspection PyMethodOverriding
@torch.no_grad()
def evaluate_dataloader(self,
data: DataLoader,
criterion: Callable,
metric,
logger,
ratio_width=None,
output=False,
official=False,
confusion_matrix=False,
**kwargs):
self.model.eval()
self.reset_metrics(metric)
timer = CountdownTimer(len(data))
total_loss = 0
if official:
sentences = []
gold = []
pred = []
for batch in data:
output_dict = self.feed_batch(batch)
if official:
sentences += batch['token']
gold += batch['srl']
pred += output_dict['prediction']
self.update_metrics(batch, output_dict, metric)
loss = output_dict['loss']
total_loss += loss.item()
timer.log(self.report_metrics(total_loss / (timer.current + 1), metric), ratio_percentage=None,
logger=logger,
ratio_width=ratio_width)
del loss
if official:
scores = compute_srl_f1(sentences, gold, pred)
if logger:
if confusion_matrix:
labels = sorted(set(y for x in scores.label_confusions.keys() for y in x))
headings = ['GOLD↓PRED→'] + labels
matrix = []
for i, gold in enumerate(labels):
row = [gold]
matrix.append(row)
for j, pred in enumerate(labels):
row.append(scores.label_confusions.get((gold, pred), 0))
matrix = markdown_table(headings, matrix)
logger.info(f'{"Confusion Matrix": ^{len(matrix.splitlines()[0])}}')
logger.info(matrix)
headings = ['Settings', 'Precision', 'Recall', 'F1']
data = []
for h, (p, r, f) in zip(['Unlabeled', 'Labeled', 'Official'], [
[scores.unlabeled_precision, scores.unlabeled_recall, scores.unlabeled_f1],
[scores.precision, scores.recall, scores.f1],
[scores.conll_precision, scores.conll_recall, scores.conll_f1],
]):
data.append([h] + [f'{x:.2%}' for x in [p, r, f]])
table = markdown_table(headings, data)
logger.info(f'{"Scores": ^{len(table.splitlines()[0])}}')
logger.info(table)
else:
scores = metric
return total_loss / timer.total, scores
def build_model(self,
training=True,
**kwargs) -> torch.nn.Module:
# noinspection PyTypeChecker
# embed: torch.nn.Embedding = self.config.embed.module(vocabs=self.vocabs)[0].embed
model = SpanRankingSRLModel(self.config,
self.config.embed.module(vocabs=self.vocabs, training=training),
self.config.context_layer,
len(self.vocabs.srl_label))
return model
# noinspection PyMethodOverriding
def build_dataloader(self, data, batch_size, shuffle, device, logger: logging.Logger,
generate_idx=False, **kwargs) -> DataLoader:
batch_max_tokens = self.config.batch_max_tokens
gradient_accumulation = self.config.get('gradient_accumulation', 1)
if batch_size:
batch_size //= gradient_accumulation
if batch_max_tokens:
batch_max_tokens //= gradient_accumulation
dataset = self.build_dataset(data, generate_idx, logger)
sampler = SortingSampler([x['token_length'] for x in dataset],
batch_size=batch_size,
batch_max_tokens=batch_max_tokens,
shuffle=shuffle)
return PadSequenceDataLoader(batch_sampler=sampler,
device=device,
dataset=dataset)
def build_dataset(self, data, generate_idx, logger, transform=None):
dataset = CoNLL2012SRLDataset(data, transform=[filter_v_args, unpack_srl, group_pa_by_p],
doc_level_offset=self.config.doc_level_offset, generate_idx=generate_idx)
if transform:
dataset.append_transform(transform)
if isinstance(self.config.get('embed', None), Embedding):
transform = self.config.embed.transform(vocabs=self.vocabs)
if transform:
dataset.append_transform(transform)
dataset.append_transform(self.vocabs)
dataset.append_transform(FieldLength('token'))
if isinstance(data, str):
dataset.purge_cache() # Enable cache
if self.vocabs.mutable:
self.build_vocabs(dataset, logger)
return dataset
def predict(self, data: Union[str, List[str]], batch_size: int = None, fmt='dict', **kwargs):
if not data:
return []
flat = self.input_is_flat(data)
if flat:
data = [data]
samples = []
for token in data:
sample = dict()
sample['token'] = token
samples.append(sample)
batch_size = batch_size or self.config.batch_size
dataloader = self.build_dataloader(samples, batch_size, False, self.device, None, generate_idx=True)
outputs = []
order = []
for batch in dataloader:
output_dict = self.feed_batch(batch)
outputs.extend(output_dict['prediction'])
order.extend(batch[IDX])
outputs = reorder(outputs, order)
if fmt == 'list':
outputs = self.format_dict_to_results(data, outputs)
if flat:
return outputs[0]
return outputs
@staticmethod
def format_dict_to_results(data, outputs, exclusive_offset=False, with_predicate=False, with_argument=False,
label_first=False):
results = []
for i in range(len(outputs)):
tokens = data[i]
output = []
for p, a in outputs[i].items():
# a: [(0, 0, 'ARG0')]
if with_predicate:
a.insert(bisect([x[0] for x in a], p), (p, p, 'PRED'))
if with_argument is not False:
a = [x + (tokens[x[0]:x[1] + 1],) for x in a]
if isinstance(with_argument, str):
a = [x[:-1] + (with_argument.join(x[-1]),) for x in a]
if exclusive_offset:
a = [(x[0], x[1] + 1) + x[2:] for x in a]
if label_first:
a = [tuple(reversed(x[2:])) + x[:2] for x in a]
output.append(a)
results.append(output)
return results
def input_is_flat(self, data):
return isinstance(data[0], str)
# noinspection PyMethodOverriding
def fit(self,
trn_data,
dev_data,
save_dir,
embed,
context_layer,
batch_size=40,
batch_max_tokens=700,
lexical_dropout=0.5,
dropout=0.2,
span_width_feature_size=20,
ffnn_size=150,
ffnn_depth=2,
argument_ratio=0.8,
predicate_ratio=0.4,
max_arg_width=30,
mlp_label_size=100,
enforce_srl_constraint=False,
use_gold_predicates=False,
doc_level_offset=True,
use_biaffine=False,
lr=1e-3,
transformer_lr=1e-5,
adam_epsilon=1e-6,
weight_decay=0.01,
warmup_steps=0.1,
grad_norm=5.0,
gradient_accumulation=1,
loss_reduction='sum',
devices=None,
logger=None,
seed=None,
**kwargs
):
return super().fit(**merge_locals_kwargs(locals(), kwargs))
def build_vocabs(self, dataset, logger, **kwargs):
self.vocabs.srl_label = Vocab(pad_token=None, unk_token=None)
# Use null to indicate no relationship
self.vocabs.srl_label.add('<null>')
timer = CountdownTimer(len(dataset))
max_seq_len = 0
for each in dataset:
max_seq_len = max(max_seq_len, len(each['token_input_ids']))
timer.log(f'Building vocabs (max sequence length {max_seq_len}) [blink][yellow]...[/yellow][/blink]')
pass
timer.stop()
timer.erase()
self.vocabs['srl_label'].set_unk_as_safe_unk()
self.vocabs.lock()
self.vocabs.summary(logger)
def reset_metrics(self, metrics):
for each in metrics:
each.reset()
def report_metrics(self, loss, metrics):
predicate, end_to_end = metrics
return f'loss: {loss:.4f} predicate: {predicate.score:.2%} end_to_end: {end_to_end.score:.2%}'
def feed_batch(self, batch) -> Dict[str, Any]:
output_dict = self.model(batch)
prediction = self.decode_output(output_dict, batch, self.model.training)
output_dict['prediction'] = prediction
return output_dict
def decode_output(self, output_dict, batch, training=False):
idx_to_label = self.vocabs['srl_label'].idx_to_token
if training:
# Use fast decoding during training,
prediction = []
top_predicate_indices = output_dict['predicates'].tolist()
top_spans = torch.stack([output_dict['arg_starts'], output_dict['arg_ends']], dim=-1).tolist()
srl_mask = output_dict['srl_mask'].tolist()
for n, (pal, predicate_indices, argument_spans) in enumerate(
zip(output_dict['srl_scores'].argmax(-1).tolist(), top_predicate_indices, top_spans)):
srl_per_sentence = {}
for p, (al, predicate_index) in enumerate(zip(pal, predicate_indices)):
for a, (l, argument_span) in enumerate(zip(al, argument_spans)):
if l and srl_mask[n][p][a]:
args = srl_per_sentence.get(p, None)
if args is None:
args = srl_per_sentence[p] = []
args.append((*argument_span, idx_to_label[l]))
prediction.append(srl_per_sentence)
else:
prediction = srl_decode(batch['token_length'], output_dict, idx_to_label, self.config)
return prediction
def update_metrics(self, batch: dict, output_dict: dict, metrics):
def unpack(y: dict):
return set((p, bel) for p, a in y.items() for bel in a)
predicate, end_to_end = metrics
for pred, gold in zip(output_dict['prediction'], batch['srl']):
predicate(pred.keys(), gold.keys())
end_to_end(unpack(pred), unpack(gold)) | 0.933363 | 0.310917 |
import _init_paths
import os
import cv2
import random
import argparse
import numpy as np
import models.models as models
from os.path import exists, join
from torch.autograd import Variable
from tracker.siamfc import SiamFC
from tracker.siamrpn import SiamRPN
from easydict import EasyDict as edict
from utils.utils import load_pretrain, cxy_wh_2_rect, get_axis_aligned_bbox, load_dataset, poly_iou
def parse_args():
"""
args for fc testing.
"""
parser = argparse.ArgumentParser(description='PyTorch SiamFC Tracking Test')
parser.add_argument('--arch', default='SiamRPNRes22', type=str, help='backbone architecture')
parser.add_argument('--resume', default='/data/zpzhang/project4/siamese/Siamese/snapshot/CIResNet22RPN.model', type=str, help='pretrained model')
parser.add_argument('--video', default='/data/zpzhang/project4/siamese/Siamese/videos/bag.mp4', type=str, help='video file path')
parser.add_argument('--init_bbox', default=None, help='bbox in the first frame None or [lx, ly, w, h]')
args = parser.parse_args()
return args
def track_video(tracker, model, video_path, init_box=None):
assert os.path.isfile(video_path), "please provide a valid video file"
cap = cv2.VideoCapture(video_path)
display_name = 'Video: {}'.format(video_path.split('/')[-1])
cv2.namedWindow(display_name, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(display_name, 960, 720)
success, frame = cap.read()
cv2.imshow(display_name, frame)
if success is not True:
print("Read failed.")
exit(-1)
# init
if init_box is not None:
lx, ly, w, h = init_box
target_pos = np.array([lx + w/2, ly + h/2])
target_sz = np.array([w, h])
state = tracker.init(frame, target_pos, target_sz, model) # init tracker
else:
while True:
frame_disp = frame.copy()
cv2.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL,
1, (0, 0, 255), 1)
lx, ly, w, h = cv2.selectROI(display_name, frame_disp, fromCenter=False)
target_pos = np.array([lx + w / 2, ly + h / 2])
target_sz = np.array([w, h])
state = tracker.init(frame_disp, target_pos, target_sz, model) # init tracker
break
while True:
ret, frame = cap.read()
if frame is None:
return
frame_disp = frame.copy()
# Draw box
state = tracker.track(state, frame_disp) # track
location = cxy_wh_2_rect(state['target_pos'], state['target_sz'])
x1, y1, x2, y2 = int(location[0]), int(location[1]), int(location[0] + location[2]), int(location[1] + location[3])
cv2.rectangle(frame_disp, (x1, y1), (x2, y2), (0, 255, 0), 5)
font_color = (0, 0, 0)
cv2.putText(frame_disp, 'Tracking!', (20, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
font_color, 1)
cv2.putText(frame_disp, 'Press r to reset', (20, 55), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
font_color, 1)
cv2.putText(frame_disp, 'Press q to quit', (20, 80), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
font_color, 1)
# Display the resulting frame
cv2.imshow(display_name, frame_disp)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('r'):
ret, frame = cap.read()
frame_disp = frame.copy()
cv2.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL,
1.5,
(0, 0, 0), 1)
cv2.imshow(display_name, frame_disp)
lx, ly, w, h = cv2.selectROI(display_name, frame_disp, fromCenter=False)
target_pos = np.array([lx + w / 2, ly + h / 2])
target_sz = np.array([w, h])
state = tracker.init(frame_disp, target_pos, target_sz, model)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
def main():
args = parse_args()
# prepare model (SiamRPN or SiamFC)
# prepare tracker
info = edict()
info.arch = args.arch
info.dataset = args.video
info.epoch_test = True
info.cls_type = 'thinner'
if 'FC' in args.arch:
net = models.__dict__[args.arch]()
tracker = SiamFC(info)
else:
net = models.__dict__[args.arch](anchors_nums=5, cls_type='thinner')
tracker = SiamRPN(info)
print('[*] ======= Track video with {} ======='.format(args.arch))
net = load_pretrain(net, args.resume)
net.eval()
net = net.cuda()
# check init box is list or not
if not isinstance(args.init_bbox, list) and args.init_bbox is not None:
args.init_bbox = list(eval(args.init_bbox))
else:
pass
track_video(tracker, net, args.video, init_box=args.init_bbox)
if __name__ == '__main__':
main() | siamese_tracking/run_video.py |
import _init_paths
import os
import cv2
import random
import argparse
import numpy as np
import models.models as models
from os.path import exists, join
from torch.autograd import Variable
from tracker.siamfc import SiamFC
from tracker.siamrpn import SiamRPN
from easydict import EasyDict as edict
from utils.utils import load_pretrain, cxy_wh_2_rect, get_axis_aligned_bbox, load_dataset, poly_iou
def parse_args():
"""
args for fc testing.
"""
parser = argparse.ArgumentParser(description='PyTorch SiamFC Tracking Test')
parser.add_argument('--arch', default='SiamRPNRes22', type=str, help='backbone architecture')
parser.add_argument('--resume', default='/data/zpzhang/project4/siamese/Siamese/snapshot/CIResNet22RPN.model', type=str, help='pretrained model')
parser.add_argument('--video', default='/data/zpzhang/project4/siamese/Siamese/videos/bag.mp4', type=str, help='video file path')
parser.add_argument('--init_bbox', default=None, help='bbox in the first frame None or [lx, ly, w, h]')
args = parser.parse_args()
return args
def track_video(tracker, model, video_path, init_box=None):
assert os.path.isfile(video_path), "please provide a valid video file"
cap = cv2.VideoCapture(video_path)
display_name = 'Video: {}'.format(video_path.split('/')[-1])
cv2.namedWindow(display_name, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(display_name, 960, 720)
success, frame = cap.read()
cv2.imshow(display_name, frame)
if success is not True:
print("Read failed.")
exit(-1)
# init
if init_box is not None:
lx, ly, w, h = init_box
target_pos = np.array([lx + w/2, ly + h/2])
target_sz = np.array([w, h])
state = tracker.init(frame, target_pos, target_sz, model) # init tracker
else:
while True:
frame_disp = frame.copy()
cv2.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL,
1, (0, 0, 255), 1)
lx, ly, w, h = cv2.selectROI(display_name, frame_disp, fromCenter=False)
target_pos = np.array([lx + w / 2, ly + h / 2])
target_sz = np.array([w, h])
state = tracker.init(frame_disp, target_pos, target_sz, model) # init tracker
break
while True:
ret, frame = cap.read()
if frame is None:
return
frame_disp = frame.copy()
# Draw box
state = tracker.track(state, frame_disp) # track
location = cxy_wh_2_rect(state['target_pos'], state['target_sz'])
x1, y1, x2, y2 = int(location[0]), int(location[1]), int(location[0] + location[2]), int(location[1] + location[3])
cv2.rectangle(frame_disp, (x1, y1), (x2, y2), (0, 255, 0), 5)
font_color = (0, 0, 0)
cv2.putText(frame_disp, 'Tracking!', (20, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
font_color, 1)
cv2.putText(frame_disp, 'Press r to reset', (20, 55), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
font_color, 1)
cv2.putText(frame_disp, 'Press q to quit', (20, 80), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
font_color, 1)
# Display the resulting frame
cv2.imshow(display_name, frame_disp)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('r'):
ret, frame = cap.read()
frame_disp = frame.copy()
cv2.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL,
1.5,
(0, 0, 0), 1)
cv2.imshow(display_name, frame_disp)
lx, ly, w, h = cv2.selectROI(display_name, frame_disp, fromCenter=False)
target_pos = np.array([lx + w / 2, ly + h / 2])
target_sz = np.array([w, h])
state = tracker.init(frame_disp, target_pos, target_sz, model)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
def main():
args = parse_args()
# prepare model (SiamRPN or SiamFC)
# prepare tracker
info = edict()
info.arch = args.arch
info.dataset = args.video
info.epoch_test = True
info.cls_type = 'thinner'
if 'FC' in args.arch:
net = models.__dict__[args.arch]()
tracker = SiamFC(info)
else:
net = models.__dict__[args.arch](anchors_nums=5, cls_type='thinner')
tracker = SiamRPN(info)
print('[*] ======= Track video with {} ======='.format(args.arch))
net = load_pretrain(net, args.resume)
net.eval()
net = net.cuda()
# check init box is list or not
if not isinstance(args.init_bbox, list) and args.init_bbox is not None:
args.init_bbox = list(eval(args.init_bbox))
else:
pass
track_video(tracker, net, args.video, init_box=args.init_bbox)
if __name__ == '__main__':
main() | 0.448185 | 0.28227 |
# Standard library imports
import json
import os
from datetime import datetime, timedelta
# Third party imports
import boto3
logs_client = boto3.client("logs")
log_group_name = os.environ.get("LOG_STREAM_NAME")
ddb_table_name = os.environ.get("DDB_TABLE")
ddb_table = boto3.resource("dynamodb").Table(name=ddb_table_name)
def event_handler(event, _context):
"""Assert and Clean Up: verify the metadata and delete the object."""
# If the arrange / act step returned an error, bail early
if not event["arrange_act_payload"]["act_success"]:
return error_response(event["arrange_act_payload"]["error_message"])
test_user_key = event["arrange_act_payload"]["test_user_key"]
test_user_pk = test_user_key["PK"]
test_user_sk = test_user_key["SK"]
expected_json = {
"EventType": "UserCreated",
"PK": {"S": test_user_pk},
"SK": {"S": test_user_sk},
}
# 3. Assert
# Filter the sought event from the CloudWatch Log Group
filter_pattern = (
f'{{ ($.EventType = "UserCreated") && ($.SK.S = "{test_user_pk}") '
f'&& ($.PK.S = "{test_user_sk}") }}'
)
# Set the search horizon to one minute ago
start_time = int((datetime.today() - timedelta(minutes=1)).timestamp()) * 1000
# Execute the search
response = logs_client.filter_log_events(
logGroupName=log_group_name, startTime=start_time, filterPattern=filter_pattern
)
# Assert exactly one event matching the pattern is found
if "events" not in response:
return clean_up_with_error_response(
test_user_pk, test_user_sk, "events not found"
)
if len(response["events"]) == 0:
return clean_up_with_error_response(
test_user_pk, test_user_sk, "event not found"
)
if len(response["events"]) != 1:
return clean_up_with_error_response(
test_user_pk, test_user_sk, "more than one event found"
)
if json.loads(response["events"][0]["message"]) != expected_json:
return clean_up_with_error_response(
test_user_pk, test_user_sk, "log event does not match expected JSON"
)
# Return success
return clean_up_with_success_response(test_user_pk, test_user_sk)
def error_response(error_message):
"""Return a well-formed error message."""
return {
"success": False,
"test_name": "ddb_user_audit_log",
"error_message": error_message,
}
def clean_up_with_error_response(test_user_pk, test_user_sk, error_message):
"""Remove the file from DDB and return an error message."""
ddb_table.delete_item(
Key={
"PK": test_user_pk,
"SK": test_user_sk,
}
)
return error_response(error_message)
def clean_up_with_success_response(test_user_pk, test_user_sk):
"""Remove the file from DDB and return a success message."""
ddb_table.delete_item(
Key={
"PK": test_user_pk,
"SK": test_user_sk,
}
)
return {"success": True, "test_name": "ddb_user_audit_log"} | integration_tests/assert_cleanup_ddb_audit_log/index.py |
# Standard library imports
import json
import os
from datetime import datetime, timedelta
# Third party imports
import boto3
logs_client = boto3.client("logs")
log_group_name = os.environ.get("LOG_STREAM_NAME")
ddb_table_name = os.environ.get("DDB_TABLE")
ddb_table = boto3.resource("dynamodb").Table(name=ddb_table_name)
def event_handler(event, _context):
"""Assert and Clean Up: verify the metadata and delete the object."""
# If the arrange / act step returned an error, bail early
if not event["arrange_act_payload"]["act_success"]:
return error_response(event["arrange_act_payload"]["error_message"])
test_user_key = event["arrange_act_payload"]["test_user_key"]
test_user_pk = test_user_key["PK"]
test_user_sk = test_user_key["SK"]
expected_json = {
"EventType": "UserCreated",
"PK": {"S": test_user_pk},
"SK": {"S": test_user_sk},
}
# 3. Assert
# Filter the sought event from the CloudWatch Log Group
filter_pattern = (
f'{{ ($.EventType = "UserCreated") && ($.SK.S = "{test_user_pk}") '
f'&& ($.PK.S = "{test_user_sk}") }}'
)
# Set the search horizon to one minute ago
start_time = int((datetime.today() - timedelta(minutes=1)).timestamp()) * 1000
# Execute the search
response = logs_client.filter_log_events(
logGroupName=log_group_name, startTime=start_time, filterPattern=filter_pattern
)
# Assert exactly one event matching the pattern is found
if "events" not in response:
return clean_up_with_error_response(
test_user_pk, test_user_sk, "events not found"
)
if len(response["events"]) == 0:
return clean_up_with_error_response(
test_user_pk, test_user_sk, "event not found"
)
if len(response["events"]) != 1:
return clean_up_with_error_response(
test_user_pk, test_user_sk, "more than one event found"
)
if json.loads(response["events"][0]["message"]) != expected_json:
return clean_up_with_error_response(
test_user_pk, test_user_sk, "log event does not match expected JSON"
)
# Return success
return clean_up_with_success_response(test_user_pk, test_user_sk)
def error_response(error_message):
"""Return a well-formed error message."""
return {
"success": False,
"test_name": "ddb_user_audit_log",
"error_message": error_message,
}
def clean_up_with_error_response(test_user_pk, test_user_sk, error_message):
"""Remove the file from DDB and return an error message."""
ddb_table.delete_item(
Key={
"PK": test_user_pk,
"SK": test_user_sk,
}
)
return error_response(error_message)
def clean_up_with_success_response(test_user_pk, test_user_sk):
"""Remove the file from DDB and return a success message."""
ddb_table.delete_item(
Key={
"PK": test_user_pk,
"SK": test_user_sk,
}
)
return {"success": True, "test_name": "ddb_user_audit_log"} | 0.723212 | 0.249573 |
import errno
import socket
import unittest
from tuntap.char_dev_harness import TunCharDevHarness, TapCharDevHarness
from tuntap.interface_harness import Address, InterfaceHarness
from tuntap.sockaddr import SockaddrDl, SockaddrIn, SockaddrIn6
from tuntap.tun_tap_harness import TunHarness, TapHarness
class TestInterface(unittest.TestCase):
def __init__(self, name, harness):
super(TestInterface, self).__init__(name)
self.harness = harness
def setUp(self):
self.harness.start()
def tearDown(self):
self.harness.stop()
def test_CloseWhileUp(self):
self.harness.interface.flags |= InterfaceHarness.IFF_UP
self.harness.char_dev.close()
self.harness.start()
def test_UpDown(self):
self.harness.interface.flags |= InterfaceHarness.IFF_UP
self.assertEquals(InterfaceHarness.IFF_UP,
self.harness.interface.flags & InterfaceHarness.IFF_UP)
self.harness.interface.flags &= ~InterfaceHarness.IFF_UP
self.assertEquals(0,
self.harness.interface.flags & InterfaceHarness.IFF_UP)
def test_NetmaskAFFix(self):
self.harness.interface.addIfAddr(local = self.harness.addr.sa_local,
dst = self.harness.addr.sa_dst,
mask = SockaddrIn(af = 0, addr = self.harness.addr.mask))
for addr in self.harness.interface.getAddrs(socket.AF_INET):
if addr[1] == self.harness.addr.sa_mask:
return;
self.fail()
def test_Address(self):
self.harness.interface.addIfAddr(local = self.harness.addr.sa_local,
dst = self.harness.addr.sa_dst,
mask = self.harness.addr.sa_mask)
for addr in self.harness.interface.getAddrs(socket.AF_INET):
if (addr[0] == self.harness.addr.sa_local and
addr[1] == self.harness.addr.sa_mask and
addr[2] == self.harness.addr.sa_dst):
return
self.fail()
def test_Address6(self):
def compare(expected, actual):
return (expected or SockaddrIn6(af = 0, addr = None)) == actual
self.harness.interface.addIfAddr6(local = self.harness.addr6.sa_local,
dst = self.harness.addr6.sa_dst,
mask = self.harness.addr6.sa_mask)
for addr in self.harness.interface.getAddrs(socket.AF_INET6):
if (compare(addr[0], self.harness.addr6.sa_local) and
compare(addr[1], self.harness.addr6.sa_mask) and
compare(addr[2], self.harness.addr6.sa_dst)):
return
self.fail()
class TestTunInterface(TestInterface):
def __init__(self, name):
super(TestTunInterface, self).__init__(name, TunHarness())
def test_Flags(self):
self.assertEquals(InterfaceHarness.IFF_POINTOPOINT |
InterfaceHarness.IFF_RUNNING |
InterfaceHarness.IFF_SIMPLEX |
InterfaceHarness.IFF_MULTICAST,
self.harness.interface.flags)
class TestTapInterface(TestInterface):
def __init__(self, name):
super(TestTapInterface, self).__init__(name, TapHarness())
def test_Flags(self):
self.assertEquals(InterfaceHarness.IFF_BROADCAST |
InterfaceHarness.IFF_RUNNING |
InterfaceHarness.IFF_SIMPLEX |
InterfaceHarness.IFF_MULTICAST,
self.harness.interface.flags)
def test_SetLladdr(self):
addr = SockaddrDl(name = '', addr = '\x11\x22\x33\x44\x55\x66', type = 0)
self.harness.interface.lladdr = addr
self.assertEquals(addr.addr, self.harness.interface.lladdr.addr) | litex_things/deps/litex/litex/build/sim/core/modules/ethernet/tapcfg/drivers/osx/tuntap/test/tuntap/test_interface.py |
import errno
import socket
import unittest
from tuntap.char_dev_harness import TunCharDevHarness, TapCharDevHarness
from tuntap.interface_harness import Address, InterfaceHarness
from tuntap.sockaddr import SockaddrDl, SockaddrIn, SockaddrIn6
from tuntap.tun_tap_harness import TunHarness, TapHarness
class TestInterface(unittest.TestCase):
def __init__(self, name, harness):
super(TestInterface, self).__init__(name)
self.harness = harness
def setUp(self):
self.harness.start()
def tearDown(self):
self.harness.stop()
def test_CloseWhileUp(self):
self.harness.interface.flags |= InterfaceHarness.IFF_UP
self.harness.char_dev.close()
self.harness.start()
def test_UpDown(self):
self.harness.interface.flags |= InterfaceHarness.IFF_UP
self.assertEquals(InterfaceHarness.IFF_UP,
self.harness.interface.flags & InterfaceHarness.IFF_UP)
self.harness.interface.flags &= ~InterfaceHarness.IFF_UP
self.assertEquals(0,
self.harness.interface.flags & InterfaceHarness.IFF_UP)
def test_NetmaskAFFix(self):
self.harness.interface.addIfAddr(local = self.harness.addr.sa_local,
dst = self.harness.addr.sa_dst,
mask = SockaddrIn(af = 0, addr = self.harness.addr.mask))
for addr in self.harness.interface.getAddrs(socket.AF_INET):
if addr[1] == self.harness.addr.sa_mask:
return;
self.fail()
def test_Address(self):
self.harness.interface.addIfAddr(local = self.harness.addr.sa_local,
dst = self.harness.addr.sa_dst,
mask = self.harness.addr.sa_mask)
for addr in self.harness.interface.getAddrs(socket.AF_INET):
if (addr[0] == self.harness.addr.sa_local and
addr[1] == self.harness.addr.sa_mask and
addr[2] == self.harness.addr.sa_dst):
return
self.fail()
def test_Address6(self):
def compare(expected, actual):
return (expected or SockaddrIn6(af = 0, addr = None)) == actual
self.harness.interface.addIfAddr6(local = self.harness.addr6.sa_local,
dst = self.harness.addr6.sa_dst,
mask = self.harness.addr6.sa_mask)
for addr in self.harness.interface.getAddrs(socket.AF_INET6):
if (compare(addr[0], self.harness.addr6.sa_local) and
compare(addr[1], self.harness.addr6.sa_mask) and
compare(addr[2], self.harness.addr6.sa_dst)):
return
self.fail()
class TestTunInterface(TestInterface):
def __init__(self, name):
super(TestTunInterface, self).__init__(name, TunHarness())
def test_Flags(self):
self.assertEquals(InterfaceHarness.IFF_POINTOPOINT |
InterfaceHarness.IFF_RUNNING |
InterfaceHarness.IFF_SIMPLEX |
InterfaceHarness.IFF_MULTICAST,
self.harness.interface.flags)
class TestTapInterface(TestInterface):
def __init__(self, name):
super(TestTapInterface, self).__init__(name, TapHarness())
def test_Flags(self):
self.assertEquals(InterfaceHarness.IFF_BROADCAST |
InterfaceHarness.IFF_RUNNING |
InterfaceHarness.IFF_SIMPLEX |
InterfaceHarness.IFF_MULTICAST,
self.harness.interface.flags)
def test_SetLladdr(self):
addr = SockaddrDl(name = '', addr = '\x11\x22\x33\x44\x55\x66', type = 0)
self.harness.interface.lladdr = addr
self.assertEquals(addr.addr, self.harness.interface.lladdr.addr) | 0.502197 | 0.281489 |
import math
from pid_controller.pid import PID # TODO: update to simple_pid (pip install simple-pid)
from carla08.client import VehicleControl
# PID based controller, we can have different ones
class Controller:
# The vehicle controller, it receives waypoints and applies a PID control in order
# to get the action.
def __init__(self, params):
# The parameters for this controller, set by the agent
self.params = params
# PID speed controller
self.pid = PID(p=params['pid_p'], i=params['pid_i'], d=params['pid_d'])
def get_control(self, wp_angle, wp_angle_speed, speed_factor, current_speed):
control = VehicleControl()
current_speed = max(current_speed, 0)
steer = self.params['steer_gain'] * wp_angle
if steer > 0:
control.steer = min(steer, 1)
else:
control.steer = max(steer, -1)
# Don't go too fast around corners
if math.fabs(wp_angle_speed) < 0.1:
target_speed_adjusted = self.params['target_speed'] * speed_factor
elif math.fabs(wp_angle_speed) < 0.5:
target_speed_adjusted = 20 * speed_factor
else:
target_speed_adjusted = 15 * speed_factor
self.pid.target = target_speed_adjusted
pid_gain = self.pid(feedback=current_speed)
print('Target: ', self.pid.target, 'Error: ', self.pid.error, 'Gain: ', pid_gain)
print(f'Target Speed: {target_speed_adjusted} Current Speed: {current_speed} Speed Factor: {speed_factor}')
throttle = min(max(self.params['default_throttle'] - 1.3 * pid_gain, 0), self.params['throttle_max'])
if pid_gain > 0.5:
brake = min(0.35 * pid_gain * self.params['brake_strength'], 1)
else:
brake = 0
control.throttle = max(throttle, 0) # Prevent N by putting at least 0.01
control.brake = brake
print(f'Throttle: {control.throttle} Brake: {control.brake} Steering Angle: {control.steer}')
return control | carla08/agent/modules/controllers.py | import math
from pid_controller.pid import PID # TODO: update to simple_pid (pip install simple-pid)
from carla08.client import VehicleControl
# PID based controller, we can have different ones
class Controller:
# The vehicle controller, it receives waypoints and applies a PID control in order
# to get the action.
def __init__(self, params):
# The parameters for this controller, set by the agent
self.params = params
# PID speed controller
self.pid = PID(p=params['pid_p'], i=params['pid_i'], d=params['pid_d'])
def get_control(self, wp_angle, wp_angle_speed, speed_factor, current_speed):
control = VehicleControl()
current_speed = max(current_speed, 0)
steer = self.params['steer_gain'] * wp_angle
if steer > 0:
control.steer = min(steer, 1)
else:
control.steer = max(steer, -1)
# Don't go too fast around corners
if math.fabs(wp_angle_speed) < 0.1:
target_speed_adjusted = self.params['target_speed'] * speed_factor
elif math.fabs(wp_angle_speed) < 0.5:
target_speed_adjusted = 20 * speed_factor
else:
target_speed_adjusted = 15 * speed_factor
self.pid.target = target_speed_adjusted
pid_gain = self.pid(feedback=current_speed)
print('Target: ', self.pid.target, 'Error: ', self.pid.error, 'Gain: ', pid_gain)
print(f'Target Speed: {target_speed_adjusted} Current Speed: {current_speed} Speed Factor: {speed_factor}')
throttle = min(max(self.params['default_throttle'] - 1.3 * pid_gain, 0), self.params['throttle_max'])
if pid_gain > 0.5:
brake = min(0.35 * pid_gain * self.params['brake_strength'], 1)
else:
brake = 0
control.throttle = max(throttle, 0) # Prevent N by putting at least 0.01
control.brake = brake
print(f'Throttle: {control.throttle} Brake: {control.brake} Steering Angle: {control.steer}')
return control | 0.261142 | 0.252338 |
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import plot_bloch_multivector, plot_histogram
from qiskit.extensions import Initialize
from math import sqrt, pi
import numpy as np
import matplotlib.pyplot as plt
from random import *
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
# DEFINITIONS
# Implementation of the measurement in the {|+>, |->} basis: HZH = X
def x_measure (quantumcircuit, qubit, cbit):
quantumcircuit.h(qubit)
quantumcircuit.measure(qubit, cbit)
return quantumcircuit
# Notice: in x_measure (Bob's measurements), we DO NOT apply the final H-Gate.
# We apply H-Gate only in the function x_measure_eve (Eve's measurement).
# That's because in the experiment we MUST END THE CIRCUIT WITH A MEASUREMENT
# (not with a gate).
def x_measure_eve (quantumcircuit, qubit, cbit):
quantumcircuit.h(qubit)
quantumcircuit.measure(qubit, cbit)
quantumcircuit.h(qubit)
return quantumcircuit
# Implementation of a function which builds up a single qubit circuit based on Alice's strings
def encoding_circuit_builder (alice_bits, alice_basis, nth_circuit):
i = nth_circuit # the n-th circuit which contains 5 qubits
encoding_circuit = QuantumCircuit(5,5)
for k in range(5):
if alice_bits[5*i+k] == 0 and alice_basis[5*i+k] == 0:
# Alice chooses {|0>, |1>} basis
pass # Apply I (nothing happens)
if alice_bits[5*i+k] == 1 and alice_basis[5*i+k] == 0:
# Alice chooses {|0>, |1>} basis
encoding_circuit.x(k) # Apply X-Gate (flip |0> to |1>)
if alice_bits[5*i+k] == 0 and alice_basis[5*i+k] == 1:
# Alice chooses {|+>, |->} basis
encoding_circuit.h(k) # Apply H-Gate (change |0> to |+>)
if alice_bits[5*i+k] == 1 and alice_basis[5*i+k] == 1:
# Alice chooses {|+>, |->} basis
encoding_circuit.x(k)
encoding_circuit.h(k) # Apply X-Gate and H-Gate (so |0> goes in |->)
encoding_circuit.barrier()
return encoding_circuit
# Implementation of the function with which Bob measures Alice's qubit
def circuit_measure (backend_name, encoding_circuit, bob_basis, nth_circuit):
i = nth_circuit
list_of_results = []
inverted_list = []
definitive_results = []
for k in range(5):
if bob_basis[5*i + k] == 0: # Bob chooses {|0>, |1>} basis
# Measurement with the default {|0>, |1>} basis
encoding_circuit.measure(k,k)
if bob_basis[5*i + k] == 1: # Bob chooses {|+>, |->} basis
# Measurement with the {|+>, |->} basis
x_measure(encoding_circuit, k, k)
backend = provider.get_backend(backend_name)
job = execute(encoding_circuit, backend, shots=1, memory=True)
result = job.result()
list_of_results = result.get_memory()
list_of_results = list(map(int, str(list_of_results[0])))
# But these results are ordered backwards!
# Their order must be inverted!
for k in range(5):
inverted_list.append(list_of_results[4-k])
# Scenario 1-2:
definitive_results = inverted_list
"""
# Scenario 3:
# We have to consider ONLY qubits q3, which has been entangled with q4
definitive_results.append(inverted_list[3])
"""
"""
encoding_circuit.draw("mpl")
print("\nlist_of_results:", list_of_results)
print("inverted_list:", inverted_list)
print("definitive_results:", definitive_results)
"""
return definitive_results
def eve_hacking_measures (hacker_activated, encoding_circuit, alice_basis, nth_circuit):
if hacker_activated == True:
# Eve measures each qubit sent by Alice. After that, Eve sends it to Bob:
i = nth_circuit
for k in range(5):
if eve_basis[5*i+k] == 0: # Eve chooses {|0>, |1>} basis
# Measurement with the default {|0>, |1>} basis
if alice_basis[5*i+k] == 0:
encoding_circuit.z(k) # --> ''right'' choice --> the state remains the same
if alice_basis[5*i+k] == 1:
encoding_circuit.h(k) # --> ''wrong'' choice --> change basis
if eve_basis[5*i+k] == 1: # Eve chooses {|+>, |->} basis
# Measurement with the {|+>, |->} basis
if alice_basis[5*i+k] == 1:
encoding_circuit.x(k) # --> ''right'' choice --> the state remains the same
if alice_basis[5*i+k] == 0:
encoding_circuit.h(k) # --> ''wrong'' choice --> change basis
encoding_circuit.barrier()
return encoding_circuit
else:
pass
def eve_hacking_entangle (hacker_activated, encoding_circuit):
if hacker_activated == True:
# Eve ENTANGLES qubits q0 and q3 sent by Alice with |0> state qubits (q2 and q4).
# After that, Eve sends the entangled qubit to Bob:
encoding_circuit.cx(3, 4)
encoding_circuit.barrier()
#encoding_circuit.draw("mpl")
return encoding_circuit
# ---------------------------------- MAIN PROGRAM ------------------------------------------
# Number of qubits that Alice is going to use:
number_of_circuits = 2
number_of_qubits = 5 * number_of_circuits
# Backend:
backend_name = "ibmq_5_yorktown"
# Alice generates n random bits (some of these bits will form the key)
alice_bits = []
for i in range (number_of_qubits):
alice_bits.append(randint(0,1))
print("\nAlice's bits (first 20 bits):\n", alice_bits[0:19])
# Alice randomly chooses the bases in which she is going to measure
alice_basis = []
for i in range (number_of_qubits):
alice_basis.append(randint(0,1))
print("\nAlice's basis (first 20 bits):\n", alice_basis[0:19])
# Bob also randomly chooses the bases in which he is going to measure
bob_basis = []
for i in range (number_of_qubits):
bob_basis.append(randint(0,1))
print("\nBob's basis (first 20 bits):\n", bob_basis[0:19])
print("\nChoose an option [digit 1, 2 or 3]:\n\n1. Transmission without hacker's attack" \
"\n2. Transmission with a measurement-based hacker's attack" \
"\n3. Transmission with an Entanglement-based hacker's attack\n")
scelta = input()
if scelta == "1":
hacker_activated1 = False
hacker_activated2 = False
if scelta == "2":
hacker_activated1 = True
hacker_activated2 = False
if scelta == "3":
hacker_activated1 = False
hacker_activated2 = True
if scelta != "1" and scelta != "2" and scelta != "3":
print("\nTry again (digit only 1, 2 or 3)")
# Eve randomly chooses the bases in which she is going to measure (like Bob)
if hacker_activated1 == True:
eve_basis = []
for i in range (number_of_qubits):
eve_basis.append(randint(0,1))
print("\nEve's basis (first 20 bits):\n", eve_basis[0:19])
print("-----------------------------------------------------------------------")
print("\nThe experiment has been launched!\n")
print("Backend:", backend_name)
print("Number of circuits:", number_of_circuits)
print("Number of qubits per circuit:", 5)
print("Total number of qubits:", number_of_qubits)
print("\n\n-----------------------------------------------------------------------")
# For each classical bit which Alice wants to encode and transmit to Bob, they proceed as it follows:
bob_measures = []
for n in range(number_of_circuits):
# Alice codes the (5*n+k)-th bit of her initial string as a qubit.
# Then she builds up the n-th circuit with 5 of these qubits, and sends it to Bob
circuit = encoding_circuit_builder(alice_bits, alice_basis, n)
# Bob measures the qubit with his own basis: but what if Eve is hacking the message?
eve_hacking_measures (hacker_activated1, circuit, alice_basis, n)
eve_hacking_entangle (hacker_activated2, circuit)
new_results = circuit_measure (backend_name, circuit, bob_basis, n)
bob_measures = bob_measures + new_results
counter = 0 # Little check, in order to be sure that the datas have been saved
# For each job, we immediately register the result in the data file:
data_file = open("bb84_yorktown_scenario1_data.txt", "a")
# Scenario 1-2:
for k in range(5):
data_file.write(str(alice_bits[5*n+k]))
data_file.write("\t")
data_file.write(str(alice_basis[5*n+k]))
data_file.write("\t")
data_file.write(str(bob_basis[5*n+k]))
data_file.write("\t")
data_file.write(str(bob_measures[5*n+k]))
data_file.write("\n")
counter = counter + 1
"""
# Scenario 3: again, we must consider only qubit q3
data_file.write(str(alice_bits[5*n+3]))
data_file.write("\t")
data_file.write(str(alice_basis[5*n+3]))
data_file.write("\t")
data_file.write(str(bob_basis[5*n+3]))
data_file.write("\t")
data_file.write(str(bob_measures[n])) # Because we have only 1 result per circuit
data_file.write("\n")
counter = counter + 1
"""
data_file.close()
print(n, "-th job")
print("Results:", new_results)
if scelta == 1 or scelta == 2:
if counter == 5:
print("Datas have been correctly saved")
else:
print("DATA WERE NOT CORRECTLY SAVED!!")
if scelta == 3:
if counter == 1:
print("Datas have been correctly saved")
else:
print("DATA WERE NOT CORRECTLY SAVED!!")
print("-----------------------------------------------------------------------")
# Let's see the first 20 results of the measurements!
print("\nBob's measurements (first 20 measurements):\n")
print(bob_measures[0:19])
print("\nThe experiment ended with success!")
plt.show() | Programs/Experiment/bb84_qkd_realdevice.py |
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import plot_bloch_multivector, plot_histogram
from qiskit.extensions import Initialize
from math import sqrt, pi
import numpy as np
import matplotlib.pyplot as plt
from random import *
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
# DEFINITIONS
# Implementation of the measurement in the {|+>, |->} basis: HZH = X
def x_measure (quantumcircuit, qubit, cbit):
quantumcircuit.h(qubit)
quantumcircuit.measure(qubit, cbit)
return quantumcircuit
# Notice: in x_measure (Bob's measurements), we DO NOT apply the final H-Gate.
# We apply H-Gate only in the function x_measure_eve (Eve's measurement).
# That's because in the experiment we MUST END THE CIRCUIT WITH A MEASUREMENT
# (not with a gate).
def x_measure_eve (quantumcircuit, qubit, cbit):
quantumcircuit.h(qubit)
quantumcircuit.measure(qubit, cbit)
quantumcircuit.h(qubit)
return quantumcircuit
# Implementation of a function which builds up a single qubit circuit based on Alice's strings
def encoding_circuit_builder (alice_bits, alice_basis, nth_circuit):
i = nth_circuit # the n-th circuit which contains 5 qubits
encoding_circuit = QuantumCircuit(5,5)
for k in range(5):
if alice_bits[5*i+k] == 0 and alice_basis[5*i+k] == 0:
# Alice chooses {|0>, |1>} basis
pass # Apply I (nothing happens)
if alice_bits[5*i+k] == 1 and alice_basis[5*i+k] == 0:
# Alice chooses {|0>, |1>} basis
encoding_circuit.x(k) # Apply X-Gate (flip |0> to |1>)
if alice_bits[5*i+k] == 0 and alice_basis[5*i+k] == 1:
# Alice chooses {|+>, |->} basis
encoding_circuit.h(k) # Apply H-Gate (change |0> to |+>)
if alice_bits[5*i+k] == 1 and alice_basis[5*i+k] == 1:
# Alice chooses {|+>, |->} basis
encoding_circuit.x(k)
encoding_circuit.h(k) # Apply X-Gate and H-Gate (so |0> goes in |->)
encoding_circuit.barrier()
return encoding_circuit
# Implementation of the function with which Bob measures Alice's qubit
def circuit_measure (backend_name, encoding_circuit, bob_basis, nth_circuit):
i = nth_circuit
list_of_results = []
inverted_list = []
definitive_results = []
for k in range(5):
if bob_basis[5*i + k] == 0: # Bob chooses {|0>, |1>} basis
# Measurement with the default {|0>, |1>} basis
encoding_circuit.measure(k,k)
if bob_basis[5*i + k] == 1: # Bob chooses {|+>, |->} basis
# Measurement with the {|+>, |->} basis
x_measure(encoding_circuit, k, k)
backend = provider.get_backend(backend_name)
job = execute(encoding_circuit, backend, shots=1, memory=True)
result = job.result()
list_of_results = result.get_memory()
list_of_results = list(map(int, str(list_of_results[0])))
# But these results are ordered backwards!
# Their order must be inverted!
for k in range(5):
inverted_list.append(list_of_results[4-k])
# Scenario 1-2:
definitive_results = inverted_list
"""
# Scenario 3:
# We have to consider ONLY qubits q3, which has been entangled with q4
definitive_results.append(inverted_list[3])
"""
"""
encoding_circuit.draw("mpl")
print("\nlist_of_results:", list_of_results)
print("inverted_list:", inverted_list)
print("definitive_results:", definitive_results)
"""
return definitive_results
def eve_hacking_measures (hacker_activated, encoding_circuit, alice_basis, nth_circuit):
if hacker_activated == True:
# Eve measures each qubit sent by Alice. After that, Eve sends it to Bob:
i = nth_circuit
for k in range(5):
if eve_basis[5*i+k] == 0: # Eve chooses {|0>, |1>} basis
# Measurement with the default {|0>, |1>} basis
if alice_basis[5*i+k] == 0:
encoding_circuit.z(k) # --> ''right'' choice --> the state remains the same
if alice_basis[5*i+k] == 1:
encoding_circuit.h(k) # --> ''wrong'' choice --> change basis
if eve_basis[5*i+k] == 1: # Eve chooses {|+>, |->} basis
# Measurement with the {|+>, |->} basis
if alice_basis[5*i+k] == 1:
encoding_circuit.x(k) # --> ''right'' choice --> the state remains the same
if alice_basis[5*i+k] == 0:
encoding_circuit.h(k) # --> ''wrong'' choice --> change basis
encoding_circuit.barrier()
return encoding_circuit
else:
pass
def eve_hacking_entangle (hacker_activated, encoding_circuit):
if hacker_activated == True:
# Eve ENTANGLES qubits q0 and q3 sent by Alice with |0> state qubits (q2 and q4).
# After that, Eve sends the entangled qubit to Bob:
encoding_circuit.cx(3, 4)
encoding_circuit.barrier()
#encoding_circuit.draw("mpl")
return encoding_circuit
# ---------------------------------- MAIN PROGRAM ------------------------------------------
# Number of qubits that Alice is going to use:
number_of_circuits = 2
number_of_qubits = 5 * number_of_circuits
# Backend:
backend_name = "ibmq_5_yorktown"
# Alice generates n random bits (some of these bits will form the key)
alice_bits = []
for i in range (number_of_qubits):
alice_bits.append(randint(0,1))
print("\nAlice's bits (first 20 bits):\n", alice_bits[0:19])
# Alice randomly chooses the bases in which she is going to measure
alice_basis = []
for i in range (number_of_qubits):
alice_basis.append(randint(0,1))
print("\nAlice's basis (first 20 bits):\n", alice_basis[0:19])
# Bob also randomly chooses the bases in which he is going to measure
bob_basis = []
for i in range (number_of_qubits):
bob_basis.append(randint(0,1))
print("\nBob's basis (first 20 bits):\n", bob_basis[0:19])
print("\nChoose an option [digit 1, 2 or 3]:\n\n1. Transmission without hacker's attack" \
"\n2. Transmission with a measurement-based hacker's attack" \
"\n3. Transmission with an Entanglement-based hacker's attack\n")
scelta = input()
if scelta == "1":
hacker_activated1 = False
hacker_activated2 = False
if scelta == "2":
hacker_activated1 = True
hacker_activated2 = False
if scelta == "3":
hacker_activated1 = False
hacker_activated2 = True
if scelta != "1" and scelta != "2" and scelta != "3":
print("\nTry again (digit only 1, 2 or 3)")
# Eve randomly chooses the bases in which she is going to measure (like Bob)
if hacker_activated1 == True:
eve_basis = []
for i in range (number_of_qubits):
eve_basis.append(randint(0,1))
print("\nEve's basis (first 20 bits):\n", eve_basis[0:19])
print("-----------------------------------------------------------------------")
print("\nThe experiment has been launched!\n")
print("Backend:", backend_name)
print("Number of circuits:", number_of_circuits)
print("Number of qubits per circuit:", 5)
print("Total number of qubits:", number_of_qubits)
print("\n\n-----------------------------------------------------------------------")
# For each classical bit which Alice wants to encode and transmit to Bob, they proceed as it follows:
bob_measures = []
for n in range(number_of_circuits):
# Alice codes the (5*n+k)-th bit of her initial string as a qubit.
# Then she builds up the n-th circuit with 5 of these qubits, and sends it to Bob
circuit = encoding_circuit_builder(alice_bits, alice_basis, n)
# Bob measures the qubit with his own basis: but what if Eve is hacking the message?
eve_hacking_measures (hacker_activated1, circuit, alice_basis, n)
eve_hacking_entangle (hacker_activated2, circuit)
new_results = circuit_measure (backend_name, circuit, bob_basis, n)
bob_measures = bob_measures + new_results
counter = 0 # Little check, in order to be sure that the datas have been saved
# For each job, we immediately register the result in the data file:
data_file = open("bb84_yorktown_scenario1_data.txt", "a")
# Scenario 1-2:
for k in range(5):
data_file.write(str(alice_bits[5*n+k]))
data_file.write("\t")
data_file.write(str(alice_basis[5*n+k]))
data_file.write("\t")
data_file.write(str(bob_basis[5*n+k]))
data_file.write("\t")
data_file.write(str(bob_measures[5*n+k]))
data_file.write("\n")
counter = counter + 1
"""
# Scenario 3: again, we must consider only qubit q3
data_file.write(str(alice_bits[5*n+3]))
data_file.write("\t")
data_file.write(str(alice_basis[5*n+3]))
data_file.write("\t")
data_file.write(str(bob_basis[5*n+3]))
data_file.write("\t")
data_file.write(str(bob_measures[n])) # Because we have only 1 result per circuit
data_file.write("\n")
counter = counter + 1
"""
data_file.close()
print(n, "-th job")
print("Results:", new_results)
if scelta == 1 or scelta == 2:
if counter == 5:
print("Datas have been correctly saved")
else:
print("DATA WERE NOT CORRECTLY SAVED!!")
if scelta == 3:
if counter == 1:
print("Datas have been correctly saved")
else:
print("DATA WERE NOT CORRECTLY SAVED!!")
print("-----------------------------------------------------------------------")
# Let's see the first 20 results of the measurements!
print("\nBob's measurements (first 20 measurements):\n")
print(bob_measures[0:19])
print("\nThe experiment ended with success!")
plt.show() | 0.672869 | 0.818809 |
import tensorflow as tf
import numpy as np
import random
import enum
import warnings
warnings.filterwarnings('ignore')
random.seed(a=123456789)
np.random.seed(123456789)
tf.set_random_seed(123456789)
## Learning Model
class AutoEncoder:
# input image size
nWidth = 512
nHeight = 512
nPixels = nWidth * nHeight
read_threads = 1
outputWidth = nWidth
outputHeight = nHeight
# loss function with L1 distance
@staticmethod
def L1(output, target):
return tf.reduce_sum(tf.abs(target-output))
# loss function with L2 distance
@staticmethod
def L2(output, target):
return tf.reduce_sum(tf.square(target-output))
def __init__(self, training_csv_file_name, **options):
# options by argument
self.batch_size = options.get('batch_size', 1)
self.is_data_augmentation = options.get('is_data_augmentation', True)
# Option to skip conecctions between corresponding layers of encoder and decoder as in U-net
self.is_skip_connection = options.get('is_skip_connection', True)
self.loss_function = options.get('loss_function', AutoEncoder.L1)
isDebug = True
if isDebug:
print("batch_size : {0}".format(self.batch_size))
print("data_augmentation : {0}".format(self.is_data_augmentation))
print("skip_connection : {0}".format(self.is_skip_connection))
print("loss_function : {0}".format(self.loss_function))
with tf.Graph().as_default():
self.prepare_model()
self.prepare_session()
self.prepare_batch(training_csv_file_name)
def prepare_model(self):
with tf.name_scope("input"):
x = tf.placeholder(tf.float32, [None, AutoEncoder.nPixels])
x_image = tf.cast(tf.reshape(x, [self.batch_size, AutoEncoder.nWidth, AutoEncoder.nHeight, 1]), tf.float32)
t = tf.placeholder(tf.float32, [None, AutoEncoder.nPixels])
t_image = tf.reshape(t, [self.batch_size, AutoEncoder.nWidth, AutoEncoder.nHeight, 1])
# keep probabilities for dropout layer
keep_prob = tf.placeholder(tf.float32)
keep_all = tf.constant(1.0, dtype=tf.float32)
## Data Augmentation
if self.is_data_augmentation:
x_tmp_array = []
t_tmp_array = []
for i in range(self.batch_size):
x_tmp = x_image[i, :, :, :]
t_tmp = t_image[i, :, :, :]
# flip each images left right and up down randomly
rint = random.randint(0, 2)
if rint%2 != 0:
x_tmp = tf.image.flip_left_right(x_tmp)
t_tmp = tf.image.flip_left_right(t_tmp)
rint = random.randint(0, 4)
# Some images has meaning in vertical direction,
# so images are flipped vertically in lower probability than horizontal flipping
if rint%4 == 0:
x_tmp = tf.image.flip_up_down(x_tmp)
t_tmp = tf.image.flip_up_down(t_tmp)
rint = random.randint(0, 4)
# Some images has meaning in vertical direction,
# so images are transposed in lower probability than horizontal flipping
if rint%4 == 0:
x_tmp = tf.image.transpose_image(x_tmp)
t_tmp = tf.image.transpose_image(t_tmp)
x_tmp_array.append(tf.expand_dims(x_tmp, 0))
t_tmp_array.append(tf.expand_dims(t_tmp, 0))
x_image = tf.concat(x_tmp_array, axis=0)
t_image = tf.concat(t_tmp_array, axis=0)
self.x_image = x_image
# Encoding function for AutoEncoder
def encode(batch_input, out_channels, stride, filter_size):
with tf.variable_scope("encode"):
in_channels = batch_input.get_shape()[3]
filter = tf.get_variable("filter", [filter_size, filter_size, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, in_channels, out_channels]
# => [batch, out_height, out_width, out_channels]
# padding if needed with the values of filter_size and stride to fit output size to out_channels
pad_size = int(filter_size - stride)
if pad_size > 0:
pad0 = pad_size//2
pad1 = pad_size//2 + pad_size%2
batch_input = tf.pad(batch_input, [[0, 0], [pad0, pad1], [pad0, pad1], [0, 0]], mode="CONSTANT")
conved = tf.nn.conv2d(batch_input, filter, [1, stride, stride, 1], padding="VALID")
return conved
# Leaky ReLU
def lrelu(x, a):
with tf.name_scope("LeakyReLU"):
x = tf.identity(x)
# leak[a*x/2 - a*abs(x)/2] + linear[x/2 + abs(x)/2]
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
# Batch Normalization
def batchnorm(input):
with tf.variable_scope("BatchNormalization"):
input = tf.identity(input)
channels = input.get_shape()[3]
offset = tf.get_variable("offset", [channels], dtype=tf.float32, initializer=tf.zeros_initializer)
scale = tf.get_variable("scale", [channels], dtype=tf.float32, initializer=tf.random_normal_initializer(1.0, 0.02))
mean, variance = tf.nn.moments(input, axes=[0, 1, 2], keep_dims=False)
variance_epsilon = 1e-5
normalized = tf.nn.batch_normalization(input, mean, variance, offset, scale, variance_epsilon=variance_epsilon)
return normalized
# Decoding function for AutoEncoder
def decode(batch_input, out_channels, filter_size):
with tf.variable_scope("decode"):
batch, in_height, in_width, in_channels = [int(d) for d in batch_input.get_shape()]
filter = tf.get_variable("filter", [filter_size, filter_size, out_channels, in_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, out_channels, in_channels]
# => [batch, out_height, out_width, out_channels]
conved = tf.nn.conv2d_transpose(batch_input, filter, [batch, in_height * 2, in_width * 2, out_channels], [1, 2, 2, 1], padding="SAME")
return conved
# List to contain each layer of AutoEncoder
layers = []
# 3 if used for color image
num_channels = 1
out_channels_base = 32
encode_output_channels = [
(out_channels_base , 16), # encoder_1: [batch_size, 512, 512, num_channels] => [batch_size, 256, 256, out_channels_base * 2]
(out_channels_base * 2, 8), # encoder_2: [batch_size, 256, 256, out_channels_base] => [batch_size, 128, 128, out_channels_base * 2]
(out_channels_base * 4, 4), # encoder_3: [batch_size, 128, 128, out_channels_base] => [batch_size, 64, 64, out_channels_base * 2]
(out_channels_base * 8, 4), # encoder_4: [batch_size, 64, 64, out_channels_base * 2] => [batch_size, 32, 32, out_channels_base * 4]
(out_channels_base * 8, 4), # encoder_5: [batch_size, 32, 32, out_channels_base * 4] => [batch_size, 16, 16, out_channels_base * 8]
(out_channels_base * 8, 4), # encoder_6: [batch_size, 16, 16, out_channels_base * 8] => [batch_size, 8, 8, out_channels_base * 8]
(out_channels_base * 8, 4), # encoder_7: [batch_size, 8, 8, out_channels_base * 8] => [batch_size, 4, 4, out_channels_base * 8]
(out_channels_base * 8, 4), # encoder_8: [batch_size, 4, 4, out_channels_base * 8] => [batch_size, 2, 2, out_channels_base * 8]
#(out_channels_base * 8, 2) # encoder_9: [batch_size, 2, 2, out_channels_base * 8] => [batch_size, 1, 1, out_channels_base * 8]
]
for encoder_index, (out_channels, filter_size) in enumerate(encode_output_channels):
with tf.variable_scope("encoder_%d" % (len(layers) + 1)):
if encoder_index == 0:
output = encode(x_image, out_channels, 2, filter_size)
else:
rectified = lrelu(layers[-1], 0.2)
# [batch_size, height, width, in_channels] => [batch_size, height/2, width/2, out_channels]
encoded = encode(rectified, out_channels, 2, filter_size)
output = batchnorm(encoded)
layers.append(output)
print(output)
decode_output_channels = [
#(out_channels_base * 8, keep_prob, 2), # decoder_9: [batch_size, 1, 1, out_channels_base * 8] => [batch_size, 2, 2, out_channels_base * 8 * 2]
(out_channels_base * 8, keep_prob, 4), # decoder_8: [batch_size, 2, 2, out_channels_base * 8 * 2] => [batch_size, 4, 4, out_channels_base * 8 * 2]
(out_channels_base * 8, keep_prob, 4), # decoder_7: [batch_size, 4, 4, out_channels_base * 8 * 2] => [batch_size, 8, 8, out_channels_base * 8 * 2]
(out_channels_base * 8, keep_all, 4), # decoder_6: [batch_size, 8, 8, out_channels_base * 8 * 2] => [batch_size, 16, 16, out_channels_base * 8 * 2]
(out_channels_base * 8, keep_all, 4), # decoder_5: [batch_size, 16, 16, out_channels_base * 8 * 2] => [batch_size, 32, 32, out_channels_base * 4 * 2]
(out_channels_base * 4, keep_all, 4), # decoder_4: [batch_size, 32, 32, out_channels_base * 4 * 2] => [batch_size, 64, 64, out_channels_base * 2 * 2]
(out_channels_base * 2, keep_all, 4), # decoder_3: [batch_size, 64, 64, out_channels_base * 4 * 2] => [batch_size, 128, 128, out_channels_base * 2 * 2]
(out_channels_base , keep_all, 8), # decoder_2: [batch_size, 128, 128, out_channels_base * 2 * 2] => [batch_size, 256, 256, out_channels_base * 2]
(num_channels , keep_all, 16), # decoder_1: [batch, 256, 256, out_channels_base * 2] => [batch, 512, 512, num_channels]
]
num_encoder_layers = len(layers)
for decoder_index, (out_channels, dropout_keep_prob, filter_size) in enumerate(decode_output_channels):
skip_layer = num_encoder_layers - decoder_index - 1
with tf.variable_scope("decoder_%d" % (skip_layer + 1)):
if decoder_index == 0:
# Even if "skip connection", first layer in decode layers is connected only from encode layer
input = layers[-1]
else:
if self.is_skip_connection == True:
# Concat output from encoder layer to keep detailed information
input = tf.concat([layers[-1], layers[skip_layer]], axis=3)
else:
input = layers[-1]
rectified = tf.nn.relu(input)
# [batch_size, height, width, in_channels] => [batch_size, height*2, width*2, out_channels]
output = decode(rectified, out_channels, filter_size)
if decoder_index != num_encoder_layers-1:
output = batchnorm(output)
#else:
# final decoder
#output = tf.tanh(output)
# dropout layer
output = tf.cond(dropout_keep_prob < 1.0, lambda: tf.nn.dropout(output, keep_prob=dropout_keep_prob), lambda: output)
layers.append(output)
print(output)
output = layers[-1]
with tf.name_scope("Optimizer"):
## Apply loss function (difference between training data and predicted data), and learning algorithm.
t_compare = t_image
loss = self.loss_function(output, t_compare)
train_step = tf.train.AdamOptimizer(0.0005).minimize(loss)
tf.summary.scalar("loss", loss)
#tf.histogram_summary("Convolution_1:biases", b_conv1)
self.x = x
self.t = t
self.keep_prob = keep_prob
self.train_step = train_step
self.output = output
self.t_compare = t_compare
self.loss = loss
def prepare_session(self):
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("board/learn_logs", sess.graph)
self.sess = sess
self.saver = saver
self.summary = summary
self.writer = writer
# https://www.tensorflow.org/versions/r0.12/how_tos/reading_data/index.html#batching
def read_my_file_format(self, filename_queue):
reader = tf.TextLineReader()
key, record_string = reader.read(filename_queue)
# "a" means representative value to indicate type for csv cell value.
image_file_name, depth_file_name = tf.decode_csv(record_string, [["a"], ["a"]])
image_png_data = tf.read_file(image_file_name)
depth_png_data = tf.read_file(depth_file_name)
# channels=1 means image is read as gray-scale
image_decoded = tf.image.decode_png(image_png_data, channels=1)
image_decoded.set_shape([512, 512, 1])
depth_decoded = tf.image.decode_png(depth_png_data, channels=1)
depth_decoded.set_shape([512, 512, 1])
return image_decoded, depth_decoded
def input_pipeline(self, filenames, batch_size, read_threads, num_epochs=None):
filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs, shuffle=True)
min_after_dequeue = 2
capacity = min_after_dequeue + 3 * batch_size
example_list = [self.read_my_file_format(filename_queue) for _ in range(read_threads)]
example_batch, label_batch = tf.train.shuffle_batch_join(
example_list, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return example_batch, label_batch
def prepare_batch(self, training_csv_file_name):
image_batch, depth_batch = self.input_pipeline([training_csv_file_name], self.batch_size, AutoEncoder.read_threads)
self.image_batch = image_batch
self.depth_batch = depth_batch | AutoEncoder.py | import tensorflow as tf
import numpy as np
import random
import enum
import warnings
warnings.filterwarnings('ignore')
random.seed(a=123456789)
np.random.seed(123456789)
tf.set_random_seed(123456789)
## Learning Model
class AutoEncoder:
# input image size
nWidth = 512
nHeight = 512
nPixels = nWidth * nHeight
read_threads = 1
outputWidth = nWidth
outputHeight = nHeight
# loss function with L1 distance
@staticmethod
def L1(output, target):
return tf.reduce_sum(tf.abs(target-output))
# loss function with L2 distance
@staticmethod
def L2(output, target):
return tf.reduce_sum(tf.square(target-output))
def __init__(self, training_csv_file_name, **options):
# options by argument
self.batch_size = options.get('batch_size', 1)
self.is_data_augmentation = options.get('is_data_augmentation', True)
# Option to skip conecctions between corresponding layers of encoder and decoder as in U-net
self.is_skip_connection = options.get('is_skip_connection', True)
self.loss_function = options.get('loss_function', AutoEncoder.L1)
isDebug = True
if isDebug:
print("batch_size : {0}".format(self.batch_size))
print("data_augmentation : {0}".format(self.is_data_augmentation))
print("skip_connection : {0}".format(self.is_skip_connection))
print("loss_function : {0}".format(self.loss_function))
with tf.Graph().as_default():
self.prepare_model()
self.prepare_session()
self.prepare_batch(training_csv_file_name)
def prepare_model(self):
with tf.name_scope("input"):
x = tf.placeholder(tf.float32, [None, AutoEncoder.nPixels])
x_image = tf.cast(tf.reshape(x, [self.batch_size, AutoEncoder.nWidth, AutoEncoder.nHeight, 1]), tf.float32)
t = tf.placeholder(tf.float32, [None, AutoEncoder.nPixels])
t_image = tf.reshape(t, [self.batch_size, AutoEncoder.nWidth, AutoEncoder.nHeight, 1])
# keep probabilities for dropout layer
keep_prob = tf.placeholder(tf.float32)
keep_all = tf.constant(1.0, dtype=tf.float32)
## Data Augmentation
if self.is_data_augmentation:
x_tmp_array = []
t_tmp_array = []
for i in range(self.batch_size):
x_tmp = x_image[i, :, :, :]
t_tmp = t_image[i, :, :, :]
# flip each images left right and up down randomly
rint = random.randint(0, 2)
if rint%2 != 0:
x_tmp = tf.image.flip_left_right(x_tmp)
t_tmp = tf.image.flip_left_right(t_tmp)
rint = random.randint(0, 4)
# Some images has meaning in vertical direction,
# so images are flipped vertically in lower probability than horizontal flipping
if rint%4 == 0:
x_tmp = tf.image.flip_up_down(x_tmp)
t_tmp = tf.image.flip_up_down(t_tmp)
rint = random.randint(0, 4)
# Some images has meaning in vertical direction,
# so images are transposed in lower probability than horizontal flipping
if rint%4 == 0:
x_tmp = tf.image.transpose_image(x_tmp)
t_tmp = tf.image.transpose_image(t_tmp)
x_tmp_array.append(tf.expand_dims(x_tmp, 0))
t_tmp_array.append(tf.expand_dims(t_tmp, 0))
x_image = tf.concat(x_tmp_array, axis=0)
t_image = tf.concat(t_tmp_array, axis=0)
self.x_image = x_image
# Encoding function for AutoEncoder
def encode(batch_input, out_channels, stride, filter_size):
with tf.variable_scope("encode"):
in_channels = batch_input.get_shape()[3]
filter = tf.get_variable("filter", [filter_size, filter_size, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, in_channels, out_channels]
# => [batch, out_height, out_width, out_channels]
# padding if needed with the values of filter_size and stride to fit output size to out_channels
pad_size = int(filter_size - stride)
if pad_size > 0:
pad0 = pad_size//2
pad1 = pad_size//2 + pad_size%2
batch_input = tf.pad(batch_input, [[0, 0], [pad0, pad1], [pad0, pad1], [0, 0]], mode="CONSTANT")
conved = tf.nn.conv2d(batch_input, filter, [1, stride, stride, 1], padding="VALID")
return conved
# Leaky ReLU
def lrelu(x, a):
with tf.name_scope("LeakyReLU"):
x = tf.identity(x)
# leak[a*x/2 - a*abs(x)/2] + linear[x/2 + abs(x)/2]
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
# Batch Normalization
def batchnorm(input):
with tf.variable_scope("BatchNormalization"):
input = tf.identity(input)
channels = input.get_shape()[3]
offset = tf.get_variable("offset", [channels], dtype=tf.float32, initializer=tf.zeros_initializer)
scale = tf.get_variable("scale", [channels], dtype=tf.float32, initializer=tf.random_normal_initializer(1.0, 0.02))
mean, variance = tf.nn.moments(input, axes=[0, 1, 2], keep_dims=False)
variance_epsilon = 1e-5
normalized = tf.nn.batch_normalization(input, mean, variance, offset, scale, variance_epsilon=variance_epsilon)
return normalized
# Decoding function for AutoEncoder
def decode(batch_input, out_channels, filter_size):
with tf.variable_scope("decode"):
batch, in_height, in_width, in_channels = [int(d) for d in batch_input.get_shape()]
filter = tf.get_variable("filter", [filter_size, filter_size, out_channels, in_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, out_channels, in_channels]
# => [batch, out_height, out_width, out_channels]
conved = tf.nn.conv2d_transpose(batch_input, filter, [batch, in_height * 2, in_width * 2, out_channels], [1, 2, 2, 1], padding="SAME")
return conved
# List to contain each layer of AutoEncoder
layers = []
# 3 if used for color image
num_channels = 1
out_channels_base = 32
encode_output_channels = [
(out_channels_base , 16), # encoder_1: [batch_size, 512, 512, num_channels] => [batch_size, 256, 256, out_channels_base * 2]
(out_channels_base * 2, 8), # encoder_2: [batch_size, 256, 256, out_channels_base] => [batch_size, 128, 128, out_channels_base * 2]
(out_channels_base * 4, 4), # encoder_3: [batch_size, 128, 128, out_channels_base] => [batch_size, 64, 64, out_channels_base * 2]
(out_channels_base * 8, 4), # encoder_4: [batch_size, 64, 64, out_channels_base * 2] => [batch_size, 32, 32, out_channels_base * 4]
(out_channels_base * 8, 4), # encoder_5: [batch_size, 32, 32, out_channels_base * 4] => [batch_size, 16, 16, out_channels_base * 8]
(out_channels_base * 8, 4), # encoder_6: [batch_size, 16, 16, out_channels_base * 8] => [batch_size, 8, 8, out_channels_base * 8]
(out_channels_base * 8, 4), # encoder_7: [batch_size, 8, 8, out_channels_base * 8] => [batch_size, 4, 4, out_channels_base * 8]
(out_channels_base * 8, 4), # encoder_8: [batch_size, 4, 4, out_channels_base * 8] => [batch_size, 2, 2, out_channels_base * 8]
#(out_channels_base * 8, 2) # encoder_9: [batch_size, 2, 2, out_channels_base * 8] => [batch_size, 1, 1, out_channels_base * 8]
]
for encoder_index, (out_channels, filter_size) in enumerate(encode_output_channels):
with tf.variable_scope("encoder_%d" % (len(layers) + 1)):
if encoder_index == 0:
output = encode(x_image, out_channels, 2, filter_size)
else:
rectified = lrelu(layers[-1], 0.2)
# [batch_size, height, width, in_channels] => [batch_size, height/2, width/2, out_channels]
encoded = encode(rectified, out_channels, 2, filter_size)
output = batchnorm(encoded)
layers.append(output)
print(output)
decode_output_channels = [
#(out_channels_base * 8, keep_prob, 2), # decoder_9: [batch_size, 1, 1, out_channels_base * 8] => [batch_size, 2, 2, out_channels_base * 8 * 2]
(out_channels_base * 8, keep_prob, 4), # decoder_8: [batch_size, 2, 2, out_channels_base * 8 * 2] => [batch_size, 4, 4, out_channels_base * 8 * 2]
(out_channels_base * 8, keep_prob, 4), # decoder_7: [batch_size, 4, 4, out_channels_base * 8 * 2] => [batch_size, 8, 8, out_channels_base * 8 * 2]
(out_channels_base * 8, keep_all, 4), # decoder_6: [batch_size, 8, 8, out_channels_base * 8 * 2] => [batch_size, 16, 16, out_channels_base * 8 * 2]
(out_channels_base * 8, keep_all, 4), # decoder_5: [batch_size, 16, 16, out_channels_base * 8 * 2] => [batch_size, 32, 32, out_channels_base * 4 * 2]
(out_channels_base * 4, keep_all, 4), # decoder_4: [batch_size, 32, 32, out_channels_base * 4 * 2] => [batch_size, 64, 64, out_channels_base * 2 * 2]
(out_channels_base * 2, keep_all, 4), # decoder_3: [batch_size, 64, 64, out_channels_base * 4 * 2] => [batch_size, 128, 128, out_channels_base * 2 * 2]
(out_channels_base , keep_all, 8), # decoder_2: [batch_size, 128, 128, out_channels_base * 2 * 2] => [batch_size, 256, 256, out_channels_base * 2]
(num_channels , keep_all, 16), # decoder_1: [batch, 256, 256, out_channels_base * 2] => [batch, 512, 512, num_channels]
]
num_encoder_layers = len(layers)
for decoder_index, (out_channels, dropout_keep_prob, filter_size) in enumerate(decode_output_channels):
skip_layer = num_encoder_layers - decoder_index - 1
with tf.variable_scope("decoder_%d" % (skip_layer + 1)):
if decoder_index == 0:
# Even if "skip connection", first layer in decode layers is connected only from encode layer
input = layers[-1]
else:
if self.is_skip_connection == True:
# Concat output from encoder layer to keep detailed information
input = tf.concat([layers[-1], layers[skip_layer]], axis=3)
else:
input = layers[-1]
rectified = tf.nn.relu(input)
# [batch_size, height, width, in_channels] => [batch_size, height*2, width*2, out_channels]
output = decode(rectified, out_channels, filter_size)
if decoder_index != num_encoder_layers-1:
output = batchnorm(output)
#else:
# final decoder
#output = tf.tanh(output)
# dropout layer
output = tf.cond(dropout_keep_prob < 1.0, lambda: tf.nn.dropout(output, keep_prob=dropout_keep_prob), lambda: output)
layers.append(output)
print(output)
output = layers[-1]
with tf.name_scope("Optimizer"):
## Apply loss function (difference between training data and predicted data), and learning algorithm.
t_compare = t_image
loss = self.loss_function(output, t_compare)
train_step = tf.train.AdamOptimizer(0.0005).minimize(loss)
tf.summary.scalar("loss", loss)
#tf.histogram_summary("Convolution_1:biases", b_conv1)
self.x = x
self.t = t
self.keep_prob = keep_prob
self.train_step = train_step
self.output = output
self.t_compare = t_compare
self.loss = loss
def prepare_session(self):
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("board/learn_logs", sess.graph)
self.sess = sess
self.saver = saver
self.summary = summary
self.writer = writer
# https://www.tensorflow.org/versions/r0.12/how_tos/reading_data/index.html#batching
def read_my_file_format(self, filename_queue):
reader = tf.TextLineReader()
key, record_string = reader.read(filename_queue)
# "a" means representative value to indicate type for csv cell value.
image_file_name, depth_file_name = tf.decode_csv(record_string, [["a"], ["a"]])
image_png_data = tf.read_file(image_file_name)
depth_png_data = tf.read_file(depth_file_name)
# channels=1 means image is read as gray-scale
image_decoded = tf.image.decode_png(image_png_data, channels=1)
image_decoded.set_shape([512, 512, 1])
depth_decoded = tf.image.decode_png(depth_png_data, channels=1)
depth_decoded.set_shape([512, 512, 1])
return image_decoded, depth_decoded
def input_pipeline(self, filenames, batch_size, read_threads, num_epochs=None):
filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs, shuffle=True)
min_after_dequeue = 2
capacity = min_after_dequeue + 3 * batch_size
example_list = [self.read_my_file_format(filename_queue) for _ in range(read_threads)]
example_batch, label_batch = tf.train.shuffle_batch_join(
example_list, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return example_batch, label_batch
def prepare_batch(self, training_csv_file_name):
image_batch, depth_batch = self.input_pipeline([training_csv_file_name], self.batch_size, AutoEncoder.read_threads)
self.image_batch = image_batch
self.depth_batch = depth_batch | 0.7586 | 0.390098 |
import cv2
import numpy as np
import os
import sys
from Output import Output
class YoloObjectDetector:
def __init__(self, modelSize):
if not (os.path.exists("models/yolo-tiny/yolov3-tiny.weights") and os.path.exists("models/yolo/yolov3.weights")):
sys.exit("Please download the YOLOv3 weight files first (using downloadYOLO.sh)")
if modelSize == "tiny":
self.modelSize = 416
self.model = cv2.dnn.readNet("models/yolo-tiny/yolov3-tiny.weights",
"models/yolo-tiny/yolov3-tiny.cfg")
else:
self.modelSize = int(modelSize)
self.model = cv2.dnn.readNet("models/yolo/yolov3.weights",
"models/yolo/yolov3.cfg")
with open("models/yolo/yolov3.txt", 'r') as f:
self.classes = [line.strip() for line in f.readlines()]
self.output = Output(self.classes)
# scale is 0.00392 for YOLO as it does not use 0..255 but 0..1 as range (0.00392 = 1/255)
self.scale = 0.00392
def processImage(self, image):
if image is None:
print("Ignoring image")
return
image_height, image_width, _ = image.shape
blob = cv2.dnn.blobFromImage(image, self.scale, (self.modelSize, self.modelSize), (0, 0, 0), True, crop=False)
self.model.setInput(blob)
retval = self.model.forward(self.get_output_layers(self.model))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in retval:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * image_width)
center_y = int(detection[1] * image_height)
w = int(detection[2] * image_width)
h = int(detection[3] * image_height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
if len(indices) == 0:
return
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
self.output.draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h))
return image
def get_output_layers(self, net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers | objectDetection/YoloObjectDetector.py | import cv2
import numpy as np
import os
import sys
from Output import Output
class YoloObjectDetector:
def __init__(self, modelSize):
if not (os.path.exists("models/yolo-tiny/yolov3-tiny.weights") and os.path.exists("models/yolo/yolov3.weights")):
sys.exit("Please download the YOLOv3 weight files first (using downloadYOLO.sh)")
if modelSize == "tiny":
self.modelSize = 416
self.model = cv2.dnn.readNet("models/yolo-tiny/yolov3-tiny.weights",
"models/yolo-tiny/yolov3-tiny.cfg")
else:
self.modelSize = int(modelSize)
self.model = cv2.dnn.readNet("models/yolo/yolov3.weights",
"models/yolo/yolov3.cfg")
with open("models/yolo/yolov3.txt", 'r') as f:
self.classes = [line.strip() for line in f.readlines()]
self.output = Output(self.classes)
# scale is 0.00392 for YOLO as it does not use 0..255 but 0..1 as range (0.00392 = 1/255)
self.scale = 0.00392
def processImage(self, image):
if image is None:
print("Ignoring image")
return
image_height, image_width, _ = image.shape
blob = cv2.dnn.blobFromImage(image, self.scale, (self.modelSize, self.modelSize), (0, 0, 0), True, crop=False)
self.model.setInput(blob)
retval = self.model.forward(self.get_output_layers(self.model))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in retval:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * image_width)
center_y = int(detection[1] * image_height)
w = int(detection[2] * image_width)
h = int(detection[3] * image_height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
if len(indices) == 0:
return
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
self.output.draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h))
return image
def get_output_layers(self, net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers | 0.317744 | 0.212028 |
import logging
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django_prbac.models import Grant, Role
from corehq import privileges
from corehq.apps.accounting.utils import ensure_grants, log_removed_grants
from corehq.apps.accounting.bootstrap import features
logger = logging.getLogger(__name__)
BULK_CASE_AND_USER_MANAGEMENT = 'bulk_case_and_user_management'
CROSS_PROJECT_REPORTS = 'cross_project_reports'
def cchq_prbac_bootstrap(apps, schema_editor):
"""Convenience function for use in data migrations.
Example operation:
migrations.RunPython(cchq_prbac_bootstrap)
"""
call_command('cchq_prbac_bootstrap')
class Command(BaseCommand):
help = 'Populate a fresh database with some sample roles and grants'
def add_arguments(self, parser):
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help='Do not actually modify the database, just verbosely log what happen',
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help='Enable debug output',
)
parser.add_argument(
'--fresh-start',
action='store_true',
default=False,
help='We changed the core v0 plans, wipe all existing plans and start over. USE CAUTION.',
)
def handle(self, dry_run=False, verbose=False, fresh_start=False, **options):
self.verbose = verbose
if fresh_start:
confirm_fresh_start = input(
"Are you sure you want to delete all Roles and start over? You can't do this"
" if accounting is already set up. Type 'yes' to continue."
)
if confirm_fresh_start == 'yes':
self.flush_roles()
self.roles_by_slug = {role.slug: role for role in Role.objects.all()}
self.ensure_roles(self.BOOTSTRAP_PRIVILEGES + self.BOOTSTRAP_PLANS, dry_run)
ensure_grants(
list(self.BOOTSTRAP_GRANTS.items()), # py3 iterable
dry_run=dry_run,
verbose=self.verbose,
roles_by_slug=self.roles_by_slug,
)
if verbose or dry_run:
log_removed_grants(self.OLD_PRIVILEGES, dry_run=dry_run)
if not dry_run:
Role.objects.filter(slug__in=self.OLD_PRIVILEGES).delete()
def flush_roles(self):
logger.info('Flushing ALL Roles...')
Role.objects.all().delete()
def ensure_roles(self, roles, dry_run=False):
"""
Add each role if it does not already exist, otherwise skip it.
"""
dry_run_tag = "[DRY RUN] " if dry_run else ""
roles_to_save = []
for role in roles:
if role.slug not in self.roles_by_slug:
if self.verbose or dry_run:
logger.info('%sCreating role: %s', dry_run_tag, role.name)
if not dry_run:
roles_to_save.append(role)
else:
logger.info('Role already exists: %s', role.name)
if roles_to_save:
roles = Role.objects.bulk_create(roles_to_save)
self.roles_by_slug.update((role.slug, role) for role in roles)
BOOTSTRAP_PRIVILEGES = [
Role(slug=privileges.API_ACCESS, name='API Access', description=''),
Role(slug=privileges.LOOKUP_TABLES, name='Lookup Tables', description=''),
Role(slug=privileges.CLOUDCARE, name='Web-based Applications (CloudCare)', description=''),
Role(slug=privileges.CUSTOM_BRANDING, name='Custom Branding', description=''),
Role(slug=privileges.ACTIVE_DATA_MANAGEMENT, name='Active Data Management', description=''),
Role(slug=privileges.CUSTOM_REPORTS, name='Custom Reports', description=''),
Role(slug=privileges.ROLE_BASED_ACCESS, name='Role-based Access', description=''),
Role(slug=privileges.RESTRICT_ACCESS_BY_LOCATION, name='Restrict Access By Location', description=''),
Role(slug=privileges.OUTBOUND_SMS, name='Outbound SMS',
description='Use of any outbound messaging / SMS services.',
),
Role(slug=privileges.REMINDERS_FRAMEWORK, name='Rules Engine (Use of Reminders Framework)',
description='Use of reminders framework for spawning reminders/alerts based on certain criteria.',
),
Role(slug=privileges.CUSTOM_SMS_GATEWAY, name='Custom Telerivet (Android) SMS Gateway',
description='Ability to set up telerivet gateway on the "SMS Connectivity" page (inbound or outbound).',
),
Role(slug=privileges.INBOUND_SMS, name='Inbound SMS (where available)', description=''),
Role(slug=privileges.BULK_CASE_MANAGEMENT, name='Bulk Case Management', description=''),
Role(slug=privileges.BULK_USER_MANAGEMENT, name='Bulk User Management', description=''),
Role(slug=privileges.DEIDENTIFIED_DATA, name='De-identified Data', description=''),
Role(slug=privileges.HIPAA_COMPLIANCE_ASSURANCE, name='HIPAA Compliance Assurance', description=''),
Role(slug=privileges.ALLOW_EXCESS_USERS, name='Can Add Users Above Limit', description=''),
Role(slug=privileges.COMMCARE_LOGO_UPLOADER, name='Custom CommCare Logo Uploader', description=''),
Role(slug=privileges.LOCATIONS, name='Locations', description=''),
Role(slug=privileges.REPORT_BUILDER, name='User Configurable Report Builder', description=''),
Role(slug=privileges.REPORT_BUILDER_TRIAL, name='Report Builder Trial', description=''),
Role(slug=privileges.REPORT_BUILDER_5, name='Report Builder, 5 report limit', description=''),
Role(slug=privileges.REPORT_BUILDER_15, name='Report Builder, 15 report limit', description=''),
Role(slug=privileges.REPORT_BUILDER_30, name='Report Builder, 30 report limit', description=''),
Role(slug=privileges.USER_CASE, name='User Case Management', description=''),
Role(slug=privileges.DATA_CLEANUP, name='Data Management',
description='Tools for cleaning up data, including editing submissions and archiving forms.'),
Role(slug=privileges.TEMPLATED_INTENTS, name='Templated Intents',
description='Provides a dropdown for Android App Callouts'),
Role(slug=privileges.CUSTOM_INTENTS, name='Custom Intents',
description='Allows for specifying custom intents'),
Role(slug=privileges.ADVANCED_DOMAIN_SECURITY, name='Advanced Domain Security',
description='Allows domains to set security policies for all web users'),
Role(slug=privileges.PRACTICE_MOBILE_WORKERS, name='Practice mode for mobile workers',
description='Allows turning on practice mode for mobile workers and link them to applications'),
Role(slug=privileges.BUILD_PROFILES, name='Application Profiles',
description='Allows domains to create application profiles to customize app deploys'),
Role(slug=privileges.EXCEL_DASHBOARD, name="Excel Dashbord",
description="Allows domains to create Excel dashboard html exports"),
Role(slug=privileges.DAILY_SAVED_EXPORT, name='DAILY_SAVED_EXPORT',
description="Allows domains to create Daily Saved Exports"),
Role(slug=privileges.ZAPIER_INTEGRATION, name='Zapier Integration',
description='Allows domains to use zapier (zapier.com) integration'),
Role(slug=privileges.LOGIN_AS, name='Login As for App Preview',
description='Allows domains to use the login as feature of app preview'),
Role(slug=privileges.CASE_SHARING_GROUPS,
name='Case Sharing via Groups',
description='Allows turning on case sharing between members in a group.'),
Role(slug=privileges.CHILD_CASES,
name='Child Cases',
description='Allows for use of child cases / subcases in applications.'),
Role(slug=privileges.ODATA_FEED,
name='OData Feed - Tableau / BI Integration',
description='Allows usage of Tableau / BI Integration (OData Feeds)'),
Role(slug=privileges.DATA_FORWARDING,
name='Data Forwarding',
description='Allows use of Data Forwarding'),
Role(slug=privileges.PROJECT_ACCESS,
name='Project Access',
description='Allows access to core project functionality.'),
Role(slug=privileges.APP_USER_PROFILES,
name='App User Profiles',
description='Allows use of App User Profiles.'),
Role(slug=privileges.GEOCODER, name='Geocoder', description='Address widget in Web Apps.'),
]
BOOTSTRAP_PLANS = [
Role(slug='paused_plan_v0', name='Paused Plan', description=''),
Role(slug='community_plan_v0', name='Community Plan', description=''),
Role(slug='community_plan_v1', name='Community Plan', description=''),
Role(slug='community_plan_v2', name='Community Plan', description=''),
Role(slug='standard_plan_v0', name='Standard Plan', description=''),
Role(slug='standard_plan_v1', name='Standard Plan', description=''),
Role(slug='pro_plan_v0', name='Pro Plan', description=''),
Role(slug='pro_plan_v1', name='Pro Plan', description=''),
Role(slug='advanced_plan_v0', name='Advanced Plan', description=''),
Role(slug='enterprise_plan_v0', name='Enterprise Plan', description=''),
] + [
Role(slug='standard_plan_report_builder_v0', name='Standard Plan - 5 Reports', description=''),
Role(slug='pro_plan_report_builder_v0', name='Pro Plan - 5 Reports', description=''),
Role(slug='advanced_plan_report_builder_v0', name='Advanced Plan - 5 Reports', description=''),
]
OLD_PRIVILEGES = [
BULK_CASE_AND_USER_MANAGEMENT,
CROSS_PROJECT_REPORTS,
]
BOOTSTRAP_GRANTS = {
'paused_plan_v0': features.paused_v0,
'community_plan_v0': features.community_v0,
'community_plan_v1': features.community_v1,
'community_plan_v2': features.community_v2,
'standard_plan_v0': features.standard_v0,
'standard_plan_v1': features.standard_v1,
'pro_plan_v0': features.pro_v0,
'pro_plan_v1': features.pro_v1,
'advanced_plan_v0': features.advanced_v0,
'enterprise_plan_v0': features.enterprise_v0,
} | corehq/apps/hqadmin/management/commands/cchq_prbac_bootstrap.py | import logging
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django_prbac.models import Grant, Role
from corehq import privileges
from corehq.apps.accounting.utils import ensure_grants, log_removed_grants
from corehq.apps.accounting.bootstrap import features
logger = logging.getLogger(__name__)
BULK_CASE_AND_USER_MANAGEMENT = 'bulk_case_and_user_management'
CROSS_PROJECT_REPORTS = 'cross_project_reports'
def cchq_prbac_bootstrap(apps, schema_editor):
"""Convenience function for use in data migrations.
Example operation:
migrations.RunPython(cchq_prbac_bootstrap)
"""
call_command('cchq_prbac_bootstrap')
class Command(BaseCommand):
help = 'Populate a fresh database with some sample roles and grants'
def add_arguments(self, parser):
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help='Do not actually modify the database, just verbosely log what happen',
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help='Enable debug output',
)
parser.add_argument(
'--fresh-start',
action='store_true',
default=False,
help='We changed the core v0 plans, wipe all existing plans and start over. USE CAUTION.',
)
def handle(self, dry_run=False, verbose=False, fresh_start=False, **options):
self.verbose = verbose
if fresh_start:
confirm_fresh_start = input(
"Are you sure you want to delete all Roles and start over? You can't do this"
" if accounting is already set up. Type 'yes' to continue."
)
if confirm_fresh_start == 'yes':
self.flush_roles()
self.roles_by_slug = {role.slug: role for role in Role.objects.all()}
self.ensure_roles(self.BOOTSTRAP_PRIVILEGES + self.BOOTSTRAP_PLANS, dry_run)
ensure_grants(
list(self.BOOTSTRAP_GRANTS.items()), # py3 iterable
dry_run=dry_run,
verbose=self.verbose,
roles_by_slug=self.roles_by_slug,
)
if verbose or dry_run:
log_removed_grants(self.OLD_PRIVILEGES, dry_run=dry_run)
if not dry_run:
Role.objects.filter(slug__in=self.OLD_PRIVILEGES).delete()
def flush_roles(self):
logger.info('Flushing ALL Roles...')
Role.objects.all().delete()
def ensure_roles(self, roles, dry_run=False):
"""
Add each role if it does not already exist, otherwise skip it.
"""
dry_run_tag = "[DRY RUN] " if dry_run else ""
roles_to_save = []
for role in roles:
if role.slug not in self.roles_by_slug:
if self.verbose or dry_run:
logger.info('%sCreating role: %s', dry_run_tag, role.name)
if not dry_run:
roles_to_save.append(role)
else:
logger.info('Role already exists: %s', role.name)
if roles_to_save:
roles = Role.objects.bulk_create(roles_to_save)
self.roles_by_slug.update((role.slug, role) for role in roles)
BOOTSTRAP_PRIVILEGES = [
Role(slug=privileges.API_ACCESS, name='API Access', description=''),
Role(slug=privileges.LOOKUP_TABLES, name='Lookup Tables', description=''),
Role(slug=privileges.CLOUDCARE, name='Web-based Applications (CloudCare)', description=''),
Role(slug=privileges.CUSTOM_BRANDING, name='Custom Branding', description=''),
Role(slug=privileges.ACTIVE_DATA_MANAGEMENT, name='Active Data Management', description=''),
Role(slug=privileges.CUSTOM_REPORTS, name='Custom Reports', description=''),
Role(slug=privileges.ROLE_BASED_ACCESS, name='Role-based Access', description=''),
Role(slug=privileges.RESTRICT_ACCESS_BY_LOCATION, name='Restrict Access By Location', description=''),
Role(slug=privileges.OUTBOUND_SMS, name='Outbound SMS',
description='Use of any outbound messaging / SMS services.',
),
Role(slug=privileges.REMINDERS_FRAMEWORK, name='Rules Engine (Use of Reminders Framework)',
description='Use of reminders framework for spawning reminders/alerts based on certain criteria.',
),
Role(slug=privileges.CUSTOM_SMS_GATEWAY, name='Custom Telerivet (Android) SMS Gateway',
description='Ability to set up telerivet gateway on the "SMS Connectivity" page (inbound or outbound).',
),
Role(slug=privileges.INBOUND_SMS, name='Inbound SMS (where available)', description=''),
Role(slug=privileges.BULK_CASE_MANAGEMENT, name='Bulk Case Management', description=''),
Role(slug=privileges.BULK_USER_MANAGEMENT, name='Bulk User Management', description=''),
Role(slug=privileges.DEIDENTIFIED_DATA, name='De-identified Data', description=''),
Role(slug=privileges.HIPAA_COMPLIANCE_ASSURANCE, name='HIPAA Compliance Assurance', description=''),
Role(slug=privileges.ALLOW_EXCESS_USERS, name='Can Add Users Above Limit', description=''),
Role(slug=privileges.COMMCARE_LOGO_UPLOADER, name='Custom CommCare Logo Uploader', description=''),
Role(slug=privileges.LOCATIONS, name='Locations', description=''),
Role(slug=privileges.REPORT_BUILDER, name='User Configurable Report Builder', description=''),
Role(slug=privileges.REPORT_BUILDER_TRIAL, name='Report Builder Trial', description=''),
Role(slug=privileges.REPORT_BUILDER_5, name='Report Builder, 5 report limit', description=''),
Role(slug=privileges.REPORT_BUILDER_15, name='Report Builder, 15 report limit', description=''),
Role(slug=privileges.REPORT_BUILDER_30, name='Report Builder, 30 report limit', description=''),
Role(slug=privileges.USER_CASE, name='User Case Management', description=''),
Role(slug=privileges.DATA_CLEANUP, name='Data Management',
description='Tools for cleaning up data, including editing submissions and archiving forms.'),
Role(slug=privileges.TEMPLATED_INTENTS, name='Templated Intents',
description='Provides a dropdown for Android App Callouts'),
Role(slug=privileges.CUSTOM_INTENTS, name='Custom Intents',
description='Allows for specifying custom intents'),
Role(slug=privileges.ADVANCED_DOMAIN_SECURITY, name='Advanced Domain Security',
description='Allows domains to set security policies for all web users'),
Role(slug=privileges.PRACTICE_MOBILE_WORKERS, name='Practice mode for mobile workers',
description='Allows turning on practice mode for mobile workers and link them to applications'),
Role(slug=privileges.BUILD_PROFILES, name='Application Profiles',
description='Allows domains to create application profiles to customize app deploys'),
Role(slug=privileges.EXCEL_DASHBOARD, name="Excel Dashbord",
description="Allows domains to create Excel dashboard html exports"),
Role(slug=privileges.DAILY_SAVED_EXPORT, name='DAILY_SAVED_EXPORT',
description="Allows domains to create Daily Saved Exports"),
Role(slug=privileges.ZAPIER_INTEGRATION, name='Zapier Integration',
description='Allows domains to use zapier (zapier.com) integration'),
Role(slug=privileges.LOGIN_AS, name='Login As for App Preview',
description='Allows domains to use the login as feature of app preview'),
Role(slug=privileges.CASE_SHARING_GROUPS,
name='Case Sharing via Groups',
description='Allows turning on case sharing between members in a group.'),
Role(slug=privileges.CHILD_CASES,
name='Child Cases',
description='Allows for use of child cases / subcases in applications.'),
Role(slug=privileges.ODATA_FEED,
name='OData Feed - Tableau / BI Integration',
description='Allows usage of Tableau / BI Integration (OData Feeds)'),
Role(slug=privileges.DATA_FORWARDING,
name='Data Forwarding',
description='Allows use of Data Forwarding'),
Role(slug=privileges.PROJECT_ACCESS,
name='Project Access',
description='Allows access to core project functionality.'),
Role(slug=privileges.APP_USER_PROFILES,
name='App User Profiles',
description='Allows use of App User Profiles.'),
Role(slug=privileges.GEOCODER, name='Geocoder', description='Address widget in Web Apps.'),
]
BOOTSTRAP_PLANS = [
Role(slug='paused_plan_v0', name='Paused Plan', description=''),
Role(slug='community_plan_v0', name='Community Plan', description=''),
Role(slug='community_plan_v1', name='Community Plan', description=''),
Role(slug='community_plan_v2', name='Community Plan', description=''),
Role(slug='standard_plan_v0', name='Standard Plan', description=''),
Role(slug='standard_plan_v1', name='Standard Plan', description=''),
Role(slug='pro_plan_v0', name='Pro Plan', description=''),
Role(slug='pro_plan_v1', name='Pro Plan', description=''),
Role(slug='advanced_plan_v0', name='Advanced Plan', description=''),
Role(slug='enterprise_plan_v0', name='Enterprise Plan', description=''),
] + [
Role(slug='standard_plan_report_builder_v0', name='Standard Plan - 5 Reports', description=''),
Role(slug='pro_plan_report_builder_v0', name='Pro Plan - 5 Reports', description=''),
Role(slug='advanced_plan_report_builder_v0', name='Advanced Plan - 5 Reports', description=''),
]
OLD_PRIVILEGES = [
BULK_CASE_AND_USER_MANAGEMENT,
CROSS_PROJECT_REPORTS,
]
BOOTSTRAP_GRANTS = {
'paused_plan_v0': features.paused_v0,
'community_plan_v0': features.community_v0,
'community_plan_v1': features.community_v1,
'community_plan_v2': features.community_v2,
'standard_plan_v0': features.standard_v0,
'standard_plan_v1': features.standard_v1,
'pro_plan_v0': features.pro_v0,
'pro_plan_v1': features.pro_v1,
'advanced_plan_v0': features.advanced_v0,
'enterprise_plan_v0': features.enterprise_v0,
} | 0.446495 | 0.070464 |
import numpy as np
import cv2
from collections import deque
## Define the upper and lower boundaries for a color to be considered "Blue"
blueLower = np.array([100, 60, 60])
blueUpper = np.array([140, 255, 255])
# Define a 5x5 kernel for erosion and dilation
kernel = np.ones((5, 5), np.uint8)
# Setup deques to store separate colors in separate arrays
bpoints = [deque(maxlen=512)]
bindex = 0
colors = [(255, 0, 0)]
colorIndex = 0
# Load the video
camera = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
# Keep looping
while True:
# Grab the current paintWindow
(grabbed, frame) = camera.read()
frame = cv2.flip(frame, 1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame = cv2.rectangle(frame, (40,1), (140,65), (122,122,122), -1)
cv2.putText(frame, "CLEAR ALL", (49, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
# Check to see if we have reached the end of the video
if not grabbed:
break
# Determine which pixels fall within the blue boundaries and then blur the binary image
blueMask = cv2.inRange(hsv, blueLower, blueUpper)
blueMask = cv2.erode(blueMask, kernel, iterations=2)
blueMask = cv2.morphologyEx(blueMask, cv2.MORPH_OPEN, kernel)
blueMask = cv2.dilate(blueMask, kernel, iterations=1)
# Find contours in the image
cnts = cv2.findContours(blueMask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
center = None
# Check to see if any contours were found
if len(cnts) > 0:
# Sort the contours and find the largest one -- we
# will assume this contour correspondes to the area of the bottle cap
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
# Get the radius of the enclosing circle around the found contour
((x, y), radius) = cv2.minEnclosingCircle(cnt)
# Draw the circle around the contour
cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
# Get the moments to calculate the center of the contour (in this case Circle)
M = cv2.moments(cnt)
center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
# Needed for drawing lines
bpoints[bindex].appendleft(center)
if center[1] <= 65:
if 40 <= center[0] <= 140: # Clear All
bpoints = [deque(maxlen=512)]
bindex = 0
elif 160 <= center[0] <= 255:
colorIndex = 0 # Blue
else :
if colorIndex == 0:
bpoints[bindex].appendleft(center)
# Append the next deque when no contours are detected
else:
bpoints.append(deque(maxlen=512))
bindex += 1
# Draw lines
points = [bpoints]
for i in range(len(points)):
for j in range(len(points[i])):
for k in range(1, len(points[i][j])):
if points[i][j][k - 1] is None or points[i][j][k] is None:
continue
cv2.line(frame, points[i][j][k - 1], points[i][j][k], colors[0], 2)
# Show the frame and the paintWindow image
cv2.imshow("Handpaint", frame)
out.write(frame)
# If the 'esc' key is pressed, stop the loop
if cv2.waitKey(5) == 27:
break
# Cleanup the camera and close any open windows
camera.release()
out.release()
cv2.destroyAllWindows() | VidHand_Tracker.py |
import numpy as np
import cv2
from collections import deque
## Define the upper and lower boundaries for a color to be considered "Blue"
blueLower = np.array([100, 60, 60])
blueUpper = np.array([140, 255, 255])
# Define a 5x5 kernel for erosion and dilation
kernel = np.ones((5, 5), np.uint8)
# Setup deques to store separate colors in separate arrays
bpoints = [deque(maxlen=512)]
bindex = 0
colors = [(255, 0, 0)]
colorIndex = 0
# Load the video
camera = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
# Keep looping
while True:
# Grab the current paintWindow
(grabbed, frame) = camera.read()
frame = cv2.flip(frame, 1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame = cv2.rectangle(frame, (40,1), (140,65), (122,122,122), -1)
cv2.putText(frame, "CLEAR ALL", (49, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
# Check to see if we have reached the end of the video
if not grabbed:
break
# Determine which pixels fall within the blue boundaries and then blur the binary image
blueMask = cv2.inRange(hsv, blueLower, blueUpper)
blueMask = cv2.erode(blueMask, kernel, iterations=2)
blueMask = cv2.morphologyEx(blueMask, cv2.MORPH_OPEN, kernel)
blueMask = cv2.dilate(blueMask, kernel, iterations=1)
# Find contours in the image
cnts = cv2.findContours(blueMask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
center = None
# Check to see if any contours were found
if len(cnts) > 0:
# Sort the contours and find the largest one -- we
# will assume this contour correspondes to the area of the bottle cap
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
# Get the radius of the enclosing circle around the found contour
((x, y), radius) = cv2.minEnclosingCircle(cnt)
# Draw the circle around the contour
cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
# Get the moments to calculate the center of the contour (in this case Circle)
M = cv2.moments(cnt)
center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
# Needed for drawing lines
bpoints[bindex].appendleft(center)
if center[1] <= 65:
if 40 <= center[0] <= 140: # Clear All
bpoints = [deque(maxlen=512)]
bindex = 0
elif 160 <= center[0] <= 255:
colorIndex = 0 # Blue
else :
if colorIndex == 0:
bpoints[bindex].appendleft(center)
# Append the next deque when no contours are detected
else:
bpoints.append(deque(maxlen=512))
bindex += 1
# Draw lines
points = [bpoints]
for i in range(len(points)):
for j in range(len(points[i])):
for k in range(1, len(points[i][j])):
if points[i][j][k - 1] is None or points[i][j][k] is None:
continue
cv2.line(frame, points[i][j][k - 1], points[i][j][k], colors[0], 2)
# Show the frame and the paintWindow image
cv2.imshow("Handpaint", frame)
out.write(frame)
# If the 'esc' key is pressed, stop the loop
if cv2.waitKey(5) == 27:
break
# Cleanup the camera and close any open windows
camera.release()
out.release()
cv2.destroyAllWindows() | 0.60743 | 0.497681 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ClusterApplicationArgs',
'ClusterAutoScalingPolicyArgs',
'ClusterBootstrapActionConfigArgs',
'ClusterCloudWatchAlarmDefinitionArgs',
'ClusterComputeLimitsArgs',
'ClusterConfigurationArgs',
'ClusterEbsBlockDeviceConfigArgs',
'ClusterEbsConfigurationArgs',
'ClusterHadoopJarStepConfigArgs',
'ClusterInstanceFleetConfigArgs',
'ClusterInstanceFleetProvisioningSpecificationsArgs',
'ClusterInstanceGroupConfigArgs',
'ClusterInstanceTypeConfigArgs',
'ClusterJobFlowInstancesConfigArgs',
'ClusterKerberosAttributesArgs',
'ClusterKeyValueArgs',
'ClusterManagedScalingPolicyArgs',
'ClusterMetricDimensionArgs',
'ClusterOnDemandProvisioningSpecificationArgs',
'ClusterPlacementTypeArgs',
'ClusterScalingActionArgs',
'ClusterScalingConstraintsArgs',
'ClusterScalingRuleArgs',
'ClusterScalingTriggerArgs',
'ClusterScriptBootstrapActionConfigArgs',
'ClusterSimpleScalingPolicyConfigurationArgs',
'ClusterSpotProvisioningSpecificationArgs',
'ClusterStepConfigArgs',
'ClusterTagArgs',
'ClusterVolumeSpecificationArgs',
'InstanceFleetConfigConfigurationArgs',
'InstanceFleetConfigEbsBlockDeviceConfigArgs',
'InstanceFleetConfigEbsConfigurationArgs',
'InstanceFleetConfigInstanceFleetProvisioningSpecificationsArgs',
'InstanceFleetConfigInstanceTypeConfigArgs',
'InstanceFleetConfigOnDemandProvisioningSpecificationArgs',
'InstanceFleetConfigSpotProvisioningSpecificationArgs',
'InstanceFleetConfigVolumeSpecificationArgs',
'InstanceGroupConfigAutoScalingPolicyArgs',
'InstanceGroupConfigCloudWatchAlarmDefinitionArgs',
'InstanceGroupConfigConfigurationArgs',
'InstanceGroupConfigEbsBlockDeviceConfigArgs',
'InstanceGroupConfigEbsConfigurationArgs',
'InstanceGroupConfigMetricDimensionArgs',
'InstanceGroupConfigScalingActionArgs',
'InstanceGroupConfigScalingConstraintsArgs',
'InstanceGroupConfigScalingRuleArgs',
'InstanceGroupConfigScalingTriggerArgs',
'InstanceGroupConfigSimpleScalingPolicyConfigurationArgs',
'InstanceGroupConfigVolumeSpecificationArgs',
'StepHadoopJarStepConfigArgs',
'StepKeyValueArgs',
'StudioTagArgs',
]
@pulumi.input_type
class ClusterApplicationArgs:
def __init__(__self__, *,
additional_info: Optional[Any] = None,
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
if additional_info is not None:
pulumi.set(__self__, "additional_info", additional_info)
if args is not None:
pulumi.set(__self__, "args", args)
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="additionalInfo")
def additional_info(self) -> Optional[Any]:
return pulumi.get(self, "additional_info")
@additional_info.setter
def additional_info(self, value: Optional[Any]):
pulumi.set(self, "additional_info", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class ClusterAutoScalingPolicyArgs:
def __init__(__self__, *,
constraints: pulumi.Input['ClusterScalingConstraintsArgs'],
rules: pulumi.Input[Sequence[pulumi.Input['ClusterScalingRuleArgs']]]):
pulumi.set(__self__, "constraints", constraints)
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def constraints(self) -> pulumi.Input['ClusterScalingConstraintsArgs']:
return pulumi.get(self, "constraints")
@constraints.setter
def constraints(self, value: pulumi.Input['ClusterScalingConstraintsArgs']):
pulumi.set(self, "constraints", value)
@property
@pulumi.getter
def rules(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterScalingRuleArgs']]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: pulumi.Input[Sequence[pulumi.Input['ClusterScalingRuleArgs']]]):
pulumi.set(self, "rules", value)
@pulumi.input_type
class ClusterBootstrapActionConfigArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
script_bootstrap_action: pulumi.Input['ClusterScriptBootstrapActionConfigArgs']):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "script_bootstrap_action", script_bootstrap_action)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="scriptBootstrapAction")
def script_bootstrap_action(self) -> pulumi.Input['ClusterScriptBootstrapActionConfigArgs']:
return pulumi.get(self, "script_bootstrap_action")
@script_bootstrap_action.setter
def script_bootstrap_action(self, value: pulumi.Input['ClusterScriptBootstrapActionConfigArgs']):
pulumi.set(self, "script_bootstrap_action", value)
@pulumi.input_type
class ClusterCloudWatchAlarmDefinitionArgs:
def __init__(__self__, *,
comparison_operator: pulumi.Input[str],
metric_name: pulumi.Input[str],
period: pulumi.Input[int],
threshold: pulumi.Input[float],
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMetricDimensionArgs']]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
namespace: Optional[pulumi.Input[str]] = None,
statistic: Optional[pulumi.Input[str]] = None,
unit: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> pulumi.Input[str]:
return pulumi.get(self, "comparison_operator")
@comparison_operator.setter
def comparison_operator(self, value: pulumi.Input[str]):
pulumi.set(self, "comparison_operator", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def period(self) -> pulumi.Input[int]:
return pulumi.get(self, "period")
@period.setter
def period(self, value: pulumi.Input[int]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMetricDimensionArgs']]]]:
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMetricDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class ClusterComputeLimitsArgs:
def __init__(__self__, *,
maximum_capacity_units: pulumi.Input[int],
minimum_capacity_units: pulumi.Input[int],
unit_type: pulumi.Input[str],
maximum_core_capacity_units: Optional[pulumi.Input[int]] = None,
maximum_on_demand_capacity_units: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "maximum_capacity_units", maximum_capacity_units)
pulumi.set(__self__, "minimum_capacity_units", minimum_capacity_units)
pulumi.set(__self__, "unit_type", unit_type)
if maximum_core_capacity_units is not None:
pulumi.set(__self__, "maximum_core_capacity_units", maximum_core_capacity_units)
if maximum_on_demand_capacity_units is not None:
pulumi.set(__self__, "maximum_on_demand_capacity_units", maximum_on_demand_capacity_units)
@property
@pulumi.getter(name="maximumCapacityUnits")
def maximum_capacity_units(self) -> pulumi.Input[int]:
return pulumi.get(self, "maximum_capacity_units")
@maximum_capacity_units.setter
def maximum_capacity_units(self, value: pulumi.Input[int]):
pulumi.set(self, "maximum_capacity_units", value)
@property
@pulumi.getter(name="minimumCapacityUnits")
def minimum_capacity_units(self) -> pulumi.Input[int]:
return pulumi.get(self, "minimum_capacity_units")
@minimum_capacity_units.setter
def minimum_capacity_units(self, value: pulumi.Input[int]):
pulumi.set(self, "minimum_capacity_units", value)
@property
@pulumi.getter(name="unitType")
def unit_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "unit_type")
@unit_type.setter
def unit_type(self, value: pulumi.Input[str]):
pulumi.set(self, "unit_type", value)
@property
@pulumi.getter(name="maximumCoreCapacityUnits")
def maximum_core_capacity_units(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "maximum_core_capacity_units")
@maximum_core_capacity_units.setter
def maximum_core_capacity_units(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum_core_capacity_units", value)
@property
@pulumi.getter(name="maximumOnDemandCapacityUnits")
def maximum_on_demand_capacity_units(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "maximum_on_demand_capacity_units")
@maximum_on_demand_capacity_units.setter
def maximum_on_demand_capacity_units(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum_on_demand_capacity_units", value)
@pulumi.input_type
class ClusterConfigurationArgs:
def __init__(__self__, *,
classification: Optional[pulumi.Input[str]] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "classification")
@classification.setter
def classification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "classification", value)
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@configuration_properties.setter
def configuration_properties(self, value: Optional[Any]):
pulumi.set(self, "configuration_properties", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@pulumi.input_type
class ClusterEbsBlockDeviceConfigArgs:
def __init__(__self__, *,
volume_specification: pulumi.Input['ClusterVolumeSpecificationArgs'],
volumes_per_instance: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> pulumi.Input['ClusterVolumeSpecificationArgs']:
return pulumi.get(self, "volume_specification")
@volume_specification.setter
def volume_specification(self, value: pulumi.Input['ClusterVolumeSpecificationArgs']):
pulumi.set(self, "volume_specification", value)
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "volumes_per_instance")
@volumes_per_instance.setter
def volumes_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volumes_per_instance", value)
@pulumi.input_type
class ClusterEbsConfigurationArgs:
def __init__(__self__, *,
ebs_block_device_configs: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterEbsBlockDeviceConfigArgs']]]] = None,
ebs_optimized: Optional[pulumi.Input[bool]] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterEbsBlockDeviceConfigArgs']]]]:
return pulumi.get(self, "ebs_block_device_configs")
@ebs_block_device_configs.setter
def ebs_block_device_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterEbsBlockDeviceConfigArgs']]]]):
pulumi.set(self, "ebs_block_device_configs", value)
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ebs_optimized")
@ebs_optimized.setter
def ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ebs_optimized", value)
@pulumi.input_type
class ClusterHadoopJarStepConfigArgs:
def __init__(__self__, *,
jar: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
main_class: Optional[pulumi.Input[str]] = None,
step_properties: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterKeyValueArgs']]]] = None):
pulumi.set(__self__, "jar", jar)
if args is not None:
pulumi.set(__self__, "args", args)
if main_class is not None:
pulumi.set(__self__, "main_class", main_class)
if step_properties is not None:
pulumi.set(__self__, "step_properties", step_properties)
@property
@pulumi.getter
def jar(self) -> pulumi.Input[str]:
return pulumi.get(self, "jar")
@jar.setter
def jar(self, value: pulumi.Input[str]):
pulumi.set(self, "jar", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter(name="mainClass")
def main_class(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "main_class")
@main_class.setter
def main_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "main_class", value)
@property
@pulumi.getter(name="stepProperties")
def step_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterKeyValueArgs']]]]:
return pulumi.get(self, "step_properties")
@step_properties.setter
def step_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterKeyValueArgs']]]]):
pulumi.set(self, "step_properties", value)
@pulumi.input_type
class ClusterInstanceFleetConfigArgs:
def __init__(__self__, *,
instance_type_configs: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInstanceTypeConfigArgs']]]] = None,
launch_specifications: Optional[pulumi.Input['ClusterInstanceFleetProvisioningSpecificationsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
target_on_demand_capacity: Optional[pulumi.Input[int]] = None,
target_spot_capacity: Optional[pulumi.Input[int]] = None):
if instance_type_configs is not None:
pulumi.set(__self__, "instance_type_configs", instance_type_configs)
if launch_specifications is not None:
pulumi.set(__self__, "launch_specifications", launch_specifications)
if name is not None:
pulumi.set(__self__, "name", name)
if target_on_demand_capacity is not None:
pulumi.set(__self__, "target_on_demand_capacity", target_on_demand_capacity)
if target_spot_capacity is not None:
pulumi.set(__self__, "target_spot_capacity", target_spot_capacity)
@property
@pulumi.getter(name="instanceTypeConfigs")
def instance_type_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInstanceTypeConfigArgs']]]]:
return pulumi.get(self, "instance_type_configs")
@instance_type_configs.setter
def instance_type_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInstanceTypeConfigArgs']]]]):
pulumi.set(self, "instance_type_configs", value)
@property
@pulumi.getter(name="launchSpecifications")
def launch_specifications(self) -> Optional[pulumi.Input['ClusterInstanceFleetProvisioningSpecificationsArgs']]:
return pulumi.get(self, "launch_specifications")
@launch_specifications.setter
def launch_specifications(self, value: Optional[pulumi.Input['ClusterInstanceFleetProvisioningSpecificationsArgs']]):
pulumi.set(self, "launch_specifications", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="targetOnDemandCapacity")
def target_on_demand_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "target_on_demand_capacity")
@target_on_demand_capacity.setter
def target_on_demand_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_on_demand_capacity", value)
@property
@pulumi.getter(name="targetSpotCapacity")
def target_spot_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "target_spot_capacity")
@target_spot_capacity.setter
def target_spot_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_spot_capacity", value)
@pulumi.input_type
class ClusterInstanceFleetProvisioningSpecificationsArgs:
def __init__(__self__, *,
on_demand_specification: Optional[pulumi.Input['ClusterOnDemandProvisioningSpecificationArgs']] = None,
spot_specification: Optional[pulumi.Input['ClusterSpotProvisioningSpecificationArgs']] = None):
if on_demand_specification is not None:
pulumi.set(__self__, "on_demand_specification", on_demand_specification)
if spot_specification is not None:
pulumi.set(__self__, "spot_specification", spot_specification)
@property
@pulumi.getter(name="onDemandSpecification")
def on_demand_specification(self) -> Optional[pulumi.Input['ClusterOnDemandProvisioningSpecificationArgs']]:
return pulumi.get(self, "on_demand_specification")
@on_demand_specification.setter
def on_demand_specification(self, value: Optional[pulumi.Input['ClusterOnDemandProvisioningSpecificationArgs']]):
pulumi.set(self, "on_demand_specification", value)
@property
@pulumi.getter(name="spotSpecification")
def spot_specification(self) -> Optional[pulumi.Input['ClusterSpotProvisioningSpecificationArgs']]:
return pulumi.get(self, "spot_specification")
@spot_specification.setter
def spot_specification(self, value: Optional[pulumi.Input['ClusterSpotProvisioningSpecificationArgs']]):
pulumi.set(self, "spot_specification", value)
@pulumi.input_type
class ClusterInstanceGroupConfigArgs:
def __init__(__self__, *,
instance_count: pulumi.Input[int],
instance_type: pulumi.Input[str],
auto_scaling_policy: Optional[pulumi.Input['ClusterAutoScalingPolicyArgs']] = None,
bid_price: Optional[pulumi.Input[str]] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
ebs_configuration: Optional[pulumi.Input['ClusterEbsConfigurationArgs']] = None,
market: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "instance_count", instance_count)
pulumi.set(__self__, "instance_type", instance_type)
if auto_scaling_policy is not None:
pulumi.set(__self__, "auto_scaling_policy", auto_scaling_policy)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> pulumi.Input[int]:
return pulumi.get(self, "instance_count")
@instance_count.setter
def instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "instance_count", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="autoScalingPolicy")
def auto_scaling_policy(self) -> Optional[pulumi.Input['ClusterAutoScalingPolicyArgs']]:
return pulumi.get(self, "auto_scaling_policy")
@auto_scaling_policy.setter
def auto_scaling_policy(self, value: Optional[pulumi.Input['ClusterAutoScalingPolicyArgs']]):
pulumi.set(self, "auto_scaling_policy", value)
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bid_price")
@bid_price.setter
def bid_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bid_price", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_ami_id")
@custom_ami_id.setter
def custom_ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_ami_id", value)
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional[pulumi.Input['ClusterEbsConfigurationArgs']]:
return pulumi.get(self, "ebs_configuration")
@ebs_configuration.setter
def ebs_configuration(self, value: Optional[pulumi.Input['ClusterEbsConfigurationArgs']]):
pulumi.set(self, "ebs_configuration", value)
@property
@pulumi.getter
def market(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "market")
@market.setter
def market(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "market", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ClusterInstanceTypeConfigArgs:
def __init__(__self__, *,
instance_type: pulumi.Input[str],
bid_price: Optional[pulumi.Input[str]] = None,
bid_price_as_percentage_of_on_demand_price: Optional[pulumi.Input[float]] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
ebs_configuration: Optional[pulumi.Input['ClusterEbsConfigurationArgs']] = None,
weighted_capacity: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bid_price")
@bid_price.setter
def bid_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bid_price", value)
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@bid_price_as_percentage_of_on_demand_price.setter
def bid_price_as_percentage_of_on_demand_price(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "bid_price_as_percentage_of_on_demand_price", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_ami_id")
@custom_ami_id.setter
def custom_ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_ami_id", value)
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional[pulumi.Input['ClusterEbsConfigurationArgs']]:
return pulumi.get(self, "ebs_configuration")
@ebs_configuration.setter
def ebs_configuration(self, value: Optional[pulumi.Input['ClusterEbsConfigurationArgs']]):
pulumi.set(self, "ebs_configuration", value)
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "weighted_capacity")
@weighted_capacity.setter
def weighted_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weighted_capacity", value)
@pulumi.input_type
class ClusterJobFlowInstancesConfigArgs:
def __init__(__self__, *,
additional_master_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_slave_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
core_instance_fleet: Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']] = None,
core_instance_group: Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']] = None,
ec2_key_name: Optional[pulumi.Input[str]] = None,
ec2_subnet_id: Optional[pulumi.Input[str]] = None,
ec2_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
emr_managed_master_security_group: Optional[pulumi.Input[str]] = None,
emr_managed_slave_security_group: Optional[pulumi.Input[str]] = None,
hadoop_version: Optional[pulumi.Input[str]] = None,
keep_job_flow_alive_when_no_steps: Optional[pulumi.Input[bool]] = None,
master_instance_fleet: Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']] = None,
master_instance_group: Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']] = None,
placement: Optional[pulumi.Input['ClusterPlacementTypeArgs']] = None,
service_access_security_group: Optional[pulumi.Input[str]] = None,
termination_protected: Optional[pulumi.Input[bool]] = None):
if additional_master_security_groups is not None:
pulumi.set(__self__, "additional_master_security_groups", additional_master_security_groups)
if additional_slave_security_groups is not None:
pulumi.set(__self__, "additional_slave_security_groups", additional_slave_security_groups)
if core_instance_fleet is not None:
pulumi.set(__self__, "core_instance_fleet", core_instance_fleet)
if core_instance_group is not None:
pulumi.set(__self__, "core_instance_group", core_instance_group)
if ec2_key_name is not None:
pulumi.set(__self__, "ec2_key_name", ec2_key_name)
if ec2_subnet_id is not None:
pulumi.set(__self__, "ec2_subnet_id", ec2_subnet_id)
if ec2_subnet_ids is not None:
pulumi.set(__self__, "ec2_subnet_ids", ec2_subnet_ids)
if emr_managed_master_security_group is not None:
pulumi.set(__self__, "emr_managed_master_security_group", emr_managed_master_security_group)
if emr_managed_slave_security_group is not None:
pulumi.set(__self__, "emr_managed_slave_security_group", emr_managed_slave_security_group)
if hadoop_version is not None:
pulumi.set(__self__, "hadoop_version", hadoop_version)
if keep_job_flow_alive_when_no_steps is not None:
pulumi.set(__self__, "keep_job_flow_alive_when_no_steps", keep_job_flow_alive_when_no_steps)
if master_instance_fleet is not None:
pulumi.set(__self__, "master_instance_fleet", master_instance_fleet)
if master_instance_group is not None:
pulumi.set(__self__, "master_instance_group", master_instance_group)
if placement is not None:
pulumi.set(__self__, "placement", placement)
if service_access_security_group is not None:
pulumi.set(__self__, "service_access_security_group", service_access_security_group)
if termination_protected is not None:
pulumi.set(__self__, "termination_protected", termination_protected)
@property
@pulumi.getter(name="additionalMasterSecurityGroups")
def additional_master_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "additional_master_security_groups")
@additional_master_security_groups.setter
def additional_master_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_master_security_groups", value)
@property
@pulumi.getter(name="additionalSlaveSecurityGroups")
def additional_slave_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "additional_slave_security_groups")
@additional_slave_security_groups.setter
def additional_slave_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_slave_security_groups", value)
@property
@pulumi.getter(name="coreInstanceFleet")
def core_instance_fleet(self) -> Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']]:
return pulumi.get(self, "core_instance_fleet")
@core_instance_fleet.setter
def core_instance_fleet(self, value: Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']]):
pulumi.set(self, "core_instance_fleet", value)
@property
@pulumi.getter(name="coreInstanceGroup")
def core_instance_group(self) -> Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']]:
return pulumi.get(self, "core_instance_group")
@core_instance_group.setter
def core_instance_group(self, value: Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']]):
pulumi.set(self, "core_instance_group", value)
@property
@pulumi.getter(name="ec2KeyName")
def ec2_key_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ec2_key_name")
@ec2_key_name.setter
def ec2_key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ec2_key_name", value)
@property
@pulumi.getter(name="ec2SubnetId")
def ec2_subnet_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ec2_subnet_id")
@ec2_subnet_id.setter
def ec2_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ec2_subnet_id", value)
@property
@pulumi.getter(name="ec2SubnetIds")
def ec2_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "ec2_subnet_ids")
@ec2_subnet_ids.setter
def ec2_subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ec2_subnet_ids", value)
@property
@pulumi.getter(name="emrManagedMasterSecurityGroup")
def emr_managed_master_security_group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "emr_managed_master_security_group")
@emr_managed_master_security_group.setter
def emr_managed_master_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "emr_managed_master_security_group", value)
@property
@pulumi.getter(name="emrManagedSlaveSecurityGroup")
def emr_managed_slave_security_group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "emr_managed_slave_security_group")
@emr_managed_slave_security_group.setter
def emr_managed_slave_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "emr_managed_slave_security_group", value)
@property
@pulumi.getter(name="hadoopVersion")
def hadoop_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hadoop_version")
@hadoop_version.setter
def hadoop_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hadoop_version", value)
@property
@pulumi.getter(name="keepJobFlowAliveWhenNoSteps")
def keep_job_flow_alive_when_no_steps(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "keep_job_flow_alive_when_no_steps")
@keep_job_flow_alive_when_no_steps.setter
def keep_job_flow_alive_when_no_steps(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "keep_job_flow_alive_when_no_steps", value)
@property
@pulumi.getter(name="masterInstanceFleet")
def master_instance_fleet(self) -> Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']]:
return pulumi.get(self, "master_instance_fleet")
@master_instance_fleet.setter
def master_instance_fleet(self, value: Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']]):
pulumi.set(self, "master_instance_fleet", value)
@property
@pulumi.getter(name="masterInstanceGroup")
def master_instance_group(self) -> Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']]:
return pulumi.get(self, "master_instance_group")
@master_instance_group.setter
def master_instance_group(self, value: Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']]):
pulumi.set(self, "master_instance_group", value)
@property
@pulumi.getter
def placement(self) -> Optional[pulumi.Input['ClusterPlacementTypeArgs']]:
return pulumi.get(self, "placement")
@placement.setter
def placement(self, value: Optional[pulumi.Input['ClusterPlacementTypeArgs']]):
pulumi.set(self, "placement", value)
@property
@pulumi.getter(name="serviceAccessSecurityGroup")
def service_access_security_group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_access_security_group")
@service_access_security_group.setter
def service_access_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_access_security_group", value)
@property
@pulumi.getter(name="terminationProtected")
def termination_protected(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "termination_protected")
@termination_protected.setter
def termination_protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "termination_protected", value)
@pulumi.input_type
class ClusterKerberosAttributesArgs:
def __init__(__self__, *,
kdc_admin_password: pulumi.Input[str],
realm: pulumi.Input[str],
a_d_domain_join_password: Optional[pulumi.Input[str]] = None,
a_d_domain_join_user: Optional[pulumi.Input[str]] = None,
cross_realm_trust_principal_password: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "kdc_admin_password", kdc_admin_password)
pulumi.set(__self__, "realm", realm)
if a_d_domain_join_password is not None:
pulumi.set(__self__, "a_d_domain_join_password", a_d_domain_join_password)
if a_d_domain_join_user is not None:
pulumi.set(__self__, "a_d_domain_join_user", a_d_domain_join_user)
if cross_realm_trust_principal_password is not None:
pulumi.set(__self__, "cross_realm_trust_principal_password", cross_realm_trust_principal_password)
@property
@pulumi.getter(name="kdcAdminPassword")
def kdc_admin_password(self) -> pulumi.Input[str]:
return pulumi.get(self, "kdc_admin_password")
@kdc_admin_password.setter
def kdc_admin_password(self, value: pulumi.Input[str]):
pulumi.set(self, "kdc_admin_password", value)
@property
@pulumi.getter
def realm(self) -> pulumi.Input[str]:
return pulumi.get(self, "realm")
@realm.setter
def realm(self, value: pulumi.Input[str]):
pulumi.set(self, "realm", value)
@property
@pulumi.getter(name="aDDomainJoinPassword")
def a_d_domain_join_password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "a_d_domain_join_password")
@a_d_domain_join_password.setter
def a_d_domain_join_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "a_d_domain_join_password", value)
@property
@pulumi.getter(name="aDDomainJoinUser")
def a_d_domain_join_user(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "a_d_domain_join_user")
@a_d_domain_join_user.setter
def a_d_domain_join_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "a_d_domain_join_user", value)
@property
@pulumi.getter(name="crossRealmTrustPrincipalPassword")
def cross_realm_trust_principal_password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cross_realm_trust_principal_password")
@cross_realm_trust_principal_password.setter
def cross_realm_trust_principal_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cross_realm_trust_principal_password", value)
@pulumi.input_type
class ClusterKeyValueArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ClusterManagedScalingPolicyArgs:
def __init__(__self__, *,
compute_limits: Optional[pulumi.Input['ClusterComputeLimitsArgs']] = None):
if compute_limits is not None:
pulumi.set(__self__, "compute_limits", compute_limits)
@property
@pulumi.getter(name="computeLimits")
def compute_limits(self) -> Optional[pulumi.Input['ClusterComputeLimitsArgs']]:
return pulumi.get(self, "compute_limits")
@compute_limits.setter
def compute_limits(self, value: Optional[pulumi.Input['ClusterComputeLimitsArgs']]):
pulumi.set(self, "compute_limits", value)
@pulumi.input_type
class ClusterMetricDimensionArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ClusterOnDemandProvisioningSpecificationArgs:
def __init__(__self__, *,
allocation_strategy: pulumi.Input[str]):
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> pulumi.Input[str]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: pulumi.Input[str]):
pulumi.set(self, "allocation_strategy", value)
@pulumi.input_type
class ClusterPlacementTypeArgs:
def __init__(__self__, *,
availability_zone: pulumi.Input[str]):
pulumi.set(__self__, "availability_zone", availability_zone)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> pulumi.Input[str]:
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: pulumi.Input[str]):
pulumi.set(self, "availability_zone", value)
@pulumi.input_type
class ClusterScalingActionArgs:
def __init__(__self__, *,
simple_scaling_policy_configuration: pulumi.Input['ClusterSimpleScalingPolicyConfigurationArgs'],
market: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "simple_scaling_policy_configuration", simple_scaling_policy_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
@property
@pulumi.getter(name="simpleScalingPolicyConfiguration")
def simple_scaling_policy_configuration(self) -> pulumi.Input['ClusterSimpleScalingPolicyConfigurationArgs']:
return pulumi.get(self, "simple_scaling_policy_configuration")
@simple_scaling_policy_configuration.setter
def simple_scaling_policy_configuration(self, value: pulumi.Input['ClusterSimpleScalingPolicyConfigurationArgs']):
pulumi.set(self, "simple_scaling_policy_configuration", value)
@property
@pulumi.getter
def market(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "market")
@market.setter
def market(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "market", value)
@pulumi.input_type
class ClusterScalingConstraintsArgs:
def __init__(__self__, *,
max_capacity: pulumi.Input[int],
min_capacity: pulumi.Input[int]):
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> pulumi.Input[int]:
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Input[int]:
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "min_capacity", value)
@pulumi.input_type
class ClusterScalingRuleArgs:
def __init__(__self__, *,
action: pulumi.Input['ClusterScalingActionArgs'],
name: pulumi.Input[str],
trigger: pulumi.Input['ClusterScalingTriggerArgs'],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "trigger", trigger)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def action(self) -> pulumi.Input['ClusterScalingActionArgs']:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['ClusterScalingActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def trigger(self) -> pulumi.Input['ClusterScalingTriggerArgs']:
return pulumi.get(self, "trigger")
@trigger.setter
def trigger(self, value: pulumi.Input['ClusterScalingTriggerArgs']):
pulumi.set(self, "trigger", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class ClusterScalingTriggerArgs:
def __init__(__self__, *,
cloud_watch_alarm_definition: pulumi.Input['ClusterCloudWatchAlarmDefinitionArgs']):
pulumi.set(__self__, "cloud_watch_alarm_definition", cloud_watch_alarm_definition)
@property
@pulumi.getter(name="cloudWatchAlarmDefinition")
def cloud_watch_alarm_definition(self) -> pulumi.Input['ClusterCloudWatchAlarmDefinitionArgs']:
return pulumi.get(self, "cloud_watch_alarm_definition")
@cloud_watch_alarm_definition.setter
def cloud_watch_alarm_definition(self, value: pulumi.Input['ClusterCloudWatchAlarmDefinitionArgs']):
pulumi.set(self, "cloud_watch_alarm_definition", value)
@pulumi.input_type
class ClusterScriptBootstrapActionConfigArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
pulumi.set(__self__, "path", path)
if args is not None:
pulumi.set(__self__, "args", args)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@pulumi.input_type
class ClusterSimpleScalingPolicyConfigurationArgs:
def __init__(__self__, *,
scaling_adjustment: pulumi.Input[int],
adjustment_type: Optional[pulumi.Input[str]] = None,
cool_down: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "scaling_adjustment", scaling_adjustment)
if adjustment_type is not None:
pulumi.set(__self__, "adjustment_type", adjustment_type)
if cool_down is not None:
pulumi.set(__self__, "cool_down", cool_down)
@property
@pulumi.getter(name="scalingAdjustment")
def scaling_adjustment(self) -> pulumi.Input[int]:
return pulumi.get(self, "scaling_adjustment")
@scaling_adjustment.setter
def scaling_adjustment(self, value: pulumi.Input[int]):
pulumi.set(self, "scaling_adjustment", value)
@property
@pulumi.getter(name="adjustmentType")
def adjustment_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "adjustment_type")
@adjustment_type.setter
def adjustment_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment_type", value)
@property
@pulumi.getter(name="coolDown")
def cool_down(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cool_down")
@cool_down.setter
def cool_down(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cool_down", value)
@pulumi.input_type
class ClusterSpotProvisioningSpecificationArgs:
def __init__(__self__, *,
timeout_action: pulumi.Input[str],
timeout_duration_minutes: pulumi.Input[int],
allocation_strategy: Optional[pulumi.Input[str]] = None,
block_duration_minutes: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> pulumi.Input[str]:
return pulumi.get(self, "timeout_action")
@timeout_action.setter
def timeout_action(self, value: pulumi.Input[str]):
pulumi.set(self, "timeout_action", value)
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> pulumi.Input[int]:
return pulumi.get(self, "timeout_duration_minutes")
@timeout_duration_minutes.setter
def timeout_duration_minutes(self, value: pulumi.Input[int]):
pulumi.set(self, "timeout_duration_minutes", value)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allocation_strategy", value)
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "block_duration_minutes")
@block_duration_minutes.setter
def block_duration_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "block_duration_minutes", value)
@pulumi.input_type
class ClusterStepConfigArgs:
def __init__(__self__, *,
hadoop_jar_step: pulumi.Input['ClusterHadoopJarStepConfigArgs'],
name: pulumi.Input[str],
action_on_failure: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "hadoop_jar_step", hadoop_jar_step)
pulumi.set(__self__, "name", name)
if action_on_failure is not None:
pulumi.set(__self__, "action_on_failure", action_on_failure)
@property
@pulumi.getter(name="hadoopJarStep")
def hadoop_jar_step(self) -> pulumi.Input['ClusterHadoopJarStepConfigArgs']:
return pulumi.get(self, "hadoop_jar_step")
@hadoop_jar_step.setter
def hadoop_jar_step(self, value: pulumi.Input['ClusterHadoopJarStepConfigArgs']):
pulumi.set(self, "hadoop_jar_step", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="actionOnFailure")
def action_on_failure(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "action_on_failure")
@action_on_failure.setter
def action_on_failure(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_on_failure", value)
@pulumi.input_type
class ClusterTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ClusterVolumeSpecificationArgs:
def __init__(__self__, *,
size_in_gb: pulumi.Input[int],
volume_type: pulumi.Input[str],
iops: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> pulumi.Input[int]:
return pulumi.get(self, "size_in_gb")
@size_in_gb.setter
def size_in_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_gb", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_type", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@pulumi.input_type
class InstanceFleetConfigConfigurationArgs:
def __init__(__self__, *,
classification: Optional[pulumi.Input[str]] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "classification")
@classification.setter
def classification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "classification", value)
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@configuration_properties.setter
def configuration_properties(self, value: Optional[Any]):
pulumi.set(self, "configuration_properties", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@pulumi.input_type
class InstanceFleetConfigEbsBlockDeviceConfigArgs:
def __init__(__self__, *,
volume_specification: pulumi.Input['InstanceFleetConfigVolumeSpecificationArgs'],
volumes_per_instance: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> pulumi.Input['InstanceFleetConfigVolumeSpecificationArgs']:
return pulumi.get(self, "volume_specification")
@volume_specification.setter
def volume_specification(self, value: pulumi.Input['InstanceFleetConfigVolumeSpecificationArgs']):
pulumi.set(self, "volume_specification", value)
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "volumes_per_instance")
@volumes_per_instance.setter
def volumes_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volumes_per_instance", value)
@pulumi.input_type
class InstanceFleetConfigEbsConfigurationArgs:
def __init__(__self__, *,
ebs_block_device_configs: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigEbsBlockDeviceConfigArgs']]]] = None,
ebs_optimized: Optional[pulumi.Input[bool]] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigEbsBlockDeviceConfigArgs']]]]:
return pulumi.get(self, "ebs_block_device_configs")
@ebs_block_device_configs.setter
def ebs_block_device_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigEbsBlockDeviceConfigArgs']]]]):
pulumi.set(self, "ebs_block_device_configs", value)
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ebs_optimized")
@ebs_optimized.setter
def ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ebs_optimized", value)
@pulumi.input_type
class InstanceFleetConfigInstanceFleetProvisioningSpecificationsArgs:
def __init__(__self__, *,
on_demand_specification: Optional[pulumi.Input['InstanceFleetConfigOnDemandProvisioningSpecificationArgs']] = None,
spot_specification: Optional[pulumi.Input['InstanceFleetConfigSpotProvisioningSpecificationArgs']] = None):
if on_demand_specification is not None:
pulumi.set(__self__, "on_demand_specification", on_demand_specification)
if spot_specification is not None:
pulumi.set(__self__, "spot_specification", spot_specification)
@property
@pulumi.getter(name="onDemandSpecification")
def on_demand_specification(self) -> Optional[pulumi.Input['InstanceFleetConfigOnDemandProvisioningSpecificationArgs']]:
return pulumi.get(self, "on_demand_specification")
@on_demand_specification.setter
def on_demand_specification(self, value: Optional[pulumi.Input['InstanceFleetConfigOnDemandProvisioningSpecificationArgs']]):
pulumi.set(self, "on_demand_specification", value)
@property
@pulumi.getter(name="spotSpecification")
def spot_specification(self) -> Optional[pulumi.Input['InstanceFleetConfigSpotProvisioningSpecificationArgs']]:
return pulumi.get(self, "spot_specification")
@spot_specification.setter
def spot_specification(self, value: Optional[pulumi.Input['InstanceFleetConfigSpotProvisioningSpecificationArgs']]):
pulumi.set(self, "spot_specification", value)
@pulumi.input_type
class InstanceFleetConfigInstanceTypeConfigArgs:
def __init__(__self__, *,
instance_type: pulumi.Input[str],
bid_price: Optional[pulumi.Input[str]] = None,
bid_price_as_percentage_of_on_demand_price: Optional[pulumi.Input[float]] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
ebs_configuration: Optional[pulumi.Input['InstanceFleetConfigEbsConfigurationArgs']] = None,
weighted_capacity: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bid_price")
@bid_price.setter
def bid_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bid_price", value)
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@bid_price_as_percentage_of_on_demand_price.setter
def bid_price_as_percentage_of_on_demand_price(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "bid_price_as_percentage_of_on_demand_price", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_ami_id")
@custom_ami_id.setter
def custom_ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_ami_id", value)
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional[pulumi.Input['InstanceFleetConfigEbsConfigurationArgs']]:
return pulumi.get(self, "ebs_configuration")
@ebs_configuration.setter
def ebs_configuration(self, value: Optional[pulumi.Input['InstanceFleetConfigEbsConfigurationArgs']]):
pulumi.set(self, "ebs_configuration", value)
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "weighted_capacity")
@weighted_capacity.setter
def weighted_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weighted_capacity", value)
@pulumi.input_type
class InstanceFleetConfigOnDemandProvisioningSpecificationArgs:
def __init__(__self__, *,
allocation_strategy: pulumi.Input[str]):
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> pulumi.Input[str]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: pulumi.Input[str]):
pulumi.set(self, "allocation_strategy", value)
@pulumi.input_type
class InstanceFleetConfigSpotProvisioningSpecificationArgs:
def __init__(__self__, *,
timeout_action: pulumi.Input[str],
timeout_duration_minutes: pulumi.Input[int],
allocation_strategy: Optional[pulumi.Input[str]] = None,
block_duration_minutes: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> pulumi.Input[str]:
return pulumi.get(self, "timeout_action")
@timeout_action.setter
def timeout_action(self, value: pulumi.Input[str]):
pulumi.set(self, "timeout_action", value)
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> pulumi.Input[int]:
return pulumi.get(self, "timeout_duration_minutes")
@timeout_duration_minutes.setter
def timeout_duration_minutes(self, value: pulumi.Input[int]):
pulumi.set(self, "timeout_duration_minutes", value)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allocation_strategy", value)
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "block_duration_minutes")
@block_duration_minutes.setter
def block_duration_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "block_duration_minutes", value)
@pulumi.input_type
class InstanceFleetConfigVolumeSpecificationArgs:
def __init__(__self__, *,
size_in_gb: pulumi.Input[int],
volume_type: pulumi.Input[str],
iops: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> pulumi.Input[int]:
return pulumi.get(self, "size_in_gb")
@size_in_gb.setter
def size_in_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_gb", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_type", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@pulumi.input_type
class InstanceGroupConfigAutoScalingPolicyArgs:
def __init__(__self__, *,
constraints: pulumi.Input['InstanceGroupConfigScalingConstraintsArgs'],
rules: pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigScalingRuleArgs']]]):
pulumi.set(__self__, "constraints", constraints)
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def constraints(self) -> pulumi.Input['InstanceGroupConfigScalingConstraintsArgs']:
return pulumi.get(self, "constraints")
@constraints.setter
def constraints(self, value: pulumi.Input['InstanceGroupConfigScalingConstraintsArgs']):
pulumi.set(self, "constraints", value)
@property
@pulumi.getter
def rules(self) -> pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigScalingRuleArgs']]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigScalingRuleArgs']]]):
pulumi.set(self, "rules", value)
@pulumi.input_type
class InstanceGroupConfigCloudWatchAlarmDefinitionArgs:
def __init__(__self__, *,
comparison_operator: pulumi.Input[str],
metric_name: pulumi.Input[str],
period: pulumi.Input[int],
threshold: pulumi.Input[float],
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigMetricDimensionArgs']]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
namespace: Optional[pulumi.Input[str]] = None,
statistic: Optional[pulumi.Input[str]] = None,
unit: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> pulumi.Input[str]:
return pulumi.get(self, "comparison_operator")
@comparison_operator.setter
def comparison_operator(self, value: pulumi.Input[str]):
pulumi.set(self, "comparison_operator", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def period(self) -> pulumi.Input[int]:
return pulumi.get(self, "period")
@period.setter
def period(self, value: pulumi.Input[int]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigMetricDimensionArgs']]]]:
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigMetricDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class InstanceGroupConfigConfigurationArgs:
def __init__(__self__, *,
classification: Optional[pulumi.Input[str]] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigConfigurationArgs']]]] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "classification")
@classification.setter
def classification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "classification", value)
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@configuration_properties.setter
def configuration_properties(self, value: Optional[Any]):
pulumi.set(self, "configuration_properties", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@pulumi.input_type
class InstanceGroupConfigEbsBlockDeviceConfigArgs:
def __init__(__self__, *,
volume_specification: pulumi.Input['InstanceGroupConfigVolumeSpecificationArgs'],
volumes_per_instance: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> pulumi.Input['InstanceGroupConfigVolumeSpecificationArgs']:
return pulumi.get(self, "volume_specification")
@volume_specification.setter
def volume_specification(self, value: pulumi.Input['InstanceGroupConfigVolumeSpecificationArgs']):
pulumi.set(self, "volume_specification", value)
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "volumes_per_instance")
@volumes_per_instance.setter
def volumes_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volumes_per_instance", value)
@pulumi.input_type
class InstanceGroupConfigEbsConfigurationArgs:
def __init__(__self__, *,
ebs_block_device_configs: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigEbsBlockDeviceConfigArgs']]]] = None,
ebs_optimized: Optional[pulumi.Input[bool]] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigEbsBlockDeviceConfigArgs']]]]:
return pulumi.get(self, "ebs_block_device_configs")
@ebs_block_device_configs.setter
def ebs_block_device_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigEbsBlockDeviceConfigArgs']]]]):
pulumi.set(self, "ebs_block_device_configs", value)
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ebs_optimized")
@ebs_optimized.setter
def ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ebs_optimized", value)
@pulumi.input_type
class InstanceGroupConfigMetricDimensionArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class InstanceGroupConfigScalingActionArgs:
def __init__(__self__, *,
simple_scaling_policy_configuration: pulumi.Input['InstanceGroupConfigSimpleScalingPolicyConfigurationArgs'],
market: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "simple_scaling_policy_configuration", simple_scaling_policy_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
@property
@pulumi.getter(name="simpleScalingPolicyConfiguration")
def simple_scaling_policy_configuration(self) -> pulumi.Input['InstanceGroupConfigSimpleScalingPolicyConfigurationArgs']:
return pulumi.get(self, "simple_scaling_policy_configuration")
@simple_scaling_policy_configuration.setter
def simple_scaling_policy_configuration(self, value: pulumi.Input['InstanceGroupConfigSimpleScalingPolicyConfigurationArgs']):
pulumi.set(self, "simple_scaling_policy_configuration", value)
@property
@pulumi.getter
def market(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "market")
@market.setter
def market(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "market", value)
@pulumi.input_type
class InstanceGroupConfigScalingConstraintsArgs:
def __init__(__self__, *,
max_capacity: pulumi.Input[int],
min_capacity: pulumi.Input[int]):
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> pulumi.Input[int]:
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Input[int]:
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "min_capacity", value)
@pulumi.input_type
class InstanceGroupConfigScalingRuleArgs:
def __init__(__self__, *,
action: pulumi.Input['InstanceGroupConfigScalingActionArgs'],
name: pulumi.Input[str],
trigger: pulumi.Input['InstanceGroupConfigScalingTriggerArgs'],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "trigger", trigger)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def action(self) -> pulumi.Input['InstanceGroupConfigScalingActionArgs']:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['InstanceGroupConfigScalingActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def trigger(self) -> pulumi.Input['InstanceGroupConfigScalingTriggerArgs']:
return pulumi.get(self, "trigger")
@trigger.setter
def trigger(self, value: pulumi.Input['InstanceGroupConfigScalingTriggerArgs']):
pulumi.set(self, "trigger", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class InstanceGroupConfigScalingTriggerArgs:
def __init__(__self__, *,
cloud_watch_alarm_definition: pulumi.Input['InstanceGroupConfigCloudWatchAlarmDefinitionArgs']):
pulumi.set(__self__, "cloud_watch_alarm_definition", cloud_watch_alarm_definition)
@property
@pulumi.getter(name="cloudWatchAlarmDefinition")
def cloud_watch_alarm_definition(self) -> pulumi.Input['InstanceGroupConfigCloudWatchAlarmDefinitionArgs']:
return pulumi.get(self, "cloud_watch_alarm_definition")
@cloud_watch_alarm_definition.setter
def cloud_watch_alarm_definition(self, value: pulumi.Input['InstanceGroupConfigCloudWatchAlarmDefinitionArgs']):
pulumi.set(self, "cloud_watch_alarm_definition", value)
@pulumi.input_type
class InstanceGroupConfigSimpleScalingPolicyConfigurationArgs:
def __init__(__self__, *,
scaling_adjustment: pulumi.Input[int],
adjustment_type: Optional[pulumi.Input[str]] = None,
cool_down: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "scaling_adjustment", scaling_adjustment)
if adjustment_type is not None:
pulumi.set(__self__, "adjustment_type", adjustment_type)
if cool_down is not None:
pulumi.set(__self__, "cool_down", cool_down)
@property
@pulumi.getter(name="scalingAdjustment")
def scaling_adjustment(self) -> pulumi.Input[int]:
return pulumi.get(self, "scaling_adjustment")
@scaling_adjustment.setter
def scaling_adjustment(self, value: pulumi.Input[int]):
pulumi.set(self, "scaling_adjustment", value)
@property
@pulumi.getter(name="adjustmentType")
def adjustment_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "adjustment_type")
@adjustment_type.setter
def adjustment_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment_type", value)
@property
@pulumi.getter(name="coolDown")
def cool_down(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cool_down")
@cool_down.setter
def cool_down(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cool_down", value)
@pulumi.input_type
class InstanceGroupConfigVolumeSpecificationArgs:
def __init__(__self__, *,
size_in_gb: pulumi.Input[int],
volume_type: pulumi.Input[str],
iops: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> pulumi.Input[int]:
return pulumi.get(self, "size_in_gb")
@size_in_gb.setter
def size_in_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_gb", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_type", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@pulumi.input_type
class StepHadoopJarStepConfigArgs:
def __init__(__self__, *,
jar: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
main_class: Optional[pulumi.Input[str]] = None,
step_properties: Optional[pulumi.Input[Sequence[pulumi.Input['StepKeyValueArgs']]]] = None):
pulumi.set(__self__, "jar", jar)
if args is not None:
pulumi.set(__self__, "args", args)
if main_class is not None:
pulumi.set(__self__, "main_class", main_class)
if step_properties is not None:
pulumi.set(__self__, "step_properties", step_properties)
@property
@pulumi.getter
def jar(self) -> pulumi.Input[str]:
return pulumi.get(self, "jar")
@jar.setter
def jar(self, value: pulumi.Input[str]):
pulumi.set(self, "jar", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter(name="mainClass")
def main_class(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "main_class")
@main_class.setter
def main_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "main_class", value)
@property
@pulumi.getter(name="stepProperties")
def step_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StepKeyValueArgs']]]]:
return pulumi.get(self, "step_properties")
@step_properties.setter
def step_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StepKeyValueArgs']]]]):
pulumi.set(self, "step_properties", value)
@pulumi.input_type
class StepKeyValueArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class StudioTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
An arbitrary set of tags (key-value pairs) for this EMR Studio.
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param pulumi.Input[str] value: The value for the tag. You can specify a value that is 0 to 255 Unicode characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the tag. You can specify a value that is 0 to 255 Unicode characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value) | sdk/python/pulumi_aws_native/emr/_inputs.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ClusterApplicationArgs',
'ClusterAutoScalingPolicyArgs',
'ClusterBootstrapActionConfigArgs',
'ClusterCloudWatchAlarmDefinitionArgs',
'ClusterComputeLimitsArgs',
'ClusterConfigurationArgs',
'ClusterEbsBlockDeviceConfigArgs',
'ClusterEbsConfigurationArgs',
'ClusterHadoopJarStepConfigArgs',
'ClusterInstanceFleetConfigArgs',
'ClusterInstanceFleetProvisioningSpecificationsArgs',
'ClusterInstanceGroupConfigArgs',
'ClusterInstanceTypeConfigArgs',
'ClusterJobFlowInstancesConfigArgs',
'ClusterKerberosAttributesArgs',
'ClusterKeyValueArgs',
'ClusterManagedScalingPolicyArgs',
'ClusterMetricDimensionArgs',
'ClusterOnDemandProvisioningSpecificationArgs',
'ClusterPlacementTypeArgs',
'ClusterScalingActionArgs',
'ClusterScalingConstraintsArgs',
'ClusterScalingRuleArgs',
'ClusterScalingTriggerArgs',
'ClusterScriptBootstrapActionConfigArgs',
'ClusterSimpleScalingPolicyConfigurationArgs',
'ClusterSpotProvisioningSpecificationArgs',
'ClusterStepConfigArgs',
'ClusterTagArgs',
'ClusterVolumeSpecificationArgs',
'InstanceFleetConfigConfigurationArgs',
'InstanceFleetConfigEbsBlockDeviceConfigArgs',
'InstanceFleetConfigEbsConfigurationArgs',
'InstanceFleetConfigInstanceFleetProvisioningSpecificationsArgs',
'InstanceFleetConfigInstanceTypeConfigArgs',
'InstanceFleetConfigOnDemandProvisioningSpecificationArgs',
'InstanceFleetConfigSpotProvisioningSpecificationArgs',
'InstanceFleetConfigVolumeSpecificationArgs',
'InstanceGroupConfigAutoScalingPolicyArgs',
'InstanceGroupConfigCloudWatchAlarmDefinitionArgs',
'InstanceGroupConfigConfigurationArgs',
'InstanceGroupConfigEbsBlockDeviceConfigArgs',
'InstanceGroupConfigEbsConfigurationArgs',
'InstanceGroupConfigMetricDimensionArgs',
'InstanceGroupConfigScalingActionArgs',
'InstanceGroupConfigScalingConstraintsArgs',
'InstanceGroupConfigScalingRuleArgs',
'InstanceGroupConfigScalingTriggerArgs',
'InstanceGroupConfigSimpleScalingPolicyConfigurationArgs',
'InstanceGroupConfigVolumeSpecificationArgs',
'StepHadoopJarStepConfigArgs',
'StepKeyValueArgs',
'StudioTagArgs',
]
@pulumi.input_type
class ClusterApplicationArgs:
def __init__(__self__, *,
additional_info: Optional[Any] = None,
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
if additional_info is not None:
pulumi.set(__self__, "additional_info", additional_info)
if args is not None:
pulumi.set(__self__, "args", args)
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="additionalInfo")
def additional_info(self) -> Optional[Any]:
return pulumi.get(self, "additional_info")
@additional_info.setter
def additional_info(self, value: Optional[Any]):
pulumi.set(self, "additional_info", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class ClusterAutoScalingPolicyArgs:
def __init__(__self__, *,
constraints: pulumi.Input['ClusterScalingConstraintsArgs'],
rules: pulumi.Input[Sequence[pulumi.Input['ClusterScalingRuleArgs']]]):
pulumi.set(__self__, "constraints", constraints)
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def constraints(self) -> pulumi.Input['ClusterScalingConstraintsArgs']:
return pulumi.get(self, "constraints")
@constraints.setter
def constraints(self, value: pulumi.Input['ClusterScalingConstraintsArgs']):
pulumi.set(self, "constraints", value)
@property
@pulumi.getter
def rules(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterScalingRuleArgs']]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: pulumi.Input[Sequence[pulumi.Input['ClusterScalingRuleArgs']]]):
pulumi.set(self, "rules", value)
@pulumi.input_type
class ClusterBootstrapActionConfigArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
script_bootstrap_action: pulumi.Input['ClusterScriptBootstrapActionConfigArgs']):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "script_bootstrap_action", script_bootstrap_action)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="scriptBootstrapAction")
def script_bootstrap_action(self) -> pulumi.Input['ClusterScriptBootstrapActionConfigArgs']:
return pulumi.get(self, "script_bootstrap_action")
@script_bootstrap_action.setter
def script_bootstrap_action(self, value: pulumi.Input['ClusterScriptBootstrapActionConfigArgs']):
pulumi.set(self, "script_bootstrap_action", value)
@pulumi.input_type
class ClusterCloudWatchAlarmDefinitionArgs:
def __init__(__self__, *,
comparison_operator: pulumi.Input[str],
metric_name: pulumi.Input[str],
period: pulumi.Input[int],
threshold: pulumi.Input[float],
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMetricDimensionArgs']]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
namespace: Optional[pulumi.Input[str]] = None,
statistic: Optional[pulumi.Input[str]] = None,
unit: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> pulumi.Input[str]:
return pulumi.get(self, "comparison_operator")
@comparison_operator.setter
def comparison_operator(self, value: pulumi.Input[str]):
pulumi.set(self, "comparison_operator", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def period(self) -> pulumi.Input[int]:
return pulumi.get(self, "period")
@period.setter
def period(self, value: pulumi.Input[int]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMetricDimensionArgs']]]]:
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMetricDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class ClusterComputeLimitsArgs:
def __init__(__self__, *,
maximum_capacity_units: pulumi.Input[int],
minimum_capacity_units: pulumi.Input[int],
unit_type: pulumi.Input[str],
maximum_core_capacity_units: Optional[pulumi.Input[int]] = None,
maximum_on_demand_capacity_units: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "maximum_capacity_units", maximum_capacity_units)
pulumi.set(__self__, "minimum_capacity_units", minimum_capacity_units)
pulumi.set(__self__, "unit_type", unit_type)
if maximum_core_capacity_units is not None:
pulumi.set(__self__, "maximum_core_capacity_units", maximum_core_capacity_units)
if maximum_on_demand_capacity_units is not None:
pulumi.set(__self__, "maximum_on_demand_capacity_units", maximum_on_demand_capacity_units)
@property
@pulumi.getter(name="maximumCapacityUnits")
def maximum_capacity_units(self) -> pulumi.Input[int]:
return pulumi.get(self, "maximum_capacity_units")
@maximum_capacity_units.setter
def maximum_capacity_units(self, value: pulumi.Input[int]):
pulumi.set(self, "maximum_capacity_units", value)
@property
@pulumi.getter(name="minimumCapacityUnits")
def minimum_capacity_units(self) -> pulumi.Input[int]:
return pulumi.get(self, "minimum_capacity_units")
@minimum_capacity_units.setter
def minimum_capacity_units(self, value: pulumi.Input[int]):
pulumi.set(self, "minimum_capacity_units", value)
@property
@pulumi.getter(name="unitType")
def unit_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "unit_type")
@unit_type.setter
def unit_type(self, value: pulumi.Input[str]):
pulumi.set(self, "unit_type", value)
@property
@pulumi.getter(name="maximumCoreCapacityUnits")
def maximum_core_capacity_units(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "maximum_core_capacity_units")
@maximum_core_capacity_units.setter
def maximum_core_capacity_units(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum_core_capacity_units", value)
@property
@pulumi.getter(name="maximumOnDemandCapacityUnits")
def maximum_on_demand_capacity_units(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "maximum_on_demand_capacity_units")
@maximum_on_demand_capacity_units.setter
def maximum_on_demand_capacity_units(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum_on_demand_capacity_units", value)
@pulumi.input_type
class ClusterConfigurationArgs:
def __init__(__self__, *,
classification: Optional[pulumi.Input[str]] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "classification")
@classification.setter
def classification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "classification", value)
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@configuration_properties.setter
def configuration_properties(self, value: Optional[Any]):
pulumi.set(self, "configuration_properties", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@pulumi.input_type
class ClusterEbsBlockDeviceConfigArgs:
def __init__(__self__, *,
volume_specification: pulumi.Input['ClusterVolumeSpecificationArgs'],
volumes_per_instance: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> pulumi.Input['ClusterVolumeSpecificationArgs']:
return pulumi.get(self, "volume_specification")
@volume_specification.setter
def volume_specification(self, value: pulumi.Input['ClusterVolumeSpecificationArgs']):
pulumi.set(self, "volume_specification", value)
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "volumes_per_instance")
@volumes_per_instance.setter
def volumes_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volumes_per_instance", value)
@pulumi.input_type
class ClusterEbsConfigurationArgs:
def __init__(__self__, *,
ebs_block_device_configs: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterEbsBlockDeviceConfigArgs']]]] = None,
ebs_optimized: Optional[pulumi.Input[bool]] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterEbsBlockDeviceConfigArgs']]]]:
return pulumi.get(self, "ebs_block_device_configs")
@ebs_block_device_configs.setter
def ebs_block_device_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterEbsBlockDeviceConfigArgs']]]]):
pulumi.set(self, "ebs_block_device_configs", value)
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ebs_optimized")
@ebs_optimized.setter
def ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ebs_optimized", value)
@pulumi.input_type
class ClusterHadoopJarStepConfigArgs:
def __init__(__self__, *,
jar: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
main_class: Optional[pulumi.Input[str]] = None,
step_properties: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterKeyValueArgs']]]] = None):
pulumi.set(__self__, "jar", jar)
if args is not None:
pulumi.set(__self__, "args", args)
if main_class is not None:
pulumi.set(__self__, "main_class", main_class)
if step_properties is not None:
pulumi.set(__self__, "step_properties", step_properties)
@property
@pulumi.getter
def jar(self) -> pulumi.Input[str]:
return pulumi.get(self, "jar")
@jar.setter
def jar(self, value: pulumi.Input[str]):
pulumi.set(self, "jar", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter(name="mainClass")
def main_class(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "main_class")
@main_class.setter
def main_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "main_class", value)
@property
@pulumi.getter(name="stepProperties")
def step_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterKeyValueArgs']]]]:
return pulumi.get(self, "step_properties")
@step_properties.setter
def step_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterKeyValueArgs']]]]):
pulumi.set(self, "step_properties", value)
@pulumi.input_type
class ClusterInstanceFleetConfigArgs:
def __init__(__self__, *,
instance_type_configs: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInstanceTypeConfigArgs']]]] = None,
launch_specifications: Optional[pulumi.Input['ClusterInstanceFleetProvisioningSpecificationsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
target_on_demand_capacity: Optional[pulumi.Input[int]] = None,
target_spot_capacity: Optional[pulumi.Input[int]] = None):
if instance_type_configs is not None:
pulumi.set(__self__, "instance_type_configs", instance_type_configs)
if launch_specifications is not None:
pulumi.set(__self__, "launch_specifications", launch_specifications)
if name is not None:
pulumi.set(__self__, "name", name)
if target_on_demand_capacity is not None:
pulumi.set(__self__, "target_on_demand_capacity", target_on_demand_capacity)
if target_spot_capacity is not None:
pulumi.set(__self__, "target_spot_capacity", target_spot_capacity)
@property
@pulumi.getter(name="instanceTypeConfigs")
def instance_type_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInstanceTypeConfigArgs']]]]:
return pulumi.get(self, "instance_type_configs")
@instance_type_configs.setter
def instance_type_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInstanceTypeConfigArgs']]]]):
pulumi.set(self, "instance_type_configs", value)
@property
@pulumi.getter(name="launchSpecifications")
def launch_specifications(self) -> Optional[pulumi.Input['ClusterInstanceFleetProvisioningSpecificationsArgs']]:
return pulumi.get(self, "launch_specifications")
@launch_specifications.setter
def launch_specifications(self, value: Optional[pulumi.Input['ClusterInstanceFleetProvisioningSpecificationsArgs']]):
pulumi.set(self, "launch_specifications", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="targetOnDemandCapacity")
def target_on_demand_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "target_on_demand_capacity")
@target_on_demand_capacity.setter
def target_on_demand_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_on_demand_capacity", value)
@property
@pulumi.getter(name="targetSpotCapacity")
def target_spot_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "target_spot_capacity")
@target_spot_capacity.setter
def target_spot_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_spot_capacity", value)
@pulumi.input_type
class ClusterInstanceFleetProvisioningSpecificationsArgs:
def __init__(__self__, *,
on_demand_specification: Optional[pulumi.Input['ClusterOnDemandProvisioningSpecificationArgs']] = None,
spot_specification: Optional[pulumi.Input['ClusterSpotProvisioningSpecificationArgs']] = None):
if on_demand_specification is not None:
pulumi.set(__self__, "on_demand_specification", on_demand_specification)
if spot_specification is not None:
pulumi.set(__self__, "spot_specification", spot_specification)
@property
@pulumi.getter(name="onDemandSpecification")
def on_demand_specification(self) -> Optional[pulumi.Input['ClusterOnDemandProvisioningSpecificationArgs']]:
return pulumi.get(self, "on_demand_specification")
@on_demand_specification.setter
def on_demand_specification(self, value: Optional[pulumi.Input['ClusterOnDemandProvisioningSpecificationArgs']]):
pulumi.set(self, "on_demand_specification", value)
@property
@pulumi.getter(name="spotSpecification")
def spot_specification(self) -> Optional[pulumi.Input['ClusterSpotProvisioningSpecificationArgs']]:
return pulumi.get(self, "spot_specification")
@spot_specification.setter
def spot_specification(self, value: Optional[pulumi.Input['ClusterSpotProvisioningSpecificationArgs']]):
pulumi.set(self, "spot_specification", value)
@pulumi.input_type
class ClusterInstanceGroupConfigArgs:
def __init__(__self__, *,
instance_count: pulumi.Input[int],
instance_type: pulumi.Input[str],
auto_scaling_policy: Optional[pulumi.Input['ClusterAutoScalingPolicyArgs']] = None,
bid_price: Optional[pulumi.Input[str]] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
ebs_configuration: Optional[pulumi.Input['ClusterEbsConfigurationArgs']] = None,
market: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "instance_count", instance_count)
pulumi.set(__self__, "instance_type", instance_type)
if auto_scaling_policy is not None:
pulumi.set(__self__, "auto_scaling_policy", auto_scaling_policy)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> pulumi.Input[int]:
return pulumi.get(self, "instance_count")
@instance_count.setter
def instance_count(self, value: pulumi.Input[int]):
pulumi.set(self, "instance_count", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="autoScalingPolicy")
def auto_scaling_policy(self) -> Optional[pulumi.Input['ClusterAutoScalingPolicyArgs']]:
return pulumi.get(self, "auto_scaling_policy")
@auto_scaling_policy.setter
def auto_scaling_policy(self, value: Optional[pulumi.Input['ClusterAutoScalingPolicyArgs']]):
pulumi.set(self, "auto_scaling_policy", value)
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bid_price")
@bid_price.setter
def bid_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bid_price", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_ami_id")
@custom_ami_id.setter
def custom_ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_ami_id", value)
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional[pulumi.Input['ClusterEbsConfigurationArgs']]:
return pulumi.get(self, "ebs_configuration")
@ebs_configuration.setter
def ebs_configuration(self, value: Optional[pulumi.Input['ClusterEbsConfigurationArgs']]):
pulumi.set(self, "ebs_configuration", value)
@property
@pulumi.getter
def market(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "market")
@market.setter
def market(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "market", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ClusterInstanceTypeConfigArgs:
def __init__(__self__, *,
instance_type: pulumi.Input[str],
bid_price: Optional[pulumi.Input[str]] = None,
bid_price_as_percentage_of_on_demand_price: Optional[pulumi.Input[float]] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
ebs_configuration: Optional[pulumi.Input['ClusterEbsConfigurationArgs']] = None,
weighted_capacity: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bid_price")
@bid_price.setter
def bid_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bid_price", value)
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@bid_price_as_percentage_of_on_demand_price.setter
def bid_price_as_percentage_of_on_demand_price(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "bid_price_as_percentage_of_on_demand_price", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_ami_id")
@custom_ami_id.setter
def custom_ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_ami_id", value)
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional[pulumi.Input['ClusterEbsConfigurationArgs']]:
return pulumi.get(self, "ebs_configuration")
@ebs_configuration.setter
def ebs_configuration(self, value: Optional[pulumi.Input['ClusterEbsConfigurationArgs']]):
pulumi.set(self, "ebs_configuration", value)
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "weighted_capacity")
@weighted_capacity.setter
def weighted_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weighted_capacity", value)
@pulumi.input_type
class ClusterJobFlowInstancesConfigArgs:
def __init__(__self__, *,
additional_master_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
additional_slave_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
core_instance_fleet: Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']] = None,
core_instance_group: Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']] = None,
ec2_key_name: Optional[pulumi.Input[str]] = None,
ec2_subnet_id: Optional[pulumi.Input[str]] = None,
ec2_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
emr_managed_master_security_group: Optional[pulumi.Input[str]] = None,
emr_managed_slave_security_group: Optional[pulumi.Input[str]] = None,
hadoop_version: Optional[pulumi.Input[str]] = None,
keep_job_flow_alive_when_no_steps: Optional[pulumi.Input[bool]] = None,
master_instance_fleet: Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']] = None,
master_instance_group: Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']] = None,
placement: Optional[pulumi.Input['ClusterPlacementTypeArgs']] = None,
service_access_security_group: Optional[pulumi.Input[str]] = None,
termination_protected: Optional[pulumi.Input[bool]] = None):
if additional_master_security_groups is not None:
pulumi.set(__self__, "additional_master_security_groups", additional_master_security_groups)
if additional_slave_security_groups is not None:
pulumi.set(__self__, "additional_slave_security_groups", additional_slave_security_groups)
if core_instance_fleet is not None:
pulumi.set(__self__, "core_instance_fleet", core_instance_fleet)
if core_instance_group is not None:
pulumi.set(__self__, "core_instance_group", core_instance_group)
if ec2_key_name is not None:
pulumi.set(__self__, "ec2_key_name", ec2_key_name)
if ec2_subnet_id is not None:
pulumi.set(__self__, "ec2_subnet_id", ec2_subnet_id)
if ec2_subnet_ids is not None:
pulumi.set(__self__, "ec2_subnet_ids", ec2_subnet_ids)
if emr_managed_master_security_group is not None:
pulumi.set(__self__, "emr_managed_master_security_group", emr_managed_master_security_group)
if emr_managed_slave_security_group is not None:
pulumi.set(__self__, "emr_managed_slave_security_group", emr_managed_slave_security_group)
if hadoop_version is not None:
pulumi.set(__self__, "hadoop_version", hadoop_version)
if keep_job_flow_alive_when_no_steps is not None:
pulumi.set(__self__, "keep_job_flow_alive_when_no_steps", keep_job_flow_alive_when_no_steps)
if master_instance_fleet is not None:
pulumi.set(__self__, "master_instance_fleet", master_instance_fleet)
if master_instance_group is not None:
pulumi.set(__self__, "master_instance_group", master_instance_group)
if placement is not None:
pulumi.set(__self__, "placement", placement)
if service_access_security_group is not None:
pulumi.set(__self__, "service_access_security_group", service_access_security_group)
if termination_protected is not None:
pulumi.set(__self__, "termination_protected", termination_protected)
@property
@pulumi.getter(name="additionalMasterSecurityGroups")
def additional_master_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "additional_master_security_groups")
@additional_master_security_groups.setter
def additional_master_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_master_security_groups", value)
@property
@pulumi.getter(name="additionalSlaveSecurityGroups")
def additional_slave_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "additional_slave_security_groups")
@additional_slave_security_groups.setter
def additional_slave_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "additional_slave_security_groups", value)
@property
@pulumi.getter(name="coreInstanceFleet")
def core_instance_fleet(self) -> Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']]:
return pulumi.get(self, "core_instance_fleet")
@core_instance_fleet.setter
def core_instance_fleet(self, value: Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']]):
pulumi.set(self, "core_instance_fleet", value)
@property
@pulumi.getter(name="coreInstanceGroup")
def core_instance_group(self) -> Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']]:
return pulumi.get(self, "core_instance_group")
@core_instance_group.setter
def core_instance_group(self, value: Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']]):
pulumi.set(self, "core_instance_group", value)
@property
@pulumi.getter(name="ec2KeyName")
def ec2_key_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ec2_key_name")
@ec2_key_name.setter
def ec2_key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ec2_key_name", value)
@property
@pulumi.getter(name="ec2SubnetId")
def ec2_subnet_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ec2_subnet_id")
@ec2_subnet_id.setter
def ec2_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ec2_subnet_id", value)
@property
@pulumi.getter(name="ec2SubnetIds")
def ec2_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "ec2_subnet_ids")
@ec2_subnet_ids.setter
def ec2_subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ec2_subnet_ids", value)
@property
@pulumi.getter(name="emrManagedMasterSecurityGroup")
def emr_managed_master_security_group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "emr_managed_master_security_group")
@emr_managed_master_security_group.setter
def emr_managed_master_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "emr_managed_master_security_group", value)
@property
@pulumi.getter(name="emrManagedSlaveSecurityGroup")
def emr_managed_slave_security_group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "emr_managed_slave_security_group")
@emr_managed_slave_security_group.setter
def emr_managed_slave_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "emr_managed_slave_security_group", value)
@property
@pulumi.getter(name="hadoopVersion")
def hadoop_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hadoop_version")
@hadoop_version.setter
def hadoop_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hadoop_version", value)
@property
@pulumi.getter(name="keepJobFlowAliveWhenNoSteps")
def keep_job_flow_alive_when_no_steps(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "keep_job_flow_alive_when_no_steps")
@keep_job_flow_alive_when_no_steps.setter
def keep_job_flow_alive_when_no_steps(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "keep_job_flow_alive_when_no_steps", value)
@property
@pulumi.getter(name="masterInstanceFleet")
def master_instance_fleet(self) -> Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']]:
return pulumi.get(self, "master_instance_fleet")
@master_instance_fleet.setter
def master_instance_fleet(self, value: Optional[pulumi.Input['ClusterInstanceFleetConfigArgs']]):
pulumi.set(self, "master_instance_fleet", value)
@property
@pulumi.getter(name="masterInstanceGroup")
def master_instance_group(self) -> Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']]:
return pulumi.get(self, "master_instance_group")
@master_instance_group.setter
def master_instance_group(self, value: Optional[pulumi.Input['ClusterInstanceGroupConfigArgs']]):
pulumi.set(self, "master_instance_group", value)
@property
@pulumi.getter
def placement(self) -> Optional[pulumi.Input['ClusterPlacementTypeArgs']]:
return pulumi.get(self, "placement")
@placement.setter
def placement(self, value: Optional[pulumi.Input['ClusterPlacementTypeArgs']]):
pulumi.set(self, "placement", value)
@property
@pulumi.getter(name="serviceAccessSecurityGroup")
def service_access_security_group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_access_security_group")
@service_access_security_group.setter
def service_access_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_access_security_group", value)
@property
@pulumi.getter(name="terminationProtected")
def termination_protected(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "termination_protected")
@termination_protected.setter
def termination_protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "termination_protected", value)
@pulumi.input_type
class ClusterKerberosAttributesArgs:
def __init__(__self__, *,
kdc_admin_password: pulumi.Input[str],
realm: pulumi.Input[str],
a_d_domain_join_password: Optional[pulumi.Input[str]] = None,
a_d_domain_join_user: Optional[pulumi.Input[str]] = None,
cross_realm_trust_principal_password: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "kdc_admin_password", kdc_admin_password)
pulumi.set(__self__, "realm", realm)
if a_d_domain_join_password is not None:
pulumi.set(__self__, "a_d_domain_join_password", a_d_domain_join_password)
if a_d_domain_join_user is not None:
pulumi.set(__self__, "a_d_domain_join_user", a_d_domain_join_user)
if cross_realm_trust_principal_password is not None:
pulumi.set(__self__, "cross_realm_trust_principal_password", cross_realm_trust_principal_password)
@property
@pulumi.getter(name="kdcAdminPassword")
def kdc_admin_password(self) -> pulumi.Input[str]:
return pulumi.get(self, "kdc_admin_password")
@kdc_admin_password.setter
def kdc_admin_password(self, value: pulumi.Input[str]):
pulumi.set(self, "kdc_admin_password", value)
@property
@pulumi.getter
def realm(self) -> pulumi.Input[str]:
return pulumi.get(self, "realm")
@realm.setter
def realm(self, value: pulumi.Input[str]):
pulumi.set(self, "realm", value)
@property
@pulumi.getter(name="aDDomainJoinPassword")
def a_d_domain_join_password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "a_d_domain_join_password")
@a_d_domain_join_password.setter
def a_d_domain_join_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "a_d_domain_join_password", value)
@property
@pulumi.getter(name="aDDomainJoinUser")
def a_d_domain_join_user(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "a_d_domain_join_user")
@a_d_domain_join_user.setter
def a_d_domain_join_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "a_d_domain_join_user", value)
@property
@pulumi.getter(name="crossRealmTrustPrincipalPassword")
def cross_realm_trust_principal_password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cross_realm_trust_principal_password")
@cross_realm_trust_principal_password.setter
def cross_realm_trust_principal_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cross_realm_trust_principal_password", value)
@pulumi.input_type
class ClusterKeyValueArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ClusterManagedScalingPolicyArgs:
def __init__(__self__, *,
compute_limits: Optional[pulumi.Input['ClusterComputeLimitsArgs']] = None):
if compute_limits is not None:
pulumi.set(__self__, "compute_limits", compute_limits)
@property
@pulumi.getter(name="computeLimits")
def compute_limits(self) -> Optional[pulumi.Input['ClusterComputeLimitsArgs']]:
return pulumi.get(self, "compute_limits")
@compute_limits.setter
def compute_limits(self, value: Optional[pulumi.Input['ClusterComputeLimitsArgs']]):
pulumi.set(self, "compute_limits", value)
@pulumi.input_type
class ClusterMetricDimensionArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ClusterOnDemandProvisioningSpecificationArgs:
def __init__(__self__, *,
allocation_strategy: pulumi.Input[str]):
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> pulumi.Input[str]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: pulumi.Input[str]):
pulumi.set(self, "allocation_strategy", value)
@pulumi.input_type
class ClusterPlacementTypeArgs:
def __init__(__self__, *,
availability_zone: pulumi.Input[str]):
pulumi.set(__self__, "availability_zone", availability_zone)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> pulumi.Input[str]:
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: pulumi.Input[str]):
pulumi.set(self, "availability_zone", value)
@pulumi.input_type
class ClusterScalingActionArgs:
def __init__(__self__, *,
simple_scaling_policy_configuration: pulumi.Input['ClusterSimpleScalingPolicyConfigurationArgs'],
market: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "simple_scaling_policy_configuration", simple_scaling_policy_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
@property
@pulumi.getter(name="simpleScalingPolicyConfiguration")
def simple_scaling_policy_configuration(self) -> pulumi.Input['ClusterSimpleScalingPolicyConfigurationArgs']:
return pulumi.get(self, "simple_scaling_policy_configuration")
@simple_scaling_policy_configuration.setter
def simple_scaling_policy_configuration(self, value: pulumi.Input['ClusterSimpleScalingPolicyConfigurationArgs']):
pulumi.set(self, "simple_scaling_policy_configuration", value)
@property
@pulumi.getter
def market(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "market")
@market.setter
def market(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "market", value)
@pulumi.input_type
class ClusterScalingConstraintsArgs:
def __init__(__self__, *,
max_capacity: pulumi.Input[int],
min_capacity: pulumi.Input[int]):
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> pulumi.Input[int]:
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Input[int]:
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "min_capacity", value)
@pulumi.input_type
class ClusterScalingRuleArgs:
def __init__(__self__, *,
action: pulumi.Input['ClusterScalingActionArgs'],
name: pulumi.Input[str],
trigger: pulumi.Input['ClusterScalingTriggerArgs'],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "trigger", trigger)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def action(self) -> pulumi.Input['ClusterScalingActionArgs']:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['ClusterScalingActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def trigger(self) -> pulumi.Input['ClusterScalingTriggerArgs']:
return pulumi.get(self, "trigger")
@trigger.setter
def trigger(self, value: pulumi.Input['ClusterScalingTriggerArgs']):
pulumi.set(self, "trigger", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class ClusterScalingTriggerArgs:
def __init__(__self__, *,
cloud_watch_alarm_definition: pulumi.Input['ClusterCloudWatchAlarmDefinitionArgs']):
pulumi.set(__self__, "cloud_watch_alarm_definition", cloud_watch_alarm_definition)
@property
@pulumi.getter(name="cloudWatchAlarmDefinition")
def cloud_watch_alarm_definition(self) -> pulumi.Input['ClusterCloudWatchAlarmDefinitionArgs']:
return pulumi.get(self, "cloud_watch_alarm_definition")
@cloud_watch_alarm_definition.setter
def cloud_watch_alarm_definition(self, value: pulumi.Input['ClusterCloudWatchAlarmDefinitionArgs']):
pulumi.set(self, "cloud_watch_alarm_definition", value)
@pulumi.input_type
class ClusterScriptBootstrapActionConfigArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
pulumi.set(__self__, "path", path)
if args is not None:
pulumi.set(__self__, "args", args)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@pulumi.input_type
class ClusterSimpleScalingPolicyConfigurationArgs:
def __init__(__self__, *,
scaling_adjustment: pulumi.Input[int],
adjustment_type: Optional[pulumi.Input[str]] = None,
cool_down: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "scaling_adjustment", scaling_adjustment)
if adjustment_type is not None:
pulumi.set(__self__, "adjustment_type", adjustment_type)
if cool_down is not None:
pulumi.set(__self__, "cool_down", cool_down)
@property
@pulumi.getter(name="scalingAdjustment")
def scaling_adjustment(self) -> pulumi.Input[int]:
return pulumi.get(self, "scaling_adjustment")
@scaling_adjustment.setter
def scaling_adjustment(self, value: pulumi.Input[int]):
pulumi.set(self, "scaling_adjustment", value)
@property
@pulumi.getter(name="adjustmentType")
def adjustment_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "adjustment_type")
@adjustment_type.setter
def adjustment_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment_type", value)
@property
@pulumi.getter(name="coolDown")
def cool_down(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cool_down")
@cool_down.setter
def cool_down(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cool_down", value)
@pulumi.input_type
class ClusterSpotProvisioningSpecificationArgs:
def __init__(__self__, *,
timeout_action: pulumi.Input[str],
timeout_duration_minutes: pulumi.Input[int],
allocation_strategy: Optional[pulumi.Input[str]] = None,
block_duration_minutes: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> pulumi.Input[str]:
return pulumi.get(self, "timeout_action")
@timeout_action.setter
def timeout_action(self, value: pulumi.Input[str]):
pulumi.set(self, "timeout_action", value)
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> pulumi.Input[int]:
return pulumi.get(self, "timeout_duration_minutes")
@timeout_duration_minutes.setter
def timeout_duration_minutes(self, value: pulumi.Input[int]):
pulumi.set(self, "timeout_duration_minutes", value)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allocation_strategy", value)
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "block_duration_minutes")
@block_duration_minutes.setter
def block_duration_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "block_duration_minutes", value)
@pulumi.input_type
class ClusterStepConfigArgs:
def __init__(__self__, *,
hadoop_jar_step: pulumi.Input['ClusterHadoopJarStepConfigArgs'],
name: pulumi.Input[str],
action_on_failure: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "hadoop_jar_step", hadoop_jar_step)
pulumi.set(__self__, "name", name)
if action_on_failure is not None:
pulumi.set(__self__, "action_on_failure", action_on_failure)
@property
@pulumi.getter(name="hadoopJarStep")
def hadoop_jar_step(self) -> pulumi.Input['ClusterHadoopJarStepConfigArgs']:
return pulumi.get(self, "hadoop_jar_step")
@hadoop_jar_step.setter
def hadoop_jar_step(self, value: pulumi.Input['ClusterHadoopJarStepConfigArgs']):
pulumi.set(self, "hadoop_jar_step", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="actionOnFailure")
def action_on_failure(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "action_on_failure")
@action_on_failure.setter
def action_on_failure(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_on_failure", value)
@pulumi.input_type
class ClusterTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ClusterVolumeSpecificationArgs:
def __init__(__self__, *,
size_in_gb: pulumi.Input[int],
volume_type: pulumi.Input[str],
iops: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> pulumi.Input[int]:
return pulumi.get(self, "size_in_gb")
@size_in_gb.setter
def size_in_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_gb", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_type", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@pulumi.input_type
class InstanceFleetConfigConfigurationArgs:
def __init__(__self__, *,
classification: Optional[pulumi.Input[str]] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "classification")
@classification.setter
def classification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "classification", value)
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@configuration_properties.setter
def configuration_properties(self, value: Optional[Any]):
pulumi.set(self, "configuration_properties", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@pulumi.input_type
class InstanceFleetConfigEbsBlockDeviceConfigArgs:
def __init__(__self__, *,
volume_specification: pulumi.Input['InstanceFleetConfigVolumeSpecificationArgs'],
volumes_per_instance: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> pulumi.Input['InstanceFleetConfigVolumeSpecificationArgs']:
return pulumi.get(self, "volume_specification")
@volume_specification.setter
def volume_specification(self, value: pulumi.Input['InstanceFleetConfigVolumeSpecificationArgs']):
pulumi.set(self, "volume_specification", value)
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "volumes_per_instance")
@volumes_per_instance.setter
def volumes_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volumes_per_instance", value)
@pulumi.input_type
class InstanceFleetConfigEbsConfigurationArgs:
def __init__(__self__, *,
ebs_block_device_configs: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigEbsBlockDeviceConfigArgs']]]] = None,
ebs_optimized: Optional[pulumi.Input[bool]] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigEbsBlockDeviceConfigArgs']]]]:
return pulumi.get(self, "ebs_block_device_configs")
@ebs_block_device_configs.setter
def ebs_block_device_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigEbsBlockDeviceConfigArgs']]]]):
pulumi.set(self, "ebs_block_device_configs", value)
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ebs_optimized")
@ebs_optimized.setter
def ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ebs_optimized", value)
@pulumi.input_type
class InstanceFleetConfigInstanceFleetProvisioningSpecificationsArgs:
def __init__(__self__, *,
on_demand_specification: Optional[pulumi.Input['InstanceFleetConfigOnDemandProvisioningSpecificationArgs']] = None,
spot_specification: Optional[pulumi.Input['InstanceFleetConfigSpotProvisioningSpecificationArgs']] = None):
if on_demand_specification is not None:
pulumi.set(__self__, "on_demand_specification", on_demand_specification)
if spot_specification is not None:
pulumi.set(__self__, "spot_specification", spot_specification)
@property
@pulumi.getter(name="onDemandSpecification")
def on_demand_specification(self) -> Optional[pulumi.Input['InstanceFleetConfigOnDemandProvisioningSpecificationArgs']]:
return pulumi.get(self, "on_demand_specification")
@on_demand_specification.setter
def on_demand_specification(self, value: Optional[pulumi.Input['InstanceFleetConfigOnDemandProvisioningSpecificationArgs']]):
pulumi.set(self, "on_demand_specification", value)
@property
@pulumi.getter(name="spotSpecification")
def spot_specification(self) -> Optional[pulumi.Input['InstanceFleetConfigSpotProvisioningSpecificationArgs']]:
return pulumi.get(self, "spot_specification")
@spot_specification.setter
def spot_specification(self, value: Optional[pulumi.Input['InstanceFleetConfigSpotProvisioningSpecificationArgs']]):
pulumi.set(self, "spot_specification", value)
@pulumi.input_type
class InstanceFleetConfigInstanceTypeConfigArgs:
def __init__(__self__, *,
instance_type: pulumi.Input[str],
bid_price: Optional[pulumi.Input[str]] = None,
bid_price_as_percentage_of_on_demand_price: Optional[pulumi.Input[float]] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]] = None,
custom_ami_id: Optional[pulumi.Input[str]] = None,
ebs_configuration: Optional[pulumi.Input['InstanceFleetConfigEbsConfigurationArgs']] = None,
weighted_capacity: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bid_price")
@bid_price.setter
def bid_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bid_price", value)
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@bid_price_as_percentage_of_on_demand_price.setter
def bid_price_as_percentage_of_on_demand_price(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "bid_price_as_percentage_of_on_demand_price", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceFleetConfigConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "custom_ami_id")
@custom_ami_id.setter
def custom_ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_ami_id", value)
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional[pulumi.Input['InstanceFleetConfigEbsConfigurationArgs']]:
return pulumi.get(self, "ebs_configuration")
@ebs_configuration.setter
def ebs_configuration(self, value: Optional[pulumi.Input['InstanceFleetConfigEbsConfigurationArgs']]):
pulumi.set(self, "ebs_configuration", value)
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "weighted_capacity")
@weighted_capacity.setter
def weighted_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weighted_capacity", value)
@pulumi.input_type
class InstanceFleetConfigOnDemandProvisioningSpecificationArgs:
def __init__(__self__, *,
allocation_strategy: pulumi.Input[str]):
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> pulumi.Input[str]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: pulumi.Input[str]):
pulumi.set(self, "allocation_strategy", value)
@pulumi.input_type
class InstanceFleetConfigSpotProvisioningSpecificationArgs:
def __init__(__self__, *,
timeout_action: pulumi.Input[str],
timeout_duration_minutes: pulumi.Input[int],
allocation_strategy: Optional[pulumi.Input[str]] = None,
block_duration_minutes: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> pulumi.Input[str]:
return pulumi.get(self, "timeout_action")
@timeout_action.setter
def timeout_action(self, value: pulumi.Input[str]):
pulumi.set(self, "timeout_action", value)
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> pulumi.Input[int]:
return pulumi.get(self, "timeout_duration_minutes")
@timeout_duration_minutes.setter
def timeout_duration_minutes(self, value: pulumi.Input[int]):
pulumi.set(self, "timeout_duration_minutes", value)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allocation_strategy", value)
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "block_duration_minutes")
@block_duration_minutes.setter
def block_duration_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "block_duration_minutes", value)
@pulumi.input_type
class InstanceFleetConfigVolumeSpecificationArgs:
def __init__(__self__, *,
size_in_gb: pulumi.Input[int],
volume_type: pulumi.Input[str],
iops: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> pulumi.Input[int]:
return pulumi.get(self, "size_in_gb")
@size_in_gb.setter
def size_in_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_gb", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_type", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@pulumi.input_type
class InstanceGroupConfigAutoScalingPolicyArgs:
def __init__(__self__, *,
constraints: pulumi.Input['InstanceGroupConfigScalingConstraintsArgs'],
rules: pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigScalingRuleArgs']]]):
pulumi.set(__self__, "constraints", constraints)
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def constraints(self) -> pulumi.Input['InstanceGroupConfigScalingConstraintsArgs']:
return pulumi.get(self, "constraints")
@constraints.setter
def constraints(self, value: pulumi.Input['InstanceGroupConfigScalingConstraintsArgs']):
pulumi.set(self, "constraints", value)
@property
@pulumi.getter
def rules(self) -> pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigScalingRuleArgs']]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigScalingRuleArgs']]]):
pulumi.set(self, "rules", value)
@pulumi.input_type
class InstanceGroupConfigCloudWatchAlarmDefinitionArgs:
def __init__(__self__, *,
comparison_operator: pulumi.Input[str],
metric_name: pulumi.Input[str],
period: pulumi.Input[int],
threshold: pulumi.Input[float],
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigMetricDimensionArgs']]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
namespace: Optional[pulumi.Input[str]] = None,
statistic: Optional[pulumi.Input[str]] = None,
unit: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> pulumi.Input[str]:
return pulumi.get(self, "comparison_operator")
@comparison_operator.setter
def comparison_operator(self, value: pulumi.Input[str]):
pulumi.set(self, "comparison_operator", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def period(self) -> pulumi.Input[int]:
return pulumi.get(self, "period")
@period.setter
def period(self, value: pulumi.Input[int]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigMetricDimensionArgs']]]]:
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigMetricDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class InstanceGroupConfigConfigurationArgs:
def __init__(__self__, *,
classification: Optional[pulumi.Input[str]] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigConfigurationArgs']]]] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "classification")
@classification.setter
def classification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "classification", value)
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@configuration_properties.setter
def configuration_properties(self, value: Optional[Any]):
pulumi.set(self, "configuration_properties", value)
@property
@pulumi.getter
def configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigConfigurationArgs']]]]:
return pulumi.get(self, "configurations")
@configurations.setter
def configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigConfigurationArgs']]]]):
pulumi.set(self, "configurations", value)
@pulumi.input_type
class InstanceGroupConfigEbsBlockDeviceConfigArgs:
def __init__(__self__, *,
volume_specification: pulumi.Input['InstanceGroupConfigVolumeSpecificationArgs'],
volumes_per_instance: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> pulumi.Input['InstanceGroupConfigVolumeSpecificationArgs']:
return pulumi.get(self, "volume_specification")
@volume_specification.setter
def volume_specification(self, value: pulumi.Input['InstanceGroupConfigVolumeSpecificationArgs']):
pulumi.set(self, "volume_specification", value)
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "volumes_per_instance")
@volumes_per_instance.setter
def volumes_per_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volumes_per_instance", value)
@pulumi.input_type
class InstanceGroupConfigEbsConfigurationArgs:
def __init__(__self__, *,
ebs_block_device_configs: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigEbsBlockDeviceConfigArgs']]]] = None,
ebs_optimized: Optional[pulumi.Input[bool]] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigEbsBlockDeviceConfigArgs']]]]:
return pulumi.get(self, "ebs_block_device_configs")
@ebs_block_device_configs.setter
def ebs_block_device_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceGroupConfigEbsBlockDeviceConfigArgs']]]]):
pulumi.set(self, "ebs_block_device_configs", value)
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ebs_optimized")
@ebs_optimized.setter
def ebs_optimized(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ebs_optimized", value)
@pulumi.input_type
class InstanceGroupConfigMetricDimensionArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class InstanceGroupConfigScalingActionArgs:
def __init__(__self__, *,
simple_scaling_policy_configuration: pulumi.Input['InstanceGroupConfigSimpleScalingPolicyConfigurationArgs'],
market: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "simple_scaling_policy_configuration", simple_scaling_policy_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
@property
@pulumi.getter(name="simpleScalingPolicyConfiguration")
def simple_scaling_policy_configuration(self) -> pulumi.Input['InstanceGroupConfigSimpleScalingPolicyConfigurationArgs']:
return pulumi.get(self, "simple_scaling_policy_configuration")
@simple_scaling_policy_configuration.setter
def simple_scaling_policy_configuration(self, value: pulumi.Input['InstanceGroupConfigSimpleScalingPolicyConfigurationArgs']):
pulumi.set(self, "simple_scaling_policy_configuration", value)
@property
@pulumi.getter
def market(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "market")
@market.setter
def market(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "market", value)
@pulumi.input_type
class InstanceGroupConfigScalingConstraintsArgs:
def __init__(__self__, *,
max_capacity: pulumi.Input[int],
min_capacity: pulumi.Input[int]):
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> pulumi.Input[int]:
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Input[int]:
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "min_capacity", value)
@pulumi.input_type
class InstanceGroupConfigScalingRuleArgs:
def __init__(__self__, *,
action: pulumi.Input['InstanceGroupConfigScalingActionArgs'],
name: pulumi.Input[str],
trigger: pulumi.Input['InstanceGroupConfigScalingTriggerArgs'],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "trigger", trigger)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def action(self) -> pulumi.Input['InstanceGroupConfigScalingActionArgs']:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['InstanceGroupConfigScalingActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def trigger(self) -> pulumi.Input['InstanceGroupConfigScalingTriggerArgs']:
return pulumi.get(self, "trigger")
@trigger.setter
def trigger(self, value: pulumi.Input['InstanceGroupConfigScalingTriggerArgs']):
pulumi.set(self, "trigger", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class InstanceGroupConfigScalingTriggerArgs:
def __init__(__self__, *,
cloud_watch_alarm_definition: pulumi.Input['InstanceGroupConfigCloudWatchAlarmDefinitionArgs']):
pulumi.set(__self__, "cloud_watch_alarm_definition", cloud_watch_alarm_definition)
@property
@pulumi.getter(name="cloudWatchAlarmDefinition")
def cloud_watch_alarm_definition(self) -> pulumi.Input['InstanceGroupConfigCloudWatchAlarmDefinitionArgs']:
return pulumi.get(self, "cloud_watch_alarm_definition")
@cloud_watch_alarm_definition.setter
def cloud_watch_alarm_definition(self, value: pulumi.Input['InstanceGroupConfigCloudWatchAlarmDefinitionArgs']):
pulumi.set(self, "cloud_watch_alarm_definition", value)
@pulumi.input_type
class InstanceGroupConfigSimpleScalingPolicyConfigurationArgs:
def __init__(__self__, *,
scaling_adjustment: pulumi.Input[int],
adjustment_type: Optional[pulumi.Input[str]] = None,
cool_down: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "scaling_adjustment", scaling_adjustment)
if adjustment_type is not None:
pulumi.set(__self__, "adjustment_type", adjustment_type)
if cool_down is not None:
pulumi.set(__self__, "cool_down", cool_down)
@property
@pulumi.getter(name="scalingAdjustment")
def scaling_adjustment(self) -> pulumi.Input[int]:
return pulumi.get(self, "scaling_adjustment")
@scaling_adjustment.setter
def scaling_adjustment(self, value: pulumi.Input[int]):
pulumi.set(self, "scaling_adjustment", value)
@property
@pulumi.getter(name="adjustmentType")
def adjustment_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "adjustment_type")
@adjustment_type.setter
def adjustment_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment_type", value)
@property
@pulumi.getter(name="coolDown")
def cool_down(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cool_down")
@cool_down.setter
def cool_down(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cool_down", value)
@pulumi.input_type
class InstanceGroupConfigVolumeSpecificationArgs:
def __init__(__self__, *,
size_in_gb: pulumi.Input[int],
volume_type: pulumi.Input[str],
iops: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> pulumi.Input[int]:
return pulumi.get(self, "size_in_gb")
@size_in_gb.setter
def size_in_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_gb", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_type", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@pulumi.input_type
class StepHadoopJarStepConfigArgs:
def __init__(__self__, *,
jar: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
main_class: Optional[pulumi.Input[str]] = None,
step_properties: Optional[pulumi.Input[Sequence[pulumi.Input['StepKeyValueArgs']]]] = None):
pulumi.set(__self__, "jar", jar)
if args is not None:
pulumi.set(__self__, "args", args)
if main_class is not None:
pulumi.set(__self__, "main_class", main_class)
if step_properties is not None:
pulumi.set(__self__, "step_properties", step_properties)
@property
@pulumi.getter
def jar(self) -> pulumi.Input[str]:
return pulumi.get(self, "jar")
@jar.setter
def jar(self, value: pulumi.Input[str]):
pulumi.set(self, "jar", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter(name="mainClass")
def main_class(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "main_class")
@main_class.setter
def main_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "main_class", value)
@property
@pulumi.getter(name="stepProperties")
def step_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StepKeyValueArgs']]]]:
return pulumi.get(self, "step_properties")
@step_properties.setter
def step_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StepKeyValueArgs']]]]):
pulumi.set(self, "step_properties", value)
@pulumi.input_type
class StepKeyValueArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class StudioTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
An arbitrary set of tags (key-value pairs) for this EMR Studio.
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param pulumi.Input[str] value: The value for the tag. You can specify a value that is 0 to 255 Unicode characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value for the tag. You can specify a value that is 0 to 255 Unicode characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value) | 0.838779 | 0.049797 |
from django.contrib.gis.utils.wkt import precision_wkt
from appPonto.models import *
from appPortas.models import *
from datetime import datetime
def consultar_pessoa_acesso_porta(id_digital, id_porta, img_digital):
try:
grupopessoa = GrupoPessoa.objects.filter(pessoa__id_digital=id_digital)
grupoporta = GrupoPorta.objects.filter(porta_id=id_porta)
except GrupoPessoa.DoesNotExist:
return False
except GrupoPorta.DoesNotExist:
return False
acesso = False
for grupo in grupopessoa:
for porta in grupoporta:
if grupo.grupo == porta.grupo:
acesso = True
return acesso
def registra_acesso_porta(id_digital, id_porta, img_digital):
hora = datetime.now()
if consultar_pessoa_acesso_porta(id_digital, id_porta, img_digital) == True:
pessoa = Pessoa.objects.get(id_digital=id_digital)
porta = Porta.objects.get(id=id_porta)
registro_acesso = RegistroPorta.objects.create(data=datetime.now().date(),
hora_acesso=hora.strftime("%H:%M:%S"), porta=porta,
pessoa=pessoa)
registro_acesso.save()
return True
else:
return False
def registra_frequencia_entrada(id_digital, img_digital, local):
not_frequencia = True
try:
Frequencia.objects.get(pessoa__id_digital=32, data=datetime.now().date())
not_frequencia = False
except Frequencia.DoesNotExist:
not_frequencia = True
hora = datetime.now()
if not_frequencia:
try:
pessoa = Pessoa.objects.get(id_digital=id_digital)
except Pessoa.DoesNotExist:
return False
frequencia = Frequencia.objects.create(data=datetime.now().date(), hora_entrada=hora.strftime("%H:%M:%S"),
hora_saida=None, pessoa=pessoa, local=local)
frequencia.save()
return True
else:
return False
def registra_frequencia_saida(id_digital, img_digital, local):
hora = datetime.now()
try:
frequencia = Frequencia.objects.get(pessoa__id_digital=id_digital, data=datetime.now().date())
except Pessoa.DoesNotExist:
return False
frequencia.hora_saida = hora.strftime("%H:%M:%S")
frequencia.local = local
frequencia.save()
return True
def adicionar_biometria(id_digital, img_digital, matricula):
try:
pessoa = Pessoa.objects.get(username=matricula)
except Pessoa.DoesNotExist:
return False
pessoa.id_digital = id_digital
pessoa.img_dital = img_digital
pessoa.save()
return True
print(adicionar_biometria(88, 'img', 251)) | appPortas/consultas.py | from django.contrib.gis.utils.wkt import precision_wkt
from appPonto.models import *
from appPortas.models import *
from datetime import datetime
def consultar_pessoa_acesso_porta(id_digital, id_porta, img_digital):
try:
grupopessoa = GrupoPessoa.objects.filter(pessoa__id_digital=id_digital)
grupoporta = GrupoPorta.objects.filter(porta_id=id_porta)
except GrupoPessoa.DoesNotExist:
return False
except GrupoPorta.DoesNotExist:
return False
acesso = False
for grupo in grupopessoa:
for porta in grupoporta:
if grupo.grupo == porta.grupo:
acesso = True
return acesso
def registra_acesso_porta(id_digital, id_porta, img_digital):
hora = datetime.now()
if consultar_pessoa_acesso_porta(id_digital, id_porta, img_digital) == True:
pessoa = Pessoa.objects.get(id_digital=id_digital)
porta = Porta.objects.get(id=id_porta)
registro_acesso = RegistroPorta.objects.create(data=datetime.now().date(),
hora_acesso=hora.strftime("%H:%M:%S"), porta=porta,
pessoa=pessoa)
registro_acesso.save()
return True
else:
return False
def registra_frequencia_entrada(id_digital, img_digital, local):
not_frequencia = True
try:
Frequencia.objects.get(pessoa__id_digital=32, data=datetime.now().date())
not_frequencia = False
except Frequencia.DoesNotExist:
not_frequencia = True
hora = datetime.now()
if not_frequencia:
try:
pessoa = Pessoa.objects.get(id_digital=id_digital)
except Pessoa.DoesNotExist:
return False
frequencia = Frequencia.objects.create(data=datetime.now().date(), hora_entrada=hora.strftime("%H:%M:%S"),
hora_saida=None, pessoa=pessoa, local=local)
frequencia.save()
return True
else:
return False
def registra_frequencia_saida(id_digital, img_digital, local):
hora = datetime.now()
try:
frequencia = Frequencia.objects.get(pessoa__id_digital=id_digital, data=datetime.now().date())
except Pessoa.DoesNotExist:
return False
frequencia.hora_saida = hora.strftime("%H:%M:%S")
frequencia.local = local
frequencia.save()
return True
def adicionar_biometria(id_digital, img_digital, matricula):
try:
pessoa = Pessoa.objects.get(username=matricula)
except Pessoa.DoesNotExist:
return False
pessoa.id_digital = id_digital
pessoa.img_dital = img_digital
pessoa.save()
return True
print(adicionar_biometria(88, 'img', 251)) | 0.305697 | 0.099077 |
from __future__ import print_function, unicode_literals
import os
import sys
from beanbag_tools.utils.builds import build_checksums
from beanbag_tools.utils.builds_python import (python_build_releases,
python_check_can_release)
from beanbag_tools.utils.git import (git_get_tag_sha, git_tag_release,
git_use_clone)
from beanbag_tools.utils.pypi import pypi_register_release
from beanbag_tools.utils.rbwebsite import (rbwebsite_load_config,
rbwebsite_register_release)
from beanbag_tools.utils.s3 import s3_upload_files
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from reviewboard import __version__, __version_info__, is_release
PY_VERSIONS = ["2.6", "2.7"]
PACKAGE_NAME = 'ReviewBoard'
RELEASES_BUCKET_NAME = 'downloads.reviewboard.org'
RELEASES_BUCKET_KEY = '/releases/%s/%s.%s/' % (PACKAGE_NAME,
__version_info__[0],
__version_info__[1])
def build_settings():
with open('settings_local.py', 'w') as f:
f.write('DATABASES = {\n')
f.write(' "default": {\n')
f.write(' "ENGINE": "django.db.backends.sqlite3",\n')
f.write(' "NAME": "reviewboard.db",\n')
f.write(' }\n')
f.write('}\n\n')
f.write('PRODUCTION = True\n')
f.write('DEBUG = False\n')
def build_targets():
built_files = python_build_releases(PACKAGE_NAME, __version__, PY_VERSIONS)
built_files += build_checksums(PACKAGE_NAME, __version__, built_files)
return built_files
def register_release():
if __version_info__[4] == 'final':
pypi_register_release()
scm_revision = git_get_tag_sha('release-%s' % __version__)
rbwebsite_register_release(__version_info__, scm_revision)
def main():
python_check_can_release(is_release())
rbwebsite_load_config()
with git_use_clone('.'):
build_settings()
built_files = build_targets()
s3_upload_files(RELEASES_BUCKET_NAME, RELEASES_BUCKET_KEY, built_files,
build_index=True)
git_tag_release(__version__)
register_release()
if __name__ == "__main__":
main() | contrib/internal/release.py |
from __future__ import print_function, unicode_literals
import os
import sys
from beanbag_tools.utils.builds import build_checksums
from beanbag_tools.utils.builds_python import (python_build_releases,
python_check_can_release)
from beanbag_tools.utils.git import (git_get_tag_sha, git_tag_release,
git_use_clone)
from beanbag_tools.utils.pypi import pypi_register_release
from beanbag_tools.utils.rbwebsite import (rbwebsite_load_config,
rbwebsite_register_release)
from beanbag_tools.utils.s3 import s3_upload_files
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from reviewboard import __version__, __version_info__, is_release
PY_VERSIONS = ["2.6", "2.7"]
PACKAGE_NAME = 'ReviewBoard'
RELEASES_BUCKET_NAME = 'downloads.reviewboard.org'
RELEASES_BUCKET_KEY = '/releases/%s/%s.%s/' % (PACKAGE_NAME,
__version_info__[0],
__version_info__[1])
def build_settings():
with open('settings_local.py', 'w') as f:
f.write('DATABASES = {\n')
f.write(' "default": {\n')
f.write(' "ENGINE": "django.db.backends.sqlite3",\n')
f.write(' "NAME": "reviewboard.db",\n')
f.write(' }\n')
f.write('}\n\n')
f.write('PRODUCTION = True\n')
f.write('DEBUG = False\n')
def build_targets():
built_files = python_build_releases(PACKAGE_NAME, __version__, PY_VERSIONS)
built_files += build_checksums(PACKAGE_NAME, __version__, built_files)
return built_files
def register_release():
if __version_info__[4] == 'final':
pypi_register_release()
scm_revision = git_get_tag_sha('release-%s' % __version__)
rbwebsite_register_release(__version_info__, scm_revision)
def main():
python_check_can_release(is_release())
rbwebsite_load_config()
with git_use_clone('.'):
build_settings()
built_files = build_targets()
s3_upload_files(RELEASES_BUCKET_NAME, RELEASES_BUCKET_KEY, built_files,
build_index=True)
git_tag_release(__version__)
register_release()
if __name__ == "__main__":
main() | 0.360714 | 0.064683 |
__all__ = ['SparkConf']
class SparkConf(object):
"""
Configuration for a Spark application. Used to set various Spark
parameters as key-value pairs.
Most of the time, you would create a SparkConf object with
C{SparkConf()}, which will load values from C{spark.*} Java system
properties as well. In this case, any parameters you set directly on
the C{SparkConf} object take priority over system properties.
For unit tests, you can also call C{SparkConf(false)} to skip
loading external settings and get the same configuration no matter
what the system properties are.
All setter methods in this class support chaining. For example,
you can write C{conf.setMain("local").setAppName("My app")}.
Note that once a SparkConf object is passed to Spark, it is cloned
and can no longer be modified by the user.
"""
def __init__(self, loadDefaults=True, _jvm=None, _jconf=None):
"""
Create a new Spark configuration.
:param loadDefaults: whether to load values from Java system
properties (True by default)
:param _jvm: internal parameter used to pass a handle to the
Java VM; does not need to be set by users
:param _jconf: Optionally pass in an existing SparkConf handle
to use its parameters
"""
if _jconf:
self._jconf = _jconf
else:
from pyspark.context import SparkContext
SparkContext._ensure_initialized()
_jvm = _jvm or SparkContext._jvm
self._jconf = _jvm.SparkConf(loadDefaults)
def set(self, key, value):
"""Set a configuration property."""
self._jconf.set(key, unicode(value))
return self
def setIfMissing(self, key, value):
"""Set a configuration property, if not already set."""
if self.get(key) is None:
self.set(key, value)
return self
def setMain(self, value):
"""Set main URL to connect to."""
self._jconf.setMain(value)
return self
def setAppName(self, value):
"""Set application name."""
self._jconf.setAppName(value)
return self
def setSparkHome(self, value):
"""Set path where Spark is installed on worker nodes."""
self._jconf.setSparkHome(value)
return self
def setExecutorEnv(self, key=None, value=None, pairs=None):
"""Set an environment variable to be passed to executors."""
if (key is not None and pairs is not None) or (key is None and pairs is None):
raise Exception("Either pass one key-value pair or a list of pairs")
elif key is not None:
self._jconf.setExecutorEnv(key, value)
elif pairs is not None:
for (k, v) in pairs:
self._jconf.setExecutorEnv(k, v)
return self
def setAll(self, pairs):
"""
Set multiple parameters, passed as a list of key-value pairs.
:param pairs: list of key-value pairs to set
"""
for (k, v) in pairs:
self._jconf.set(k, v)
return self
def get(self, key, defaultValue=None):
"""Get the configured value for some key, or return a default otherwise."""
if defaultValue is None: # Py4J doesn't call the right get() if we pass None
if not self._jconf.contains(key):
return None
return self._jconf.get(key)
else:
return self._jconf.get(key, defaultValue)
def getAll(self):
"""Get all values as a list of key-value pairs."""
pairs = []
for elem in self._jconf.getAll():
pairs.append((elem._1(), elem._2()))
return pairs
def contains(self, key):
"""Does this configuration contain a given key?"""
return self._jconf.contains(key)
def toDebugString(self):
"""
Returns a printable version of the configuration, as a list of
key=value pairs, one per line.
"""
return self._jconf.toDebugString()
def _test():
import doctest
(failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test() | python/pyspark/conf.py | __all__ = ['SparkConf']
class SparkConf(object):
"""
Configuration for a Spark application. Used to set various Spark
parameters as key-value pairs.
Most of the time, you would create a SparkConf object with
C{SparkConf()}, which will load values from C{spark.*} Java system
properties as well. In this case, any parameters you set directly on
the C{SparkConf} object take priority over system properties.
For unit tests, you can also call C{SparkConf(false)} to skip
loading external settings and get the same configuration no matter
what the system properties are.
All setter methods in this class support chaining. For example,
you can write C{conf.setMain("local").setAppName("My app")}.
Note that once a SparkConf object is passed to Spark, it is cloned
and can no longer be modified by the user.
"""
def __init__(self, loadDefaults=True, _jvm=None, _jconf=None):
"""
Create a new Spark configuration.
:param loadDefaults: whether to load values from Java system
properties (True by default)
:param _jvm: internal parameter used to pass a handle to the
Java VM; does not need to be set by users
:param _jconf: Optionally pass in an existing SparkConf handle
to use its parameters
"""
if _jconf:
self._jconf = _jconf
else:
from pyspark.context import SparkContext
SparkContext._ensure_initialized()
_jvm = _jvm or SparkContext._jvm
self._jconf = _jvm.SparkConf(loadDefaults)
def set(self, key, value):
"""Set a configuration property."""
self._jconf.set(key, unicode(value))
return self
def setIfMissing(self, key, value):
"""Set a configuration property, if not already set."""
if self.get(key) is None:
self.set(key, value)
return self
def setMain(self, value):
"""Set main URL to connect to."""
self._jconf.setMain(value)
return self
def setAppName(self, value):
"""Set application name."""
self._jconf.setAppName(value)
return self
def setSparkHome(self, value):
"""Set path where Spark is installed on worker nodes."""
self._jconf.setSparkHome(value)
return self
def setExecutorEnv(self, key=None, value=None, pairs=None):
"""Set an environment variable to be passed to executors."""
if (key is not None and pairs is not None) or (key is None and pairs is None):
raise Exception("Either pass one key-value pair or a list of pairs")
elif key is not None:
self._jconf.setExecutorEnv(key, value)
elif pairs is not None:
for (k, v) in pairs:
self._jconf.setExecutorEnv(k, v)
return self
def setAll(self, pairs):
"""
Set multiple parameters, passed as a list of key-value pairs.
:param pairs: list of key-value pairs to set
"""
for (k, v) in pairs:
self._jconf.set(k, v)
return self
def get(self, key, defaultValue=None):
"""Get the configured value for some key, or return a default otherwise."""
if defaultValue is None: # Py4J doesn't call the right get() if we pass None
if not self._jconf.contains(key):
return None
return self._jconf.get(key)
else:
return self._jconf.get(key, defaultValue)
def getAll(self):
"""Get all values as a list of key-value pairs."""
pairs = []
for elem in self._jconf.getAll():
pairs.append((elem._1(), elem._2()))
return pairs
def contains(self, key):
"""Does this configuration contain a given key?"""
return self._jconf.contains(key)
def toDebugString(self):
"""
Returns a printable version of the configuration, as a list of
key=value pairs, one per line.
"""
return self._jconf.toDebugString()
def _test():
import doctest
(failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test() | 0.905227 | 0.493103 |
import unittest
from definitions import ROOT_DIR
from main.org.core.fitness.objectives2D import Objective2D
from main.org.core.utility.Chromosome_Generator import *
class Test(unittest.TestCase):
def test_constructor(self):
logfile = ROOT_DIR + '/test/resources/File.log'
chrom_gen = ChromosomeGenerator(logfile, 0, '\n', [])
obj2_d = Objective2D(chrom_gen)
self.assertEqual(len(obj2_d.get_messages()), 15)
def test_zero_frequency(self):
# create a chromosome with only one template with maximum specificity but that
# does not match any message
chromosome = Chromosome({})
template = ["A", "b", "c"]
t = Template(template)
chromosome.add_template(t)
# let's read the messages
logfile = ROOT_DIR + '/test/resources/File.log'
chrom_gen = ChromosomeGenerator(logfile, 0, '\n', [])
obj2_d = Objective2D(chrom_gen)
# assertion section
self.assertEqual(obj2_d.compute_objective(chromosome), [1.0, 0])
def test_star_template(self):
logfile = ROOT_DIR + '/test/resources/File.log'
chrom_gen = ChromosomeGenerator(logfile, 0, '\n', [])
t = Template(["*", "*", "*"])
compute_matched_lines(chrom_gen.messages, t)
chromosome = Chromosome({})
chromosome.add_template(t)
obj = Objective2D(chrom_gen)
self.assertEqual(obj.compute_objective(chromosome), [0, 0])
def test_compute_objective(self):
logfile = ROOT_DIR + '/test/resources/File.log'
chrom_gen = ChromosomeGenerator(logfile, 0, '\n', ["'[\w\d\$\-:,\./_ ><\|]*'"])
template1 = ["Message", "sent", "by", "*", ",", "at", "port", "*"]
template2 = ["generating", "reading", "files"]
t1 = Template(template1)
t2 = Template(template2)
ch = Chromosome({})
ch.add_template(t1)
ch.add_template(t2)
# update matched info
compute_matched_lines(chrom_gen.messages, t1)
print(t1.matched_lines)
compute_matched_lines(chrom_gen.messages, t2)
print(t2.matched_lines)
ch.coverage = 9.0 / 15
obj = Objective2D(chrom_gen)
scores = obj.compute_objective(ch)
self.assertEqual(scores[0], (((6/8)+1)/2))
self.assertEqual(scores[1], (8/7)/2)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(Test)
unittest.TextTestRunner(verbosity=2).run(suite) | executable/logpai/logparser/MoLFI/test/org/core/fitness/test_objectives2D.py | import unittest
from definitions import ROOT_DIR
from main.org.core.fitness.objectives2D import Objective2D
from main.org.core.utility.Chromosome_Generator import *
class Test(unittest.TestCase):
def test_constructor(self):
logfile = ROOT_DIR + '/test/resources/File.log'
chrom_gen = ChromosomeGenerator(logfile, 0, '\n', [])
obj2_d = Objective2D(chrom_gen)
self.assertEqual(len(obj2_d.get_messages()), 15)
def test_zero_frequency(self):
# create a chromosome with only one template with maximum specificity but that
# does not match any message
chromosome = Chromosome({})
template = ["A", "b", "c"]
t = Template(template)
chromosome.add_template(t)
# let's read the messages
logfile = ROOT_DIR + '/test/resources/File.log'
chrom_gen = ChromosomeGenerator(logfile, 0, '\n', [])
obj2_d = Objective2D(chrom_gen)
# assertion section
self.assertEqual(obj2_d.compute_objective(chromosome), [1.0, 0])
def test_star_template(self):
logfile = ROOT_DIR + '/test/resources/File.log'
chrom_gen = ChromosomeGenerator(logfile, 0, '\n', [])
t = Template(["*", "*", "*"])
compute_matched_lines(chrom_gen.messages, t)
chromosome = Chromosome({})
chromosome.add_template(t)
obj = Objective2D(chrom_gen)
self.assertEqual(obj.compute_objective(chromosome), [0, 0])
def test_compute_objective(self):
logfile = ROOT_DIR + '/test/resources/File.log'
chrom_gen = ChromosomeGenerator(logfile, 0, '\n', ["'[\w\d\$\-:,\./_ ><\|]*'"])
template1 = ["Message", "sent", "by", "*", ",", "at", "port", "*"]
template2 = ["generating", "reading", "files"]
t1 = Template(template1)
t2 = Template(template2)
ch = Chromosome({})
ch.add_template(t1)
ch.add_template(t2)
# update matched info
compute_matched_lines(chrom_gen.messages, t1)
print(t1.matched_lines)
compute_matched_lines(chrom_gen.messages, t2)
print(t2.matched_lines)
ch.coverage = 9.0 / 15
obj = Objective2D(chrom_gen)
scores = obj.compute_objective(ch)
self.assertEqual(scores[0], (((6/8)+1)/2))
self.assertEqual(scores[1], (8/7)/2)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(Test)
unittest.TextTestRunner(verbosity=2).run(suite) | 0.452294 | 0.362405 |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.utils import parallel_backend
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_boston(return_X_y=True)
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({'col1': [1.0, 2.0, 3.0, np.nan],
'col2': ['a', 'b', 'a', 'b']})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer([
('num', num_preprocess, ['col1']),
('cat', OneHotEncoder(), ['col2'])
])
clf = make_pipeline(preprocess, LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(lr, X, y,
n_repeats=50,
scoring='neg_mean_squared_error')
assert_allclose(expected_importances, results.importances_mean,
rtol=1e-1, atol=1e-6)
def test_permutation_importance_equivalence_sequential_parallel():
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(X, y)
importance_sequential = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=1
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_sequential['importances'].min()
imp_max = importance_sequential['importances'].max()
assert imp_max - imp_min > 0.3
# The actually check that parallelism does not impact the results
# either with shared memory (threading) or without isolated memory
# via process-based parallelism using the default backend
# ('loky' or 'multiprocessing') depending on the joblib version:
# process-based parallelism (by default):
importance_processes = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2)
assert_allclose(
importance_processes['importances'],
importance_sequential['importances']
)
# thread-based parallelism:
with parallel_backend("threading"):
importance_threading = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_threading['importances'],
importance_sequential['importances']
)
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
def test_permutation_importance_equivalence_array_dataframe(n_jobs):
# This test checks that the column shuffling logic has the same behavior
# both a dataframe and a simple numpy array.
pd = pytest.importorskip('pandas')
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
X_df = pd.DataFrame(X)
# Add a categorical feature that is statistically linked to y:
binner = KBinsDiscretizer(n_bins=3, encode="ordinal")
cat_column = binner.fit_transform(y.reshape(-1, 1))
# Concatenate the extra column to the numpy array: integers will be
# cast to float values
X = np.hstack([X, cat_column])
assert X.dtype.kind == "f"
# Insert extra column as a non-numpy-native dtype (while keeping backward
# compat for old pandas versions):
if hasattr(pd, "Categorical"):
cat_column = pd.Categorical(cat_column.ravel())
else:
cat_column = cat_column.ravel()
new_col_idx = len(X_df.columns)
X_df[new_col_idx] = cat_column
assert X_df[new_col_idx].dtype == cat_column.dtype
# Stich an aribtrary index to the dataframe:
X_df.index = np.arange(len(X_df)).astype(str)
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
rf.fit(X, y)
n_repeats = 3
importance_array = permutation_importance(
rf, X, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_array['importances'].min()
imp_max = importance_array['importances'].max()
assert imp_max - imp_min > 0.3
# Now check that importances computed on dataframe matche the values
# of those computed on the array with the same data.
importance_dataframe = permutation_importance(
rf, X_df, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
assert_allclose(
importance_array['importances'],
importance_dataframe['importances']
)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_permutation_importance_large_memmaped_data(input_type):
# Smoke, non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15810
n_samples, n_features = int(5e4), 4
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
assert X.nbytes > 1e6 # trigger joblib memmaping
X = _convert_container(X, input_type)
clf = DummyClassifier(strategy='prior').fit(X, y)
# Actual smoke test: should not raise any error:
n_repeats = 5
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
# Auxiliary check: DummyClassifier is feature independent:
# permutating feature should not change the predictions
expected_importances = np.zeros((n_features, n_repeats))
assert_allclose(expected_importances, r.importances) | venv/Lib/site-packages/sklearn/inspection/tests/test_permutation_importance.py | import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.utils import parallel_backend
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_boston(return_X_y=True)
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({'col1': [1.0, 2.0, 3.0, np.nan],
'col2': ['a', 'b', 'a', 'b']})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer([
('num', num_preprocess, ['col1']),
('cat', OneHotEncoder(), ['col2'])
])
clf = make_pipeline(preprocess, LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(lr, X, y,
n_repeats=50,
scoring='neg_mean_squared_error')
assert_allclose(expected_importances, results.importances_mean,
rtol=1e-1, atol=1e-6)
def test_permutation_importance_equivalence_sequential_parallel():
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(X, y)
importance_sequential = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=1
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_sequential['importances'].min()
imp_max = importance_sequential['importances'].max()
assert imp_max - imp_min > 0.3
# The actually check that parallelism does not impact the results
# either with shared memory (threading) or without isolated memory
# via process-based parallelism using the default backend
# ('loky' or 'multiprocessing') depending on the joblib version:
# process-based parallelism (by default):
importance_processes = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2)
assert_allclose(
importance_processes['importances'],
importance_sequential['importances']
)
# thread-based parallelism:
with parallel_backend("threading"):
importance_threading = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_threading['importances'],
importance_sequential['importances']
)
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
def test_permutation_importance_equivalence_array_dataframe(n_jobs):
# This test checks that the column shuffling logic has the same behavior
# both a dataframe and a simple numpy array.
pd = pytest.importorskip('pandas')
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
X_df = pd.DataFrame(X)
# Add a categorical feature that is statistically linked to y:
binner = KBinsDiscretizer(n_bins=3, encode="ordinal")
cat_column = binner.fit_transform(y.reshape(-1, 1))
# Concatenate the extra column to the numpy array: integers will be
# cast to float values
X = np.hstack([X, cat_column])
assert X.dtype.kind == "f"
# Insert extra column as a non-numpy-native dtype (while keeping backward
# compat for old pandas versions):
if hasattr(pd, "Categorical"):
cat_column = pd.Categorical(cat_column.ravel())
else:
cat_column = cat_column.ravel()
new_col_idx = len(X_df.columns)
X_df[new_col_idx] = cat_column
assert X_df[new_col_idx].dtype == cat_column.dtype
# Stich an aribtrary index to the dataframe:
X_df.index = np.arange(len(X_df)).astype(str)
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
rf.fit(X, y)
n_repeats = 3
importance_array = permutation_importance(
rf, X, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_array['importances'].min()
imp_max = importance_array['importances'].max()
assert imp_max - imp_min > 0.3
# Now check that importances computed on dataframe matche the values
# of those computed on the array with the same data.
importance_dataframe = permutation_importance(
rf, X_df, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
assert_allclose(
importance_array['importances'],
importance_dataframe['importances']
)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_permutation_importance_large_memmaped_data(input_type):
# Smoke, non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15810
n_samples, n_features = int(5e4), 4
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
assert X.nbytes > 1e6 # trigger joblib memmaping
X = _convert_container(X, input_type)
clf = DummyClassifier(strategy='prior').fit(X, y)
# Actual smoke test: should not raise any error:
n_repeats = 5
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
# Auxiliary check: DummyClassifier is feature independent:
# permutating feature should not change the predictions
expected_importances = np.zeros((n_features, n_repeats))
assert_allclose(expected_importances, r.importances) | 0.675015 | 0.661896 |
import os
import subprocess as sp
import re
import sys
import threading
import logging
import signal
from math import ceil
from queue import Queue, Empty
from gevent import idle
from config import config
from imageProcess import clean
from procedure import genProcess
from progress import Node, initialETA
from worker import context, begin
from runSlomo import RefTime as SlomoRefs
from videoSR import RefTime as VSRRefs
from ESTRNN import para as ESTRNNpara
log = logging.getLogger('Moe')
ffmpegPath = os.path.realpath('ffmpeg/bin/ffmpeg') # require full path to spawn in shell
qOut = Queue(256)
stepVideo = [dict(op='buffer', bitDepth=16)]
pix_fmt = 'bgr48le'
pixBytes = 6
bufsize = 10 ** 8
isWindows = sys.platform[:3] == 'win'
reMatchInfo = re.compile(r'Stream #.*: Video:')
reSearchInfo = re.compile(r',[\s]*([\d]+)x([\d]+)[\s]*.+,[\s]*([.\d]+)[\s]*(fps|tbr)')
reMatchFrame = re.compile(r'frame=')
reSearchFrame = re.compile(r'frame=[\s]*([\d]+) ')
reMatchAudio = re.compile(r'Stream #0:1')
reMatchOutput = re.compile(r'Output #0,')
formats = {'.mp4', '.ts', '.mkv'}
creationflag = sp.CREATE_NEW_PROCESS_GROUP if isWindows else 0
sigint = signal.CTRL_BREAK_EVENT if isWindows else signal.SIGINT
lookback = dict(slomo=SlomoRefs >> 1, VSR=VSRRefs >> 1, demob=ESTRNNpara.past_frames)
lookahead = dict(slomo=(SlomoRefs - 1) >> 1, VSR=(VSRRefs - 1) >> 1, demob=ESTRNNpara.future_frames)
resizeOp = {'SR', 'resize', 'VSR'}
padOp = {'VSR', 'demob'}
popen = lambda command: sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=bufsize, creationflags=creationflag)
popenText = lambda command: sp.Popen(command, stderr=sp.PIPE, encoding='utf_8', errors='ignore')
insert1 = lambda t, s: ''.join((t[0], s, *t[1:]))
splitext = lambda p: os.path.splitext(p)
fixExt = lambda t: ''.join((*t[:-1], t[-1] if t[-1] in formats else '.mkv'))
suffix = lambda p, s: insert1(splitext(p), s)
clipList = lambda l, start, end: l[:start] + l[end:]
commandVideoSkip = lambda command: clipList(command, 15, 25)
def removeFile(path):
try:
os.remove(path)
except FileNotFoundError: pass
except PermissionError as e:
log.error(str(e))
def getVideoInfo(videoPath, by, width, height, frameRate):
commandIn = [
ffmpegPath,
'-hide_banner',
'-t', '1',
'-f', 'lavfi',
'-i', videoPath,
'-map', '0:v:0',
'-c', 'copy',
'-f', 'null',
'-'
]
matchInfo = not (width and height and frameRate)
matchFrame = not by
matchOutput = True
error = RuntimeError('Video info not found')
videoOnly = True
if by != 'cmd':
commandIn = clipList(commandIn, 4, 6)
if matchFrame:
commandIn = clipList(commandIn, 2, 4)
try:
procIn = popenText(commandIn)
totalFrames = 0
while matchInfo or matchOutput or matchFrame:
line = procIn.stderr.readline()
if type(line) != str:
line = str(line, 'utf-8', errors='ignore')
sys.stdout.write(line)
if not line:
break
line = line.lstrip()
if reMatchOutput.match(line):
matchOutput = False
elif reMatchAudio.match(line):
videoOnly = False
if matchInfo and reMatchInfo.match(line):
try:
videoInfo = reSearchInfo.search(line).groups()
if not width:
width = int(videoInfo[0])
if not height:
height = int(videoInfo[1])
if not frameRate:
frameRate = float(videoInfo[2])
except Exception:
log.error(line)
raise error
matchInfo = False
if matchFrame and reMatchFrame.match(line):
try:
totalFrames = int(reSearchFrame.search(line).groups()[0])
except Exception:
log.error(line)
procIn.stderr.flush()
procIn.stderr.close()
finally:
procIn.terminate()
if matchInfo or (matchFrame and not totalFrames):
raise error
log.info('Info of video {}: {}x{}@{}fps, {} frames'.format(videoPath, width, height, frameRate, totalFrames))
return width, height, frameRate, totalFrames, videoOnly
def enqueueOutput(out, queue):
try:
for line in iter(out.readline, b''):
queue.put(line)
out.flush()
except Exception: pass
def createEnqueueThread(pipe, *args):
t = threading.Thread(target=enqueueOutput, args=(pipe, qOut, *args))
t.daemon = True # thread dies with the program
t.start()
def readSubprocess(q):
while True:
try:
line = q.get_nowait()
if not type(line) == str:
line = str(line, encoding='utf_8', errors='replace')
except Empty:
break
else:
sys.stdout.write(line)
def prepare(video, by, steps):
optEncode = steps[-1]
encodec = optEncode.get('codec', config.defaultEncodec) # pylint: disable=E1101
optDecode = steps[0]
decodec = optDecode.get('codec', config.defaultDecodec) # pylint: disable=E1101
optRange = steps[1]
start = int(optRange.get('start', 0))
outDir = config.outDir # pylint: disable=E1101
procSteps = stepVideo + list(steps[2:-1])
diagnose = optEncode.get('diagnose', {})
bench = diagnose.get('bench', False)
clear = diagnose.get('clear', False)
process, nodes = genProcess(procSteps)
traceDetail = config.progressDetail or bench # pylint: disable=E1101
root = begin(Node({'op': 'video'}, 1, 2, 0), nodes, traceDetail, bench, clear)
context.root = root
slomos = [step for step in procSteps if step['op'] == 'slomo']
refs, ahead = 0, 0
if start < 0:
start = 0
for i in range(len(procSteps) - 1, -1, -1): # gather some reference frames before start point for video models
step = procSteps[i]
if step['op'] == 'slomo':
step['opt'].outStart = -refs % step['sf'] if refs else 1
step['opt'].outEnd = -(-ahead % step['sf'])
refs = max(ceil(refs / step['sf']), lookback[step['op']])
ahead = max(ceil(ahead / step['sf']), lookahead[step['op']])
elif step['op'] in padOp:
step['opt'].start = 0
step['opt'].end = 0
refs += lookback[step['op']]
ahead += lookahead[step['op']]
if start < refs: # no enough reference frames
arefs = start
for step in procSteps:
if arefs >= refs:
break
if step['op'] == 'slomo':
refs = refs * step['sf'] - step['opt'].outStart
step['opt'].outStart = 0
arefs = arefs * step['sf']
elif step['op'] in padOp:
step['opt'].start = min(refs - arefs, lookback[step['op']])
refs -= step['opt'].start
start = 0
else:
start -= refs
stop = int(optRange.get('stop', -1))
if stop <= start:
stop = -1
root.total = -1 if stop < 0 else stop - start
outputPath = fixExt(splitext(optEncode.get('file', '') or outDir + '/' + config.getPath()))
dataPath = suffix(outputPath, '-a')
commandIn = [
ffmpegPath,
'-hide_banner',
'-f', 'lavfi',
'-i', video,
'-vn',
'-c', 'copy',
'-y',
dataPath,
'-map', '0:v',
'-f', 'rawvideo',
'-pix_fmt', pix_fmt]
if by != 'cmd':
commandIn = clipList(commandIn, 2, 4)
if len(decodec):
commandIn.extend(decodec.split(' '))
commandIn.append('-')
metadata = ['-metadata', 'service_provider="MoePhoto {}"'.format(config.version)] # pylint: disable=E1101
commandVideo = [
ffmpegPath,
'-hide_banner', '-y',
'-f', 'rawvideo',
'-pix_fmt', pix_fmt,
'-s', '',
'-r', '',
'-thread_queue_size', '64',
'-i', '-',
'-i', dataPath,
'-map', '0:v',
'-map', '1?',
'-map', '-1:v',
'-c:1', 'copy',
*metadata,
'-c:v:0'
] + encodec.split(' ') + ['']
commandOut = None
if by:
commandVideo[-1] = suffix(outputPath, '-v')
commandOut = [
ffmpegPath,
'-hide_banner', '-y',
'-i', commandVideo[-1],
'-i', dataPath,
'-map', '0:v',
'-map', '1?',
'-c:0', 'copy',
'-c:1', 'copy',
*metadata,
outputPath
]
else:
commandVideo[16] = video
frameRate = optEncode.get('frameRate', 0)
width = optDecode.get('width', 0)
height = optDecode.get('height', 0)
sizes = [step for step in procSteps if step['op'] in resizeOp]
return outputPath, process, start, stop, ahead, root, commandIn, commandVideo, commandOut, slomos, sizes, width, height, frameRate
def setupInfo(by, outputPath, root, commandIn, commandVideo, commandOut, slomos, sizes, start, width, height, frameRate, totalFrames, videoOnly):
if root.total < 0 and totalFrames > 0:
root.total = totalFrames - start
if frameRate:
for opt in slomos:
frameRate *= opt['sf']
outWidth, outHeight = (width, height)
for opt in sizes:
if opt['op'] == 'SR':
outWidth *= opt['scale']
outHeight *= opt['scale']
elif opt['op'] == 'VSR':
outWidth *= 4
outHeight *= 4
else: # resize
outWidth = round(outWidth * opt['scaleW']) if 'scaleW' in opt else opt['width']
outHeight = round(outHeight * opt['scaleH']) if 'scaleH' in opt else opt['height']
commandVideo[8] = f'{outWidth}x{outHeight}'
commandVideo[10] = str(frameRate)
videoOnly |= start > 0
if videoOnly or by:
commandVideo = commandVideoSkip(commandVideo)
if videoOnly or not by:
commandVideo[-1] = outputPath
i = commandIn.index('-vn')
commandIn = clipList(commandIn, i, i + 5)
commandOut = None
root.multipleLoad(width * height * 3)
initialETA(root)
root.reset().trace(0)
return commandIn, commandVideo, commandOut
def cleanAV(command, path):
if command:
try:
stat = os.stat(path)
except Exception:
stat = False
removeFile(command[6])
video = command[4]
if stat:
removeFile(video)
else:
return video
return path
def mergeAV(command):
if command:
err = True
procMerge = popenText(command)
createEnqueueThread(procMerge.stderr)
err, msg = procMerge.communicate()
sys.stdout.write(msg)
return procMerge, err
else:
return 0, 0
def SR_vid(video, by, *steps):
def p(raw_image=None):
bufs = process((raw_image, height, width))
if (not bufs is None) and len(bufs):
for buffer in bufs:
if buffer:
procOut.stdin.write(buffer)
if raw_image:
root.trace()
return 0 if bufs is None else len(bufs)
context.stopFlag.clear()
outputPath, process, *args = prepare(video, by, steps)
start, stop, refs, root = args[:4]
width, height, *more = getVideoInfo(video, by, *args[-3:])
root.callback(root, dict(shape=[height, width], fps=more[0]))
commandIn, commandVideo, commandOut = setupInfo(by, outputPath, *args[3:9], start, width, height, *more)
procIn = popen(commandIn)
procOut = sp.Popen(commandVideo, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=0)
procMerge = 0
err = 0
try:
createEnqueueThread(procOut.stdout)
createEnqueueThread(procIn.stderr)
createEnqueueThread(procOut.stderr)
i = 0
frameBytes = width * height * pixBytes # read 1 frame
while (stop < 0 or i <= stop + refs) and not context.stopFlag.is_set():
raw_image = procIn.stdout.read(frameBytes)
if len(raw_image) == 0:
break
readSubprocess(qOut)
if i >= start:
p(raw_image)
elif (i + 1) % 10 == 0:
root.callback(root, dict(skip=i + 1))
i += 1
idle()
os.kill(procIn.pid, sigint)
if len(raw_image) == 0: # tell VSR to pad frames
arefs = 0 if stop <= 0 or i < stop else i - stop
for step in steps:
if arefs >= refs:
break
if step['op'] == 'slomo':
refs = refs * step['sf'] + step['opt'].outEnd # outEnd is negative
step['opt'].outEnd = 0
arefs = arefs * step['sf']
elif step['op'] in padOp:
step['opt'].end = -min(refs - arefs, lookahead[step['op']])
refs += step['opt'].end
p()
procOut.communicate(timeout=300)
procIn.terminate()
readSubprocess(qOut)
procMerge, err = mergeAV(commandOut)
finally:
log.info('Video processing end at frame #{}.'.format(i - refs))
procIn.terminate()
procOut.terminate()
if procMerge:
procMerge.terminate()
clean()
try:
if not by:
removeFile(video)
except Exception:
log.warning('Timed out waiting ffmpeg to terminate, need to remove {} manually.'.format(video))
if err:
log.warning('Unable to merge video and other tracks with exit code {}.'.format(err))
else:
outputPath = cleanAV(commandOut, outputPath)
readSubprocess(qOut)
return outputPath, i - refs | python/video.py | import os
import subprocess as sp
import re
import sys
import threading
import logging
import signal
from math import ceil
from queue import Queue, Empty
from gevent import idle
from config import config
from imageProcess import clean
from procedure import genProcess
from progress import Node, initialETA
from worker import context, begin
from runSlomo import RefTime as SlomoRefs
from videoSR import RefTime as VSRRefs
from ESTRNN import para as ESTRNNpara
log = logging.getLogger('Moe')
ffmpegPath = os.path.realpath('ffmpeg/bin/ffmpeg') # require full path to spawn in shell
qOut = Queue(256)
stepVideo = [dict(op='buffer', bitDepth=16)]
pix_fmt = 'bgr48le'
pixBytes = 6
bufsize = 10 ** 8
isWindows = sys.platform[:3] == 'win'
reMatchInfo = re.compile(r'Stream #.*: Video:')
reSearchInfo = re.compile(r',[\s]*([\d]+)x([\d]+)[\s]*.+,[\s]*([.\d]+)[\s]*(fps|tbr)')
reMatchFrame = re.compile(r'frame=')
reSearchFrame = re.compile(r'frame=[\s]*([\d]+) ')
reMatchAudio = re.compile(r'Stream #0:1')
reMatchOutput = re.compile(r'Output #0,')
formats = {'.mp4', '.ts', '.mkv'}
creationflag = sp.CREATE_NEW_PROCESS_GROUP if isWindows else 0
sigint = signal.CTRL_BREAK_EVENT if isWindows else signal.SIGINT
lookback = dict(slomo=SlomoRefs >> 1, VSR=VSRRefs >> 1, demob=ESTRNNpara.past_frames)
lookahead = dict(slomo=(SlomoRefs - 1) >> 1, VSR=(VSRRefs - 1) >> 1, demob=ESTRNNpara.future_frames)
resizeOp = {'SR', 'resize', 'VSR'}
padOp = {'VSR', 'demob'}
popen = lambda command: sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=bufsize, creationflags=creationflag)
popenText = lambda command: sp.Popen(command, stderr=sp.PIPE, encoding='utf_8', errors='ignore')
insert1 = lambda t, s: ''.join((t[0], s, *t[1:]))
splitext = lambda p: os.path.splitext(p)
fixExt = lambda t: ''.join((*t[:-1], t[-1] if t[-1] in formats else '.mkv'))
suffix = lambda p, s: insert1(splitext(p), s)
clipList = lambda l, start, end: l[:start] + l[end:]
commandVideoSkip = lambda command: clipList(command, 15, 25)
def removeFile(path):
try:
os.remove(path)
except FileNotFoundError: pass
except PermissionError as e:
log.error(str(e))
def getVideoInfo(videoPath, by, width, height, frameRate):
commandIn = [
ffmpegPath,
'-hide_banner',
'-t', '1',
'-f', 'lavfi',
'-i', videoPath,
'-map', '0:v:0',
'-c', 'copy',
'-f', 'null',
'-'
]
matchInfo = not (width and height and frameRate)
matchFrame = not by
matchOutput = True
error = RuntimeError('Video info not found')
videoOnly = True
if by != 'cmd':
commandIn = clipList(commandIn, 4, 6)
if matchFrame:
commandIn = clipList(commandIn, 2, 4)
try:
procIn = popenText(commandIn)
totalFrames = 0
while matchInfo or matchOutput or matchFrame:
line = procIn.stderr.readline()
if type(line) != str:
line = str(line, 'utf-8', errors='ignore')
sys.stdout.write(line)
if not line:
break
line = line.lstrip()
if reMatchOutput.match(line):
matchOutput = False
elif reMatchAudio.match(line):
videoOnly = False
if matchInfo and reMatchInfo.match(line):
try:
videoInfo = reSearchInfo.search(line).groups()
if not width:
width = int(videoInfo[0])
if not height:
height = int(videoInfo[1])
if not frameRate:
frameRate = float(videoInfo[2])
except Exception:
log.error(line)
raise error
matchInfo = False
if matchFrame and reMatchFrame.match(line):
try:
totalFrames = int(reSearchFrame.search(line).groups()[0])
except Exception:
log.error(line)
procIn.stderr.flush()
procIn.stderr.close()
finally:
procIn.terminate()
if matchInfo or (matchFrame and not totalFrames):
raise error
log.info('Info of video {}: {}x{}@{}fps, {} frames'.format(videoPath, width, height, frameRate, totalFrames))
return width, height, frameRate, totalFrames, videoOnly
def enqueueOutput(out, queue):
try:
for line in iter(out.readline, b''):
queue.put(line)
out.flush()
except Exception: pass
def createEnqueueThread(pipe, *args):
t = threading.Thread(target=enqueueOutput, args=(pipe, qOut, *args))
t.daemon = True # thread dies with the program
t.start()
def readSubprocess(q):
while True:
try:
line = q.get_nowait()
if not type(line) == str:
line = str(line, encoding='utf_8', errors='replace')
except Empty:
break
else:
sys.stdout.write(line)
def prepare(video, by, steps):
optEncode = steps[-1]
encodec = optEncode.get('codec', config.defaultEncodec) # pylint: disable=E1101
optDecode = steps[0]
decodec = optDecode.get('codec', config.defaultDecodec) # pylint: disable=E1101
optRange = steps[1]
start = int(optRange.get('start', 0))
outDir = config.outDir # pylint: disable=E1101
procSteps = stepVideo + list(steps[2:-1])
diagnose = optEncode.get('diagnose', {})
bench = diagnose.get('bench', False)
clear = diagnose.get('clear', False)
process, nodes = genProcess(procSteps)
traceDetail = config.progressDetail or bench # pylint: disable=E1101
root = begin(Node({'op': 'video'}, 1, 2, 0), nodes, traceDetail, bench, clear)
context.root = root
slomos = [step for step in procSteps if step['op'] == 'slomo']
refs, ahead = 0, 0
if start < 0:
start = 0
for i in range(len(procSteps) - 1, -1, -1): # gather some reference frames before start point for video models
step = procSteps[i]
if step['op'] == 'slomo':
step['opt'].outStart = -refs % step['sf'] if refs else 1
step['opt'].outEnd = -(-ahead % step['sf'])
refs = max(ceil(refs / step['sf']), lookback[step['op']])
ahead = max(ceil(ahead / step['sf']), lookahead[step['op']])
elif step['op'] in padOp:
step['opt'].start = 0
step['opt'].end = 0
refs += lookback[step['op']]
ahead += lookahead[step['op']]
if start < refs: # no enough reference frames
arefs = start
for step in procSteps:
if arefs >= refs:
break
if step['op'] == 'slomo':
refs = refs * step['sf'] - step['opt'].outStart
step['opt'].outStart = 0
arefs = arefs * step['sf']
elif step['op'] in padOp:
step['opt'].start = min(refs - arefs, lookback[step['op']])
refs -= step['opt'].start
start = 0
else:
start -= refs
stop = int(optRange.get('stop', -1))
if stop <= start:
stop = -1
root.total = -1 if stop < 0 else stop - start
outputPath = fixExt(splitext(optEncode.get('file', '') or outDir + '/' + config.getPath()))
dataPath = suffix(outputPath, '-a')
commandIn = [
ffmpegPath,
'-hide_banner',
'-f', 'lavfi',
'-i', video,
'-vn',
'-c', 'copy',
'-y',
dataPath,
'-map', '0:v',
'-f', 'rawvideo',
'-pix_fmt', pix_fmt]
if by != 'cmd':
commandIn = clipList(commandIn, 2, 4)
if len(decodec):
commandIn.extend(decodec.split(' '))
commandIn.append('-')
metadata = ['-metadata', 'service_provider="MoePhoto {}"'.format(config.version)] # pylint: disable=E1101
commandVideo = [
ffmpegPath,
'-hide_banner', '-y',
'-f', 'rawvideo',
'-pix_fmt', pix_fmt,
'-s', '',
'-r', '',
'-thread_queue_size', '64',
'-i', '-',
'-i', dataPath,
'-map', '0:v',
'-map', '1?',
'-map', '-1:v',
'-c:1', 'copy',
*metadata,
'-c:v:0'
] + encodec.split(' ') + ['']
commandOut = None
if by:
commandVideo[-1] = suffix(outputPath, '-v')
commandOut = [
ffmpegPath,
'-hide_banner', '-y',
'-i', commandVideo[-1],
'-i', dataPath,
'-map', '0:v',
'-map', '1?',
'-c:0', 'copy',
'-c:1', 'copy',
*metadata,
outputPath
]
else:
commandVideo[16] = video
frameRate = optEncode.get('frameRate', 0)
width = optDecode.get('width', 0)
height = optDecode.get('height', 0)
sizes = [step for step in procSteps if step['op'] in resizeOp]
return outputPath, process, start, stop, ahead, root, commandIn, commandVideo, commandOut, slomos, sizes, width, height, frameRate
def setupInfo(by, outputPath, root, commandIn, commandVideo, commandOut, slomos, sizes, start, width, height, frameRate, totalFrames, videoOnly):
if root.total < 0 and totalFrames > 0:
root.total = totalFrames - start
if frameRate:
for opt in slomos:
frameRate *= opt['sf']
outWidth, outHeight = (width, height)
for opt in sizes:
if opt['op'] == 'SR':
outWidth *= opt['scale']
outHeight *= opt['scale']
elif opt['op'] == 'VSR':
outWidth *= 4
outHeight *= 4
else: # resize
outWidth = round(outWidth * opt['scaleW']) if 'scaleW' in opt else opt['width']
outHeight = round(outHeight * opt['scaleH']) if 'scaleH' in opt else opt['height']
commandVideo[8] = f'{outWidth}x{outHeight}'
commandVideo[10] = str(frameRate)
videoOnly |= start > 0
if videoOnly or by:
commandVideo = commandVideoSkip(commandVideo)
if videoOnly or not by:
commandVideo[-1] = outputPath
i = commandIn.index('-vn')
commandIn = clipList(commandIn, i, i + 5)
commandOut = None
root.multipleLoad(width * height * 3)
initialETA(root)
root.reset().trace(0)
return commandIn, commandVideo, commandOut
def cleanAV(command, path):
if command:
try:
stat = os.stat(path)
except Exception:
stat = False
removeFile(command[6])
video = command[4]
if stat:
removeFile(video)
else:
return video
return path
def mergeAV(command):
if command:
err = True
procMerge = popenText(command)
createEnqueueThread(procMerge.stderr)
err, msg = procMerge.communicate()
sys.stdout.write(msg)
return procMerge, err
else:
return 0, 0
def SR_vid(video, by, *steps):
def p(raw_image=None):
bufs = process((raw_image, height, width))
if (not bufs is None) and len(bufs):
for buffer in bufs:
if buffer:
procOut.stdin.write(buffer)
if raw_image:
root.trace()
return 0 if bufs is None else len(bufs)
context.stopFlag.clear()
outputPath, process, *args = prepare(video, by, steps)
start, stop, refs, root = args[:4]
width, height, *more = getVideoInfo(video, by, *args[-3:])
root.callback(root, dict(shape=[height, width], fps=more[0]))
commandIn, commandVideo, commandOut = setupInfo(by, outputPath, *args[3:9], start, width, height, *more)
procIn = popen(commandIn)
procOut = sp.Popen(commandVideo, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=0)
procMerge = 0
err = 0
try:
createEnqueueThread(procOut.stdout)
createEnqueueThread(procIn.stderr)
createEnqueueThread(procOut.stderr)
i = 0
frameBytes = width * height * pixBytes # read 1 frame
while (stop < 0 or i <= stop + refs) and not context.stopFlag.is_set():
raw_image = procIn.stdout.read(frameBytes)
if len(raw_image) == 0:
break
readSubprocess(qOut)
if i >= start:
p(raw_image)
elif (i + 1) % 10 == 0:
root.callback(root, dict(skip=i + 1))
i += 1
idle()
os.kill(procIn.pid, sigint)
if len(raw_image) == 0: # tell VSR to pad frames
arefs = 0 if stop <= 0 or i < stop else i - stop
for step in steps:
if arefs >= refs:
break
if step['op'] == 'slomo':
refs = refs * step['sf'] + step['opt'].outEnd # outEnd is negative
step['opt'].outEnd = 0
arefs = arefs * step['sf']
elif step['op'] in padOp:
step['opt'].end = -min(refs - arefs, lookahead[step['op']])
refs += step['opt'].end
p()
procOut.communicate(timeout=300)
procIn.terminate()
readSubprocess(qOut)
procMerge, err = mergeAV(commandOut)
finally:
log.info('Video processing end at frame #{}.'.format(i - refs))
procIn.terminate()
procOut.terminate()
if procMerge:
procMerge.terminate()
clean()
try:
if not by:
removeFile(video)
except Exception:
log.warning('Timed out waiting ffmpeg to terminate, need to remove {} manually.'.format(video))
if err:
log.warning('Unable to merge video and other tracks with exit code {}.'.format(err))
else:
outputPath = cleanAV(commandOut, outputPath)
readSubprocess(qOut)
return outputPath, i - refs | 0.147095 | 0.123868 |
import json
import multiprocessing
import os
import sys
import os.path as op
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import pandas as pd
import numpy as np
import pickle
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import filenames
from utils import deps_from_tsv
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cpu = torch.device("cpu")
device = cpu
class BatchedDataset(Dataset):
'''
This class make a general dataset that we will use to generate
the batched training data
'''
def __init__(self, x_train, y_train):
super(BatchedDataset, self).__init__()
self.x_train = x_train
self.y_train = y_train
assert (x_train).shape[0] == (y_train).shape[0]
self.length = (x_train).shape[0]
def __getitem__(self, index):
return self.x_train[index], self.y_train[index]
def __len__(self):
return self.length
class DECAY_RNN_Model(object):
def input_to_string(self, x_input):
#x_input is the example we want to convert to the string
#x_input should be in the form of 1D list.
example_string = ""
for token in x_input:
if token == 0:
continue
str_tok = self.ints_to_vocab[token]
example_string+=str_tok+" "
return example_string
def demark_testing(self):
X_test=self.X_test
Y_test=self.Y_test
deps_test=self.deps_test
testing_dict={}
assert len(X_test)==len(Y_test) and len(Y_test)==len(deps_test)
for i in (range(len(X_test))):
key = deps_test[i]['n_diff_intervening']
if not key in testing_dict.keys():
testing_dict[key]=[]
testing_dict[key].append((X_test[i], Y_test[i]))
self.testing_dict=testing_dict
serialized_attributes = ['vocab_to_ints', 'ints_to_vocab', 'filename',
'X_train', 'Y_train', 'deps_train',
'X_test', 'Y_test', 'deps_test']
def __init__(self, filename=None, serialization_dir=None,
batch_size=1, embedding_size=50, hidden_dim = 50,
maxlen=50, prop_train=0.9, rnn_output_size=10,
mode='infreq_pos', vocab_file=filenames.vocab_file,
equalize_classes=False, criterion=None, len_after_verb=0,
verbose=1, output_filename='default.txt'):
'''
filename: TSV file with positive examples, or None if unserializing
criterion: dependencies that don't meet this criterion are excluded
(set to None to keep all dependencies)
verbose: passed to Keras (0 = no, 1 = progress bar, 2 = line per epoch)
'''
self.filename = filename
self.vocab_file = vocab_file
self.batch_size = batch_size
self.embedding_size = embedding_size
self.hidden_dim = hidden_dim
self.prop_train = prop_train
self.mode = mode
self.rnn_output_size = rnn_output_size
self.maxlen = maxlen
self.equalize_classes = equalize_classes
self.criterion = (lambda x: True) if criterion is None else criterion
self.len_after_verb = len_after_verb
self.verbose = verbose
self.output_filename = output_filename
# self.set_serialization_dir(serialization_dir)
def log(self, message):
with open('logs/' + self.output_filename, 'a') as file:
file.write(str(message) + '\n')
def log_grad(self, message):
with open('logs/grad_' + self.output_filename, 'a') as file:
file.write(message + '\n')
def log_alpha(self,message):
with open('logs/alpha_' + self.output_filename, 'a') as file:
file.write(message + '\n')
def pipeline(self, train = True, batched=False, batch_size = 32, shuffle = True, num_workers= 0,
load = False, model = '', test_size=7000,
train_size=None, model_prefix='__', epochs=20, data_name='Not',
activation=False, df_name='_verbose_.pkl', load_data=False,
save_data=False):
self.batched= batched
if (load_data):
self.load_train_and_test(test_size, data_name)
else :
self.log('creating data')
examples = self.load_examples(data_name, save_data, None if train_size is None else train_size*10)
self.create_train_and_test(examples, test_size, data_name, save_data)
if batched:
self.create_model_batched(batch_size=batch_size)
else:
self.create_model()
if (load) :
self.load_model(model)
if (train) :
if(batched):
self.train_batched(epochs, model_prefix, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
else:
self.train(epochs, model_prefix)
else:
result_dict= self.test_model()
print(result_dict)
print('Data : ', data_name)
self.log(data_name)
if (activation) :
acc = self.results_verbose(df_name)
else :
if self.batched:
acc= self.results_batched()
else:
acc = self.results()
if (test_size == -2):
acctrain = self.results_train()
def load_examples(self,data_name='Not',save_data=False, n_examples=None):
'''
Set n_examples to some positive integer to only load (up to) that
number of examples
'''
self.log('Loading examples')
if self.filename is None:
raise ValueError('Filename argument to constructor can\'t be None')
self.vocab_to_ints = {}
self.ints_to_vocab = {}
examples = []
n = 0
deps = deps_from_tsv(self.filename, limit=n_examples)
for dep in deps:
tokens = dep['sentence'].split()
if len(tokens) > self.maxlen or not self.criterion(dep):
continue
tokens = self.process_single_dependency(dep)
ints = []
for token in tokens:
if token not in self.vocab_to_ints:
# zero is for pad
x = self.vocab_to_ints[token] = len(self.vocab_to_ints) + 1
self.ints_to_vocab[x] = token
ints.append(self.vocab_to_ints[token])
examples.append((self.class_to_code[dep['label']], ints, dep))
n += 1
if n_examples is not None and n >= n_examples:
break
if (save_data) :
with open('plus5_v2i.pkl', 'wb') as f:
pickle.dump(self.vocab_to_ints, f)
with open('plus5_i2v.pkl', 'wb') as f:
pickle.dump(self.ints_to_vocab, f)
return examples
def load_model(self, model) :
self.model = torch.load(model)
def train(self, n_epochs=10, model_prefix='__'):
self.log('Training')
if not hasattr(self, 'model'):
self.create_model()
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(self.model.parameters(), lr = 0.001)
prev_param = list(self.model.parameters())[0].clone()
max_acc = 0
self.log(len(self.X_train))
x_train = torch.tensor(self.X_train, dtype=torch.long, requires_grad=False)#.to(device)
y_train = self.Y_train #torch.tensor(self.Y_train, requires_grad=False)#.to(device)
self.log('cpu to gpu')
# acc = self.results()
print(n_epochs)
fffstart = 0
for epoch in range(n_epochs) :
self.log('epoch : ' + str(epoch))
self.log_grad('epoch : ' + str(epoch))
self.log_alpha('epoch : ' + str(epoch))
for index in range(fffstart, len(x_train)) :
# self.log(index)
if ((index+1) % 1000 == 0) :
self.log(index+1)
if ((index+1) % 3000 == 0):
acc = self.results()
# result_dict = self.result_demarcated()
if (acc >= max_acc) :
model_name = model_prefix + '.pkl'
torch.save(self.model, model_name)
max_acc = acc
# _ = self.test_model()
self.model.zero_grad()
output, hidden, out = self.model(x_train[index])
if (y_train[index] == 0) :
actual = torch.autograd.Variable(torch.tensor([0]), requires_grad=False)#.to(device)
else :
actual = torch.autograd.Variable(torch.tensor([1]), requires_grad=False)#.to(device)
loss = loss_function(output, actual)
loss.backward(retain_graph=True)
for name,param in self.model.named_parameters():
if(name=="cell_0.rgate"):
self.log_alpha(str(param))
optimizer.step()
if ((index) % 10 == 0) :
counter = 0
self.log_grad('index : ' + str(index))
for param in self.model.parameters():
if param.grad is not None:
# print(counter, param.shape)
self.log_grad(str(counter) + ' : ' + str(param.grad.norm().item()))
counter += 1
fffstart = 0
acc = self.results()
if (acc > max_acc) :
model_name = model_prefix + '.pkl'
torch.save(self.model, model_name)
max_acc = acc
# self.results_train() | Ab-DRNN/decay_rnn_model.py | import json
import multiprocessing
import os
import sys
import os.path as op
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import pandas as pd
import numpy as np
import pickle
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import filenames
from utils import deps_from_tsv
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cpu = torch.device("cpu")
device = cpu
class BatchedDataset(Dataset):
'''
This class make a general dataset that we will use to generate
the batched training data
'''
def __init__(self, x_train, y_train):
super(BatchedDataset, self).__init__()
self.x_train = x_train
self.y_train = y_train
assert (x_train).shape[0] == (y_train).shape[0]
self.length = (x_train).shape[0]
def __getitem__(self, index):
return self.x_train[index], self.y_train[index]
def __len__(self):
return self.length
class DECAY_RNN_Model(object):
def input_to_string(self, x_input):
#x_input is the example we want to convert to the string
#x_input should be in the form of 1D list.
example_string = ""
for token in x_input:
if token == 0:
continue
str_tok = self.ints_to_vocab[token]
example_string+=str_tok+" "
return example_string
def demark_testing(self):
X_test=self.X_test
Y_test=self.Y_test
deps_test=self.deps_test
testing_dict={}
assert len(X_test)==len(Y_test) and len(Y_test)==len(deps_test)
for i in (range(len(X_test))):
key = deps_test[i]['n_diff_intervening']
if not key in testing_dict.keys():
testing_dict[key]=[]
testing_dict[key].append((X_test[i], Y_test[i]))
self.testing_dict=testing_dict
serialized_attributes = ['vocab_to_ints', 'ints_to_vocab', 'filename',
'X_train', 'Y_train', 'deps_train',
'X_test', 'Y_test', 'deps_test']
def __init__(self, filename=None, serialization_dir=None,
batch_size=1, embedding_size=50, hidden_dim = 50,
maxlen=50, prop_train=0.9, rnn_output_size=10,
mode='infreq_pos', vocab_file=filenames.vocab_file,
equalize_classes=False, criterion=None, len_after_verb=0,
verbose=1, output_filename='default.txt'):
'''
filename: TSV file with positive examples, or None if unserializing
criterion: dependencies that don't meet this criterion are excluded
(set to None to keep all dependencies)
verbose: passed to Keras (0 = no, 1 = progress bar, 2 = line per epoch)
'''
self.filename = filename
self.vocab_file = vocab_file
self.batch_size = batch_size
self.embedding_size = embedding_size
self.hidden_dim = hidden_dim
self.prop_train = prop_train
self.mode = mode
self.rnn_output_size = rnn_output_size
self.maxlen = maxlen
self.equalize_classes = equalize_classes
self.criterion = (lambda x: True) if criterion is None else criterion
self.len_after_verb = len_after_verb
self.verbose = verbose
self.output_filename = output_filename
# self.set_serialization_dir(serialization_dir)
def log(self, message):
with open('logs/' + self.output_filename, 'a') as file:
file.write(str(message) + '\n')
def log_grad(self, message):
with open('logs/grad_' + self.output_filename, 'a') as file:
file.write(message + '\n')
def log_alpha(self,message):
with open('logs/alpha_' + self.output_filename, 'a') as file:
file.write(message + '\n')
def pipeline(self, train = True, batched=False, batch_size = 32, shuffle = True, num_workers= 0,
load = False, model = '', test_size=7000,
train_size=None, model_prefix='__', epochs=20, data_name='Not',
activation=False, df_name='_verbose_.pkl', load_data=False,
save_data=False):
self.batched= batched
if (load_data):
self.load_train_and_test(test_size, data_name)
else :
self.log('creating data')
examples = self.load_examples(data_name, save_data, None if train_size is None else train_size*10)
self.create_train_and_test(examples, test_size, data_name, save_data)
if batched:
self.create_model_batched(batch_size=batch_size)
else:
self.create_model()
if (load) :
self.load_model(model)
if (train) :
if(batched):
self.train_batched(epochs, model_prefix, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
else:
self.train(epochs, model_prefix)
else:
result_dict= self.test_model()
print(result_dict)
print('Data : ', data_name)
self.log(data_name)
if (activation) :
acc = self.results_verbose(df_name)
else :
if self.batched:
acc= self.results_batched()
else:
acc = self.results()
if (test_size == -2):
acctrain = self.results_train()
def load_examples(self,data_name='Not',save_data=False, n_examples=None):
'''
Set n_examples to some positive integer to only load (up to) that
number of examples
'''
self.log('Loading examples')
if self.filename is None:
raise ValueError('Filename argument to constructor can\'t be None')
self.vocab_to_ints = {}
self.ints_to_vocab = {}
examples = []
n = 0
deps = deps_from_tsv(self.filename, limit=n_examples)
for dep in deps:
tokens = dep['sentence'].split()
if len(tokens) > self.maxlen or not self.criterion(dep):
continue
tokens = self.process_single_dependency(dep)
ints = []
for token in tokens:
if token not in self.vocab_to_ints:
# zero is for pad
x = self.vocab_to_ints[token] = len(self.vocab_to_ints) + 1
self.ints_to_vocab[x] = token
ints.append(self.vocab_to_ints[token])
examples.append((self.class_to_code[dep['label']], ints, dep))
n += 1
if n_examples is not None and n >= n_examples:
break
if (save_data) :
with open('plus5_v2i.pkl', 'wb') as f:
pickle.dump(self.vocab_to_ints, f)
with open('plus5_i2v.pkl', 'wb') as f:
pickle.dump(self.ints_to_vocab, f)
return examples
def load_model(self, model) :
self.model = torch.load(model)
def train(self, n_epochs=10, model_prefix='__'):
self.log('Training')
if not hasattr(self, 'model'):
self.create_model()
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(self.model.parameters(), lr = 0.001)
prev_param = list(self.model.parameters())[0].clone()
max_acc = 0
self.log(len(self.X_train))
x_train = torch.tensor(self.X_train, dtype=torch.long, requires_grad=False)#.to(device)
y_train = self.Y_train #torch.tensor(self.Y_train, requires_grad=False)#.to(device)
self.log('cpu to gpu')
# acc = self.results()
print(n_epochs)
fffstart = 0
for epoch in range(n_epochs) :
self.log('epoch : ' + str(epoch))
self.log_grad('epoch : ' + str(epoch))
self.log_alpha('epoch : ' + str(epoch))
for index in range(fffstart, len(x_train)) :
# self.log(index)
if ((index+1) % 1000 == 0) :
self.log(index+1)
if ((index+1) % 3000 == 0):
acc = self.results()
# result_dict = self.result_demarcated()
if (acc >= max_acc) :
model_name = model_prefix + '.pkl'
torch.save(self.model, model_name)
max_acc = acc
# _ = self.test_model()
self.model.zero_grad()
output, hidden, out = self.model(x_train[index])
if (y_train[index] == 0) :
actual = torch.autograd.Variable(torch.tensor([0]), requires_grad=False)#.to(device)
else :
actual = torch.autograd.Variable(torch.tensor([1]), requires_grad=False)#.to(device)
loss = loss_function(output, actual)
loss.backward(retain_graph=True)
for name,param in self.model.named_parameters():
if(name=="cell_0.rgate"):
self.log_alpha(str(param))
optimizer.step()
if ((index) % 10 == 0) :
counter = 0
self.log_grad('index : ' + str(index))
for param in self.model.parameters():
if param.grad is not None:
# print(counter, param.shape)
self.log_grad(str(counter) + ' : ' + str(param.grad.norm().item()))
counter += 1
fffstart = 0
acc = self.results()
if (acc > max_acc) :
model_name = model_prefix + '.pkl'
torch.save(self.model, model_name)
max_acc = acc
# self.results_train() | 0.369998 | 0.298338 |
import argparse
import json
import multiprocessing
import time
import asyncio
from multiprocessing import Process
import os
import functools
import sys
from indy import pool
count_of_connected = 0
count_of_not_connected = 0
def run_client(genesis_path, pipe_conn, client_number):
async def run_test(genesis_path, loop, pipe_conn):
try:
pool_cfg = json.dumps({"genesis_txn": genesis_path})
# TODO: remove after latest changes committed
pool_name = "pool_{}_{}".format(int(time.time()), os.getpid())
await pool.set_protocol_version(2)
await pool.create_pool_ledger_config(pool_name, pool_cfg)
await pool.open_pool_ledger(pool_name, None)
pipe_conn.send((0, client_number))
time.sleep(100000)
except Exception:
pipe_conn.send((1, client_number))
loop.call_soon(loop.stop)
return
async def periodically_print():
while True:
print("Client with number: {}. Trying to connect ....".format(client_number))
await asyncio.sleep(5)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(asyncio.gather(
periodically_print(),
run_test(genesis_path, loop, pipe_conn)
))
except Exception as e:
pipe_conn.send(e)
def read_cb(pipe_conn):
global count_of_connected
global count_of_not_connected
res = pipe_conn.recv()
if isinstance(res, tuple):
code, cl_number = res
if code == 0:
print("Client with number {} is connected".format(cl_number))
count_of_connected += 1
elif code == 1:
print("Client with number {} is not connected".format(cl_number))
count_of_not_connected += 1
print("Count of connected clients: {}".format(count_of_connected))
print("Count of not connected clients: {}".format(count_of_not_connected))
else:
print(res)
async def start_all_procs(args, wr):
processes = []
for client_number in range(args.clients):
process = Process(target=run_client, args=(args.genesis_path, wr, client_number))
processes.append(process)
process.start()
parser = argparse.ArgumentParser(description="Create N simultaneous connection to pool ")
parser.add_argument('-c', '--clients', default=100, type=int, required=False, dest='clients',
help='Number of client you want to create. ')
parser.add_argument('-g', '--genesis', required=True, dest='genesis_path', type=str,
help='Path to genesis txns file. '
'Default value is ~/.indy-cli/networks/sandbox/pool_transactions_genesis')
args = parser.parse_args()
count_failed_clients = 0
rd, wr = multiprocessing.Pipe()
main_loop = asyncio.get_event_loop()
main_loop.add_reader(rd, functools.partial(read_cb, rd))
asyncio.run_coroutine_threadsafe(start_all_procs(args, wr), loop=main_loop)
print("All the processes are started")
try:
main_loop.run_forever()
except KeyboardInterrupt:
sys.exit(0) | scripts/client_connections/just_connect_N_times.py | import argparse
import json
import multiprocessing
import time
import asyncio
from multiprocessing import Process
import os
import functools
import sys
from indy import pool
count_of_connected = 0
count_of_not_connected = 0
def run_client(genesis_path, pipe_conn, client_number):
async def run_test(genesis_path, loop, pipe_conn):
try:
pool_cfg = json.dumps({"genesis_txn": genesis_path})
# TODO: remove after latest changes committed
pool_name = "pool_{}_{}".format(int(time.time()), os.getpid())
await pool.set_protocol_version(2)
await pool.create_pool_ledger_config(pool_name, pool_cfg)
await pool.open_pool_ledger(pool_name, None)
pipe_conn.send((0, client_number))
time.sleep(100000)
except Exception:
pipe_conn.send((1, client_number))
loop.call_soon(loop.stop)
return
async def periodically_print():
while True:
print("Client with number: {}. Trying to connect ....".format(client_number))
await asyncio.sleep(5)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(asyncio.gather(
periodically_print(),
run_test(genesis_path, loop, pipe_conn)
))
except Exception as e:
pipe_conn.send(e)
def read_cb(pipe_conn):
global count_of_connected
global count_of_not_connected
res = pipe_conn.recv()
if isinstance(res, tuple):
code, cl_number = res
if code == 0:
print("Client with number {} is connected".format(cl_number))
count_of_connected += 1
elif code == 1:
print("Client with number {} is not connected".format(cl_number))
count_of_not_connected += 1
print("Count of connected clients: {}".format(count_of_connected))
print("Count of not connected clients: {}".format(count_of_not_connected))
else:
print(res)
async def start_all_procs(args, wr):
processes = []
for client_number in range(args.clients):
process = Process(target=run_client, args=(args.genesis_path, wr, client_number))
processes.append(process)
process.start()
parser = argparse.ArgumentParser(description="Create N simultaneous connection to pool ")
parser.add_argument('-c', '--clients', default=100, type=int, required=False, dest='clients',
help='Number of client you want to create. ')
parser.add_argument('-g', '--genesis', required=True, dest='genesis_path', type=str,
help='Path to genesis txns file. '
'Default value is ~/.indy-cli/networks/sandbox/pool_transactions_genesis')
args = parser.parse_args()
count_failed_clients = 0
rd, wr = multiprocessing.Pipe()
main_loop = asyncio.get_event_loop()
main_loop.add_reader(rd, functools.partial(read_cb, rd))
asyncio.run_coroutine_threadsafe(start_all_procs(args, wr), loop=main_loop)
print("All the processes are started")
try:
main_loop.run_forever()
except KeyboardInterrupt:
sys.exit(0) | 0.13215 | 0.086131 |
import unittest
from acme.wrappers import atari_wrapper
from dm_env import specs
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
SKIP_GYM_TESTS = False
SKIP_GYM_MESSAGE = 'gym not installed.'
SKIP_ATARI_TESTS = False
SKIP_ATARI_MESSAGE = ''
try:
# pylint: disable=g-import-not-at-top
from acme.wrappers import gym_wrapper
import gym
# pylint: enable=g-import-not-at-top
except ModuleNotFoundError:
SKIP_GYM_TESTS = True
try:
import atari_py # pylint: disable=g-import-not-at-top
atari_py.get_game_path('pong')
except ModuleNotFoundError as e:
SKIP_ATARI_TESTS = True
SKIP_ATARI_MESSAGE = str(e)
except Exception as e: # pylint: disable=broad-except
# This exception is raised by atari_py.get_game_path('pong') if the Atari ROM
# file has not been installed.
SKIP_ATARI_TESTS = True
SKIP_ATARI_MESSAGE = str(e)
del atari_py
else:
del atari_py
@unittest.skipIf(SKIP_ATARI_TESTS, SKIP_ATARI_MESSAGE)
@unittest.skipIf(SKIP_GYM_TESTS, SKIP_GYM_MESSAGE)
class AtariWrapperTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_pong(self, zero_discount_on_life_loss: bool):
env = gym.make('PongNoFrameskip-v4', full_action_space=True)
env = gym_wrapper.GymAtariAdapter(env)
env = atari_wrapper.AtariWrapper(
env, zero_discount_on_life_loss=zero_discount_on_life_loss)
# Test converted observation spec.
observation_spec = env.observation_spec()
self.assertEqual(type(observation_spec), specs.Array)
# Test converted action spec.
action_spec: specs.DiscreteArray = env.action_spec()
self.assertEqual(type(action_spec), specs.DiscreteArray)
self.assertEqual(action_spec.shape, ())
self.assertEqual(action_spec.minimum, 0)
self.assertEqual(action_spec.maximum, 17)
self.assertEqual(action_spec.num_values, 18)
self.assertEqual(action_spec.dtype, np.dtype('int32'))
# Check that the `render` call gets delegated to the underlying Gym env.
env.render('rgb_array')
# Test step.
timestep = env.reset()
self.assertTrue(timestep.first())
_ = env.step(0)
env.close()
if __name__ == '__main__':
absltest.main() | acme/wrappers/atari_wrapper_test.py | import unittest
from acme.wrappers import atari_wrapper
from dm_env import specs
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
SKIP_GYM_TESTS = False
SKIP_GYM_MESSAGE = 'gym not installed.'
SKIP_ATARI_TESTS = False
SKIP_ATARI_MESSAGE = ''
try:
# pylint: disable=g-import-not-at-top
from acme.wrappers import gym_wrapper
import gym
# pylint: enable=g-import-not-at-top
except ModuleNotFoundError:
SKIP_GYM_TESTS = True
try:
import atari_py # pylint: disable=g-import-not-at-top
atari_py.get_game_path('pong')
except ModuleNotFoundError as e:
SKIP_ATARI_TESTS = True
SKIP_ATARI_MESSAGE = str(e)
except Exception as e: # pylint: disable=broad-except
# This exception is raised by atari_py.get_game_path('pong') if the Atari ROM
# file has not been installed.
SKIP_ATARI_TESTS = True
SKIP_ATARI_MESSAGE = str(e)
del atari_py
else:
del atari_py
@unittest.skipIf(SKIP_ATARI_TESTS, SKIP_ATARI_MESSAGE)
@unittest.skipIf(SKIP_GYM_TESTS, SKIP_GYM_MESSAGE)
class AtariWrapperTest(parameterized.TestCase):
@parameterized.parameters(True, False)
def test_pong(self, zero_discount_on_life_loss: bool):
env = gym.make('PongNoFrameskip-v4', full_action_space=True)
env = gym_wrapper.GymAtariAdapter(env)
env = atari_wrapper.AtariWrapper(
env, zero_discount_on_life_loss=zero_discount_on_life_loss)
# Test converted observation spec.
observation_spec = env.observation_spec()
self.assertEqual(type(observation_spec), specs.Array)
# Test converted action spec.
action_spec: specs.DiscreteArray = env.action_spec()
self.assertEqual(type(action_spec), specs.DiscreteArray)
self.assertEqual(action_spec.shape, ())
self.assertEqual(action_spec.minimum, 0)
self.assertEqual(action_spec.maximum, 17)
self.assertEqual(action_spec.num_values, 18)
self.assertEqual(action_spec.dtype, np.dtype('int32'))
# Check that the `render` call gets delegated to the underlying Gym env.
env.render('rgb_array')
# Test step.
timestep = env.reset()
self.assertTrue(timestep.first())
_ = env.step(0)
env.close()
if __name__ == '__main__':
absltest.main() | 0.57332 | 0.4165 |
import time
from flask import request
from flask_restful import abort
from redash import models
from redash.permissions import require_admin, require_permission
from redash.handlers.base import BaseResource, get_object_or_404
class GroupListResource(BaseResource):
@require_admin
def post(self):
name = request.json['name']
group = models.Group(name=name, org=self.current_org)
models.db.session.add(group)
models.db.session.commit()
self.record_event({
'action': 'create',
'timestamp': int(time.time()),
'object_id': group.id,
'object_type': 'group'
})
return group.to_dict()
def get(self):
if self.current_user.has_permission('admin'):
groups = models.Group.all(self.current_org)
else:
groups = models.Group.query.filter(
models.Group.id.in_(self.current_user.group_ids))
return [g.to_dict() for g in groups]
class GroupResource(BaseResource):
@require_admin
def post(self, group_id):
group = models.Group.get_by_id_and_org(group_id, self.current_org)
if group.type == models.Group.BUILTIN_GROUP:
abort(400, message="Can't modify built-in groups.")
group.name = request.json['name']
models.db.session.commit()
self.record_event({
'action': 'edit',
'timestamp': int(time.time()),
'object_id': group.id,
'object_type': 'group'
})
return group.to_dict()
def get(self, group_id):
if not (self.current_user.has_permission('admin') or int(group_id) in self.current_user.group_ids):
abort(403)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
return group.to_dict()
@require_admin
def delete(self, group_id):
group = models.Group.get_by_id_and_org(group_id, self.current_org)
if group.type == models.Group.BUILTIN_GROUP:
abort(400, message="Can't delete built-in groups.")
members = models.Group.members(group_id)
for member in members:
member.group_ids.remove(int(group_id))
models.db.session.add(member)
models.db.session.delete(group)
models.db.session.commit()
class GroupMemberListResource(BaseResource):
@require_admin
def post(self, group_id):
user_id = request.json['user_id']
user = models.User.get_by_id_and_org(user_id, self.current_org)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
user.group_ids.append(group.id)
models.db.session.commit()
self.record_event({
'action': 'add_member',
'timestamp': int(time.time()),
'object_id': group.id,
'object_type': 'group',
'member_id': user.id
})
return user.to_dict()
@require_permission('list_users')
def get(self, group_id):
if not (self.current_user.has_permission('admin') or int(group_id) in self.current_user.group_ids):
abort(403)
members = models.Group.members(group_id)
unsorted_members = [m.to_dict() for m in members]
groups = [g.to_dict() for g in models.Group.all(self.current_org)]
for i in unsorted_members:
groups_string = ""
for group_id in i['groups']:
get_group = [group['name'] for group in groups if group['id'] == group_id]
for group_name in get_group:
groups_string = groups_string + "[" + str(group_name) + "] "
i['groups'] = groups_string;
sorted_members = sorted(unsorted_members, key = lambda i: i['name'].encode('utf-8').lower())
return sorted_members
class GroupMemberResource(BaseResource):
@require_admin
def delete(self, group_id, user_id):
user = models.User.get_by_id_and_org(user_id, self.current_org)
user.group_ids.remove(int(group_id))
models.db.session.commit()
self.record_event({
'action': 'remove_member',
'timestamp': int(time.time()),
'object_id': group_id,
'object_type': 'group',
'member_id': user.id
})
def serialize_data_source_with_group(data_source, data_source_group):
d = data_source.to_dict()
d['view_only'] = data_source_group.view_only
return d
class GroupDataSourceListResource(BaseResource):
@require_admin
def post(self, group_id):
data_source_id = request.json['data_source_id']
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
data_source_group = data_source.add_group(group)
models.db.session.commit()
self.record_event({
'action': 'add_data_source',
'timestamp': int(time.time()),
'object_id': group_id,
'object_type': 'group',
'member_id': data_source.id
})
return serialize_data_source_with_group(data_source, data_source_group)
@require_admin
def get(self, group_id):
group = get_object_or_404(models.Group.get_by_id_and_org, group_id,
self.current_org)
# TOOD: move to models
data_sources = (models.DataSource.query
.join(models.DataSourceGroup)
.filter(models.DataSourceGroup.group == group))
return [ds.to_dict(with_permissions_for=group) for ds in data_sources]
class GroupDataSourceResource(BaseResource):
@require_admin
def post(self, group_id, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
view_only = request.json['view_only']
data_source_group = data_source.update_group_permission(group, view_only)
models.db.session.commit()
self.record_event({
'action': 'change_data_source_permission',
'timestamp': int(time.time()),
'object_id': group_id,
'object_type': 'group',
'member_id': data_source.id,
'view_only': view_only
})
return serialize_data_source_with_group(data_source, data_source_group)
@require_admin
def delete(self, group_id, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
data_source.remove_group(group)
models.db.session.commit()
self.record_event({
'action': 'remove_data_source',
'timestamp': int(time.time()),
'object_id': group_id,
'object_type': 'group',
'member_id': data_source.id
}) | redash/handlers/groups.py | import time
from flask import request
from flask_restful import abort
from redash import models
from redash.permissions import require_admin, require_permission
from redash.handlers.base import BaseResource, get_object_or_404
class GroupListResource(BaseResource):
@require_admin
def post(self):
name = request.json['name']
group = models.Group(name=name, org=self.current_org)
models.db.session.add(group)
models.db.session.commit()
self.record_event({
'action': 'create',
'timestamp': int(time.time()),
'object_id': group.id,
'object_type': 'group'
})
return group.to_dict()
def get(self):
if self.current_user.has_permission('admin'):
groups = models.Group.all(self.current_org)
else:
groups = models.Group.query.filter(
models.Group.id.in_(self.current_user.group_ids))
return [g.to_dict() for g in groups]
class GroupResource(BaseResource):
@require_admin
def post(self, group_id):
group = models.Group.get_by_id_and_org(group_id, self.current_org)
if group.type == models.Group.BUILTIN_GROUP:
abort(400, message="Can't modify built-in groups.")
group.name = request.json['name']
models.db.session.commit()
self.record_event({
'action': 'edit',
'timestamp': int(time.time()),
'object_id': group.id,
'object_type': 'group'
})
return group.to_dict()
def get(self, group_id):
if not (self.current_user.has_permission('admin') or int(group_id) in self.current_user.group_ids):
abort(403)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
return group.to_dict()
@require_admin
def delete(self, group_id):
group = models.Group.get_by_id_and_org(group_id, self.current_org)
if group.type == models.Group.BUILTIN_GROUP:
abort(400, message="Can't delete built-in groups.")
members = models.Group.members(group_id)
for member in members:
member.group_ids.remove(int(group_id))
models.db.session.add(member)
models.db.session.delete(group)
models.db.session.commit()
class GroupMemberListResource(BaseResource):
@require_admin
def post(self, group_id):
user_id = request.json['user_id']
user = models.User.get_by_id_and_org(user_id, self.current_org)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
user.group_ids.append(group.id)
models.db.session.commit()
self.record_event({
'action': 'add_member',
'timestamp': int(time.time()),
'object_id': group.id,
'object_type': 'group',
'member_id': user.id
})
return user.to_dict()
@require_permission('list_users')
def get(self, group_id):
if not (self.current_user.has_permission('admin') or int(group_id) in self.current_user.group_ids):
abort(403)
members = models.Group.members(group_id)
unsorted_members = [m.to_dict() for m in members]
groups = [g.to_dict() for g in models.Group.all(self.current_org)]
for i in unsorted_members:
groups_string = ""
for group_id in i['groups']:
get_group = [group['name'] for group in groups if group['id'] == group_id]
for group_name in get_group:
groups_string = groups_string + "[" + str(group_name) + "] "
i['groups'] = groups_string;
sorted_members = sorted(unsorted_members, key = lambda i: i['name'].encode('utf-8').lower())
return sorted_members
class GroupMemberResource(BaseResource):
@require_admin
def delete(self, group_id, user_id):
user = models.User.get_by_id_and_org(user_id, self.current_org)
user.group_ids.remove(int(group_id))
models.db.session.commit()
self.record_event({
'action': 'remove_member',
'timestamp': int(time.time()),
'object_id': group_id,
'object_type': 'group',
'member_id': user.id
})
def serialize_data_source_with_group(data_source, data_source_group):
d = data_source.to_dict()
d['view_only'] = data_source_group.view_only
return d
class GroupDataSourceListResource(BaseResource):
@require_admin
def post(self, group_id):
data_source_id = request.json['data_source_id']
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
data_source_group = data_source.add_group(group)
models.db.session.commit()
self.record_event({
'action': 'add_data_source',
'timestamp': int(time.time()),
'object_id': group_id,
'object_type': 'group',
'member_id': data_source.id
})
return serialize_data_source_with_group(data_source, data_source_group)
@require_admin
def get(self, group_id):
group = get_object_or_404(models.Group.get_by_id_and_org, group_id,
self.current_org)
# TOOD: move to models
data_sources = (models.DataSource.query
.join(models.DataSourceGroup)
.filter(models.DataSourceGroup.group == group))
return [ds.to_dict(with_permissions_for=group) for ds in data_sources]
class GroupDataSourceResource(BaseResource):
@require_admin
def post(self, group_id, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
view_only = request.json['view_only']
data_source_group = data_source.update_group_permission(group, view_only)
models.db.session.commit()
self.record_event({
'action': 'change_data_source_permission',
'timestamp': int(time.time()),
'object_id': group_id,
'object_type': 'group',
'member_id': data_source.id,
'view_only': view_only
})
return serialize_data_source_with_group(data_source, data_source_group)
@require_admin
def delete(self, group_id, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
group = models.Group.get_by_id_and_org(group_id, self.current_org)
data_source.remove_group(group)
models.db.session.commit()
self.record_event({
'action': 'remove_data_source',
'timestamp': int(time.time()),
'object_id': group_id,
'object_type': 'group',
'member_id': data_source.id
}) | 0.439988 | 0.072178 |
from .. import models
from .generic import AllMixin, GetByIdMixin, Manager, SyncMixin
class LabelsManager(Manager, AllMixin, GetByIdMixin, SyncMixin):
state_name = "labels"
object_type = "label"
def add(self, name, **kwargs):
"""
Creates a local label object.
"""
obj = models.Label({"name": name}, self.api)
obj.temp_id = obj["id"] = self.api.generate_uuid()
obj.data.update(kwargs)
self.state[self.state_name].append(obj)
cmd = {
"type": "label_add",
"temp_id": obj.temp_id,
"uuid": self.api.generate_uuid(),
"args": {key: obj.data[key] for key in obj.data if key != "id"},
}
self.queue.append(cmd)
return obj
def update(self, label_id, **kwargs):
"""
Updates a label remotely.
"""
args = {"id": label_id}
args.update(kwargs)
cmd = {
"type": "label_update",
"uuid": self.api.generate_uuid(),
"args": args,
}
self.queue.append(cmd)
def delete(self, label_id):
"""
Deletes a label remotely.
"""
cmd = {
"type": "label_delete",
"uuid": self.api.generate_uuid(),
"args": {"id": label_id},
}
self.queue.append(cmd)
def update_orders(self, id_order_mapping):
"""
Updates the orders of multiple labels remotely.
"""
cmd = {
"type": "label_update_orders",
"uuid": self.api.generate_uuid(),
"args": {"id_order_mapping": id_order_mapping},
}
self.queue.append(cmd)
def get(self, label_id):
"""
Gets an existing label.
"""
params = {"token": self.token, "label_id": label_id}
obj = self.api._get("labels/get", params=params)
if obj and "error" in obj:
return None
data = {"labels": []}
if obj.get("label"):
data["labels"].append(obj.get("label"))
self.api._update_state(data)
return obj | todoist/managers/labels.py | from .. import models
from .generic import AllMixin, GetByIdMixin, Manager, SyncMixin
class LabelsManager(Manager, AllMixin, GetByIdMixin, SyncMixin):
state_name = "labels"
object_type = "label"
def add(self, name, **kwargs):
"""
Creates a local label object.
"""
obj = models.Label({"name": name}, self.api)
obj.temp_id = obj["id"] = self.api.generate_uuid()
obj.data.update(kwargs)
self.state[self.state_name].append(obj)
cmd = {
"type": "label_add",
"temp_id": obj.temp_id,
"uuid": self.api.generate_uuid(),
"args": {key: obj.data[key] for key in obj.data if key != "id"},
}
self.queue.append(cmd)
return obj
def update(self, label_id, **kwargs):
"""
Updates a label remotely.
"""
args = {"id": label_id}
args.update(kwargs)
cmd = {
"type": "label_update",
"uuid": self.api.generate_uuid(),
"args": args,
}
self.queue.append(cmd)
def delete(self, label_id):
"""
Deletes a label remotely.
"""
cmd = {
"type": "label_delete",
"uuid": self.api.generate_uuid(),
"args": {"id": label_id},
}
self.queue.append(cmd)
def update_orders(self, id_order_mapping):
"""
Updates the orders of multiple labels remotely.
"""
cmd = {
"type": "label_update_orders",
"uuid": self.api.generate_uuid(),
"args": {"id_order_mapping": id_order_mapping},
}
self.queue.append(cmd)
def get(self, label_id):
"""
Gets an existing label.
"""
params = {"token": self.token, "label_id": label_id}
obj = self.api._get("labels/get", params=params)
if obj and "error" in obj:
return None
data = {"labels": []}
if obj.get("label"):
data["labels"].append(obj.get("label"))
self.api._update_state(data)
return obj | 0.389663 | 0.167355 |
import asyncio
import inspect
from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, Iterable
import toolz
from toolz import curried
from gamla import functional
async def to_awaitable(value):
if inspect.isawaitable(value):
return await value
return value
async def apipe(val, *funcs):
for f in funcs:
val = await to_awaitable(f(val))
return val
def acompose(*funcs):
async def composed(*args, **kwargs):
for f in reversed(funcs):
args = [await to_awaitable(f(*args, **kwargs))]
kwargs = {}
return toolz.first(args)
return composed
def acompose_left(*funcs):
return acompose(*reversed(funcs))
def run_sync(f):
"""Runs a coroutine in a synchronous context, blocking until result arrives."""
loop = asyncio.new_event_loop()
return loop.run_until_complete(asyncio.ensure_future(f, loop=loop))
@toolz.curry
async def amap(f, it):
return await asyncio.gather(*map(f, it))
@toolz.curry
async def amap_ascompleted(
f: Callable[[Any], Awaitable[Any]], it: Iterable
) -> AsyncGenerator[Any, None]:
for future in asyncio.as_completed(map(f, it)):
yield await future
@toolz.curry
async def aexcepts(exception_type, func, handler, x):
try:
return await func(x)
except exception_type as error:
return handler(error)
@toolz.curry
async def mapa(f, it):
async for element in it:
yield f(element)
async def aconcat(async_generators):
async for g in async_generators:
for x in g:
yield x
def ajuxt(*funcs):
async def ajuxt_inner(x):
return await apipe(
funcs, amap(acompose_left(functional.apply(x), to_awaitable)), tuple
)
return ajuxt_inner
@toolz.curry
async def afilter(func, it):
it = tuple(it)
results = await amap(func, it)
return toolz.pipe(
zip(it, results), curried.filter(toolz.second), curried.map(toolz.first)
)
def afirst(*funcs, exception_type):
async def afirst_inner(x):
for f in funcs:
try:
return await to_awaitable(f(x))
except exception_type:
pass
raise exception_type
return afirst_inner
@toolz.curry
async def apair_with(f, element):
return await f(element), element
@toolz.curry
async def apair_right(f, element):
return element, await f(element)
@toolz.curry
async def akeymap(f, d: Dict):
return await aitemmap(ajuxt(acompose_left(toolz.first, f), toolz.second), d)
@toolz.curry
async def avalmap(f, d: Dict):
return await aitemmap(ajuxt(toolz.first, acompose_left(toolz.second, f)), d)
@toolz.curry
async def aitemmap(f, d: Dict):
return await apipe(d, dict.items, amap(f), dict)
@toolz.curry
def aternary(condition, f_true, f_false):
async def aternary_inner(*args, **kwargs):
return (
await to_awaitable(f_true(*args, **kwargs))
if await to_awaitable(condition(*args, **kwargs))
else await to_awaitable(f_false(*args, **kwargs))
)
return aternary_inner | gamla/functional_async.py | import asyncio
import inspect
from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, Iterable
import toolz
from toolz import curried
from gamla import functional
async def to_awaitable(value):
if inspect.isawaitable(value):
return await value
return value
async def apipe(val, *funcs):
for f in funcs:
val = await to_awaitable(f(val))
return val
def acompose(*funcs):
async def composed(*args, **kwargs):
for f in reversed(funcs):
args = [await to_awaitable(f(*args, **kwargs))]
kwargs = {}
return toolz.first(args)
return composed
def acompose_left(*funcs):
return acompose(*reversed(funcs))
def run_sync(f):
"""Runs a coroutine in a synchronous context, blocking until result arrives."""
loop = asyncio.new_event_loop()
return loop.run_until_complete(asyncio.ensure_future(f, loop=loop))
@toolz.curry
async def amap(f, it):
return await asyncio.gather(*map(f, it))
@toolz.curry
async def amap_ascompleted(
f: Callable[[Any], Awaitable[Any]], it: Iterable
) -> AsyncGenerator[Any, None]:
for future in asyncio.as_completed(map(f, it)):
yield await future
@toolz.curry
async def aexcepts(exception_type, func, handler, x):
try:
return await func(x)
except exception_type as error:
return handler(error)
@toolz.curry
async def mapa(f, it):
async for element in it:
yield f(element)
async def aconcat(async_generators):
async for g in async_generators:
for x in g:
yield x
def ajuxt(*funcs):
async def ajuxt_inner(x):
return await apipe(
funcs, amap(acompose_left(functional.apply(x), to_awaitable)), tuple
)
return ajuxt_inner
@toolz.curry
async def afilter(func, it):
it = tuple(it)
results = await amap(func, it)
return toolz.pipe(
zip(it, results), curried.filter(toolz.second), curried.map(toolz.first)
)
def afirst(*funcs, exception_type):
async def afirst_inner(x):
for f in funcs:
try:
return await to_awaitable(f(x))
except exception_type:
pass
raise exception_type
return afirst_inner
@toolz.curry
async def apair_with(f, element):
return await f(element), element
@toolz.curry
async def apair_right(f, element):
return element, await f(element)
@toolz.curry
async def akeymap(f, d: Dict):
return await aitemmap(ajuxt(acompose_left(toolz.first, f), toolz.second), d)
@toolz.curry
async def avalmap(f, d: Dict):
return await aitemmap(ajuxt(toolz.first, acompose_left(toolz.second, f)), d)
@toolz.curry
async def aitemmap(f, d: Dict):
return await apipe(d, dict.items, amap(f), dict)
@toolz.curry
def aternary(condition, f_true, f_false):
async def aternary_inner(*args, **kwargs):
return (
await to_awaitable(f_true(*args, **kwargs))
if await to_awaitable(condition(*args, **kwargs))
else await to_awaitable(f_false(*args, **kwargs))
)
return aternary_inner | 0.713132 | 0.302314 |
from behave import *
from itunes_app_scraper.scraper import AppStoreScraper
@given('we have itunes scraper installed')
def step_impl(context):
context.scraper = AppStoreScraper()
@when('we search for "{search_term}"')
def step_impl(context, search_term):
context.results = context.scraper.get_app_ids_for_query(search_term, country="gb", lang="en")
@then('the scraper will return "{text}" results')
def step_impl(context, text):
assert len(context.results) == int(text)
@then('the results length is "{json_len}"')
def step_impl(context, json_len):
print(context.results)
assert len(context.results) == int(json_len)
@when('we search for result from mindful')
def step_impl(context):
results = context.scraper.get_app_ids_for_query("mindful", country="gb", lang="en")
context.results = context.scraper.get_similar_app_ids_for_app(results[0])
@when('we search for the topic topfreeapplications')
def step_impl(context):
context.results = context.scraper.get_app_ids_for_collection(collection="topfreeapplications", category="", num=50, country="gb")
@when('we search for the developer 384434796')
def step_impl(context):
context.results = context.scraper.get_app_ids_for_developer("384434796", country="gb")
@when('we search for the app with id "{app_id}"')
def step_impl(context, app_id):
context.results = context.scraper.get_app_details(app_id, country="gb")
@when(u'we search for "{num_apps}" apps')
def step_impl(context, num_apps):
apps = context.scraper.get_app_ids_for_query("mindful", country="gb", lang="en", num=num_apps)
context.results = list(context.scraper.get_multiple_app_details(apps, country="gb"))
@when(u'we search for another "{num_apps}" apps')
def step_impl(context, num_apps):
apps = context.app_id + context.scraper.get_app_ids_for_query("mindful", country="gb", lang="en", num=num_apps)
context.results = list(context.scraper.get_multiple_app_details(apps, country="gb"))
@when(u'we define an incorrect app id "{app_id}"')
def step_impl(context, app_id):
context.app_id = [int(app_id)] | features/steps/scraper.py | from behave import *
from itunes_app_scraper.scraper import AppStoreScraper
@given('we have itunes scraper installed')
def step_impl(context):
context.scraper = AppStoreScraper()
@when('we search for "{search_term}"')
def step_impl(context, search_term):
context.results = context.scraper.get_app_ids_for_query(search_term, country="gb", lang="en")
@then('the scraper will return "{text}" results')
def step_impl(context, text):
assert len(context.results) == int(text)
@then('the results length is "{json_len}"')
def step_impl(context, json_len):
print(context.results)
assert len(context.results) == int(json_len)
@when('we search for result from mindful')
def step_impl(context):
results = context.scraper.get_app_ids_for_query("mindful", country="gb", lang="en")
context.results = context.scraper.get_similar_app_ids_for_app(results[0])
@when('we search for the topic topfreeapplications')
def step_impl(context):
context.results = context.scraper.get_app_ids_for_collection(collection="topfreeapplications", category="", num=50, country="gb")
@when('we search for the developer 384434796')
def step_impl(context):
context.results = context.scraper.get_app_ids_for_developer("384434796", country="gb")
@when('we search for the app with id "{app_id}"')
def step_impl(context, app_id):
context.results = context.scraper.get_app_details(app_id, country="gb")
@when(u'we search for "{num_apps}" apps')
def step_impl(context, num_apps):
apps = context.scraper.get_app_ids_for_query("mindful", country="gb", lang="en", num=num_apps)
context.results = list(context.scraper.get_multiple_app_details(apps, country="gb"))
@when(u'we search for another "{num_apps}" apps')
def step_impl(context, num_apps):
apps = context.app_id + context.scraper.get_app_ids_for_query("mindful", country="gb", lang="en", num=num_apps)
context.results = list(context.scraper.get_multiple_app_details(apps, country="gb"))
@when(u'we define an incorrect app id "{app_id}"')
def step_impl(context, app_id):
context.app_id = [int(app_id)] | 0.311846 | 0.12692 |
from datetime import datetime, timedelta
from odoo import fields
from odoo.tests.common import TransactionCase
USER_DEMO = "base.user_demo"
class TestCase(TransactionCase):
at_install = True
post_install = True
def setUp(self):
super(TestCase, self).setUp()
self.event = self.env["event.event"].create(
{
"name": "TestEvent",
"attendee_signup": True,
"create_partner": True,
"date_begin": fields.Datetime.to_string(
datetime.today() + timedelta(days=1)
),
"date_end": fields.Datetime.to_string(
datetime.today() + timedelta(days=15)
),
}
)
def test_self_registration(self):
"""demouser creates registration for himself"""
agent = self.env.ref(USER_DEMO).partner_id
NEW_NAME = "<NAME>Name"
registration = self.env["event.registration"].create(
{
"partner_id": agent.id,
"event_id": self.event.id,
"name": NEW_NAME,
"email": agent.email,
}
)
self.assertEqual(
registration.partner_id.id, agent.id, "Wrong Agent value",
)
self.assertEqual(
registration.attendee_partner_id.id, agent.id, "Wrong Attendee value",
)
self.assertEqual(
registration.attendee_partner_id.name,
NEW_NAME,
"User has a right to change attendee values, if he buy ticket for himself",
)
def test_registration_for_existing_user(self):
"""superuser creates registration for another user"""
agent = self.env.user.partner_id
NEW_NAME = "<NAME> Name"
attendee = self.env.ref(USER_DEMO)
registration = self.env["event.registration"].create(
{
"partner_id": agent.id,
"event_id": self.event.id,
"name": NEW_NAME,
"email": attendee.email,
}
)
self.assertNotEqual(
registration.attendee_partner_id.name,
NEW_NAME,
"Attendee's name must not be changed for security reasons",
) | website_event_attendee_fields/tests/test_security.py | from datetime import datetime, timedelta
from odoo import fields
from odoo.tests.common import TransactionCase
USER_DEMO = "base.user_demo"
class TestCase(TransactionCase):
at_install = True
post_install = True
def setUp(self):
super(TestCase, self).setUp()
self.event = self.env["event.event"].create(
{
"name": "TestEvent",
"attendee_signup": True,
"create_partner": True,
"date_begin": fields.Datetime.to_string(
datetime.today() + timedelta(days=1)
),
"date_end": fields.Datetime.to_string(
datetime.today() + timedelta(days=15)
),
}
)
def test_self_registration(self):
"""demouser creates registration for himself"""
agent = self.env.ref(USER_DEMO).partner_id
NEW_NAME = "<NAME>Name"
registration = self.env["event.registration"].create(
{
"partner_id": agent.id,
"event_id": self.event.id,
"name": NEW_NAME,
"email": agent.email,
}
)
self.assertEqual(
registration.partner_id.id, agent.id, "Wrong Agent value",
)
self.assertEqual(
registration.attendee_partner_id.id, agent.id, "Wrong Attendee value",
)
self.assertEqual(
registration.attendee_partner_id.name,
NEW_NAME,
"User has a right to change attendee values, if he buy ticket for himself",
)
def test_registration_for_existing_user(self):
"""superuser creates registration for another user"""
agent = self.env.user.partner_id
NEW_NAME = "<NAME> Name"
attendee = self.env.ref(USER_DEMO)
registration = self.env["event.registration"].create(
{
"partner_id": agent.id,
"event_id": self.event.id,
"name": NEW_NAME,
"email": attendee.email,
}
)
self.assertNotEqual(
registration.attendee_partner_id.name,
NEW_NAME,
"Attendee's name must not be changed for security reasons",
) | 0.618435 | 0.178347 |
import hashlib
import json
from unittest import TestCase
from blockchain import Blockchain
class BlockchainTestCase(TestCase):
def setUp(self):
self.blockchain = Blockchain()
def create_block(self, proof=123, previous_hash='abc'):
self.blockchain.new_block(proof, previous_hash)
def create_transaction(self, sender='a', recipient='b', amount=1):
self.blockchain.new_transaction(
sender=sender,
recipient=recipient,
amount=amount
)
class TestRegisterNodes(BlockchainTestCase):
def test_valid_nodes(self):
blockchain = Blockchain()
blockchain.register_node('http://192.168.0.1:5000')
self.assertIn('192.168.0.1:5000', blockchain.nodes)
def test_malformed_nodes(self):
blockchain = Blockchain()
blockchain.register_node('http//192.168.0.1:5000')
self.assertNotIn('192.168.0.1:5000', blockchain.nodes)
def test_idempotency(self):
blockchain = Blockchain()
blockchain.register_node('http://192.168.0.1:5000')
blockchain.register_node('http://192.168.0.1:5000')
assert len(blockchain.nodes) == 1
class TestBlocksAndTransactions(BlockchainTestCase):
def test_block_creation(self):
self.create_block()
latest_block = self.blockchain.last_block
assert len(self.blockchain.chain) == 2
assert latest_block['index'] == 2
assert latest_block['timestamp'] is not None
assert latest_block['proof'] == 123
assert latest_block['previous_hash'] == 'abc'
def test_create_transaction(self):
self.create_transaction()
transaction = self.blockchain.current_transactions[-1]
assert transaction
assert transaction['sender'] == 'a'
assert transaction['recipient'] == 'b'
assert transaction['amount'] == 1
def test_block_resets_transactions(self):
self.create_transaction()
initial_length = len(self.blockchain.current_transactions)
self.create_block()
current_length = len(self.blockchain.current_transactions)
assert initial_length == 1
assert current_length == 0
def test_return_last_block(self):
self.create_block()
created_block = self.blockchain.last_block
assert len(self.blockchain.chain) == 2
assert created_block is self.blockchain.chain[-1]
class TestHashingAndProofs(BlockchainTestCase):
def test_hash_is_correct(self):
self.create_block()
new_block = self.blockchain.last_block
new_block_json = json.dumps(self.blockchain.last_block, sort_keys=True).encode()
new_hash = hashlib.sha256(new_block_json).hexdigest()
assert len(new_hash) == 64
assert new_hash == self.blockchain.hash(new_block) | custom-blockchain/tests/test_blockchain.py | import hashlib
import json
from unittest import TestCase
from blockchain import Blockchain
class BlockchainTestCase(TestCase):
def setUp(self):
self.blockchain = Blockchain()
def create_block(self, proof=123, previous_hash='abc'):
self.blockchain.new_block(proof, previous_hash)
def create_transaction(self, sender='a', recipient='b', amount=1):
self.blockchain.new_transaction(
sender=sender,
recipient=recipient,
amount=amount
)
class TestRegisterNodes(BlockchainTestCase):
def test_valid_nodes(self):
blockchain = Blockchain()
blockchain.register_node('http://192.168.0.1:5000')
self.assertIn('192.168.0.1:5000', blockchain.nodes)
def test_malformed_nodes(self):
blockchain = Blockchain()
blockchain.register_node('http//192.168.0.1:5000')
self.assertNotIn('192.168.0.1:5000', blockchain.nodes)
def test_idempotency(self):
blockchain = Blockchain()
blockchain.register_node('http://192.168.0.1:5000')
blockchain.register_node('http://192.168.0.1:5000')
assert len(blockchain.nodes) == 1
class TestBlocksAndTransactions(BlockchainTestCase):
def test_block_creation(self):
self.create_block()
latest_block = self.blockchain.last_block
assert len(self.blockchain.chain) == 2
assert latest_block['index'] == 2
assert latest_block['timestamp'] is not None
assert latest_block['proof'] == 123
assert latest_block['previous_hash'] == 'abc'
def test_create_transaction(self):
self.create_transaction()
transaction = self.blockchain.current_transactions[-1]
assert transaction
assert transaction['sender'] == 'a'
assert transaction['recipient'] == 'b'
assert transaction['amount'] == 1
def test_block_resets_transactions(self):
self.create_transaction()
initial_length = len(self.blockchain.current_transactions)
self.create_block()
current_length = len(self.blockchain.current_transactions)
assert initial_length == 1
assert current_length == 0
def test_return_last_block(self):
self.create_block()
created_block = self.blockchain.last_block
assert len(self.blockchain.chain) == 2
assert created_block is self.blockchain.chain[-1]
class TestHashingAndProofs(BlockchainTestCase):
def test_hash_is_correct(self):
self.create_block()
new_block = self.blockchain.last_block
new_block_json = json.dumps(self.blockchain.last_block, sort_keys=True).encode()
new_hash = hashlib.sha256(new_block_json).hexdigest()
assert len(new_hash) == 64
assert new_hash == self.blockchain.hash(new_block) | 0.710025 | 0.500854 |
from DB_mgmt import *
from protobuffs.code_pb2 import *
from model.objects.User import *
class AuthDAO():
# Load the permission database into an in-memory dictionary.
# This method also initializes objects for users with no previous permissions.
@staticmethod
def loadDbMemory(memory_db):
# Get every switch id and every user id stored in the database.
queryCursor.execute("SELECT device_id FROM switches")
switch_ids = queryCursor.fetchall()
queryCursor.execute("SELECT user_id FROM users")
user_ids = queryCursor.fetchall()
# Initialize the in-memory dictionary.
# It skips previously loaded permissions by addPermission() to spare database accesses.
# TODO (maybe): refactor addPermission() so it won't initialize the dict on switch load.
for user_id in user_ids:
for switch_id in switch_ids:
key = (user_id[0], switch_id[0])
if key not in memory_db:
queryCursor.execute("SELECT perms FROM permissions WHERE user_id=? AND device_id=?", key)
permission = queryCursor.fetchone()
memory_db[key] = 0x0000000 if permission is None else permission[0]
print "Successfully loaded permission database into memory"
return
@staticmethod
def addUser(user_name, password):
try:
queryCursor.execute("INSERT INTO users (name, password) VALUES (?,?)", (user_name, password))
dbVar.commit()
return True
except:
print "Failed adding user '{}': could not commit to the database".format(user_name)
return False
@staticmethod
def getUserByName(user_name):
queryCursor.execute("SELECT user_id, name FROM users WHERE name=?", (user_name,))
user_data = queryCursor.fetchone()
if user_data is None:
return False
else:
return User(user_data[0], user_data[1])
@staticmethod
def getUserPermissions(user_name):
user = AuthDAO.getUserByName(user_name)
if user is False:
print "Failed getting permissions: user '{}' not found".format(user_name)
return False
else:
queryCursor.execute("SELECT perms FROM permissions WHERE user_id=?", (user.user_id,))
permissions = queryCursor.fetchall()
return permissions
@staticmethod
def getUserPassword(user_name):
queryCursor.execute("SELECT password FROM users WHERE name=?", (user_name,))
password = queryCursor.fetchone()
if password is None:
print "Failed getting password: user '{}' not found".format(user_name)
return password[0]
@staticmethod
def addPermission(user_id, switch_id, permission):
queryCursor.execute("SELECT perms FROM permissions WHERE user_id=? AND device_id=?", (user_id, switch_id))
stored_permission = queryCursor.fetchone()
if stored_permission is None:
try:
queryCursor.execute("INSERT INTO permissions (user_id, device_id, perms) VALUES (?,?,?)", (user_id, switch_id, permission))
dbVar.commit()
return True
except:
print "Failed adding permission: could not commit to the database"
return False
else:
permission = permission | stored_permission[0]
try:
queryCursor.execute("UPDATE permissions SET perms=? WHERE user_id=? AND device_id=?", (permission, user_id, switch_id))
dbVar.commit()
return True
except:
print "Failed adding permission: could not commit to the database"
return False
@staticmethod
def removePermission(user_id, switch_id, permission):
try:
queryCursor.execute("UPDATE permissions SET perms=? WHERE user_id=? AND device_id=?", (permission, user_id, switch_id))
dbVar.commit()
return True
except:
print "Failed removing permission: could not commit to the database"
return False | pvs-build/model/AuthDAO.py | from DB_mgmt import *
from protobuffs.code_pb2 import *
from model.objects.User import *
class AuthDAO():
# Load the permission database into an in-memory dictionary.
# This method also initializes objects for users with no previous permissions.
@staticmethod
def loadDbMemory(memory_db):
# Get every switch id and every user id stored in the database.
queryCursor.execute("SELECT device_id FROM switches")
switch_ids = queryCursor.fetchall()
queryCursor.execute("SELECT user_id FROM users")
user_ids = queryCursor.fetchall()
# Initialize the in-memory dictionary.
# It skips previously loaded permissions by addPermission() to spare database accesses.
# TODO (maybe): refactor addPermission() so it won't initialize the dict on switch load.
for user_id in user_ids:
for switch_id in switch_ids:
key = (user_id[0], switch_id[0])
if key not in memory_db:
queryCursor.execute("SELECT perms FROM permissions WHERE user_id=? AND device_id=?", key)
permission = queryCursor.fetchone()
memory_db[key] = 0x0000000 if permission is None else permission[0]
print "Successfully loaded permission database into memory"
return
@staticmethod
def addUser(user_name, password):
try:
queryCursor.execute("INSERT INTO users (name, password) VALUES (?,?)", (user_name, password))
dbVar.commit()
return True
except:
print "Failed adding user '{}': could not commit to the database".format(user_name)
return False
@staticmethod
def getUserByName(user_name):
queryCursor.execute("SELECT user_id, name FROM users WHERE name=?", (user_name,))
user_data = queryCursor.fetchone()
if user_data is None:
return False
else:
return User(user_data[0], user_data[1])
@staticmethod
def getUserPermissions(user_name):
user = AuthDAO.getUserByName(user_name)
if user is False:
print "Failed getting permissions: user '{}' not found".format(user_name)
return False
else:
queryCursor.execute("SELECT perms FROM permissions WHERE user_id=?", (user.user_id,))
permissions = queryCursor.fetchall()
return permissions
@staticmethod
def getUserPassword(user_name):
queryCursor.execute("SELECT password FROM users WHERE name=?", (user_name,))
password = queryCursor.fetchone()
if password is None:
print "Failed getting password: user '{}' not found".format(user_name)
return password[0]
@staticmethod
def addPermission(user_id, switch_id, permission):
queryCursor.execute("SELECT perms FROM permissions WHERE user_id=? AND device_id=?", (user_id, switch_id))
stored_permission = queryCursor.fetchone()
if stored_permission is None:
try:
queryCursor.execute("INSERT INTO permissions (user_id, device_id, perms) VALUES (?,?,?)", (user_id, switch_id, permission))
dbVar.commit()
return True
except:
print "Failed adding permission: could not commit to the database"
return False
else:
permission = permission | stored_permission[0]
try:
queryCursor.execute("UPDATE permissions SET perms=? WHERE user_id=? AND device_id=?", (permission, user_id, switch_id))
dbVar.commit()
return True
except:
print "Failed adding permission: could not commit to the database"
return False
@staticmethod
def removePermission(user_id, switch_id, permission):
try:
queryCursor.execute("UPDATE permissions SET perms=? WHERE user_id=? AND device_id=?", (permission, user_id, switch_id))
dbVar.commit()
return True
except:
print "Failed removing permission: could not commit to the database"
return False | 0.419172 | 0.07373 |
import json
import difflib
import unittest
from responseobjects.api_response import KeywordSearchResponse, \
FileSearchResponse
import os
base_path = os.path.dirname(os.path.abspath(__file__))
class MyTestCase(unittest.TestCase):
def test_key_search_response(self):
"""
This method tests the KeywordSearchResponse object.
It will make sure the functionality works as
appropriate by asserting the apiResponse attribute
is the same as expected.
:return:
"""
with open('{}/test_mapping_config.json'.format(
base_path)) as json_mapping:
test_mapping = json.load(json_mapping)
json_mapping.close()
with open('{}/test1index.json'.format(base_path)) as json_test:
test_json = json.load(json_test)
json_test.close()
with open('{}/keyword_test1.json'.format(base_path)) as test1:
keyword_test = json.load(test1)
test1.close()
# Still need a way to test the response.
keyword_response = KeywordSearchResponse(
test_mapping, test_json).return_response().to_json()
# Transform both json objects to a string
json_response = json.dumps(keyword_test, sort_keys=True)
json_test = json.dumps(keyword_response, sort_keys=True)
# Now show differences so message is helpful
print "Comparing the two dictionaries built."
print('{}... => {}...'.format(json_test[:20], json_response[:20]))
for i, s in enumerate(difflib.ndiff(json_test, json_response)):
if s[0] == ' ':
continue
elif s[0] == '-':
print(u'Delete "{}" from position {}'.format(s[-1], i))
elif s[0] == '+':
print(u'Add "{}" to position {}'.format(s[-1], i))
self.assertEqual(json_test, json_response)
def test_null_key_search_response(self):
"""
This method tests the KeywordSearchResponse object.
It will make sure the functionality works as
appropriate by asserting the apiResponse attribute is
the same as expected.
:return:
"""
with open('{}/test_null_mapping_config.json'.format(
base_path)) as json_mapping:
test_mapping = json.load(json_mapping)
json_mapping.close()
with open('{}/test1index.json'.format(base_path)) as json_test:
test_json = json.load(json_test)
json_test.close()
with open('{}/keyword_null_test1.json'.format(base_path)) as test1:
keyword_test = json.load(test1)
test1.close()
# Still need a way to test the response.
keyword_response = KeywordSearchResponse(
test_mapping, test_json).return_response().to_json()
# Transform both json objects to a string
json_response = json.dumps(keyword_test, sort_keys=True)
json_test = json.dumps(keyword_response, sort_keys=True)
# Now show differences so message is helpful
print "Comparing the two dictionaries built."
print('{}... => {}...'.format(json_test[:20], json_response[:20]))
for i, s in enumerate(difflib.ndiff(json_test, json_response)):
if s[0] == ' ':
continue
elif s[0] == '-':
print(u'Delete "{}" from position {}'.format(s[-1], i))
elif s[0] == '+':
print(u'Add "{}" to position {}'.format(s[-1], i))
self.assertEqual(json_test, json_response)
def test_file_search_response(self):
"""
This method tests the FileSearchResponse object.
It will make sure the functionality works as
appropriate by asserting the apiResponse attribute
is the same as expected.
:return:
"""
with open('{}/test_mapping_config.json'.format(
base_path)) as json_mapping:
test_mapping = json.load(json_mapping)
json_mapping.close()
with open('{}/test1index.json'.format(base_path)) as json_test:
test_json = json.load(json_test)
json_test.close()
with open('{}/filesearch_test1.json'.format(base_path)) as test1:
file_search_test = json.load(test1)
test1.close()
# This is what will be used as the comparing standard
with open('{}/facets_test_input1.json'.format(
base_path)) as facet_input:
facet_test = json.load(facet_input)
facet_input.close()
with open('{}/pagination_test_input1.json'.format(
base_path)) as pagination_input:
pagination_test = json.load(pagination_input)
pagination_input.close()
# Still need a way to test the response.
file_search_response = FileSearchResponse(
test_mapping,
test_json,
pagination_test,
facet_test
).return_response().to_json()
# Transform both json objects to a string
json_response = json.dumps(file_search_test, sort_keys=True)
json_test = json.dumps(file_search_response, sort_keys=True)
# Now show differences so message is helpful
print "Comparing the two dictionaries built."
print('{}... => {}...'.format(json_test[:20], json_response[:20]))
for i, s in enumerate(difflib.ndiff(json_test, json_response)):
if s[0] == ' ':
continue
elif s[0] == '-':
print(u'Delete "{}" from position {}'.format(s[-1], i))
elif s[0] == '+':
print(u'Add "{}" to position {}'.format(s[-1], i))
self.assertEqual(json_test, json_response)
if __name__ == '__main__':
unittest.main() | test/test_response.py |
import json
import difflib
import unittest
from responseobjects.api_response import KeywordSearchResponse, \
FileSearchResponse
import os
base_path = os.path.dirname(os.path.abspath(__file__))
class MyTestCase(unittest.TestCase):
def test_key_search_response(self):
"""
This method tests the KeywordSearchResponse object.
It will make sure the functionality works as
appropriate by asserting the apiResponse attribute
is the same as expected.
:return:
"""
with open('{}/test_mapping_config.json'.format(
base_path)) as json_mapping:
test_mapping = json.load(json_mapping)
json_mapping.close()
with open('{}/test1index.json'.format(base_path)) as json_test:
test_json = json.load(json_test)
json_test.close()
with open('{}/keyword_test1.json'.format(base_path)) as test1:
keyword_test = json.load(test1)
test1.close()
# Still need a way to test the response.
keyword_response = KeywordSearchResponse(
test_mapping, test_json).return_response().to_json()
# Transform both json objects to a string
json_response = json.dumps(keyword_test, sort_keys=True)
json_test = json.dumps(keyword_response, sort_keys=True)
# Now show differences so message is helpful
print "Comparing the two dictionaries built."
print('{}... => {}...'.format(json_test[:20], json_response[:20]))
for i, s in enumerate(difflib.ndiff(json_test, json_response)):
if s[0] == ' ':
continue
elif s[0] == '-':
print(u'Delete "{}" from position {}'.format(s[-1], i))
elif s[0] == '+':
print(u'Add "{}" to position {}'.format(s[-1], i))
self.assertEqual(json_test, json_response)
def test_null_key_search_response(self):
"""
This method tests the KeywordSearchResponse object.
It will make sure the functionality works as
appropriate by asserting the apiResponse attribute is
the same as expected.
:return:
"""
with open('{}/test_null_mapping_config.json'.format(
base_path)) as json_mapping:
test_mapping = json.load(json_mapping)
json_mapping.close()
with open('{}/test1index.json'.format(base_path)) as json_test:
test_json = json.load(json_test)
json_test.close()
with open('{}/keyword_null_test1.json'.format(base_path)) as test1:
keyword_test = json.load(test1)
test1.close()
# Still need a way to test the response.
keyword_response = KeywordSearchResponse(
test_mapping, test_json).return_response().to_json()
# Transform both json objects to a string
json_response = json.dumps(keyword_test, sort_keys=True)
json_test = json.dumps(keyword_response, sort_keys=True)
# Now show differences so message is helpful
print "Comparing the two dictionaries built."
print('{}... => {}...'.format(json_test[:20], json_response[:20]))
for i, s in enumerate(difflib.ndiff(json_test, json_response)):
if s[0] == ' ':
continue
elif s[0] == '-':
print(u'Delete "{}" from position {}'.format(s[-1], i))
elif s[0] == '+':
print(u'Add "{}" to position {}'.format(s[-1], i))
self.assertEqual(json_test, json_response)
def test_file_search_response(self):
"""
This method tests the FileSearchResponse object.
It will make sure the functionality works as
appropriate by asserting the apiResponse attribute
is the same as expected.
:return:
"""
with open('{}/test_mapping_config.json'.format(
base_path)) as json_mapping:
test_mapping = json.load(json_mapping)
json_mapping.close()
with open('{}/test1index.json'.format(base_path)) as json_test:
test_json = json.load(json_test)
json_test.close()
with open('{}/filesearch_test1.json'.format(base_path)) as test1:
file_search_test = json.load(test1)
test1.close()
# This is what will be used as the comparing standard
with open('{}/facets_test_input1.json'.format(
base_path)) as facet_input:
facet_test = json.load(facet_input)
facet_input.close()
with open('{}/pagination_test_input1.json'.format(
base_path)) as pagination_input:
pagination_test = json.load(pagination_input)
pagination_input.close()
# Still need a way to test the response.
file_search_response = FileSearchResponse(
test_mapping,
test_json,
pagination_test,
facet_test
).return_response().to_json()
# Transform both json objects to a string
json_response = json.dumps(file_search_test, sort_keys=True)
json_test = json.dumps(file_search_response, sort_keys=True)
# Now show differences so message is helpful
print "Comparing the two dictionaries built."
print('{}... => {}...'.format(json_test[:20], json_response[:20]))
for i, s in enumerate(difflib.ndiff(json_test, json_response)):
if s[0] == ' ':
continue
elif s[0] == '-':
print(u'Delete "{}" from position {}'.format(s[-1], i))
elif s[0] == '+':
print(u'Add "{}" to position {}'.format(s[-1], i))
self.assertEqual(json_test, json_response)
if __name__ == '__main__':
unittest.main() | 0.347648 | 0.387574 |
import unittest
from core.models import GitlabUser
from core.tests.test_models import GitlabUserModelMethod
from django.test import TestCase
class GitlabUserSignalsTests(TestCase, GitlabUserModelMethod):
def equal(self, gitlab_user, user, user_social_auth):
self.assertEqual(gitlab_user.user, user)
self.assertEqual(gitlab_user.user_social_auth, user_social_auth)
self.assertEqual(gitlab_user.gitlab_id, user_social_auth.uid)
def test_after_creating_user_social_auth_with_provider_not_gitlab_do_nothing(self):
user, user_social_auth = self.create_user_and_user_social_auth(provider='test')
with self.assertRaises(GitlabUser.DoesNotExist):
GitlabUser.objects.get(user_social_auth=user_social_auth)
def test_after_creating_user_social_auth_create_user(self):
user, user_social_auth = self.create_user_and_user_social_auth()
gitlab_user = GitlabUser.objects.get(user_social_auth=user_social_auth)
self.equal(gitlab_user, user, user_social_auth)
def test_after_saving_social_auth_create_user_if_not_exist(self):
user, user_social_auth = self.create_user_and_user_social_auth()
GitlabUser.objects.get(user_social_auth=user_social_auth).delete()
user_social_auth.save()
gitlab_user = GitlabUser.objects.get(user_social_auth=user_social_auth)
self.equal(gitlab_user, user, user_social_auth)
def test_after_creating_social_auth_connect_him_to_existing_user(self):
gitlab_id = 10
gitlab_user = self.create_gitlab_user(gitlab_id=gitlab_id)
user, user_social_auth = self.create_user_and_user_social_auth(uid=gitlab_id)
gitlab_user.refresh_from_db()
self.equal(gitlab_user, user, user_social_auth)
@unittest.skip("GiLabUser does not contain mutable attributes")
def test_after_saving_social_auth_update_user(self):
user, user_social_auth = self.create_user_and_user_social_auth()
user_social_auth.uid = 400
user_social_auth.save()
gitlab_user = GitlabUser.objects.get(user_social_auth=user_social_auth)
self.equal(gitlab_user, user, user_social_auth) | SZR/apps/core/tests/test_signals.py | import unittest
from core.models import GitlabUser
from core.tests.test_models import GitlabUserModelMethod
from django.test import TestCase
class GitlabUserSignalsTests(TestCase, GitlabUserModelMethod):
def equal(self, gitlab_user, user, user_social_auth):
self.assertEqual(gitlab_user.user, user)
self.assertEqual(gitlab_user.user_social_auth, user_social_auth)
self.assertEqual(gitlab_user.gitlab_id, user_social_auth.uid)
def test_after_creating_user_social_auth_with_provider_not_gitlab_do_nothing(self):
user, user_social_auth = self.create_user_and_user_social_auth(provider='test')
with self.assertRaises(GitlabUser.DoesNotExist):
GitlabUser.objects.get(user_social_auth=user_social_auth)
def test_after_creating_user_social_auth_create_user(self):
user, user_social_auth = self.create_user_and_user_social_auth()
gitlab_user = GitlabUser.objects.get(user_social_auth=user_social_auth)
self.equal(gitlab_user, user, user_social_auth)
def test_after_saving_social_auth_create_user_if_not_exist(self):
user, user_social_auth = self.create_user_and_user_social_auth()
GitlabUser.objects.get(user_social_auth=user_social_auth).delete()
user_social_auth.save()
gitlab_user = GitlabUser.objects.get(user_social_auth=user_social_auth)
self.equal(gitlab_user, user, user_social_auth)
def test_after_creating_social_auth_connect_him_to_existing_user(self):
gitlab_id = 10
gitlab_user = self.create_gitlab_user(gitlab_id=gitlab_id)
user, user_social_auth = self.create_user_and_user_social_auth(uid=gitlab_id)
gitlab_user.refresh_from_db()
self.equal(gitlab_user, user, user_social_auth)
@unittest.skip("GiLabUser does not contain mutable attributes")
def test_after_saving_social_auth_update_user(self):
user, user_social_auth = self.create_user_and_user_social_auth()
user_social_auth.uid = 400
user_social_auth.save()
gitlab_user = GitlabUser.objects.get(user_social_auth=user_social_auth)
self.equal(gitlab_user, user, user_social_auth) | 0.344223 | 0.213193 |
import unittest
from unittest.mock import MagicMock, patch
import functools
import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QSignalSpy, QTest
from PyQt5.QtWidgets import QWidget
from extra_foam.pipeline.tests import _RawDataMixin
from extra_foam.special_suite import logger, mkQApp
from extra_foam.gui.plot_widgets import ImageViewF, PlotWidgetF
from extra_foam.special_suite.special_analysis_base import (
_BaseAnalysisCtrlWidgetS, _SpecialAnalysisBase, create_special,
ClientType, QThreadWorker, QThreadFoamClient, QThreadKbClient
)
app = mkQApp()
logger.setLevel('CRITICAL')
class testSpecialAnalysisBase(_RawDataMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
class DummyCtrlWidget(_BaseAnalysisCtrlWidgetS):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dummy_widget = QWidget()
self._non_reconfigurable_widgets = [
self.dummy_widget
]
class DummyProcessor(QThreadWorker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._dark_removed = False
def process(self, data):
"""Override."""
pass
def onRemoveDark(self):
"""Override."""
self._dark_removed = True
def sources(self):
return [
("device1:output", "property1", 1),
("device2", "property2", 0)
]
class DummyImageView(ImageViewF):
def __init__(self, *, parent=None):
super().__init__(parent=parent)
def updateF(self, data):
"""Override."""
pass
class DummyImageViewWithRoi(ImageViewF):
def __init__(self, *, parent=None):
super().__init__(has_roi=True, parent=parent)
def updateF(self, data):
"""Override."""
pass
class DummyPlotWidget(PlotWidgetF):
def __init__(self, *, parent=None):
super().__init__(parent=parent)
self._plot = self.plotCurve(name="dummy")
def updateF(self, data):
"""Override."""
pass
@create_special(DummyCtrlWidget, DummyProcessor)
class DummyWindow(_SpecialAnalysisBase):
_title = "Dummy"
_long_title = "Dummy analysis"
_client_support = ClientType.BOTH
def __init__(self, topic):
super().__init__(topic)
self._line = DummyPlotWidget(parent=self)
self._view = DummyImageView(parent=self)
self._view_with_roi = DummyImageViewWithRoi(parent=self)
self.initUI()
self.initConnections()
self.startWorker()
def initUI(self):
"""Override."""
pass
def initConnections(self):
"""Override."""
pass
# Note: startWorker is not patched as it is in other tests of
# concrete windows
cls._win = DummyWindow('DET')
def testGeneral(self):
self.assertEqual('DET', self._win._ctrl_widget_st.topic)
def testPlotWidgets(self):
win = self._win
self.assertEqual(3, len(win._plot_widgets_st))
self.assertIn(win._line, win._plot_widgets_st)
self.assertIn(win._view, win._plot_widgets_st)
self.assertEqual(2, len(win._image_views_st))
self.assertIn(win._view, win._image_views_st)
self.assertIn(win._view_with_roi, win._image_views_st)
with patch.object(win._view, "updateImage") as update_image:
QTest.mouseClick(win._com_ctrl_st.auto_level_btn, Qt.LeftButton)
update_image.assert_called_once()
with patch.object(win._view, "updateF") as update_view:
with patch.object(win._line, "updateF") as update_line:
win.updateWidgetsST()
# win._data is empty
update_line.assert_not_called()
update_view.assert_not_called()
# patch win._worker_st.get()
with patch.object(win._worker_st, "getOutputDataST"):
win.updateWidgetsST()
update_line.assert_called_once()
update_view.assert_called_once()
def testCommonStartStopReset(self):
win = self._win
com_ctrl_widget = win._com_ctrl_st
ctrl_widget = win._ctrl_widget_st
client = win._client_st
worker = win._worker_st
self.assertFalse(com_ctrl_widget.stop_btn.isEnabled())
self.assertIsNone(client._endpoint_st)
with patch.object(client, "start") as client_start:
with patch.object(win._plot_timer_st, "start") as timer_start:
spy = QSignalSpy(win.started_sgn)
QTest.mouseClick(com_ctrl_widget.start_btn, Qt.LeftButton)
self.assertEqual(f"tcp://{com_ctrl_widget._hostname_le.text()}:"
f"{com_ctrl_widget._port_le.text()}", client._endpoint_st)
self.assertEqual(2, len(client._catalog_st))
self.assertIn("device1:output property1", client._catalog_st)
self.assertIn("device2 property2", client._catalog_st)
self.assertEqual(1, len(spy))
self.assertTrue(com_ctrl_widget.stop_btn.isEnabled())
self.assertFalse(com_ctrl_widget.start_btn.isEnabled())
self.assertFalse(com_ctrl_widget.load_dark_run_btn.isEnabled())
self.assertFalse(ctrl_widget.dummy_widget.isEnabled())
client_start.assert_called_once()
timer_start.assert_called_once()
with patch.object(client, "terminateRunST") as client_stop:
with patch.object(win._plot_timer_st, "stop") as timer_stop:
spy = QSignalSpy(win.stopped_sgn)
QTest.mouseClick(com_ctrl_widget.stop_btn, Qt.LeftButton)
self.assertEqual(1, len(spy))
self.assertFalse(com_ctrl_widget.stop_btn.isEnabled())
self.assertTrue(com_ctrl_widget.start_btn.isEnabled())
self.assertTrue(com_ctrl_widget.load_dark_run_btn.isEnabled())
self.assertTrue(ctrl_widget.dummy_widget.isEnabled())
client_stop.assert_called_once()
timer_stop.assert_called_once()
with patch.object(client, "start") as client_start:
with patch.object(win._plot_timer_st, "start") as timer_start:
with patch.object(worker, "sources") as mocked_sources:
with self.assertLogs(logger, level="ERROR") as cm:
mocked_sources.return_value = [("", "property1", 1)]
QTest.mouseClick(com_ctrl_widget.start_btn, Qt.LeftButton)
client_start.assert_not_called()
timer_start.assert_not_called()
self.assertIn("Empty source", cm.output[0])
with self.assertLogs(logger, level="ERROR") as cm:
mocked_sources.return_value = [("device", "", 0)]
QTest.mouseClick(com_ctrl_widget.start_btn, Qt.LeftButton)
client_start.assert_not_called()
timer_start.assert_not_called()
self.assertIn("Empty property", cm.output[0])
with self.assertLogs(logger, level="ERROR") as cm:
mocked_sources.return_value = [("device", "property", 2)]
QTest.mouseClick(com_ctrl_widget.start_btn, Qt.LeftButton)
client_start.assert_not_called()
timer_start.assert_not_called()
self.assertIn("Not understandable data type", cm.output[0])
with patch.object(client, "onResetST") as client_reset:
with patch.object(worker, "onResetST") as worker_reset:
with patch.object(win._line, "reset") as line_reset:
with patch.object(win._view, "reset") as view_reset:
QTest.mouseClick(com_ctrl_widget.reset_btn, Qt.LeftButton)
client_reset.assert_called_once()
worker_reset.assert_called_once()
line_reset.assert_called_once()
view_reset.assert_called_once()
with patch.object(worker._input_st, "clear") as input_clear:
with patch.object(worker._output_st, "clear") as output_clear:
worker._reset_st = False
worker.onResetST()
input_clear.assert_called_once()
output_clear.assert_called_once()
worker._reset_st = True
with patch.object(client._transformer_st, "reset") as transformer_reset:
with patch.object(client._output_st, "clear") as output_clear:
client.onResetST()
transformer_reset.assert_called_once()
output_clear.assert_called_once()
def testProcessFlow(self):
worker = self._win._worker_st
data = object()
with patch.object(worker, "preprocess") as mocked_preprocess:
with patch.object(worker, "process") as mocked_process:
with patch.object(worker, "postprocess") as mocked_postprocess:
with patch.object(worker, "reset") as mocked_reset:
worker._reset_st = False
worker._processImpST(data)
mocked_preprocess.assert_called_once()
mocked_process.assert_called_once_with(data)
mocked_postprocess.assert_called_once()
mocked_reset.assert_not_called()
worker._reset_st = True
worker._processImpST(data)
mocked_reset.assert_called_once()
self.assertFalse(worker._reset_st)
def testCommonDarkOperation(self):
win = self._win
widget = win._com_ctrl_st
worker = win._worker_st
# recording dark
self.assertFalse(worker.recordingDark()) # default value
QTest.mouseClick(widget.record_dark_btn, Qt.LeftButton)
self.assertTrue(worker.recordingDark())
self.assertTrue(widget.record_dark_btn.isChecked())
QTest.mouseClick(widget.record_dark_btn, Qt.LeftButton)
self.assertFalse(worker.recordingDark())
self.assertFalse(widget.record_dark_btn.isChecked())
# load dark run
with patch.object(worker, "onLoadDarkRun") as load_dark_run:
with patch('extra_foam.special_suite.special_analysis_base.QFileDialog.getExistingDirectory',
return_value=""):
QTest.mouseClick(widget.load_dark_run_btn, Qt.LeftButton)
load_dark_run.assert_not_called()
with patch('extra_foam.special_suite.special_analysis_base.QFileDialog.getExistingDirectory',
return_value="/run/directory"):
QTest.mouseClick(widget.load_dark_run_btn, Qt.LeftButton)
load_dark_run.assert_called_with("/run/directory")
# remove dark
# patch.object does not work
self.assertFalse(worker._dark_removed)
QTest.mouseClick(widget.remove_dark_btn, Qt.LeftButton)
self.assertTrue(worker._dark_removed)
# subtract dark
self.assertTrue(worker.subtractDark()) # default value
widget.dark_subtraction_cb.setChecked(False)
self.assertFalse(worker.subtractDark())
def testRoiCtrl(self):
pass
def testSqueezeCameraImage(self):
a1d = np.ones((4, ))
a2d = np.ones((2, 1))
a3d = np.ones((3, 3, 1))
func = functools.partial(self._win._worker_st.squeezeToVector, 1234)
assert func(None) is None
assert func(a3d) is None
ret_1d = func(a1d)
np.testing.assert_array_equal(a1d, ret_1d)
ret_2d = func(a2d)
np.testing.assert_array_equal(a2d.squeeze(axis=-1), ret_2d)
def testSqueezeToVector(self):
a1d = np.ones((4, ))
a2d = np.ones((2, 2))
a3d = np.ones((3, 3, 1))
a3d_f = np.ones((3, 3, 2))
a4d = np.ones((2, 2, 2, 2))
func = functools.partial(self._win._worker_st.squeezeToImage, 1234)
assert func(None) is None
assert func(a1d) is None
assert func(a4d) is None
ret_2d = func(a2d)
np.testing.assert_array_equal(a2d, ret_2d)
assert np.float32 == ret_2d.dtype
ret_3d = func(a3d)
np.testing.assert_array_equal(a3d.squeeze(axis=-1), ret_3d)
assert np.float32 == ret_3d.dtype
assert func(a3d_f) is None
def testGetRoiData(self):
worker = self._win._worker_st
# test 2D array
img = np.ones((4, 6))
# test ROI geometry not specified
worker._roi_geom_st = None
roi = worker.getRoiData(img)
assert img is roi
roi = worker.getRoiData(img, copy=True)
assert img is not roi
np.testing.assert_array_equal(img, roi)
# test with intersection
worker._roi_geom_st = (1, 2, 2, 3)
roi = worker.getRoiData(img)
np.testing.assert_array_equal(img[2:5, 1:3], roi)
# test without intersection
worker._roi_geom_st = (-5, -6, 2, 3)
roi = worker.getRoiData(img)
np.testing.assert_array_equal(np.empty((0, 0)), roi)
# test 3D array
img = np.ones((3, 4, 6))
# test with intersection
worker._roi_geom_st = (1, 2, 2, 3)
roi = worker.getRoiData(img)
np.testing.assert_array_equal(img[:, 2:5, 1:3], roi)
# test without intersection
worker._roi_geom_st = (-5, -6, 2, 3)
roi = worker.getRoiData(img)
np.testing.assert_array_equal(np.empty((3, 0, 0)), roi)
def testClientChange(self):
win = self._win
ctrl_widget = win._com_ctrl_st
worker = win._worker_st
# Helper function to get the appropriate class for a client type
def client_class(client_type):
if client_type == ClientType.EXTRA_FOAM:
return QThreadFoamClient
elif client_type == ClientType.KARABO_BRIDGE:
return QThreadKbClient
else:
raise RuntimeError("Unrecognized client type")
# Check the client is initialized properly
client_type = ClientType(ctrl_widget._client_type_cb.currentText())
assert client_type == ctrl_widget.selected_client
assert client_class(client_type) == type(win._client_st)
assert worker.client_type == client_type
# Which client is selected by default doesn't actually matter for
# operation, but it's simpler to test if we know that this is the
# default.
assert client_type == ClientType.EXTRA_FOAM
# Change the client type
ctrl_widget._client_type_cb.setCurrentText(ClientType.KARABO_BRIDGE.value)
client_type = ctrl_widget.selected_client
assert client_class(client_type) == type(win._client_st)
assert worker.client_type == client_type | extra_foam/special_suite/tests/test_special_analysis_base.py | import unittest
from unittest.mock import MagicMock, patch
import functools
import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QSignalSpy, QTest
from PyQt5.QtWidgets import QWidget
from extra_foam.pipeline.tests import _RawDataMixin
from extra_foam.special_suite import logger, mkQApp
from extra_foam.gui.plot_widgets import ImageViewF, PlotWidgetF
from extra_foam.special_suite.special_analysis_base import (
_BaseAnalysisCtrlWidgetS, _SpecialAnalysisBase, create_special,
ClientType, QThreadWorker, QThreadFoamClient, QThreadKbClient
)
app = mkQApp()
logger.setLevel('CRITICAL')
class testSpecialAnalysisBase(_RawDataMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
class DummyCtrlWidget(_BaseAnalysisCtrlWidgetS):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dummy_widget = QWidget()
self._non_reconfigurable_widgets = [
self.dummy_widget
]
class DummyProcessor(QThreadWorker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._dark_removed = False
def process(self, data):
"""Override."""
pass
def onRemoveDark(self):
"""Override."""
self._dark_removed = True
def sources(self):
return [
("device1:output", "property1", 1),
("device2", "property2", 0)
]
class DummyImageView(ImageViewF):
def __init__(self, *, parent=None):
super().__init__(parent=parent)
def updateF(self, data):
"""Override."""
pass
class DummyImageViewWithRoi(ImageViewF):
def __init__(self, *, parent=None):
super().__init__(has_roi=True, parent=parent)
def updateF(self, data):
"""Override."""
pass
class DummyPlotWidget(PlotWidgetF):
def __init__(self, *, parent=None):
super().__init__(parent=parent)
self._plot = self.plotCurve(name="dummy")
def updateF(self, data):
"""Override."""
pass
@create_special(DummyCtrlWidget, DummyProcessor)
class DummyWindow(_SpecialAnalysisBase):
_title = "Dummy"
_long_title = "Dummy analysis"
_client_support = ClientType.BOTH
def __init__(self, topic):
super().__init__(topic)
self._line = DummyPlotWidget(parent=self)
self._view = DummyImageView(parent=self)
self._view_with_roi = DummyImageViewWithRoi(parent=self)
self.initUI()
self.initConnections()
self.startWorker()
def initUI(self):
"""Override."""
pass
def initConnections(self):
"""Override."""
pass
# Note: startWorker is not patched as it is in other tests of
# concrete windows
cls._win = DummyWindow('DET')
def testGeneral(self):
self.assertEqual('DET', self._win._ctrl_widget_st.topic)
def testPlotWidgets(self):
win = self._win
self.assertEqual(3, len(win._plot_widgets_st))
self.assertIn(win._line, win._plot_widgets_st)
self.assertIn(win._view, win._plot_widgets_st)
self.assertEqual(2, len(win._image_views_st))
self.assertIn(win._view, win._image_views_st)
self.assertIn(win._view_with_roi, win._image_views_st)
with patch.object(win._view, "updateImage") as update_image:
QTest.mouseClick(win._com_ctrl_st.auto_level_btn, Qt.LeftButton)
update_image.assert_called_once()
with patch.object(win._view, "updateF") as update_view:
with patch.object(win._line, "updateF") as update_line:
win.updateWidgetsST()
# win._data is empty
update_line.assert_not_called()
update_view.assert_not_called()
# patch win._worker_st.get()
with patch.object(win._worker_st, "getOutputDataST"):
win.updateWidgetsST()
update_line.assert_called_once()
update_view.assert_called_once()
def testCommonStartStopReset(self):
win = self._win
com_ctrl_widget = win._com_ctrl_st
ctrl_widget = win._ctrl_widget_st
client = win._client_st
worker = win._worker_st
self.assertFalse(com_ctrl_widget.stop_btn.isEnabled())
self.assertIsNone(client._endpoint_st)
with patch.object(client, "start") as client_start:
with patch.object(win._plot_timer_st, "start") as timer_start:
spy = QSignalSpy(win.started_sgn)
QTest.mouseClick(com_ctrl_widget.start_btn, Qt.LeftButton)
self.assertEqual(f"tcp://{com_ctrl_widget._hostname_le.text()}:"
f"{com_ctrl_widget._port_le.text()}", client._endpoint_st)
self.assertEqual(2, len(client._catalog_st))
self.assertIn("device1:output property1", client._catalog_st)
self.assertIn("device2 property2", client._catalog_st)
self.assertEqual(1, len(spy))
self.assertTrue(com_ctrl_widget.stop_btn.isEnabled())
self.assertFalse(com_ctrl_widget.start_btn.isEnabled())
self.assertFalse(com_ctrl_widget.load_dark_run_btn.isEnabled())
self.assertFalse(ctrl_widget.dummy_widget.isEnabled())
client_start.assert_called_once()
timer_start.assert_called_once()
with patch.object(client, "terminateRunST") as client_stop:
with patch.object(win._plot_timer_st, "stop") as timer_stop:
spy = QSignalSpy(win.stopped_sgn)
QTest.mouseClick(com_ctrl_widget.stop_btn, Qt.LeftButton)
self.assertEqual(1, len(spy))
self.assertFalse(com_ctrl_widget.stop_btn.isEnabled())
self.assertTrue(com_ctrl_widget.start_btn.isEnabled())
self.assertTrue(com_ctrl_widget.load_dark_run_btn.isEnabled())
self.assertTrue(ctrl_widget.dummy_widget.isEnabled())
client_stop.assert_called_once()
timer_stop.assert_called_once()
with patch.object(client, "start") as client_start:
with patch.object(win._plot_timer_st, "start") as timer_start:
with patch.object(worker, "sources") as mocked_sources:
with self.assertLogs(logger, level="ERROR") as cm:
mocked_sources.return_value = [("", "property1", 1)]
QTest.mouseClick(com_ctrl_widget.start_btn, Qt.LeftButton)
client_start.assert_not_called()
timer_start.assert_not_called()
self.assertIn("Empty source", cm.output[0])
with self.assertLogs(logger, level="ERROR") as cm:
mocked_sources.return_value = [("device", "", 0)]
QTest.mouseClick(com_ctrl_widget.start_btn, Qt.LeftButton)
client_start.assert_not_called()
timer_start.assert_not_called()
self.assertIn("Empty property", cm.output[0])
with self.assertLogs(logger, level="ERROR") as cm:
mocked_sources.return_value = [("device", "property", 2)]
QTest.mouseClick(com_ctrl_widget.start_btn, Qt.LeftButton)
client_start.assert_not_called()
timer_start.assert_not_called()
self.assertIn("Not understandable data type", cm.output[0])
with patch.object(client, "onResetST") as client_reset:
with patch.object(worker, "onResetST") as worker_reset:
with patch.object(win._line, "reset") as line_reset:
with patch.object(win._view, "reset") as view_reset:
QTest.mouseClick(com_ctrl_widget.reset_btn, Qt.LeftButton)
client_reset.assert_called_once()
worker_reset.assert_called_once()
line_reset.assert_called_once()
view_reset.assert_called_once()
with patch.object(worker._input_st, "clear") as input_clear:
with patch.object(worker._output_st, "clear") as output_clear:
worker._reset_st = False
worker.onResetST()
input_clear.assert_called_once()
output_clear.assert_called_once()
worker._reset_st = True
with patch.object(client._transformer_st, "reset") as transformer_reset:
with patch.object(client._output_st, "clear") as output_clear:
client.onResetST()
transformer_reset.assert_called_once()
output_clear.assert_called_once()
def testProcessFlow(self):
worker = self._win._worker_st
data = object()
with patch.object(worker, "preprocess") as mocked_preprocess:
with patch.object(worker, "process") as mocked_process:
with patch.object(worker, "postprocess") as mocked_postprocess:
with patch.object(worker, "reset") as mocked_reset:
worker._reset_st = False
worker._processImpST(data)
mocked_preprocess.assert_called_once()
mocked_process.assert_called_once_with(data)
mocked_postprocess.assert_called_once()
mocked_reset.assert_not_called()
worker._reset_st = True
worker._processImpST(data)
mocked_reset.assert_called_once()
self.assertFalse(worker._reset_st)
def testCommonDarkOperation(self):
win = self._win
widget = win._com_ctrl_st
worker = win._worker_st
# recording dark
self.assertFalse(worker.recordingDark()) # default value
QTest.mouseClick(widget.record_dark_btn, Qt.LeftButton)
self.assertTrue(worker.recordingDark())
self.assertTrue(widget.record_dark_btn.isChecked())
QTest.mouseClick(widget.record_dark_btn, Qt.LeftButton)
self.assertFalse(worker.recordingDark())
self.assertFalse(widget.record_dark_btn.isChecked())
# load dark run
with patch.object(worker, "onLoadDarkRun") as load_dark_run:
with patch('extra_foam.special_suite.special_analysis_base.QFileDialog.getExistingDirectory',
return_value=""):
QTest.mouseClick(widget.load_dark_run_btn, Qt.LeftButton)
load_dark_run.assert_not_called()
with patch('extra_foam.special_suite.special_analysis_base.QFileDialog.getExistingDirectory',
return_value="/run/directory"):
QTest.mouseClick(widget.load_dark_run_btn, Qt.LeftButton)
load_dark_run.assert_called_with("/run/directory")
# remove dark
# patch.object does not work
self.assertFalse(worker._dark_removed)
QTest.mouseClick(widget.remove_dark_btn, Qt.LeftButton)
self.assertTrue(worker._dark_removed)
# subtract dark
self.assertTrue(worker.subtractDark()) # default value
widget.dark_subtraction_cb.setChecked(False)
self.assertFalse(worker.subtractDark())
def testRoiCtrl(self):
pass
def testSqueezeCameraImage(self):
a1d = np.ones((4, ))
a2d = np.ones((2, 1))
a3d = np.ones((3, 3, 1))
func = functools.partial(self._win._worker_st.squeezeToVector, 1234)
assert func(None) is None
assert func(a3d) is None
ret_1d = func(a1d)
np.testing.assert_array_equal(a1d, ret_1d)
ret_2d = func(a2d)
np.testing.assert_array_equal(a2d.squeeze(axis=-1), ret_2d)
def testSqueezeToVector(self):
a1d = np.ones((4, ))
a2d = np.ones((2, 2))
a3d = np.ones((3, 3, 1))
a3d_f = np.ones((3, 3, 2))
a4d = np.ones((2, 2, 2, 2))
func = functools.partial(self._win._worker_st.squeezeToImage, 1234)
assert func(None) is None
assert func(a1d) is None
assert func(a4d) is None
ret_2d = func(a2d)
np.testing.assert_array_equal(a2d, ret_2d)
assert np.float32 == ret_2d.dtype
ret_3d = func(a3d)
np.testing.assert_array_equal(a3d.squeeze(axis=-1), ret_3d)
assert np.float32 == ret_3d.dtype
assert func(a3d_f) is None
def testGetRoiData(self):
worker = self._win._worker_st
# test 2D array
img = np.ones((4, 6))
# test ROI geometry not specified
worker._roi_geom_st = None
roi = worker.getRoiData(img)
assert img is roi
roi = worker.getRoiData(img, copy=True)
assert img is not roi
np.testing.assert_array_equal(img, roi)
# test with intersection
worker._roi_geom_st = (1, 2, 2, 3)
roi = worker.getRoiData(img)
np.testing.assert_array_equal(img[2:5, 1:3], roi)
# test without intersection
worker._roi_geom_st = (-5, -6, 2, 3)
roi = worker.getRoiData(img)
np.testing.assert_array_equal(np.empty((0, 0)), roi)
# test 3D array
img = np.ones((3, 4, 6))
# test with intersection
worker._roi_geom_st = (1, 2, 2, 3)
roi = worker.getRoiData(img)
np.testing.assert_array_equal(img[:, 2:5, 1:3], roi)
# test without intersection
worker._roi_geom_st = (-5, -6, 2, 3)
roi = worker.getRoiData(img)
np.testing.assert_array_equal(np.empty((3, 0, 0)), roi)
def testClientChange(self):
win = self._win
ctrl_widget = win._com_ctrl_st
worker = win._worker_st
# Helper function to get the appropriate class for a client type
def client_class(client_type):
if client_type == ClientType.EXTRA_FOAM:
return QThreadFoamClient
elif client_type == ClientType.KARABO_BRIDGE:
return QThreadKbClient
else:
raise RuntimeError("Unrecognized client type")
# Check the client is initialized properly
client_type = ClientType(ctrl_widget._client_type_cb.currentText())
assert client_type == ctrl_widget.selected_client
assert client_class(client_type) == type(win._client_st)
assert worker.client_type == client_type
# Which client is selected by default doesn't actually matter for
# operation, but it's simpler to test if we know that this is the
# default.
assert client_type == ClientType.EXTRA_FOAM
# Change the client type
ctrl_widget._client_type_cb.setCurrentText(ClientType.KARABO_BRIDGE.value)
client_type = ctrl_widget.selected_client
assert client_class(client_type) == type(win._client_st)
assert worker.client_type == client_type | 0.675229 | 0.33734 |
import numpy as np
import pandas as pd
from scipy.stats import norm, moment
from mlfinlab.bet_sizing.ch10_snippets import get_signal, avg_active_signals, discrete_signal
from mlfinlab.bet_sizing.ch10_snippets import get_w, get_target_pos, limit_price, bet_size
from mlfinlab.bet_sizing.ef3m import M2N, raw_moment, most_likely_parameters
def bet_size_probability(events, prob, num_classes, pred=None, step_size=0.0, average_active=False, num_threads=1):
"""
Calculates the bet size using the predicted probability. Note that if 'average_active' is True, the returned
pandas.Series will be twice the length of the original since the average is calculated at each bet's open and close.
:param events: (pandas.DataFrame) Contains at least the column 't1', the expiry datetime of the product, with
a datetime index, the datetime the position was taken.
:param prob: (pandas.Series) The predicted probability.
:param num_classes: (int) The number of predicted bet sides.
:param pred: (pd.Series) The predicted bet side. Default value is None which will return a relative bet size
(i.e. without multiplying by the side).
:param step_size: (float) The step size at which the bet size is discretized, default is 0.0 which imposes no
discretization.
:param average_active: (bool) Option to average the size of active bets, default value is False.
:param num_threads: (int) The number of processing threads to utilize for multiprocessing, default value is 1.
:return: (pandas.Series) The bet size, with the time index.
"""
signal_0 = get_signal(prob, num_classes, pred)
events_0 = signal_0.to_frame('signal').join(events['t1'], how='left')
if average_active:
signal_1 = avg_active_signals(events_0, num_threads)
else:
signal_1 = events_0.signal
if abs(step_size) > 0:
signal_1 = discrete_signal(signal0=signal_1, step_size=abs(step_size))
return signal_1
def bet_size_dynamic(current_pos, max_pos, market_price, forecast_price, cal_divergence=10, cal_bet_size=0.95,
func='sigmoid'):
"""
Calculates the bet sizes, target position, and limit price as the market price and forecast price fluctuate.
The current position, maximum position, market price, and forecast price can be passed as separate pandas.Series
(with a common index), as individual numbers, or a combination thereof. If any one of the aforementioned arguments
is a pandas.Series, the other arguments will be broadcast to a pandas.Series of the same length and index.
:param current_pos: (pandas.Series, int) Current position.
:param max_pos: (pandas.Series, int) Maximum position
:param market_price: (pandas.Series, float) Market price.
:param forecast_price: (pandas.Series, float) Forecast price.
:param cal_divergence: (float) The divergence to use in calibration.
:param cal_bet_size: (float) The bet size to use in calibration.
:param func: (string) Function to use for dynamic calculation. Valid options are: 'sigmoid', 'power'.
:return: (pandas.DataFrame) Bet size (bet_size), target position (t_pos), and limit price (l_p).
"""
# Create a dictionary of bet size variables for easier handling.
d_vars = {'pos': current_pos, 'max_pos': max_pos, 'm_p': market_price, 'f': forecast_price}
events_0 = confirm_and_cast_to_df(d_vars)
# Calibrate w.
w_param = get_w(cal_divergence, cal_bet_size, func)
# Compute the target bet position.
events_0['t_pos'] = events_0.apply(lambda x: get_target_pos(w_param, x.f, x.m_p, x.max_pos, func), axis=1)
# Compute the break even limit price.
events_0['l_p'] = events_0.apply(lambda x: limit_price(x.t_pos, x.pos, x.f, w_param, x.max_pos, func), axis=1)
# Compute the bet size.
events_0['bet_size'] = events_0.apply(lambda x: bet_size(w_param, x.f-x.m_p, func), axis=1)
return events_0[['bet_size', 't_pos', 'l_p']]
def bet_size_budget(events_t1, sides):
"""
Calculates a bet size from the bet sides and start and end times. These sequences are used to determine the
number of concurrent long and short bets, and the resulting strategy-independent bet sizes are the difference
between the average long and short bets at any given time. This strategy is based on the section 10.2
in "Advances in Financial Machine Learning". This creates a linear bet sizing scheme that is aligned to the
expected number of concurrent bets in the dataset.
:param events_t1: (pandas.Series) The end datetime of the position with the start datetime as the index.
:param sides: (pandas.Series) The side of the bet with the start datetime as index. Index must match the
'events_t1' argument exactly. Bet sides less than zero are interpretted as short, bet sides greater than zero
are interpretted as long.
:return: (pandas.DataFrame) The 'events_t1' and 'sides' arguments as columns, with the number of concurrent
active long and short bets, as well as the bet size, in additional columns.
"""
events_1 = get_concurrent_sides(events_t1, sides)
active_long_max, active_short_max = events_1['active_long'].max(), events_1['active_short'].max()
frac_active_long = events_1['active_long'] / active_long_max if active_long_max > 0 else 0
frac_active_short = events_1['active_short'] / active_short_max if active_short_max > 0 else 0
events_1['bet_size'] = frac_active_long - frac_active_short
return events_1
def bet_size_reserve(events_t1, sides, fit_runs=100, epsilon=1e-5, factor=5, variant=2, max_iter=10_000,
num_workers=1, return_parameters=False):
"""
Calculates the bet size from bet sides and start and end times. These sequences are used to determine the number
of concurrent long and short bets, and the difference between the two at each time step, c_t. A mixture of two
Gaussian distributions is fit to the distribution of c_t, which is then used to determine the bet size. This
strategy results in a sigmoid-shaped bet sizing response aligned to the expected number of concurrent long
and short bets in the dataset.
Note that this function creates a <mlfinlab.bet_sizing.ef3m.M2N> object and makes use of the parallel fitting
functionality. As such, this function accepts and passes fitting parameters to the
mlfinlab.bet_sizing.ef3m.M2N.mp_fit() method.
:param events_t1: (pandas.Series) The end datetime of the position with the start datetime as the index.
:param sides: (pandas.Series) The side of the bet with the start datetime as index. Index must match the
'events_t1' argument exactly. Bet sides less than zero are interpretted as short, bet sides greater than zero
are interpretted as long.
:param fit_runs: (int) Number of runs to execute when trying to fit the distribution.
:param epsilon: (float) Error tolerance.
:param factor: (float) Lambda factor from equations.
:param variant: (int) Which algorithm variant to use, 1 or 2.
:param max_iter: (int) Maximum number of iterations after which to terminate loop.
:param num_workers: (int) Number of CPU cores to use for multiprocessing execution, set to -1 to use all
CPU cores. Default is 1.
:param return_parameters: (bool) If True, function also returns a dictionary of the fited mixture parameters.
:return: (pandas.DataFrame) The 'events_t1' and 'sides' arguments as columns, with the number of concurrent
active long, short bets, the difference between long and short, and the bet size in additional columns.
Also returns the mixture parameters if 'return_parameters' is set to True.
"""
events_active = get_concurrent_sides(events_t1, sides)
# Calculate the concurrent difference in active bets: c_t = <current active long> - <current active short>
events_active['c_t'] = events_active['active_long'] - events_active['active_short']
# Calculate the first 5 centered and raw moments from the c_t distribution.
central_mmnts = [moment(events_active['c_t'].to_numpy(), moment=i) for i in range(1, 6)]
raw_mmnts = raw_moment(central_moments=central_mmnts, dist_mean=events_active['c_t'].mean())
# Fit the mixture of distributions.
m2n = M2N(raw_mmnts, epsilon=epsilon, factor=factor, n_runs=fit_runs,
variant=variant, max_iter=max_iter, num_workers=num_workers)
df_fit_results = m2n.mp_fit()
fit_params = most_likely_parameters(df_fit_results)
params_list = [fit_params[key] for key in ['mu_1', 'mu_2', 'sigma_1', 'sigma_2', 'p_1']]
# Calculate the bet size.
events_active['bet_size'] = events_active['c_t'].apply(lambda c: single_bet_size_mixed(c, params_list))
if return_parameters:
return events_active, fit_params
return events_active
def confirm_and_cast_to_df(d_vars):
"""
Accepts either pandas.Series (with a common index) or integer/float values, casts all non-pandas.Series values
to Series, and returns a pandas.DataFrame for further calculations. This is a helper function to the
'bet_size_dynamic' function.
:param d_vars: (dict) A dictionary where the values are either pandas.Series or single int/float values.
All pandas.Series passed are assumed to have the same index. The keys of the dictionary will be used for column
names in the returned pandas.DataFrame.
:return: (pandas.DataFrame) The values from the input dictionary in pandas.DataFrame format, with dictionary
keys as column names.
"""
any_series = False # Are any variables a pandas.Series?
all_series = True # Are all variables a pandas.Series?
ser_len = 0
for var in d_vars.values():
any_series = any_series or isinstance(var, pd.Series)
all_series = all_series and isinstance(var, pd.Series)
if isinstance(var, pd.Series):
ser_len = var.size
idx = var.index
# Handle data types if there are no pandas.Series variables.
if not any_series:
for k in d_vars:
d_vars[k] = pd.Series(data=[d_vars[k]], index=[0])
# Handle data types if some but not all variables are pandas.Series.
if any_series and not all_series:
for k in d_vars:
if not isinstance(d_vars[k], pd.Series):
d_vars[k] = pd.Series(data=np.array([d_vars[k] for i in range(ser_len)]), index=idx)
# Combine Series to form a DataFrame.
events = pd.concat(list(d_vars.values()), axis=1)
events.columns = list(d_vars.keys())
return events
def get_concurrent_sides(events_t1, sides):
"""
Given the side of the position along with its start and end timestamps, this function returns two pandas.Series
indicating the number of concurrent long and short bets at each timestamp.
:param events_t1: (pandas.Series) The end datetime of the position with the start datetime as the index.
:param sides: (pandas.Series) The side of the bet with the start datetime as index. Index must match the
'events_t1' argument exactly. Bet sides less than zero are interpretted as short, bet sides greater than zero
are interpretted as long.
:return: (pandas.DataFrame) The 'events_t1' and 'sides' arguments as columns, with two additional columns
indicating the number of concurrent active long and active short bets at each timestamp.
"""
events_0 = pd.DataFrame({'t1':events_t1, 'side':sides})
events_0['active_long'] = 0
events_0['active_short'] = 0
for idx in events_0.index:
# A bet side greater than zero indicates a long position.
df_long_active_idx = set(events_0[(events_0.index <= idx) & (events_0['t1'] > idx) & (events_0['side'] > 0)].index)
events_0.loc[idx, 'active_long'] = len(df_long_active_idx)
# A bet side less than zero indicates a short position.
df_short_active_idx = set(events_0[(events_0.index <= idx) & (events_0['t1'] > idx) & (events_0['side'] < 0)].index)
events_0.loc[idx, 'active_short'] = len(df_short_active_idx)
return events_0
def cdf_mixture(x_val, parameters):
"""
The cumulative distribution function of a mixture of 2 normal distributions, evaluated at x_val.
:param x_val: (float) Value at which to evaluate the CDF.
:param parameters: (list) The parameters of the mixture, [mu_1, mu_2, sigma_1, sigma_2, p_1]
:return: (float) CDF of the mixture.
"""
mu_1, mu_2, sigma_1, sigma_2, p_1 = parameters # Parameters reassigned for clarity.
return p_1 * norm.cdf(x_val, mu_1, sigma_1) + (1-p_1) * norm.cdf(x_val, mu_2, sigma_2)
def single_bet_size_mixed(c_t, parameters):
"""
Returns the single bet size based on the description provided in question 10.4(c), provided the difference in
concurrent long and short positions, c_t, and the fitted parameters of the mixture of two Gaussain distributions.
:param c_t: (int) The difference in the number of concurrent long bets minus short bets.
:param parameters: (list) The parameters of the mixture, [mu_1, mu_2, sigma_1, sigma_2, p_1]
:return: (float) Bet size.
"""
if c_t >= 0:
single_bet_size = (cdf_mixture(c_t, parameters) - cdf_mixture(0, parameters)) / (1 - cdf_mixture(0, parameters))
else:
single_bet_size = (cdf_mixture(c_t, parameters) - cdf_mixture(0, parameters)) / cdf_mixture(0, parameters)
return single_bet_size | mlfinlab/bet_sizing/bet_sizing.py | import numpy as np
import pandas as pd
from scipy.stats import norm, moment
from mlfinlab.bet_sizing.ch10_snippets import get_signal, avg_active_signals, discrete_signal
from mlfinlab.bet_sizing.ch10_snippets import get_w, get_target_pos, limit_price, bet_size
from mlfinlab.bet_sizing.ef3m import M2N, raw_moment, most_likely_parameters
def bet_size_probability(events, prob, num_classes, pred=None, step_size=0.0, average_active=False, num_threads=1):
"""
Calculates the bet size using the predicted probability. Note that if 'average_active' is True, the returned
pandas.Series will be twice the length of the original since the average is calculated at each bet's open and close.
:param events: (pandas.DataFrame) Contains at least the column 't1', the expiry datetime of the product, with
a datetime index, the datetime the position was taken.
:param prob: (pandas.Series) The predicted probability.
:param num_classes: (int) The number of predicted bet sides.
:param pred: (pd.Series) The predicted bet side. Default value is None which will return a relative bet size
(i.e. without multiplying by the side).
:param step_size: (float) The step size at which the bet size is discretized, default is 0.0 which imposes no
discretization.
:param average_active: (bool) Option to average the size of active bets, default value is False.
:param num_threads: (int) The number of processing threads to utilize for multiprocessing, default value is 1.
:return: (pandas.Series) The bet size, with the time index.
"""
signal_0 = get_signal(prob, num_classes, pred)
events_0 = signal_0.to_frame('signal').join(events['t1'], how='left')
if average_active:
signal_1 = avg_active_signals(events_0, num_threads)
else:
signal_1 = events_0.signal
if abs(step_size) > 0:
signal_1 = discrete_signal(signal0=signal_1, step_size=abs(step_size))
return signal_1
def bet_size_dynamic(current_pos, max_pos, market_price, forecast_price, cal_divergence=10, cal_bet_size=0.95,
func='sigmoid'):
"""
Calculates the bet sizes, target position, and limit price as the market price and forecast price fluctuate.
The current position, maximum position, market price, and forecast price can be passed as separate pandas.Series
(with a common index), as individual numbers, or a combination thereof. If any one of the aforementioned arguments
is a pandas.Series, the other arguments will be broadcast to a pandas.Series of the same length and index.
:param current_pos: (pandas.Series, int) Current position.
:param max_pos: (pandas.Series, int) Maximum position
:param market_price: (pandas.Series, float) Market price.
:param forecast_price: (pandas.Series, float) Forecast price.
:param cal_divergence: (float) The divergence to use in calibration.
:param cal_bet_size: (float) The bet size to use in calibration.
:param func: (string) Function to use for dynamic calculation. Valid options are: 'sigmoid', 'power'.
:return: (pandas.DataFrame) Bet size (bet_size), target position (t_pos), and limit price (l_p).
"""
# Create a dictionary of bet size variables for easier handling.
d_vars = {'pos': current_pos, 'max_pos': max_pos, 'm_p': market_price, 'f': forecast_price}
events_0 = confirm_and_cast_to_df(d_vars)
# Calibrate w.
w_param = get_w(cal_divergence, cal_bet_size, func)
# Compute the target bet position.
events_0['t_pos'] = events_0.apply(lambda x: get_target_pos(w_param, x.f, x.m_p, x.max_pos, func), axis=1)
# Compute the break even limit price.
events_0['l_p'] = events_0.apply(lambda x: limit_price(x.t_pos, x.pos, x.f, w_param, x.max_pos, func), axis=1)
# Compute the bet size.
events_0['bet_size'] = events_0.apply(lambda x: bet_size(w_param, x.f-x.m_p, func), axis=1)
return events_0[['bet_size', 't_pos', 'l_p']]
def bet_size_budget(events_t1, sides):
"""
Calculates a bet size from the bet sides and start and end times. These sequences are used to determine the
number of concurrent long and short bets, and the resulting strategy-independent bet sizes are the difference
between the average long and short bets at any given time. This strategy is based on the section 10.2
in "Advances in Financial Machine Learning". This creates a linear bet sizing scheme that is aligned to the
expected number of concurrent bets in the dataset.
:param events_t1: (pandas.Series) The end datetime of the position with the start datetime as the index.
:param sides: (pandas.Series) The side of the bet with the start datetime as index. Index must match the
'events_t1' argument exactly. Bet sides less than zero are interpretted as short, bet sides greater than zero
are interpretted as long.
:return: (pandas.DataFrame) The 'events_t1' and 'sides' arguments as columns, with the number of concurrent
active long and short bets, as well as the bet size, in additional columns.
"""
events_1 = get_concurrent_sides(events_t1, sides)
active_long_max, active_short_max = events_1['active_long'].max(), events_1['active_short'].max()
frac_active_long = events_1['active_long'] / active_long_max if active_long_max > 0 else 0
frac_active_short = events_1['active_short'] / active_short_max if active_short_max > 0 else 0
events_1['bet_size'] = frac_active_long - frac_active_short
return events_1
def bet_size_reserve(events_t1, sides, fit_runs=100, epsilon=1e-5, factor=5, variant=2, max_iter=10_000,
num_workers=1, return_parameters=False):
"""
Calculates the bet size from bet sides and start and end times. These sequences are used to determine the number
of concurrent long and short bets, and the difference between the two at each time step, c_t. A mixture of two
Gaussian distributions is fit to the distribution of c_t, which is then used to determine the bet size. This
strategy results in a sigmoid-shaped bet sizing response aligned to the expected number of concurrent long
and short bets in the dataset.
Note that this function creates a <mlfinlab.bet_sizing.ef3m.M2N> object and makes use of the parallel fitting
functionality. As such, this function accepts and passes fitting parameters to the
mlfinlab.bet_sizing.ef3m.M2N.mp_fit() method.
:param events_t1: (pandas.Series) The end datetime of the position with the start datetime as the index.
:param sides: (pandas.Series) The side of the bet with the start datetime as index. Index must match the
'events_t1' argument exactly. Bet sides less than zero are interpretted as short, bet sides greater than zero
are interpretted as long.
:param fit_runs: (int) Number of runs to execute when trying to fit the distribution.
:param epsilon: (float) Error tolerance.
:param factor: (float) Lambda factor from equations.
:param variant: (int) Which algorithm variant to use, 1 or 2.
:param max_iter: (int) Maximum number of iterations after which to terminate loop.
:param num_workers: (int) Number of CPU cores to use for multiprocessing execution, set to -1 to use all
CPU cores. Default is 1.
:param return_parameters: (bool) If True, function also returns a dictionary of the fited mixture parameters.
:return: (pandas.DataFrame) The 'events_t1' and 'sides' arguments as columns, with the number of concurrent
active long, short bets, the difference between long and short, and the bet size in additional columns.
Also returns the mixture parameters if 'return_parameters' is set to True.
"""
events_active = get_concurrent_sides(events_t1, sides)
# Calculate the concurrent difference in active bets: c_t = <current active long> - <current active short>
events_active['c_t'] = events_active['active_long'] - events_active['active_short']
# Calculate the first 5 centered and raw moments from the c_t distribution.
central_mmnts = [moment(events_active['c_t'].to_numpy(), moment=i) for i in range(1, 6)]
raw_mmnts = raw_moment(central_moments=central_mmnts, dist_mean=events_active['c_t'].mean())
# Fit the mixture of distributions.
m2n = M2N(raw_mmnts, epsilon=epsilon, factor=factor, n_runs=fit_runs,
variant=variant, max_iter=max_iter, num_workers=num_workers)
df_fit_results = m2n.mp_fit()
fit_params = most_likely_parameters(df_fit_results)
params_list = [fit_params[key] for key in ['mu_1', 'mu_2', 'sigma_1', 'sigma_2', 'p_1']]
# Calculate the bet size.
events_active['bet_size'] = events_active['c_t'].apply(lambda c: single_bet_size_mixed(c, params_list))
if return_parameters:
return events_active, fit_params
return events_active
def confirm_and_cast_to_df(d_vars):
"""
Accepts either pandas.Series (with a common index) or integer/float values, casts all non-pandas.Series values
to Series, and returns a pandas.DataFrame for further calculations. This is a helper function to the
'bet_size_dynamic' function.
:param d_vars: (dict) A dictionary where the values are either pandas.Series or single int/float values.
All pandas.Series passed are assumed to have the same index. The keys of the dictionary will be used for column
names in the returned pandas.DataFrame.
:return: (pandas.DataFrame) The values from the input dictionary in pandas.DataFrame format, with dictionary
keys as column names.
"""
any_series = False # Are any variables a pandas.Series?
all_series = True # Are all variables a pandas.Series?
ser_len = 0
for var in d_vars.values():
any_series = any_series or isinstance(var, pd.Series)
all_series = all_series and isinstance(var, pd.Series)
if isinstance(var, pd.Series):
ser_len = var.size
idx = var.index
# Handle data types if there are no pandas.Series variables.
if not any_series:
for k in d_vars:
d_vars[k] = pd.Series(data=[d_vars[k]], index=[0])
# Handle data types if some but not all variables are pandas.Series.
if any_series and not all_series:
for k in d_vars:
if not isinstance(d_vars[k], pd.Series):
d_vars[k] = pd.Series(data=np.array([d_vars[k] for i in range(ser_len)]), index=idx)
# Combine Series to form a DataFrame.
events = pd.concat(list(d_vars.values()), axis=1)
events.columns = list(d_vars.keys())
return events
def get_concurrent_sides(events_t1, sides):
"""
Given the side of the position along with its start and end timestamps, this function returns two pandas.Series
indicating the number of concurrent long and short bets at each timestamp.
:param events_t1: (pandas.Series) The end datetime of the position with the start datetime as the index.
:param sides: (pandas.Series) The side of the bet with the start datetime as index. Index must match the
'events_t1' argument exactly. Bet sides less than zero are interpretted as short, bet sides greater than zero
are interpretted as long.
:return: (pandas.DataFrame) The 'events_t1' and 'sides' arguments as columns, with two additional columns
indicating the number of concurrent active long and active short bets at each timestamp.
"""
events_0 = pd.DataFrame({'t1':events_t1, 'side':sides})
events_0['active_long'] = 0
events_0['active_short'] = 0
for idx in events_0.index:
# A bet side greater than zero indicates a long position.
df_long_active_idx = set(events_0[(events_0.index <= idx) & (events_0['t1'] > idx) & (events_0['side'] > 0)].index)
events_0.loc[idx, 'active_long'] = len(df_long_active_idx)
# A bet side less than zero indicates a short position.
df_short_active_idx = set(events_0[(events_0.index <= idx) & (events_0['t1'] > idx) & (events_0['side'] < 0)].index)
events_0.loc[idx, 'active_short'] = len(df_short_active_idx)
return events_0
def cdf_mixture(x_val, parameters):
"""
The cumulative distribution function of a mixture of 2 normal distributions, evaluated at x_val.
:param x_val: (float) Value at which to evaluate the CDF.
:param parameters: (list) The parameters of the mixture, [mu_1, mu_2, sigma_1, sigma_2, p_1]
:return: (float) CDF of the mixture.
"""
mu_1, mu_2, sigma_1, sigma_2, p_1 = parameters # Parameters reassigned for clarity.
return p_1 * norm.cdf(x_val, mu_1, sigma_1) + (1-p_1) * norm.cdf(x_val, mu_2, sigma_2)
def single_bet_size_mixed(c_t, parameters):
"""
Returns the single bet size based on the description provided in question 10.4(c), provided the difference in
concurrent long and short positions, c_t, and the fitted parameters of the mixture of two Gaussain distributions.
:param c_t: (int) The difference in the number of concurrent long bets minus short bets.
:param parameters: (list) The parameters of the mixture, [mu_1, mu_2, sigma_1, sigma_2, p_1]
:return: (float) Bet size.
"""
if c_t >= 0:
single_bet_size = (cdf_mixture(c_t, parameters) - cdf_mixture(0, parameters)) / (1 - cdf_mixture(0, parameters))
else:
single_bet_size = (cdf_mixture(c_t, parameters) - cdf_mixture(0, parameters)) / cdf_mixture(0, parameters)
return single_bet_size | 0.819135 | 0.716888 |
import inspect
import json
import numbers
import stringcase
from enum import Enum, auto
from typing import Sequence, Optional, Union, Any, Dict
class SchemaType(Enum):
OBJECT = auto()
ARRAY = auto()
STRING = auto()
INTEGER = auto()
NUMBER = auto()
BOOLEAN = auto()
NULL = auto()
NONE = auto()
class Schema(object):
def __init__(self, of_type: SchemaType, default_value=None):
self._spec = {}
if of_type is not SchemaType.NONE:
self._spec['type'] = of_type.name.lower()
if default_value is not None:
self.default(default_value)
def enum(self, *values):
return self._set(values)
def const(self, value: Any):
return self._set(value)
def if_then_else(self, if_schema: Union[dict, 'Schema'], then_schema: Union[dict, 'Schema', None] = None,
else_schema: Union[dict, 'Schema', None] = None):
self._set(if_schema, name='if')
if then_schema is not None:
self._set(then_schema, name='then')
if else_schema is not None:
self._set(else_schema, name='else')
return self
def ref(self, reference: str):
return self._set(reference, name='$ref')
def add_definition(self, name: str, schema: Union[dict, 'Schema']):
definitions = self._ensure_dict('definitions')
definitions[name] = self._to_spec(schema)
return self
def default(self, value):
return self._set(value)
def schema(self, schema: str):
return self._set(schema, name='$schema')
def comment(self, comment: str):
return self._set(comment, name='$comment')
def id(self, id_str: str):
return self._set(id_str, name='$id')
def title(self, title: str):
return self._set(title, name='title')
def description(self, description: str):
return self._set(description, name='description')
def spec(self):
return self._spec
def _ensure_dict(self, name: str) -> dict:
result = self._spec[name] if name in self._spec else {}
self._spec[name] = result
return result
def _set(self, value, name=None):
if name is None:
name = stringcase.camelcase(inspect.stack()[1].function)
self._spec[name] = Schema._to_spec(value)
return self
def as_json_text(self):
return json.dumps(self._spec, indent=2)
@staticmethod
def _to_spec(value: Any) -> Any:
if isinstance(value, Schema):
value = value.spec()
if isinstance(value, (list, tuple)):
value = [Schema._to_spec(value) for value in value]
if isinstance(value, dict):
value = {key: Schema._to_spec(value) for key, value in value.items()}
return value
class StringSchema(Schema):
def __init__(self, min_length: Optional[int] = None, max_length: Optional[int] = None,
pattern: Optional[str] = None, str_format: Optional[str] = None, default_value: str = None):
super().__init__(of_type=SchemaType.STRING, default_value=default_value)
if min_length is not None:
self.min_length(min_length)
if max_length is not None:
self.max_length(max_length)
if pattern is not None:
self.pattern(pattern)
if str_format is not None:
self.format(str_format)
def min_length(self, value: int):
return self._set(value)
def max_length(self, value: int):
return self._set(value)
def pattern(self, value: str):
return self._set(value)
def format(self, value: str):
return self._set(value)
class IntegerSchema(Schema):
def __init__(self, minimum: Optional[int] = None, exclusive_minimum: Union[bool, int, None] = None,
maximum: Optional[int] = None, exclusive_maximum: Union[bool, int, None] = None,
multiple_of: Optional[numbers.Number] = None, default_value: int = None):
super().__init__(of_type=SchemaType.INTEGER, default_value=default_value)
if minimum is not None:
self.minimum(minimum)
if exclusive_minimum is not None:
self.exclusive_minimum(exclusive_minimum)
if maximum is not None:
self.maximum(maximum)
if exclusive_maximum is not None:
self.exclusive_maximum(exclusive_maximum)
if multiple_of is not None:
self.multiple_of(multiple_of)
def minimum(self, value: int):
return self._set(value)
def exclusive_minimum(self, value: Union[bool, int]):
return self._set(value)
def maximum(self, value: int):
return self._set(value)
def exclusive_maximum(self, value: Union[bool, int]):
return self._set(value)
def multiple_of(self, value: numbers.Number):
return self._set(value)
class NumberSchema(Schema):
def __init__(self, minimum: Optional[numbers.Number] = None,
exclusive_minimum: Union[bool, numbers.Number, None] = None,
maximum: Optional[numbers.Number] = None,
exclusive_maximum: Union[bool, numbers.Number, None] = None,
multiple_of: Optional[numbers.Number] = None, default_value: int = None):
super().__init__(of_type=SchemaType.NUMBER, default_value=default_value)
if minimum is not None:
self.minimum(minimum)
if exclusive_minimum is not None:
self.exclusive_minimum(exclusive_minimum)
if maximum is not None:
self.maximum(maximum)
if exclusive_maximum is not None:
self.exclusive_maximum(exclusive_maximum)
if multiple_of is not None:
self.multiple_of(multiple_of)
def minimum(self, value: numbers.Number):
return self._set(value)
def exclusive_minimum(self, value: Union[bool, numbers.Number]):
return self._set(value)
def maximum(self, value: numbers.Number):
return self._set(value)
def exclusive_maximum(self, value: Union[bool, numbers.Number]):
return self._set(value)
def multiple_of(self, value: numbers.Number):
return self._set(value)
class BooleanSchema(Schema):
def __init__(self, default_value: bool = None):
super().__init__(of_type=SchemaType.BOOLEAN, default_value=default_value)
class ObjectSchema(Schema):
def __init__(self, properties: Optional[dict] = None, pattern_properties: Optional[dict] = None,
property_names: Optional[Union[dict, Schema]] = None,
min_properties: Optional[int] = None, max_properties: Optional[int] = None,
additional_properties: Union[bool, dict, Schema, None] = None, default_value=None):
super().__init__(of_type=SchemaType.OBJECT, default_value=default_value)
if properties is not None:
self._set(properties, name='properties')
if pattern_properties is not None:
self._set(pattern_properties, name='patternProperties')
if property_names is not None:
self.property_names(property_names)
if min_properties is not None:
self.min_properties(min_properties)
if max_properties is not None:
self.max_properties(max_properties)
if additional_properties is not None:
self.additional_properties(additional_properties)
def properties(self, **kwargs):
properties = {}
for key, value in kwargs.items():
properties[key] = value
return self._set(properties)
def add_pattern_property(self, pattern: str, schema: Union[dict, Schema]):
properties = self._ensure_dict('patternProperties')
properties[pattern] = self._to_spec(schema)
return self
def required(self, *names: str):
return self._set(names)
def property_names(self, schema: Union[dict, Schema]):
return self._set(schema)
def min_properties(self, value: int):
return self._set(value)
def max_properties(self, value: int):
return self._set(value)
def dependencies(self, value: Dict[str, Union[Sequence[str], dict, Schema]]):
return self._set(value)
def additional_properties(self, value: Union[bool, dict, Schema]):
return self._set(value)
class ArraySchema(Schema):
def __init__(self, items: Union[Sequence[Union[dict, Schema]], dict, Schema, None] = None,
contains: Optional[Union[dict, Schema]] = None,
min_items: Optional[int] = None, max_items: Optional[int] = None,
unique_items: Optional[bool] = None, additional_items: Union[bool, dict, Schema, None] = None,
default_value=None):
super().__init__(of_type=SchemaType.ARRAY, default_value=default_value)
if items is not None:
self.items(items)
if contains is not None:
self.contains(contains)
if min_items is not None:
self.min_items(min_items)
if max_items is not None:
self.max_items(max_items)
if unique_items is not None:
self.unique_items(unique_items)
if additional_items is not None:
self.additional_items(additional_items)
def items(self, value: Union[Sequence[Union[dict, Schema]], dict, Schema]):
return self._set(value)
def contains(self, schema: Union[dict, Schema]):
return self._set(schema)
def min_items(self, value: int):
return self._set(value)
def max_items(self, value: int):
return self._set(value)
def unique_items(self, value: bool):
return self._set(value)
def additional_items(self, value: Union[bool, dict, Schema]):
return self._set(value)
class CombinerSchema(Schema):
def __init__(self, *schemas: Union[dict, Schema], tag: str):
super().__init__(of_type=SchemaType.NONE)
self._tag = tag
self._set(schemas, name=tag)
def add_schema(self, schema: Union[dict, Schema]):
schemas = self._spec[self._tag]
# noinspection PyUnresolvedReferences
schemas.append(Schema._to_spec(schema))
return self
def __add__(self, other):
if not (isinstance(other, dict) or isinstance(other, Schema)):
return NotImplemented
return self.add_schema(other)
class AllOfSchema(CombinerSchema):
def __init__(self, *schemas: Union[dict, Schema]):
super().__init__(*schemas, tag='allOf')
class AnyOfSchema(CombinerSchema):
def __init__(self, *schemas: Union[dict, Schema]):
super().__init__(*schemas, tag='anyOf')
class OneOfSchema(CombinerSchema):
def __init__(self, *schemas: Union[dict, Schema]):
super().__init__(*schemas, tag='oneOf')
class NotSchema(Schema):
def __init__(self, schema: Union[dict, Schema]):
super().__init__(of_type=SchemaType.NONE)
self._set(schema, name='not')
class RefSchema(Schema):
def __init__(self, reference: str):
super().__init__(of_type=SchemaType.NONE)
self.ref(reference)
class EmptySchema(Schema):
def __init__(self):
super().__init__(of_type=SchemaType.NONE) | builder/schema.py | import inspect
import json
import numbers
import stringcase
from enum import Enum, auto
from typing import Sequence, Optional, Union, Any, Dict
class SchemaType(Enum):
OBJECT = auto()
ARRAY = auto()
STRING = auto()
INTEGER = auto()
NUMBER = auto()
BOOLEAN = auto()
NULL = auto()
NONE = auto()
class Schema(object):
def __init__(self, of_type: SchemaType, default_value=None):
self._spec = {}
if of_type is not SchemaType.NONE:
self._spec['type'] = of_type.name.lower()
if default_value is not None:
self.default(default_value)
def enum(self, *values):
return self._set(values)
def const(self, value: Any):
return self._set(value)
def if_then_else(self, if_schema: Union[dict, 'Schema'], then_schema: Union[dict, 'Schema', None] = None,
else_schema: Union[dict, 'Schema', None] = None):
self._set(if_schema, name='if')
if then_schema is not None:
self._set(then_schema, name='then')
if else_schema is not None:
self._set(else_schema, name='else')
return self
def ref(self, reference: str):
return self._set(reference, name='$ref')
def add_definition(self, name: str, schema: Union[dict, 'Schema']):
definitions = self._ensure_dict('definitions')
definitions[name] = self._to_spec(schema)
return self
def default(self, value):
return self._set(value)
def schema(self, schema: str):
return self._set(schema, name='$schema')
def comment(self, comment: str):
return self._set(comment, name='$comment')
def id(self, id_str: str):
return self._set(id_str, name='$id')
def title(self, title: str):
return self._set(title, name='title')
def description(self, description: str):
return self._set(description, name='description')
def spec(self):
return self._spec
def _ensure_dict(self, name: str) -> dict:
result = self._spec[name] if name in self._spec else {}
self._spec[name] = result
return result
def _set(self, value, name=None):
if name is None:
name = stringcase.camelcase(inspect.stack()[1].function)
self._spec[name] = Schema._to_spec(value)
return self
def as_json_text(self):
return json.dumps(self._spec, indent=2)
@staticmethod
def _to_spec(value: Any) -> Any:
if isinstance(value, Schema):
value = value.spec()
if isinstance(value, (list, tuple)):
value = [Schema._to_spec(value) for value in value]
if isinstance(value, dict):
value = {key: Schema._to_spec(value) for key, value in value.items()}
return value
class StringSchema(Schema):
def __init__(self, min_length: Optional[int] = None, max_length: Optional[int] = None,
pattern: Optional[str] = None, str_format: Optional[str] = None, default_value: str = None):
super().__init__(of_type=SchemaType.STRING, default_value=default_value)
if min_length is not None:
self.min_length(min_length)
if max_length is not None:
self.max_length(max_length)
if pattern is not None:
self.pattern(pattern)
if str_format is not None:
self.format(str_format)
def min_length(self, value: int):
return self._set(value)
def max_length(self, value: int):
return self._set(value)
def pattern(self, value: str):
return self._set(value)
def format(self, value: str):
return self._set(value)
class IntegerSchema(Schema):
def __init__(self, minimum: Optional[int] = None, exclusive_minimum: Union[bool, int, None] = None,
maximum: Optional[int] = None, exclusive_maximum: Union[bool, int, None] = None,
multiple_of: Optional[numbers.Number] = None, default_value: int = None):
super().__init__(of_type=SchemaType.INTEGER, default_value=default_value)
if minimum is not None:
self.minimum(minimum)
if exclusive_minimum is not None:
self.exclusive_minimum(exclusive_minimum)
if maximum is not None:
self.maximum(maximum)
if exclusive_maximum is not None:
self.exclusive_maximum(exclusive_maximum)
if multiple_of is not None:
self.multiple_of(multiple_of)
def minimum(self, value: int):
return self._set(value)
def exclusive_minimum(self, value: Union[bool, int]):
return self._set(value)
def maximum(self, value: int):
return self._set(value)
def exclusive_maximum(self, value: Union[bool, int]):
return self._set(value)
def multiple_of(self, value: numbers.Number):
return self._set(value)
class NumberSchema(Schema):
def __init__(self, minimum: Optional[numbers.Number] = None,
exclusive_minimum: Union[bool, numbers.Number, None] = None,
maximum: Optional[numbers.Number] = None,
exclusive_maximum: Union[bool, numbers.Number, None] = None,
multiple_of: Optional[numbers.Number] = None, default_value: int = None):
super().__init__(of_type=SchemaType.NUMBER, default_value=default_value)
if minimum is not None:
self.minimum(minimum)
if exclusive_minimum is not None:
self.exclusive_minimum(exclusive_minimum)
if maximum is not None:
self.maximum(maximum)
if exclusive_maximum is not None:
self.exclusive_maximum(exclusive_maximum)
if multiple_of is not None:
self.multiple_of(multiple_of)
def minimum(self, value: numbers.Number):
return self._set(value)
def exclusive_minimum(self, value: Union[bool, numbers.Number]):
return self._set(value)
def maximum(self, value: numbers.Number):
return self._set(value)
def exclusive_maximum(self, value: Union[bool, numbers.Number]):
return self._set(value)
def multiple_of(self, value: numbers.Number):
return self._set(value)
class BooleanSchema(Schema):
def __init__(self, default_value: bool = None):
super().__init__(of_type=SchemaType.BOOLEAN, default_value=default_value)
class ObjectSchema(Schema):
def __init__(self, properties: Optional[dict] = None, pattern_properties: Optional[dict] = None,
property_names: Optional[Union[dict, Schema]] = None,
min_properties: Optional[int] = None, max_properties: Optional[int] = None,
additional_properties: Union[bool, dict, Schema, None] = None, default_value=None):
super().__init__(of_type=SchemaType.OBJECT, default_value=default_value)
if properties is not None:
self._set(properties, name='properties')
if pattern_properties is not None:
self._set(pattern_properties, name='patternProperties')
if property_names is not None:
self.property_names(property_names)
if min_properties is not None:
self.min_properties(min_properties)
if max_properties is not None:
self.max_properties(max_properties)
if additional_properties is not None:
self.additional_properties(additional_properties)
def properties(self, **kwargs):
properties = {}
for key, value in kwargs.items():
properties[key] = value
return self._set(properties)
def add_pattern_property(self, pattern: str, schema: Union[dict, Schema]):
properties = self._ensure_dict('patternProperties')
properties[pattern] = self._to_spec(schema)
return self
def required(self, *names: str):
return self._set(names)
def property_names(self, schema: Union[dict, Schema]):
return self._set(schema)
def min_properties(self, value: int):
return self._set(value)
def max_properties(self, value: int):
return self._set(value)
def dependencies(self, value: Dict[str, Union[Sequence[str], dict, Schema]]):
return self._set(value)
def additional_properties(self, value: Union[bool, dict, Schema]):
return self._set(value)
class ArraySchema(Schema):
def __init__(self, items: Union[Sequence[Union[dict, Schema]], dict, Schema, None] = None,
contains: Optional[Union[dict, Schema]] = None,
min_items: Optional[int] = None, max_items: Optional[int] = None,
unique_items: Optional[bool] = None, additional_items: Union[bool, dict, Schema, None] = None,
default_value=None):
super().__init__(of_type=SchemaType.ARRAY, default_value=default_value)
if items is not None:
self.items(items)
if contains is not None:
self.contains(contains)
if min_items is not None:
self.min_items(min_items)
if max_items is not None:
self.max_items(max_items)
if unique_items is not None:
self.unique_items(unique_items)
if additional_items is not None:
self.additional_items(additional_items)
def items(self, value: Union[Sequence[Union[dict, Schema]], dict, Schema]):
return self._set(value)
def contains(self, schema: Union[dict, Schema]):
return self._set(schema)
def min_items(self, value: int):
return self._set(value)
def max_items(self, value: int):
return self._set(value)
def unique_items(self, value: bool):
return self._set(value)
def additional_items(self, value: Union[bool, dict, Schema]):
return self._set(value)
class CombinerSchema(Schema):
def __init__(self, *schemas: Union[dict, Schema], tag: str):
super().__init__(of_type=SchemaType.NONE)
self._tag = tag
self._set(schemas, name=tag)
def add_schema(self, schema: Union[dict, Schema]):
schemas = self._spec[self._tag]
# noinspection PyUnresolvedReferences
schemas.append(Schema._to_spec(schema))
return self
def __add__(self, other):
if not (isinstance(other, dict) or isinstance(other, Schema)):
return NotImplemented
return self.add_schema(other)
class AllOfSchema(CombinerSchema):
def __init__(self, *schemas: Union[dict, Schema]):
super().__init__(*schemas, tag='allOf')
class AnyOfSchema(CombinerSchema):
def __init__(self, *schemas: Union[dict, Schema]):
super().__init__(*schemas, tag='anyOf')
class OneOfSchema(CombinerSchema):
def __init__(self, *schemas: Union[dict, Schema]):
super().__init__(*schemas, tag='oneOf')
class NotSchema(Schema):
def __init__(self, schema: Union[dict, Schema]):
super().__init__(of_type=SchemaType.NONE)
self._set(schema, name='not')
class RefSchema(Schema):
def __init__(self, reference: str):
super().__init__(of_type=SchemaType.NONE)
self.ref(reference)
class EmptySchema(Schema):
def __init__(self):
super().__init__(of_type=SchemaType.NONE) | 0.871639 | 0.139719 |
if __name__ == '__main__':
import os
import torch
from torch.utils.data import DataLoader
from networks import Discriminator, Generator, Loss
from options import TrainOption
from pipeline import CustomDataset
from utils import binning_and_cal_pixel_cc, Manager, update_lr, weights_init
import numpy as np
from tqdm import tqdm
import datetime
torch.backends.cudnn.benchmark = True
opt = TrainOption().parse()
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu_ids)
device = torch.device('cuda:0' if opt.gpu_ids != -1 else 'cpu:0')
dtype = torch.float16 if opt.data_type == 16 else torch.float32
image_height = opt.image_height
radius = 392 if image_height == 1024 else 196
if opt.val_during_train:
from options import TestOption
test_opt = TestOption().parse()
val_freq = opt.val_freq
init_lr = opt.lr
lr = opt.lr
dataset = CustomDataset(opt)
data_loader = DataLoader(dataset=dataset,
batch_size=opt.batch_size,
num_workers=opt.n_workers,
shuffle=not opt.no_shuffle)
G = Generator(opt).apply(weights_init).to(device=device, dtype=dtype)
D = Discriminator(opt).apply(weights_init).to(device=device, dtype=dtype)
criterion = Loss(opt)
G_optim = torch.optim.Adam(G.parameters(), lr=lr, betas=(opt.beta1, opt.beta2), eps=opt.eps)
D_optim = torch.optim.Adam(D.parameters(), lr=lr, betas=(opt.beta1, opt.beta2), eps=opt.eps)
if opt.latest and os.path.isfile(opt.model_dir + '/' + str(opt.latest) + '_dict.pt'):
pt_file = torch.load(opt.model_dir + '/' + str(opt.latest) + '_dict.pt')
init_epoch = pt_file['Epoch']
print("Resume at epoch: ", init_epoch)
G.load_state_dict(pt_file['G_state_dict'])
D.load_state_dict(pt_file['D_state_dict'])
G_optim.load_state_dict(pt_file['G_optim_state_dict'])
D_optim.load_state_dict(pt_file['D_optim_state_dict'])
current_step = init_epoch * len(dataset)
for param_group in G_optim.param_groups:
lr = param_group['lr']
else:
init_epoch = 1
current_step = 0
manager = Manager(opt)
total_step = opt.n_epochs * len(data_loader)
start_time = datetime.datetime.now()
for epoch in range(init_epoch, opt.n_epochs + 1):
for input, target, _, _ in tqdm(data_loader):
G.train()
current_step += 1
input, target = input.to(device=device, dtype=dtype), target.to(device, dtype=dtype)
D_loss, G_loss, target_tensor, generated_tensor = criterion(D, G, input, target)
G_optim.zero_grad()
G_loss.backward()
G_optim.step()
D_optim.zero_grad()
D_loss.backward()
D_optim.step()
package = {'Epoch': epoch,
'current_step': current_step,
'total_step': total_step,
'D_loss': D_loss.detach().item(),
'G_loss': G_loss.detach().item(),
'D_state_dict': D.state_dict(),
'G_state_dict': G.state_dict(),
'D_optim_state_dict': D_optim.state_dict(),
'G_optim_state_dict': G_optim.state_dict(),
'target_tensor': target_tensor,
'generated_tensor': generated_tensor.detach()}
manager(package)
if opt.val_during_train and (current_step % val_freq == 0):
G.eval()
test_image_dir = os.path.join(test_opt.image_dir, str(current_step))
os.makedirs(test_image_dir, exist_ok=True)
test_model_dir = test_opt.model_dir
test_dataset = CustomDataset(test_opt)
test_data_loader = DataLoader(dataset=test_dataset,
batch_size=test_opt.batch_size,
num_workers=test_opt.n_workers,
shuffle=not test_opt.no_shuffle)
for p in G.parameters():
p.requires_grad_(False)
list_TUMF_fake = list()
list_TUMF_real = list()
list_cc_1x1_fake = list()
list_cc_1x1_real = list()
list_cc_1x1 = list()
list_cc_bin_2x2 = list()
list_cc_bin_4x4 = list()
list_cc_bin_8x8 = list()
list_R1 = list()
list_R2 = list()
for input, target, _, name in tqdm(test_data_loader):
input, target = input.to(device=device, dtype=dtype), target.to(device, dtype=dtype)
fake = G(input)
manager.save_image(fake, path=os.path.join(test_image_dir, name[0] + '_fake.png'))
manager.save_image(target, path=os.path.join(test_image_dir, name[0] + '_real.png'))
# Model measurements
bin_size = 8
np_fake, np_real = fake.cpu().numpy().squeeze() * 100., target.cpu().numpy().squeeze() * 100.
# rearrange [-100, 100]
carrier_fake = list()
carrier_real = list()
for i in range(image_height):
for j in range(image_height):
if (i - 511) ** 2 + (j - 511) ** 2 <= 392 ** 2:
list_cc_1x1_fake.append(np_fake[i, j])
list_cc_1x1_real.append(np_real[i, j])
if abs(np_fake[i, j]) >= 10:
carrier_fake.append(abs(np_fake[i, j]))
if abs(np_real[i, j]) >= 10:
carrier_real.append(abs(np_real[i, j]))
TUMF_fake, TUMF_real = np.array(carrier_fake).sum(), np.array(carrier_real).sum()
list_TUMF_fake.append(TUMF_fake)
list_TUMF_real.append(TUMF_real)
list_R1.append((TUMF_fake - TUMF_real) / TUMF_real)
list_cc_1x1.append(np.corrcoef(list_cc_1x1_fake, list_cc_1x1_real)[0][1])
list_R2.append(((np.array(list_cc_1x1_fake) - np.array(list_cc_1x1_real)) ** 2).sum() / (np.array(list_cc_1x1_real) ** 2).sum())
list_cc_bin_2x2.append(binning_and_cal_pixel_cc(np_fake, np_real, 2))
list_cc_bin_4x4.append(binning_and_cal_pixel_cc(np_fake, np_real, 4))
list_cc_bin_8x8.append(binning_and_cal_pixel_cc(np_fake, np_real, 8))
cc_TUMF = np.corrcoef(np.array(list_TUMF_fake), np.array(list_TUMF_real))
cc_1x1 = np.mean(list_cc_1x1)
cc_bin_2x2 = np.mean(list_cc_bin_2x2)
cc_bin_4x4 = np.mean(list_cc_bin_4x4)
cc_bin_8x8 = np.mean(list_cc_bin_8x8)
R1_mean = np.mean(list_R1)
R1_std = np.std(list_R1)
R2_mean = np.mean(list_R2)
R2_std = np.std(list_R2)
with open(os.path.join(test_model_dir, 'Analysis.txt'), 'a') as analysis:
analysis.write(str(current_step) + ', ' + str(cc_TUMF[0][1]) + ', ' + str(cc_1x1) + ', ' +
str(cc_bin_2x2) + ', ' + str(cc_bin_4x4) + ', ' + str(cc_bin_8x8) + ', ' +
str(R1_mean) + ', ' + str(R1_std) + ', ' + str(R2_mean) + ', ' + str(R2_std) + '\n')
analysis.close()
for p in G.parameters():
p.requires_grad_(True)
if opt.debug:
break
if epoch > opt.epoch_decay and opt.HD:
lr = update_lr(lr, init_lr, opt.n_epochs - opt.epoch_decay, D_optim, G_optim)
print("Total time taken: ", datetime.datetime.now() - start_time) | train.py | if __name__ == '__main__':
import os
import torch
from torch.utils.data import DataLoader
from networks import Discriminator, Generator, Loss
from options import TrainOption
from pipeline import CustomDataset
from utils import binning_and_cal_pixel_cc, Manager, update_lr, weights_init
import numpy as np
from tqdm import tqdm
import datetime
torch.backends.cudnn.benchmark = True
opt = TrainOption().parse()
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu_ids)
device = torch.device('cuda:0' if opt.gpu_ids != -1 else 'cpu:0')
dtype = torch.float16 if opt.data_type == 16 else torch.float32
image_height = opt.image_height
radius = 392 if image_height == 1024 else 196
if opt.val_during_train:
from options import TestOption
test_opt = TestOption().parse()
val_freq = opt.val_freq
init_lr = opt.lr
lr = opt.lr
dataset = CustomDataset(opt)
data_loader = DataLoader(dataset=dataset,
batch_size=opt.batch_size,
num_workers=opt.n_workers,
shuffle=not opt.no_shuffle)
G = Generator(opt).apply(weights_init).to(device=device, dtype=dtype)
D = Discriminator(opt).apply(weights_init).to(device=device, dtype=dtype)
criterion = Loss(opt)
G_optim = torch.optim.Adam(G.parameters(), lr=lr, betas=(opt.beta1, opt.beta2), eps=opt.eps)
D_optim = torch.optim.Adam(D.parameters(), lr=lr, betas=(opt.beta1, opt.beta2), eps=opt.eps)
if opt.latest and os.path.isfile(opt.model_dir + '/' + str(opt.latest) + '_dict.pt'):
pt_file = torch.load(opt.model_dir + '/' + str(opt.latest) + '_dict.pt')
init_epoch = pt_file['Epoch']
print("Resume at epoch: ", init_epoch)
G.load_state_dict(pt_file['G_state_dict'])
D.load_state_dict(pt_file['D_state_dict'])
G_optim.load_state_dict(pt_file['G_optim_state_dict'])
D_optim.load_state_dict(pt_file['D_optim_state_dict'])
current_step = init_epoch * len(dataset)
for param_group in G_optim.param_groups:
lr = param_group['lr']
else:
init_epoch = 1
current_step = 0
manager = Manager(opt)
total_step = opt.n_epochs * len(data_loader)
start_time = datetime.datetime.now()
for epoch in range(init_epoch, opt.n_epochs + 1):
for input, target, _, _ in tqdm(data_loader):
G.train()
current_step += 1
input, target = input.to(device=device, dtype=dtype), target.to(device, dtype=dtype)
D_loss, G_loss, target_tensor, generated_tensor = criterion(D, G, input, target)
G_optim.zero_grad()
G_loss.backward()
G_optim.step()
D_optim.zero_grad()
D_loss.backward()
D_optim.step()
package = {'Epoch': epoch,
'current_step': current_step,
'total_step': total_step,
'D_loss': D_loss.detach().item(),
'G_loss': G_loss.detach().item(),
'D_state_dict': D.state_dict(),
'G_state_dict': G.state_dict(),
'D_optim_state_dict': D_optim.state_dict(),
'G_optim_state_dict': G_optim.state_dict(),
'target_tensor': target_tensor,
'generated_tensor': generated_tensor.detach()}
manager(package)
if opt.val_during_train and (current_step % val_freq == 0):
G.eval()
test_image_dir = os.path.join(test_opt.image_dir, str(current_step))
os.makedirs(test_image_dir, exist_ok=True)
test_model_dir = test_opt.model_dir
test_dataset = CustomDataset(test_opt)
test_data_loader = DataLoader(dataset=test_dataset,
batch_size=test_opt.batch_size,
num_workers=test_opt.n_workers,
shuffle=not test_opt.no_shuffle)
for p in G.parameters():
p.requires_grad_(False)
list_TUMF_fake = list()
list_TUMF_real = list()
list_cc_1x1_fake = list()
list_cc_1x1_real = list()
list_cc_1x1 = list()
list_cc_bin_2x2 = list()
list_cc_bin_4x4 = list()
list_cc_bin_8x8 = list()
list_R1 = list()
list_R2 = list()
for input, target, _, name in tqdm(test_data_loader):
input, target = input.to(device=device, dtype=dtype), target.to(device, dtype=dtype)
fake = G(input)
manager.save_image(fake, path=os.path.join(test_image_dir, name[0] + '_fake.png'))
manager.save_image(target, path=os.path.join(test_image_dir, name[0] + '_real.png'))
# Model measurements
bin_size = 8
np_fake, np_real = fake.cpu().numpy().squeeze() * 100., target.cpu().numpy().squeeze() * 100.
# rearrange [-100, 100]
carrier_fake = list()
carrier_real = list()
for i in range(image_height):
for j in range(image_height):
if (i - 511) ** 2 + (j - 511) ** 2 <= 392 ** 2:
list_cc_1x1_fake.append(np_fake[i, j])
list_cc_1x1_real.append(np_real[i, j])
if abs(np_fake[i, j]) >= 10:
carrier_fake.append(abs(np_fake[i, j]))
if abs(np_real[i, j]) >= 10:
carrier_real.append(abs(np_real[i, j]))
TUMF_fake, TUMF_real = np.array(carrier_fake).sum(), np.array(carrier_real).sum()
list_TUMF_fake.append(TUMF_fake)
list_TUMF_real.append(TUMF_real)
list_R1.append((TUMF_fake - TUMF_real) / TUMF_real)
list_cc_1x1.append(np.corrcoef(list_cc_1x1_fake, list_cc_1x1_real)[0][1])
list_R2.append(((np.array(list_cc_1x1_fake) - np.array(list_cc_1x1_real)) ** 2).sum() / (np.array(list_cc_1x1_real) ** 2).sum())
list_cc_bin_2x2.append(binning_and_cal_pixel_cc(np_fake, np_real, 2))
list_cc_bin_4x4.append(binning_and_cal_pixel_cc(np_fake, np_real, 4))
list_cc_bin_8x8.append(binning_and_cal_pixel_cc(np_fake, np_real, 8))
cc_TUMF = np.corrcoef(np.array(list_TUMF_fake), np.array(list_TUMF_real))
cc_1x1 = np.mean(list_cc_1x1)
cc_bin_2x2 = np.mean(list_cc_bin_2x2)
cc_bin_4x4 = np.mean(list_cc_bin_4x4)
cc_bin_8x8 = np.mean(list_cc_bin_8x8)
R1_mean = np.mean(list_R1)
R1_std = np.std(list_R1)
R2_mean = np.mean(list_R2)
R2_std = np.std(list_R2)
with open(os.path.join(test_model_dir, 'Analysis.txt'), 'a') as analysis:
analysis.write(str(current_step) + ', ' + str(cc_TUMF[0][1]) + ', ' + str(cc_1x1) + ', ' +
str(cc_bin_2x2) + ', ' + str(cc_bin_4x4) + ', ' + str(cc_bin_8x8) + ', ' +
str(R1_mean) + ', ' + str(R1_std) + ', ' + str(R2_mean) + ', ' + str(R2_std) + '\n')
analysis.close()
for p in G.parameters():
p.requires_grad_(True)
if opt.debug:
break
if epoch > opt.epoch_decay and opt.HD:
lr = update_lr(lr, init_lr, opt.n_epochs - opt.epoch_decay, D_optim, G_optim)
print("Total time taken: ", datetime.datetime.now() - start_time) | 0.55097 | 0.244115 |
from __future__ import division
import numpy
import pytest
from mpilot.commands import Argument
from mpilot.exceptions import ResultNotFuzzy, ResultIsFuzzy
from mpilot.libraries.eems.exceptions import MismatchedWeights
from mpilot.libraries.eems.fuzzy import (
CvtToFuzzy,
CvtToFuzzyZScore,
CvtToFuzzyCat,
CvtToFuzzyCurve,
CvtToFuzzyCurveZScore,
CvtToBinary,
FuzzyUnion,
FuzzyWeightedUnion,
FuzzySelectedUnion,
FuzzyOr,
FuzzyAnd,
FuzzyXOr,
FuzzyNot,
CvtFromFuzzy,
CvtToFuzzyMeanToMid,
)
from ..utils import create_command_with_result
def test_convert_to_fuzzy():
arr = numpy.ma.arange(10)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[-1.00, -0.78, -0.56, -0.33, -0.11, 0.11, 0.33, 0.56, 0.78, 1.00]
)
result = CvtToFuzzy("ConvertResult").execute(InFieldName=command)
assert (result.round(2) == answer).all()
answer = numpy.ma.array([-1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
result = CvtToFuzzy("ConvertResult").execute(
InFieldName=command, TrueThreshold=2, FalseThreshold=0
)
assert (result == answer).all()
def test_convert_to_fuzzy_z_score():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[-1.0, -1.0, -0.87, -0.52, -0.17, 0.17, 0.52, 0.87, 1.0, 1.0]
)
result = CvtToFuzzyZScore("ConvertResult").execute(
InFieldName=command, TrueThresholdZScore=1, FalseThresholdZScore=-1
)
assert (result.round(2) == answer).all()
def test_convert_to_fuzzy_cat():
arr = numpy.ma.array([1, 1, 5, 4, 4, 8, 8, 9], dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array([-1.0, -1.0, 0.1, 0, 0, 0.9, 0.9, 1.0], dtype=float)
result = CvtToFuzzyCat("ConvertResult").execute(
InFieldName=command,
RawValues=[1, 4, 5, 8, 9],
FuzzyValues=[-1.0, 0.0, 0.1, 0.9, 1.0],
DefaultFuzzyValue=0,
)
assert (result == answer).all()
result = CvtToFuzzyCat("ConvertResult").execute(
InFieldName=command,
RawValues=[1, 4, 8, 9],
FuzzyValues=[-1.0, 0.0, 0.9, 1.0],
DefaultFuzzyValue=0.1,
)
assert (result == answer).all()
def test_convert_to_fuzzy_curve():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[-1.0, -1.0, -0.5, 0.0, 0.17, 0.33, 0.5, 0.67, 0.83, 1.0], dtype=float
)
result = CvtToFuzzyCurve("ConvertResult").execute(
InFieldName=command, RawValues=[1.0, 3.0, 9.0], FuzzyValues=[-1.0, 0.0, 1.0]
)
assert (result.round(2) == answer).all()
def test_mean_to_mid():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[-1.0, -0.6, -0.2, -0.12, -0.04, 0.08, 0.24, 0.4, 0.7, 1.0], dtype=float
)
result = CvtToFuzzyMeanToMid("ConvertResult").execute(
InFieldName=command, IgnoreZeros=False, FuzzyValues=[-1.0, -0.2, 0.0, 0.4, 1.0]
)
assert (result.round(2) == answer).all()
def test_mean_to_mid_with_uneven_distribution():
"""Tests that the CvtToFuzzyMeanToMid command works when the largest raw value is much larger than other values"""
arr = numpy.ma.array([0, 25, 88, 999], dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array([-1.0, -0.47, -0.16, 1.0], dtype=float)
result = CvtToFuzzyMeanToMid("ConvertResult").execute(
InFieldName=command, IgnoreZeros=False, FuzzyValues=[-1.0, -0.2, 0.0, 0.4, 1.0]
)
assert (result.round(2) == answer).all()
def test_convert_to_fuzzy_curve_z_score():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=float
)
result = CvtToFuzzyCurveZScore("ConvertResult").execute(
InFieldName=command, ZScoreValues=[-0.1, 0.0, 0.1], FuzzyValues=[1.0, 5.0, 9.0]
)
assert (result == answer).all()
def test_convert_to_binary():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
result = CvtToBinary("ConvertResult").execute(
InFieldName=command, Threshold=5, Direction="LowToHigh"
)
assert (result == answer).all()
answer = numpy.ma.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
result = CvtToBinary("ConvertResult").execute(
InFieldName=command, Threshold=5, Direction="HighToLow"
)
assert (result == answer).all()
def test_fuzzy_union():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([0, 0.125, 0.75, 0.75, 0.375])
result = FuzzyUnion("UnionResult").execute(InFieldNames=[command_1, command_2])
assert (result == answer).all()
def test_fuzzy_weighted_union():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([-0.33, -0.08, 0.83, 0.67, 0.33])
result = FuzzyWeightedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2], Weights=[1, 0.5]
)
assert (result.round(2) == answer).all()
with pytest.raises(MismatchedWeights) as ex:
FuzzyWeightedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2], Weights=[1]
)
assert ex.value.target_length == 2
assert ex.value.length == 1
with pytest.raises(MismatchedWeights) as ex:
FuzzyWeightedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2], Weights=[1, 2, 3]
)
assert ex.value.target_length == 2
assert ex.value.length == 3
def test_fuzzy_selected_union():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([1.0, 0.75, 1.0, 1.0, 0.5])
result = FuzzySelectedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2],
TruestOrFalsest="Truest",
NumberToConsider=1,
)
assert (result == answer).all()
answer = numpy.ma.array([-1.0, -0.5, 0.5, 0.5, 0.25])
result = FuzzySelectedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2],
TruestOrFalsest="Falsest",
NumberToConsider=1,
)
assert (result == answer).all()
answer = numpy.ma.array([0.0, 0.125, 0.75, 0.75, 0.375])
result = FuzzySelectedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2],
TruestOrFalsest="Truest",
NumberToConsider=2,
)
assert (result == answer).all()
arr_1 = numpy.ma.array(
[-1, -0.5, 1, 0.5, 0.25, 0], mask=[False, False, False, False, False, True]
)
arr_2 = numpy.ma.array(
[1, 0.75, 0.5, 1, 0.5, 0], mask=[False, False, False, False, False, True]
)
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
result = FuzzySelectedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2],
TruestOrFalsest="Truest",
NumberToConsider=2,
)
assert (result.compressed() == answer).all()
def test_fuzzy_or():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([1, 0.75, 1, 1, 0.5])
result = FuzzyOr("OrResult").execute(InFieldNames=[command_1, command_2])
assert (result == answer).all()
def test_fuzzy_and():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([-1, -0.5, 0.5, 0.5, 0.25])
result = FuzzyAnd("AndResult").execute(InFieldNames=[command_1, command_2])
assert (result == answer).all()
def test_fuzzy_xor():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([1.0, 0.393, 0.625, 0.625, 0.292])
result = FuzzyXOr("XOrResult").execute(InFieldNames=[command_1, command_2])
assert (result.round(3) == answer).all()
def test_fuzzy_not():
arr = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
command = create_command_with_result("Result", arr, fuzzy=True)
answer = numpy.ma.array([1, 0.5, -1, -0.5, -0.25])
result = FuzzyNot("NotResult").execute(InFieldName=command)
assert (result == answer).all()
def test_convert_from_fuzzy():
arr = numpy.ma.array(
[-1.00, -0.78, -0.56, -0.33, -0.11, 0.11, 0.33, 0.56, 0.78, 1.00]
)
command = create_command_with_result("Result", arr, fuzzy=True)
answer = numpy.ma.arange(10, dtype=float)
result = CvtFromFuzzy("ConvertResult").execute(
InFieldName=command, TrueThreshold=9.0, FalseThreshold=0.0
)
assert (result.round() == answer).all()
def test_fuzzy_validation():
arr = numpy.ma.array(
[-1.00, -0.78, -0.56, -0.33, -0.11, 0.11, 0.33, 0.56, 0.78, 1.00]
)
command = create_command_with_result("Result", arr, fuzzy=False)
with pytest.raises(ResultNotFuzzy):
FuzzyNot("NotResul").validate_params({"InFieldName": command})
def test_nonfuzzy_validation():
arr = numpy.ma.array(
[-1.00, -0.78, -0.56, -0.33, -0.11, 0.11, 0.33, 0.56, 0.78, 1.00]
)
command = create_command_with_result("Result", arr, fuzzy=True)
with pytest.raises(ResultIsFuzzy):
CvtToFuzzy("ConvertResult").validate_params({"InFieldName": command})
def test_unmasked_array():
arr_1 = numpy.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([1.0, 0.75, 1.0, 1.0, 0.5])
command = FuzzySelectedUnion(
"UnionResult",
arguments=[
Argument("InFieldNames", [command_1, command_2], 1),
Argument("TruestOrFalsest", "Truest", 1),
Argument("NumberToConsider", 1, 1),
],
)
result = command.result
assert (result == answer).all() | tests/eems/test_fuzzy.py | from __future__ import division
import numpy
import pytest
from mpilot.commands import Argument
from mpilot.exceptions import ResultNotFuzzy, ResultIsFuzzy
from mpilot.libraries.eems.exceptions import MismatchedWeights
from mpilot.libraries.eems.fuzzy import (
CvtToFuzzy,
CvtToFuzzyZScore,
CvtToFuzzyCat,
CvtToFuzzyCurve,
CvtToFuzzyCurveZScore,
CvtToBinary,
FuzzyUnion,
FuzzyWeightedUnion,
FuzzySelectedUnion,
FuzzyOr,
FuzzyAnd,
FuzzyXOr,
FuzzyNot,
CvtFromFuzzy,
CvtToFuzzyMeanToMid,
)
from ..utils import create_command_with_result
def test_convert_to_fuzzy():
arr = numpy.ma.arange(10)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[-1.00, -0.78, -0.56, -0.33, -0.11, 0.11, 0.33, 0.56, 0.78, 1.00]
)
result = CvtToFuzzy("ConvertResult").execute(InFieldName=command)
assert (result.round(2) == answer).all()
answer = numpy.ma.array([-1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
result = CvtToFuzzy("ConvertResult").execute(
InFieldName=command, TrueThreshold=2, FalseThreshold=0
)
assert (result == answer).all()
def test_convert_to_fuzzy_z_score():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[-1.0, -1.0, -0.87, -0.52, -0.17, 0.17, 0.52, 0.87, 1.0, 1.0]
)
result = CvtToFuzzyZScore("ConvertResult").execute(
InFieldName=command, TrueThresholdZScore=1, FalseThresholdZScore=-1
)
assert (result.round(2) == answer).all()
def test_convert_to_fuzzy_cat():
arr = numpy.ma.array([1, 1, 5, 4, 4, 8, 8, 9], dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array([-1.0, -1.0, 0.1, 0, 0, 0.9, 0.9, 1.0], dtype=float)
result = CvtToFuzzyCat("ConvertResult").execute(
InFieldName=command,
RawValues=[1, 4, 5, 8, 9],
FuzzyValues=[-1.0, 0.0, 0.1, 0.9, 1.0],
DefaultFuzzyValue=0,
)
assert (result == answer).all()
result = CvtToFuzzyCat("ConvertResult").execute(
InFieldName=command,
RawValues=[1, 4, 8, 9],
FuzzyValues=[-1.0, 0.0, 0.9, 1.0],
DefaultFuzzyValue=0.1,
)
assert (result == answer).all()
def test_convert_to_fuzzy_curve():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[-1.0, -1.0, -0.5, 0.0, 0.17, 0.33, 0.5, 0.67, 0.83, 1.0], dtype=float
)
result = CvtToFuzzyCurve("ConvertResult").execute(
InFieldName=command, RawValues=[1.0, 3.0, 9.0], FuzzyValues=[-1.0, 0.0, 1.0]
)
assert (result.round(2) == answer).all()
def test_mean_to_mid():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[-1.0, -0.6, -0.2, -0.12, -0.04, 0.08, 0.24, 0.4, 0.7, 1.0], dtype=float
)
result = CvtToFuzzyMeanToMid("ConvertResult").execute(
InFieldName=command, IgnoreZeros=False, FuzzyValues=[-1.0, -0.2, 0.0, 0.4, 1.0]
)
assert (result.round(2) == answer).all()
def test_mean_to_mid_with_uneven_distribution():
"""Tests that the CvtToFuzzyMeanToMid command works when the largest raw value is much larger than other values"""
arr = numpy.ma.array([0, 25, 88, 999], dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array([-1.0, -0.47, -0.16, 1.0], dtype=float)
result = CvtToFuzzyMeanToMid("ConvertResult").execute(
InFieldName=command, IgnoreZeros=False, FuzzyValues=[-1.0, -0.2, 0.0, 0.4, 1.0]
)
assert (result.round(2) == answer).all()
def test_convert_to_fuzzy_curve_z_score():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array(
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=float
)
result = CvtToFuzzyCurveZScore("ConvertResult").execute(
InFieldName=command, ZScoreValues=[-0.1, 0.0, 0.1], FuzzyValues=[1.0, 5.0, 9.0]
)
assert (result == answer).all()
def test_convert_to_binary():
arr = numpy.ma.arange(10, dtype=float)
command = create_command_with_result("Result", arr)
answer = numpy.ma.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
result = CvtToBinary("ConvertResult").execute(
InFieldName=command, Threshold=5, Direction="LowToHigh"
)
assert (result == answer).all()
answer = numpy.ma.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
result = CvtToBinary("ConvertResult").execute(
InFieldName=command, Threshold=5, Direction="HighToLow"
)
assert (result == answer).all()
def test_fuzzy_union():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([0, 0.125, 0.75, 0.75, 0.375])
result = FuzzyUnion("UnionResult").execute(InFieldNames=[command_1, command_2])
assert (result == answer).all()
def test_fuzzy_weighted_union():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([-0.33, -0.08, 0.83, 0.67, 0.33])
result = FuzzyWeightedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2], Weights=[1, 0.5]
)
assert (result.round(2) == answer).all()
with pytest.raises(MismatchedWeights) as ex:
FuzzyWeightedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2], Weights=[1]
)
assert ex.value.target_length == 2
assert ex.value.length == 1
with pytest.raises(MismatchedWeights) as ex:
FuzzyWeightedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2], Weights=[1, 2, 3]
)
assert ex.value.target_length == 2
assert ex.value.length == 3
def test_fuzzy_selected_union():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([1.0, 0.75, 1.0, 1.0, 0.5])
result = FuzzySelectedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2],
TruestOrFalsest="Truest",
NumberToConsider=1,
)
assert (result == answer).all()
answer = numpy.ma.array([-1.0, -0.5, 0.5, 0.5, 0.25])
result = FuzzySelectedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2],
TruestOrFalsest="Falsest",
NumberToConsider=1,
)
assert (result == answer).all()
answer = numpy.ma.array([0.0, 0.125, 0.75, 0.75, 0.375])
result = FuzzySelectedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2],
TruestOrFalsest="Truest",
NumberToConsider=2,
)
assert (result == answer).all()
arr_1 = numpy.ma.array(
[-1, -0.5, 1, 0.5, 0.25, 0], mask=[False, False, False, False, False, True]
)
arr_2 = numpy.ma.array(
[1, 0.75, 0.5, 1, 0.5, 0], mask=[False, False, False, False, False, True]
)
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
result = FuzzySelectedUnion("UnionResult").execute(
InFieldNames=[command_1, command_2],
TruestOrFalsest="Truest",
NumberToConsider=2,
)
assert (result.compressed() == answer).all()
def test_fuzzy_or():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([1, 0.75, 1, 1, 0.5])
result = FuzzyOr("OrResult").execute(InFieldNames=[command_1, command_2])
assert (result == answer).all()
def test_fuzzy_and():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([-1, -0.5, 0.5, 0.5, 0.25])
result = FuzzyAnd("AndResult").execute(InFieldNames=[command_1, command_2])
assert (result == answer).all()
def test_fuzzy_xor():
arr_1 = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.ma.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([1.0, 0.393, 0.625, 0.625, 0.292])
result = FuzzyXOr("XOrResult").execute(InFieldNames=[command_1, command_2])
assert (result.round(3) == answer).all()
def test_fuzzy_not():
arr = numpy.ma.array([-1, -0.5, 1, 0.5, 0.25])
command = create_command_with_result("Result", arr, fuzzy=True)
answer = numpy.ma.array([1, 0.5, -1, -0.5, -0.25])
result = FuzzyNot("NotResult").execute(InFieldName=command)
assert (result == answer).all()
def test_convert_from_fuzzy():
arr = numpy.ma.array(
[-1.00, -0.78, -0.56, -0.33, -0.11, 0.11, 0.33, 0.56, 0.78, 1.00]
)
command = create_command_with_result("Result", arr, fuzzy=True)
answer = numpy.ma.arange(10, dtype=float)
result = CvtFromFuzzy("ConvertResult").execute(
InFieldName=command, TrueThreshold=9.0, FalseThreshold=0.0
)
assert (result.round() == answer).all()
def test_fuzzy_validation():
arr = numpy.ma.array(
[-1.00, -0.78, -0.56, -0.33, -0.11, 0.11, 0.33, 0.56, 0.78, 1.00]
)
command = create_command_with_result("Result", arr, fuzzy=False)
with pytest.raises(ResultNotFuzzy):
FuzzyNot("NotResul").validate_params({"InFieldName": command})
def test_nonfuzzy_validation():
arr = numpy.ma.array(
[-1.00, -0.78, -0.56, -0.33, -0.11, 0.11, 0.33, 0.56, 0.78, 1.00]
)
command = create_command_with_result("Result", arr, fuzzy=True)
with pytest.raises(ResultIsFuzzy):
CvtToFuzzy("ConvertResult").validate_params({"InFieldName": command})
def test_unmasked_array():
arr_1 = numpy.array([-1, -0.5, 1, 0.5, 0.25])
arr_2 = numpy.array([1, 0.75, 0.5, 1, 0.5])
command_1 = create_command_with_result("Result", arr_1, fuzzy=True)
command_2 = create_command_with_result("Result", arr_2, fuzzy=True)
answer = numpy.ma.array([1.0, 0.75, 1.0, 1.0, 0.5])
command = FuzzySelectedUnion(
"UnionResult",
arguments=[
Argument("InFieldNames", [command_1, command_2], 1),
Argument("TruestOrFalsest", "Truest", 1),
Argument("NumberToConsider", 1, 1),
],
)
result = command.result
assert (result == answer).all() | 0.716615 | 0.473353 |
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QWidget, QFileDialog, QPushButton
from PyQt5.QtCore import pyqtSignal
from modules.primerainterfaz import Ui_MainWindow, Ui_Form_papeleta, Ui_Form_variosExam, Ui_Form_DatosVarios
from modules import generarpdf_papeleta, procesador_examenes
import sys
class window_tablaVarios(QtWidgets.QMainWindow):
trigger = pyqtSignal()
closing = pyqtSignal()
def __init__(self, file_respuesta, files_examenes, parent=None):
super(window_tablaVarios, self).__init__(parent)
self.ui = Ui_Form_DatosVarios()
self.ui.setupUi(self)
self.trigger.connect(self.parent().completar_barra)
self.closing.connect(self.parent().setup_defecto)
self.ui.tableWidget.setColumnCount(5)
self.ui.tableWidget.setRowCount(2)
self.ui.tableWidget.setHorizontalHeaderLabels(('ID', 'Info', 'Grade', 'Grading', 'Type'))
header = self.ui.tableWidget.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)
print(file_respuesta)
respuestas, nid, examen, datos, contornos_r = procesador_examenes.procesar_examen(file_respuesta)
nota_r, examen = procesador_examenes.obtener_nota(respuestas, respuestas, contornos_r, examen)
self.mostrar_respuestas = QPushButton('Show', self)
self.mostrar_datos = QPushButton('Show', self)
self.ui.tableWidget.setItem(0, 0, QtWidgets.QTableWidgetItem(nid))
self.ui.tableWidget.setCellWidget(0, 1, self.mostrar_datos)
self.ui.tableWidget.setItem(0, 2, QtWidgets.QTableWidgetItem("N/A"))
self.ui.tableWidget.setCellWidget(0, 3, self.mostrar_respuestas)
self.ui.tableWidget.setItem(0, 4, QtWidgets.QTableWidgetItem("Answer sheet"))
self.btmostrar = []
self.btdatos = []
self.imagenes = []
self.datos = []
for i in range(len(files_examenes)):
fila = i+1
self.btn = QPushButton('Show', self)
self.btnb = QPushButton('Show', self)
self.ui.tableWidget.insertRow(fila)
self.ui.tableWidget.setCellWidget(fila, 1, self.btn)
self.ui.tableWidget.setCellWidget(fila, 3, self.btnb)
self.btn.clicked.connect(lambda state, x=i: self.mostrar_respectivo_datos(x))
self.btnb.clicked.connect(lambda state, x=i: self.mostrar_respectivo(x))
self.btmostrar.append(self.btn)
self.btmostrar.append(self.btnb)
#self.btnsmostrar[i].clicked.connect(lambda: self.mostrar_respectivo(i))
contestadas, nidb, examenb, datosb, contornos = procesador_examenes.procesar_examen(files_examenes[i])
self.datos.append(datosb)
#print(respuestas, contestadas)
nota, examenb = procesador_examenes.obtener_nota(respuestas, contestadas, contornos, examenb)
self.imagenes.append(examenb)
self.ui.tableWidget.setItem(fila, 0, QtWidgets.QTableWidgetItem(nidb))
self.ui.tableWidget.setItem(fila, 2, QtWidgets.QTableWidgetItem("{}".format(nota)))
self.ui.tableWidget.setItem(fila, 4, QtWidgets.QTableWidgetItem("Answered"))
self.mostrar_respuestas.clicked.connect(lambda: self.ver_respuestas(examen))
self.mostrar_datos.clicked.connect(lambda: self.ver_datos_respuestas(datos))
self.trigger.emit()
def ver_respuestas(self, examen):
procesador_examenes.mostrar_imagen("Answers found", examen)
def ver_datos_respuestas(self, datos):
procesador_examenes.mostrar_imagen("Test's info", datos)
def mostrar_respectivo(self, num):
procesador_examenes.mostrar_imagen("Student's answers", self.imagenes[num])
def mostrar_respectivo_datos(self, num):
procesador_examenes.mostrar_imagen("Student's info", self.datos[num])
def closeEvent(self, event):
self.closing.emit()
event.accept()
class window_variosExam(QtWidgets.QMainWindow):
fileName = []
files = []
def __init__(self, parent=None):
super(window_variosExam, self).__init__()
self.ui = Ui_Form_variosExam()
self.ui.setupUi(self)
self.ui.pushButton_2.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
self.ui.pushButton.clicked.connect(self.subir_respuesta)
self.ui.pushButton_2.clicked.connect(self.subir_examenes)
self.ui.pushButton_3.clicked.connect(self.calificar_examenes)
def subir_respuesta(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,
"Open answer-sheet",
"Answer file",
"Images (*.png *.xpm *.jpg)", options=options)
if fileName:
self.fileName = fileName
self.ui.label_4.setText("Loaded")
self.ui.pushButton_2.setEnabled(True)
self.ui.pushButton_3.setEnabled(True)
def subir_examenes(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
files, _ = QFileDialog.getOpenFileNames(self,
"QFileDialog.getOpenFileNames()",
"","Images (*.png *.xpm *.jpg)", options=options)
if files:
self.files = files
self.ui.label_5.setText("Loaded")
self.ui.progressBar.setEnabled(True)
def calificar_examenes(self):
self.ui.progressBar.setProperty("value", 86)
self.sub = window_tablaVarios(self.fileName, self.files, self)
self.sub.show()
def completar_barra(self):
self.ui.progressBar.setProperty("value", 100)
def setup_defecto(self):
self.ui.progressBar.setEnabled(False)
self.ui.progressBar.setProperty("value", 0)
self.ui.pushButton_2.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
self.ui.label_4.setText("")
self.ui.label_5.setText("")
class window_papeleta(QWidget):
def __init__(self):
super(window_papeleta, self).__init__()
self.ui = Ui_Form_papeleta()
self.ui.setupUi(self)
self.ui.spinBox.setMinimum(1)
self.ui.spinBox_2.setMinimum(1)
self.ui.spinBox.valueChanged[int].connect(self.limits)
self.ui.pushButton.clicked.connect(self.generarPDF)
def limits(self):
self.ui.spinBox_2.setMinimum(self.ui.spinBox.value())
def generarPDF(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getSaveFileName(self,"QFileDialog.getSaveFileName()","print.pdf","PDF (*.pdf)", options=options)
self.ui.progressBar.setProperty("value", 50)
limite_inicial = self.ui.spinBox.value()
limite_final = self.ui.spinBox_2.value()
inombre = False
ifecha = False
imateria = False
icarnet = False
if self.ui.checkBox.isChecked() == True:
inombre = True
else:
inombre = False
if self.ui.checkBox_2.isChecked() == True:
ifecha = True
else:
ifecha = False
if self.ui.checkBox_3.isChecked() == True:
imateria = True
else:
imateria = False
if self.ui.checkBox_4.isChecked() == True:
icarnet = True
else:
icarnet = False
formato = "resources/images/base_format.png"
fuente_titulo = "resources/fonts/monoglyceride.bold.ttf"
fuente_numero = "resources/fonts/Shahd_Serif.ttf"
completado = generarpdf_papeleta.generar_pdf(limite_inicial, limite_final, inombre,
ifecha, imateria, icarnet, formato, fuente_titulo, fuente_numero, fileName)
if completado == 1:
self.ui.progressBar.setProperty("value", 100)
class mywindow(QtWidgets.QMainWindow):
def __init__(self):
super(mywindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.pushButton.clicked.connect(self.openSub1)
self.ui.pushButton_3.clicked.connect(self.openSub)
def openSub1(self):
self.sub = window_variosExam()
self.sub.show()
def openSub(self):
self.sub = window_papeleta()
self.sub.show()
if __name__ == "__main__":
def run_app():
app = QtWidgets.QApplication(sys.argv)
application = mywindow()
application.show()
app.exec_()
run_app() | main.py | from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QWidget, QFileDialog, QPushButton
from PyQt5.QtCore import pyqtSignal
from modules.primerainterfaz import Ui_MainWindow, Ui_Form_papeleta, Ui_Form_variosExam, Ui_Form_DatosVarios
from modules import generarpdf_papeleta, procesador_examenes
import sys
class window_tablaVarios(QtWidgets.QMainWindow):
trigger = pyqtSignal()
closing = pyqtSignal()
def __init__(self, file_respuesta, files_examenes, parent=None):
super(window_tablaVarios, self).__init__(parent)
self.ui = Ui_Form_DatosVarios()
self.ui.setupUi(self)
self.trigger.connect(self.parent().completar_barra)
self.closing.connect(self.parent().setup_defecto)
self.ui.tableWidget.setColumnCount(5)
self.ui.tableWidget.setRowCount(2)
self.ui.tableWidget.setHorizontalHeaderLabels(('ID', 'Info', 'Grade', 'Grading', 'Type'))
header = self.ui.tableWidget.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)
print(file_respuesta)
respuestas, nid, examen, datos, contornos_r = procesador_examenes.procesar_examen(file_respuesta)
nota_r, examen = procesador_examenes.obtener_nota(respuestas, respuestas, contornos_r, examen)
self.mostrar_respuestas = QPushButton('Show', self)
self.mostrar_datos = QPushButton('Show', self)
self.ui.tableWidget.setItem(0, 0, QtWidgets.QTableWidgetItem(nid))
self.ui.tableWidget.setCellWidget(0, 1, self.mostrar_datos)
self.ui.tableWidget.setItem(0, 2, QtWidgets.QTableWidgetItem("N/A"))
self.ui.tableWidget.setCellWidget(0, 3, self.mostrar_respuestas)
self.ui.tableWidget.setItem(0, 4, QtWidgets.QTableWidgetItem("Answer sheet"))
self.btmostrar = []
self.btdatos = []
self.imagenes = []
self.datos = []
for i in range(len(files_examenes)):
fila = i+1
self.btn = QPushButton('Show', self)
self.btnb = QPushButton('Show', self)
self.ui.tableWidget.insertRow(fila)
self.ui.tableWidget.setCellWidget(fila, 1, self.btn)
self.ui.tableWidget.setCellWidget(fila, 3, self.btnb)
self.btn.clicked.connect(lambda state, x=i: self.mostrar_respectivo_datos(x))
self.btnb.clicked.connect(lambda state, x=i: self.mostrar_respectivo(x))
self.btmostrar.append(self.btn)
self.btmostrar.append(self.btnb)
#self.btnsmostrar[i].clicked.connect(lambda: self.mostrar_respectivo(i))
contestadas, nidb, examenb, datosb, contornos = procesador_examenes.procesar_examen(files_examenes[i])
self.datos.append(datosb)
#print(respuestas, contestadas)
nota, examenb = procesador_examenes.obtener_nota(respuestas, contestadas, contornos, examenb)
self.imagenes.append(examenb)
self.ui.tableWidget.setItem(fila, 0, QtWidgets.QTableWidgetItem(nidb))
self.ui.tableWidget.setItem(fila, 2, QtWidgets.QTableWidgetItem("{}".format(nota)))
self.ui.tableWidget.setItem(fila, 4, QtWidgets.QTableWidgetItem("Answered"))
self.mostrar_respuestas.clicked.connect(lambda: self.ver_respuestas(examen))
self.mostrar_datos.clicked.connect(lambda: self.ver_datos_respuestas(datos))
self.trigger.emit()
def ver_respuestas(self, examen):
procesador_examenes.mostrar_imagen("Answers found", examen)
def ver_datos_respuestas(self, datos):
procesador_examenes.mostrar_imagen("Test's info", datos)
def mostrar_respectivo(self, num):
procesador_examenes.mostrar_imagen("Student's answers", self.imagenes[num])
def mostrar_respectivo_datos(self, num):
procesador_examenes.mostrar_imagen("Student's info", self.datos[num])
def closeEvent(self, event):
self.closing.emit()
event.accept()
class window_variosExam(QtWidgets.QMainWindow):
fileName = []
files = []
def __init__(self, parent=None):
super(window_variosExam, self).__init__()
self.ui = Ui_Form_variosExam()
self.ui.setupUi(self)
self.ui.pushButton_2.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
self.ui.pushButton.clicked.connect(self.subir_respuesta)
self.ui.pushButton_2.clicked.connect(self.subir_examenes)
self.ui.pushButton_3.clicked.connect(self.calificar_examenes)
def subir_respuesta(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,
"Open answer-sheet",
"Answer file",
"Images (*.png *.xpm *.jpg)", options=options)
if fileName:
self.fileName = fileName
self.ui.label_4.setText("Loaded")
self.ui.pushButton_2.setEnabled(True)
self.ui.pushButton_3.setEnabled(True)
def subir_examenes(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
files, _ = QFileDialog.getOpenFileNames(self,
"QFileDialog.getOpenFileNames()",
"","Images (*.png *.xpm *.jpg)", options=options)
if files:
self.files = files
self.ui.label_5.setText("Loaded")
self.ui.progressBar.setEnabled(True)
def calificar_examenes(self):
self.ui.progressBar.setProperty("value", 86)
self.sub = window_tablaVarios(self.fileName, self.files, self)
self.sub.show()
def completar_barra(self):
self.ui.progressBar.setProperty("value", 100)
def setup_defecto(self):
self.ui.progressBar.setEnabled(False)
self.ui.progressBar.setProperty("value", 0)
self.ui.pushButton_2.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
self.ui.label_4.setText("")
self.ui.label_5.setText("")
class window_papeleta(QWidget):
def __init__(self):
super(window_papeleta, self).__init__()
self.ui = Ui_Form_papeleta()
self.ui.setupUi(self)
self.ui.spinBox.setMinimum(1)
self.ui.spinBox_2.setMinimum(1)
self.ui.spinBox.valueChanged[int].connect(self.limits)
self.ui.pushButton.clicked.connect(self.generarPDF)
def limits(self):
self.ui.spinBox_2.setMinimum(self.ui.spinBox.value())
def generarPDF(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getSaveFileName(self,"QFileDialog.getSaveFileName()","print.pdf","PDF (*.pdf)", options=options)
self.ui.progressBar.setProperty("value", 50)
limite_inicial = self.ui.spinBox.value()
limite_final = self.ui.spinBox_2.value()
inombre = False
ifecha = False
imateria = False
icarnet = False
if self.ui.checkBox.isChecked() == True:
inombre = True
else:
inombre = False
if self.ui.checkBox_2.isChecked() == True:
ifecha = True
else:
ifecha = False
if self.ui.checkBox_3.isChecked() == True:
imateria = True
else:
imateria = False
if self.ui.checkBox_4.isChecked() == True:
icarnet = True
else:
icarnet = False
formato = "resources/images/base_format.png"
fuente_titulo = "resources/fonts/monoglyceride.bold.ttf"
fuente_numero = "resources/fonts/Shahd_Serif.ttf"
completado = generarpdf_papeleta.generar_pdf(limite_inicial, limite_final, inombre,
ifecha, imateria, icarnet, formato, fuente_titulo, fuente_numero, fileName)
if completado == 1:
self.ui.progressBar.setProperty("value", 100)
class mywindow(QtWidgets.QMainWindow):
def __init__(self):
super(mywindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.pushButton.clicked.connect(self.openSub1)
self.ui.pushButton_3.clicked.connect(self.openSub)
def openSub1(self):
self.sub = window_variosExam()
self.sub.show()
def openSub(self):
self.sub = window_papeleta()
self.sub.show()
if __name__ == "__main__":
def run_app():
app = QtWidgets.QApplication(sys.argv)
application = mywindow()
application.show()
app.exec_()
run_app() | 0.113592 | 0.078219 |
from __future__ import absolute_import, print_function, unicode_literals
import unittest
import mock
from rest_framework.generics import GenericAPIView
from ..mixins import (
MapDataViewMixin,
MultipleSerializersViewMixin,
StrippingJSONViewMixin,
)
class TestMultipleSerializersViewMixin(unittest.TestCase):
def setUp(self):
super(TestMultipleSerializersViewMixin, self).setUp()
class View(MultipleSerializersViewMixin, GenericAPIView):
pass
self.view = View()
@mock.patch.object(GenericAPIView, 'get_serializer_context')
@mock.patch.object(GenericAPIView, 'get_serializer_class')
def test_get_serializer(self,
mock_get_serializer_class,
mock_get_serializer_context):
context = {'context': 'here'}
mock_get_serializer_context.return_value = context
serializer = self.view.get_serializer(hello='world')
self.assertEqual(serializer, mock_get_serializer_class.return_value.return_value)
mock_get_serializer_class.assert_called_once_with()
mock_get_serializer_class.return_value.assert_called_once_with(
hello='world', context=context
)
mock_get_serializer_context.assert_called_once_with()
@mock.patch.object(GenericAPIView, 'get_serializer_context')
@mock.patch.object(GenericAPIView, 'get_serializer_class')
def test_get_serializer_with_class(self,
mock_get_serializer_class,
mock_get_serializer_context):
context = {'context': 'here'}
mock_get_serializer_context.return_value = context
serializer_class = mock.MagicMock()
serializer = self.view.get_serializer(hello='world', serializer_class=serializer_class)
self.assertEqual(serializer, serializer_class.return_value)
self.assertFalse(mock_get_serializer_class.called)
serializer_class.assert_called_once_with(hello='world', context=context)
mock_get_serializer_context.assert_called_once_with()
class TestMapDataViewMixin(unittest.TestCase):
def setUp(self):
super(TestMapDataViewMixin, self).setUp()
class View(MapDataViewMixin, GenericAPIView):
pass
self.view = View()
self.view.request = mock.MagicMock(data=mock.sentinel.data)
def test_get_data_no_mapper(self):
actual = self.view.get_data()
self.assertEqual(actual, mock.sentinel.data)
@mock.patch.object(GenericAPIView, 'get_serializer_context')
def test_get_data_attribute_mapper(self, mock_get_serializer_context):
mapper = self.view.data_mapper_class = mock.MagicMock()
actual = self.view.get_data()
self.assertEqual(actual, mapper.return_value.return_value)
mapper.assert_called_once_with(
context=mock_get_serializer_context.return_value
)
mapper.return_value.assert_called_once_with(mock.sentinel.data)
@mock.patch.object(GenericAPIView, 'get_serializer_context')
def test_get_data_provided(self, mock_get_serializer_context):
mapper = mock.MagicMock()
actual = self.view.get_data(mapper_class=mapper)
self.assertEqual(actual, mapper.return_value.return_value)
mapper.assert_called_once_with(
context=mock_get_serializer_context.return_value
)
mapper.return_value.assert_called_once_with(mock.sentinel.data)
class TestStrippingJSONViewMixin(unittest.TestCase):
def setUp(self):
super(TestStrippingJSONViewMixin, self).setUp()
class View(StrippingJSONViewMixin, GenericAPIView):
pass
self.view = View()
self.view.request = mock.MagicMock()
def test_get_parser_context(self):
self.view.parser_root = mock.sentinel.parser_root
actual = self.view.get_parser_context(self.view.request)
self.assertIn('parse_root', actual)
self.assertEqual(actual['parse_root'], mock.sentinel.parser_root) | drf_braces/tests/test_mixins.py | from __future__ import absolute_import, print_function, unicode_literals
import unittest
import mock
from rest_framework.generics import GenericAPIView
from ..mixins import (
MapDataViewMixin,
MultipleSerializersViewMixin,
StrippingJSONViewMixin,
)
class TestMultipleSerializersViewMixin(unittest.TestCase):
def setUp(self):
super(TestMultipleSerializersViewMixin, self).setUp()
class View(MultipleSerializersViewMixin, GenericAPIView):
pass
self.view = View()
@mock.patch.object(GenericAPIView, 'get_serializer_context')
@mock.patch.object(GenericAPIView, 'get_serializer_class')
def test_get_serializer(self,
mock_get_serializer_class,
mock_get_serializer_context):
context = {'context': 'here'}
mock_get_serializer_context.return_value = context
serializer = self.view.get_serializer(hello='world')
self.assertEqual(serializer, mock_get_serializer_class.return_value.return_value)
mock_get_serializer_class.assert_called_once_with()
mock_get_serializer_class.return_value.assert_called_once_with(
hello='world', context=context
)
mock_get_serializer_context.assert_called_once_with()
@mock.patch.object(GenericAPIView, 'get_serializer_context')
@mock.patch.object(GenericAPIView, 'get_serializer_class')
def test_get_serializer_with_class(self,
mock_get_serializer_class,
mock_get_serializer_context):
context = {'context': 'here'}
mock_get_serializer_context.return_value = context
serializer_class = mock.MagicMock()
serializer = self.view.get_serializer(hello='world', serializer_class=serializer_class)
self.assertEqual(serializer, serializer_class.return_value)
self.assertFalse(mock_get_serializer_class.called)
serializer_class.assert_called_once_with(hello='world', context=context)
mock_get_serializer_context.assert_called_once_with()
class TestMapDataViewMixin(unittest.TestCase):
def setUp(self):
super(TestMapDataViewMixin, self).setUp()
class View(MapDataViewMixin, GenericAPIView):
pass
self.view = View()
self.view.request = mock.MagicMock(data=mock.sentinel.data)
def test_get_data_no_mapper(self):
actual = self.view.get_data()
self.assertEqual(actual, mock.sentinel.data)
@mock.patch.object(GenericAPIView, 'get_serializer_context')
def test_get_data_attribute_mapper(self, mock_get_serializer_context):
mapper = self.view.data_mapper_class = mock.MagicMock()
actual = self.view.get_data()
self.assertEqual(actual, mapper.return_value.return_value)
mapper.assert_called_once_with(
context=mock_get_serializer_context.return_value
)
mapper.return_value.assert_called_once_with(mock.sentinel.data)
@mock.patch.object(GenericAPIView, 'get_serializer_context')
def test_get_data_provided(self, mock_get_serializer_context):
mapper = mock.MagicMock()
actual = self.view.get_data(mapper_class=mapper)
self.assertEqual(actual, mapper.return_value.return_value)
mapper.assert_called_once_with(
context=mock_get_serializer_context.return_value
)
mapper.return_value.assert_called_once_with(mock.sentinel.data)
class TestStrippingJSONViewMixin(unittest.TestCase):
def setUp(self):
super(TestStrippingJSONViewMixin, self).setUp()
class View(StrippingJSONViewMixin, GenericAPIView):
pass
self.view = View()
self.view.request = mock.MagicMock()
def test_get_parser_context(self):
self.view.parser_root = mock.sentinel.parser_root
actual = self.view.get_parser_context(self.view.request)
self.assertIn('parse_root', actual)
self.assertEqual(actual['parse_root'], mock.sentinel.parser_root) | 0.775137 | 0.215206 |
# 验证ip是否有效
import urllib2,re,time,urllib,random, os, requests, json
import redis,threading
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
]
def proxy(url):
try:
data = urllib2.urlopen(url).read()
except Exception,e:
print 'proxy 出问题了',e
s = json.loads(data)
try:
if len(s['msg']) == 0:
return s['data']['proxy_list']
else:
print '返回None'
return None
except Exception,e:
print e
def Check(ip_list):
proxy_ip_list = []
try:
for ip in ip_list:
lock = threading.Lock()
cookie = "PHPSESSID=5f7mbqghvk1kt5n9illa0nr175; kmsign=56023b6880039; KMUID=ezsEg1YCOzxg97EwAwUXAg=="
try:
proxy = 'http://'+ ip
proxy_support = urllib2.ProxyHandler({'http': proxy})
opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
request = urllib2.Request('http://www.baidu.com')
request.add_header("cookie",cookie)
request.add_header("User-Agent", random.choice(user_agents))
content = urllib2.urlopen(request,timeout=4).read()
if len(content) >= 1000:
lock.acquire()
proxy_ip_list.append(ip)
lock.release()
else:
print '出现验证码或IP被封杀'
except Exception, error:
print ip, error
except Exception, e:
print 'ip_list', ip_list, e
return proxy_ip_list
if __name__ == '__main__':
pool = redis.ConnectionPool(host='192.168.0.42', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
proxy_list = Check(proxy('http://dps.kuaidaili.com/api/getdps/?orderid=996639436139576&num=30&ut=1&format=json&sep=2'))
if len(proxy_list) != 0:
r.delete('proxy_list')
print 'delete redis[proxy_list] done'
for ip in proxy_list:
r.lpush('proxy_list', ip)
print 'lpush redis[proxy_list] done'
print 'proxy_list', len(r.lrange('proxy_list', 0, -1)), random.choice(r.lrange('proxy_list', 0, -1))
print 'done' | proxy_list.py |
# 验证ip是否有效
import urllib2,re,time,urllib,random, os, requests, json
import redis,threading
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
]
def proxy(url):
try:
data = urllib2.urlopen(url).read()
except Exception,e:
print 'proxy 出问题了',e
s = json.loads(data)
try:
if len(s['msg']) == 0:
return s['data']['proxy_list']
else:
print '返回None'
return None
except Exception,e:
print e
def Check(ip_list):
proxy_ip_list = []
try:
for ip in ip_list:
lock = threading.Lock()
cookie = "PHPSESSID=5f7mbqghvk1kt5n9illa0nr175; kmsign=56023b6880039; KMUID=ezsEg1YCOzxg97EwAwUXAg=="
try:
proxy = 'http://'+ ip
proxy_support = urllib2.ProxyHandler({'http': proxy})
opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
request = urllib2.Request('http://www.baidu.com')
request.add_header("cookie",cookie)
request.add_header("User-Agent", random.choice(user_agents))
content = urllib2.urlopen(request,timeout=4).read()
if len(content) >= 1000:
lock.acquire()
proxy_ip_list.append(ip)
lock.release()
else:
print '出现验证码或IP被封杀'
except Exception, error:
print ip, error
except Exception, e:
print 'ip_list', ip_list, e
return proxy_ip_list
if __name__ == '__main__':
pool = redis.ConnectionPool(host='192.168.0.42', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
proxy_list = Check(proxy('http://dps.kuaidaili.com/api/getdps/?orderid=996639436139576&num=30&ut=1&format=json&sep=2'))
if len(proxy_list) != 0:
r.delete('proxy_list')
print 'delete redis[proxy_list] done'
for ip in proxy_list:
r.lpush('proxy_list', ip)
print 'lpush redis[proxy_list] done'
print 'proxy_list', len(r.lrange('proxy_list', 0, -1)), random.choice(r.lrange('proxy_list', 0, -1))
print 'done' | 0.17575 | 0.063628 |