content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
import os
import numpy as np
import tensorflow as tf
from utils.data_reader import H5DataLoader, H53DDataLoader
from utils.img_utils import imsave
from utils import ops
"""
This module builds a standard U-NET for semantic segmentation.
If want VAE using pixelDCL, please visit this code:
https://github.com/HongyangGao/UVAE
"""
class PixelDCN(object):
def __init__(self, sess, conf):
self.sess = sess
self.conf = conf
self.def_params()
if not os.path.exists(conf.modeldir):
os.makedirs(conf.modeldir)
if not os.path.exists(conf.logdir):
os.makedirs(conf.logdir)
if not os.path.exists(conf.sampledir):
os.makedirs(conf.sampledir)
self.configure_networks()
self.train_summary = self.config_summary('train')
self.valid_summary = self.config_summary('valid')
def def_params(self):
self.data_format = 'NHWC'
if self.conf.data_type == '3D':
self.conv_size = (3, 3, 3)
self.pool_size = (2, 2, 2)
self.axis, self.channel_axis = (1, 2, 3), 4
self.input_shape = [
self.conf.batch, self.conf.depth, self.conf.height,
self.conf.width, self.conf.channel]
self.output_shape = [
self.conf.batch, self.conf.depth, self.conf.height,
self.conf.width]
else:
self.conv_size = (3, 3)
self.pool_size = (2, 2)
self.axis, self.channel_axis = (1, 2), 3
self.input_shape = [
self.conf.batch, self.conf.height, self.conf.width,
self.conf.channel]
self.output_shape = [
self.conf.batch, self.conf.height, self.conf.width]
def configure_networks(self):
self.build_network()
optimizer = tf.train.AdamOptimizer(self.conf.learning_rate)
self.train_op = optimizer.minimize(self.loss_op, name='train_op')
tf.set_random_seed(self.conf.random_seed)
self.sess.run(tf.global_variables_initializer())
trainable_vars = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=trainable_vars, max_to_keep=0)
self.writer = tf.summary.FileWriter(self.conf.logdir, self.sess.graph)
def build_network(self):
self.inputs = tf.placeholder(
tf.float32, self.input_shape, name='inputs')
self.labels = tf.placeholder(
tf.int64, self.output_shape, name='labels')
self.predictions = self.inference(self.inputs)
self.cal_loss()
def cal_loss(self):
one_hot_labels = tf.one_hot(
self.labels, depth=self.conf.class_num,
axis=self.channel_axis, name='labels/one_hot')
losses = tf.losses.softmax_cross_entropy(
one_hot_labels, self.predictions, scope='loss/losses')
self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
self.decoded_preds = tf.argmax(
self.predictions, self.channel_axis, name='accuracy/decode_pred')
correct_prediction = tf.equal(
self.labels, self.decoded_preds,
name='accuracy/correct_pred')
self.accuracy_op = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
name='accuracy/accuracy_op')
# weights = tf.cast(
# tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
# tf.int32, name='m_iou/weights')
weights = tf.cast(
tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
tf.int64, name='m_iou/weights')
labels = tf.multiply(self.labels, weights, name='m_iou/mul')
self.m_iou, self.miou_op = tf.metrics.mean_iou(
self.labels, self.decoded_preds, self.conf.class_num,
weights, name='m_iou/m_ious')
def config_summary(self, name):
summarys = []
summarys.append(tf.summary.scalar(name+'/loss', self.loss_op))
summarys.append(tf.summary.scalar(name+'/accuracy', self.accuracy_op))
if name == 'valid' and self.conf.data_type == '2D':
summarys.append(
tf.summary.image(name+'/input', self.inputs, max_outputs=100))
summarys.append(
tf.summary.image(
name+'/annotation',
tf.cast(tf.expand_dims(self.labels, -1),
tf.float32), max_outputs=100))
summarys.append(
tf.summary.image(
name+'/prediction',
tf.cast(tf.expand_dims(self.decoded_preds, -1),
tf.float32), max_outputs=100))
summary = tf.summary.merge(summarys)
return summary
def inference(self, inputs):
outputs = inputs
down_outputs = []
for layer_index in range(self.conf.network_depth-1):
is_first = True if not layer_index else False
name = 'down%s' % layer_index
outputs = self.build_down_block(
outputs, name, down_outputs, is_first)
outputs = self.build_bottom_block(outputs, 'bottom')
for layer_index in range(self.conf.network_depth-2, -1, -1):
is_final = True if layer_index == 0 else False
name = 'up%s' % layer_index
down_inputs = down_outputs[layer_index]
outputs = self.build_up_block(
outputs, down_inputs, name, is_final)
return outputs
def build_down_block(self, inputs, name, down_outputs, first=False):
out_num = self.conf.start_channel_num if first else 2 * \
inputs.shape[self.channel_axis].value
conv1 = ops.conv(inputs, out_num, self.conv_size,
name+'/conv1', self.conf.data_type)
conv2 = ops.conv(conv1, out_num, self.conv_size,
name+'/conv2', self.conf.data_type)
down_outputs.append(conv2)
pool = ops.pool(conv2, self.pool_size, name +
'/pool', self.conf.data_type)
return pool
def build_bottom_block(self, inputs, name):
out_num = inputs.shape[self.channel_axis].value
conv1 = ops.conv(
inputs, 2*out_num, self.conv_size, name+'/conv1',
self.conf.data_type)
conv2 = ops.conv(
conv1, out_num, self.conv_size, name+'/conv2', self.conf.data_type)
return conv2
def build_up_block(self, inputs, down_inputs, name, final=False):
out_num = inputs.shape[self.channel_axis].value
conv1 = self.deconv_func()(
inputs, out_num, self.conv_size, name+'/conv1',
self.conf.data_type, action=self.conf.action)
conv1 = tf.concat(
[conv1, down_inputs], self.channel_axis, name=name+'/concat')
conv2 = self.conv_func()(
conv1, out_num, self.conv_size, name+'/conv2', self.conf.data_type)
out_num = self.conf.class_num if final else out_num/2
conv3 = ops.conv(
conv2, out_num, self.conv_size, name+'/conv3', self.conf.data_type,
not final)
return conv3
def deconv_func(self):
return getattr(ops, self.conf.deconv_name)
def conv_func(self):
return getattr(ops, self.conf.conv_name)
def save_summary(self, summary, step):
print('---->summarizing', step)
self.writer.add_summary(summary, step)
def train(self):
if self.conf.reload_step > 0:
self.reload(self.conf.reload_step)
if self.conf.data_type == '2D':
train_reader = H5DataLoader(
self.conf.data_dir+self.conf.train_data)
valid_reader = H5DataLoader(
self.conf.data_dir+self.conf.valid_data)
else:
train_reader = H53DDataLoader(
self.conf.data_dir+self.conf.train_data, self.input_shape)
valid_reader = H53DDataLoader(
self.conf.data_dir+self.conf.valid_data, self.input_shape)
for epoch_num in range(self.conf.max_step+1):
if epoch_num and epoch_num % self.conf.test_interval == 0:
inputs, labels = valid_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, summary = self.sess.run(
[self.loss_op, self.valid_summary], feed_dict=feed_dict)
self.save_summary(summary, epoch_num+self.conf.reload_step)
print('----testing loss', loss)
if epoch_num and epoch_num % self.conf.summary_interval == 0:
inputs, labels = train_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, _, summary = self.sess.run(
[self.loss_op, self.train_op, self.train_summary],
feed_dict=feed_dict)
self.save_summary(summary, epoch_num+self.conf.reload_step)
else:
inputs, labels = train_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, _ = self.sess.run(
[self.loss_op, self.train_op], feed_dict=feed_dict)
print('----training loss', loss)
if epoch_num and epoch_num % self.conf.save_interval == 0:
self.save(epoch_num+self.conf.reload_step)
def test(self):
print('---->testing ', self.conf.test_step)
if self.conf.test_step > 0:
self.reload(self.conf.test_step)
else:
print("please set a reasonable test_step")
return
if self.conf.data_type == '2D':
test_reader = H5DataLoader(
self.conf.data_dir+self.conf.test_data, False)
else:
test_reader = H53DDataLoader(
self.conf.data_dir+self.conf.test_data, self.input_shape)
self.sess.run(tf.local_variables_initializer())
count = 0
losses = []
accuracies = []
m_ious = []
while True:
inputs, labels = test_reader.next_batch(self.conf.batch)
if inputs.shape[0] < self.conf.batch:
break
feed_dict = {self.inputs: inputs, self.labels: labels}
loss, accuracy, m_iou, _ = self.sess.run(
[self.loss_op, self.accuracy_op, self.m_iou, self.miou_op],
feed_dict=feed_dict)
print('values----->', loss, accuracy, m_iou)
count += 1
losses.append(loss)
accuracies.append(accuracy)
m_ious.append(m_iou)
print('Loss: ', np.mean(losses))
print('Accuracy: ', np.mean(accuracies))
print('M_iou: ', m_ious[-1])
def predict(self):
print('---->predicting ', self.conf.test_step)
if self.conf.test_step > 0:
self.reload(self.conf.test_step)
else:
print("please set a reasonable test_step")
return
if self.conf.data_type == '2D':
test_reader = H5DataLoader(
self.conf.data_dir+self.conf.test_data, False)
else:
test_reader = H53DDataLoader(
self.conf.data_dir+self.conf.test_data, self.input_shape)
predictions = []
while True:
inputs, labels = test_reader.next_batch(self.conf.batch)
if inputs.shape[0] < self.conf.batch:
break
feed_dict = {self.inputs: inputs, self.labels: labels}
predictions.append(self.sess.run(
self.decoded_preds, feed_dict=feed_dict))
print('----->saving predictions')
for index, prediction in enumerate(predictions):
for i in range(prediction.shape[0]):
imsave(prediction[i], self.conf.sampledir +
str(index*prediction.shape[0]+i)+'.png')
def save(self, step):
print('---->saving', step)
checkpoint_path = os.path.join(
self.conf.modeldir, self.conf.model_name)
self.saver.save(self.sess, checkpoint_path, global_step=step)
def reload(self, step):
checkpoint_path = os.path.join(
self.conf.modeldir, self.conf.model_name)
model_path = checkpoint_path+'-'+str(step)
if not os.path.exists(model_path+'.meta'):
print('------- no such checkpoint', model_path)
return
self.saver.restore(self.sess, model_path)
| network.py | 12,654 | weights = tf.cast( tf.greater(self.decoded_preds, 0, name='m_iou/greater'), tf.int32, name='m_iou/weights') | 115 | en | 0.194852 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements
"""Schedule for conv2d_hwcn with auto fusion"""
import tvm
from .. import tag
def schedule_conv2d_hwcn(outs):
"""Schedule for conv2d_hwcn and any element-wise operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_hwcn.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
sch = tvm.create_schedule([x.op for x in outs])
def schedule(Apad, W, B):
"""Schedule conv2d_hwcn"""
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, "shared", [B])
WW = sch.cache_read(W, "shared", [B])
AL = sch.cache_read(AA, "local", [B])
WL = sch.cache_read(WW, "local", [B])
if B.op in sch.outputs:
Out = B
BL = sch.cache_write(Out, "local")
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope("local")
BL = B
tile = 8
num_thread = 8
block_factor = tile * num_thread
step = 8
vthread = 2
block_x = tvm.thread_axis("blockIdx.x")
block_y = tvm.thread_axis("blockIdx.y")
block_z = tvm.thread_axis("blockIdx.z")
thread_x = tvm.thread_axis((0, num_thread), "threadIdx.x")
thread_y = tvm.thread_axis((0, num_thread), "threadIdx.y")
thread_xz = tvm.thread_axis((0, vthread), "vthread", name="vx")
thread_yz = tvm.thread_axis((0, vthread), "vthread", name="vy")
hi, wi, fi, ni = sch[Out].op.axis
bz = sch[Out].fuse(hi, wi)
by, fi = sch[Out].split(fi, factor=block_factor)
bx, ni = sch[Out].split(ni, factor=block_factor)
tyz, fi = sch[Out].split(fi, nparts=vthread)
txz, ni = sch[Out].split(ni, nparts=vthread)
ty, fi = sch[Out].split(fi, nparts=num_thread)
tx, ni = sch[Out].split(ni, nparts=num_thread)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, block_z)
sch[Out].bind(by, block_y)
sch[Out].bind(bx, block_x)
sch[Out].bind(tyz, thread_yz)
sch[Out].bind(txz, thread_xz)
sch[Out].bind(ty, thread_y)
sch[Out].bind(tx, thread_x)
# Schedule BL local write
sch[BL].compute_at(sch[Out], tx)
yi, xi, fi, ni = sch[BL].op.axis
ry, rx, rc = sch[BL].op.reduce_axis
rco, rci = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
# Schedule for A's shared memory load
yi, xi, ci, ni = sch[AA].op.axis
ty, ci = sch[AA].split(ci, nparts=num_thread)
tx, ni = sch[AA].split(ni, nparts=num_thread)
_, ni = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, thread_y)
sch[AA].bind(tx, thread_x)
sch[AA].vectorize(ni)
# Schedule for W's shared memory load
yi, xi, ci, fi = sch[WW].op.axis
ty, ci = sch[WW].split(ci, nparts=num_thread)
tx, fi = sch[WW].split(fi, nparts=num_thread)
_, fi = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, thread_y)
sch[WW].bind(tx, thread_x)
sch[WW].vectorize(fi)
scheduled_ops = []
def traverse(operator):
"""Traverse operators from computation graph"""
if tag.is_broadcast(operator.tag):
if operator not in sch.outputs:
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
elif operator.tag == 'conv2d_hwcn':
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if isinstance(W.op, tvm.tensor.ComputeOp) and 'dilate' in W.op.tag:
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
traverse(outs[0].op)
return sch
| topi/python/topi/cuda/conv2d_hwcn.py | 5,390 | Schedule conv2d_hwcn
Schedule for conv2d_hwcn and any element-wise operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_hwcn.
Traverse operators from computation graph
Schedule for conv2d_hwcn with auto fusion
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=invalid-name, too-many-locals, too-many-statements Schedule BL local write Schedule for A's shared memory load Schedule for W's shared memory load | 1,295 | en | 0.830004 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import copy
import dict_utils
import file_extract
from file_extract import AutoParser
import numbers
import operator
import optparse
import os
import re
import six
import string
import sys
import StringIO
def get_uleb128_byte_size(value):
byte_size = 1
while value >= 0x80:
byte_size += 1
value >>= 7
return byte_size
def get_uleb128p1_byte_size(value):
return get_uleb128_byte_size(value + 1)
# ----------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------
MAGIC = "dex\n"
ENDIAN_CONSTANT = 0x12345678
REVERSE_ENDIAN_CONSTANT = 0x78563412
NO_INDEX = 0xffffffff
INT4_MIN = -8
INT4_MAX = 7
INT8_MIN = -128
INT8_MAX = 127
INT16_MIN = -32768
INT16_MAX = 32767
INT24_MIN = -8388608
INT24_MAX = 8388607
INT32_MIN = -2147483648
INT32_MAX = 2147483647
UINT4_MAX = 15
UINT8_MAX = 255
UINT16_MAX = 65535
UINT32_MAX = 4294967295
# ----------------------------------------------------------------------
# access_flags definitions
# ----------------------------------------------------------------------
ACC_PUBLIC = 0x1
ACC_PRIVATE = 0x2
ACC_PROTECTED = 0x4
ACC_STATIC = 0x8
ACC_FINAL = 0x10
ACC_SYNCHRONIZED = 0x20
ACC_VOLATILE = 0x40
ACC_BRIDGE = 0x40
ACC_TRANSIENT = 0x80
ACC_VARARGS = 0x80
ACC_NATIVE = 0x100
ACC_INTERFACE = 0x200
ACC_ABSTRACT = 0x400
ACC_STRICT = 0x800
ACC_SYNTHETIC = 0x1000
ACC_ANNOTATION = 0x2000
ACC_ENUM = 0x4000
ACC_CONSTRUCTOR = 0x10000
ACC_DECLARED_SYNCHRONIZED = 0x20000
# ----------------------------------------------------------------------
# Value formats
# ----------------------------------------------------------------------
VALUE_BYTE = 0x00
VALUE_SHORT = 0x02
VALUE_CHAR = 0x03
VALUE_INT = 0x04
VALUE_LONG = 0x06
VALUE_FLOAT = 0x10
VALUE_DOUBLE = 0x11
VALUE_METHOD_TYPE = 0x15
VALUE_METHOD_HANDLE = 0x16
VALUE_STRING = 0x17
VALUE_TYPE = 0x18
VALUE_FIELD = 0x19
VALUE_METHOD = 0x1a
VALUE_ENUM = 0x1b
VALUE_ARRAY = 0x1c
VALUE_ANNOTATION = 0x1d
VALUE_NULL = 0x1e
VALUE_BOOLEAN = 0x1f
class ValueFormat(dict_utils.Enum):
enum = {
'VALUE_BYTE': VALUE_BYTE,
'VALUE_SHORT': VALUE_SHORT,
'VALUE_CHAR': VALUE_CHAR,
'VALUE_INT': VALUE_INT,
'VALUE_LONG': VALUE_LONG,
'VALUE_FLOAT': VALUE_FLOAT,
'VALUE_DOUBLE': VALUE_DOUBLE,
'VALUE_METHOD_TYPE': VALUE_METHOD_TYPE,
'VALUE_METHOD_HANDLE': VALUE_METHOD_HANDLE,
'VALUE_STRING': VALUE_STRING,
'VALUE_TYPE': VALUE_TYPE,
'VALUE_FIELD': VALUE_FIELD,
'VALUE_METHOD': VALUE_METHOD,
'VALUE_ENUM': VALUE_ENUM,
'VALUE_ARRAY': VALUE_ARRAY,
'VALUE_ANNOTATION': VALUE_ANNOTATION,
'VALUE_NULL': VALUE_NULL,
'VALUE_BOOLEAN': VALUE_BOOLEAN,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
# ----------------------------------------------------------------------
# Type Codes
# ----------------------------------------------------------------------
TYPE_HEADER_ITEM = 0x0000 # size = 0x70
TYPE_STRING_ID_ITEM = 0x0001 # size = 0x04
TYPE_TYPE_ID_ITEM = 0x0002 # size = 0x04
TYPE_PROTO_ID_ITEM = 0x0003 # size = 0x0c
TYPE_FIELD_ID_ITEM = 0x0004 # size = 0x08
TYPE_METHOD_ID_ITEM = 0x0005 # size = 0x08
TYPE_CLASS_DEF_ITEM = 0x0006 # size = 0x20
TYPE_CALL_SITE_ID_ITEM = 0x0007 # size = 0x04
TYPE_METHOD_HANDLE_ITEM = 0x0008 # size = 0x08
TYPE_MAP_LIST = 0x1000 # size = 4 + (item.size * 12)
TYPE_TYPE_LIST = 0x1001 # size = 4 + (item.size * 2)
TYPE_ANNOTATION_SET_REF_LIST = 0x1002 # size = 4 + (item.size * 4)
TYPE_ANNOTATION_SET_ITEM = 0x1003 # size = 4 + (item.size * 4)
TYPE_CLASS_DATA_ITEM = 0x2000
TYPE_CODE_ITEM = 0x2001
TYPE_STRING_DATA_ITEM = 0x2002
TYPE_DEBUG_INFO_ITEM = 0x2003
TYPE_ANNOTATION_ITEM = 0x2004
TYPE_ENCODED_ARRAY_ITEM = 0x2005
TYPE_ANNOTATIONS_DIRECTORY_ITEM = 0x2006
class TypeCode(dict_utils.Enum):
enum = {
'TYPE_HEADER_ITEM': TYPE_HEADER_ITEM,
'TYPE_STRING_ID_ITEM': TYPE_STRING_ID_ITEM,
'TYPE_TYPE_ID_ITEM': TYPE_TYPE_ID_ITEM,
'TYPE_PROTO_ID_ITEM': TYPE_PROTO_ID_ITEM,
'TYPE_FIELD_ID_ITEM': TYPE_FIELD_ID_ITEM,
'TYPE_METHOD_ID_ITEM': TYPE_METHOD_ID_ITEM,
'TYPE_CLASS_DEF_ITEM': TYPE_CLASS_DEF_ITEM,
'TYPE_CALL_SITE_ID_ITEM': TYPE_CALL_SITE_ID_ITEM,
'TYPE_METHOD_HANDLE_ITEM': TYPE_METHOD_HANDLE_ITEM,
'TYPE_MAP_LIST': TYPE_MAP_LIST,
'TYPE_TYPE_LIST': TYPE_TYPE_LIST,
'TYPE_ANNOTATION_SET_REF_LIST': TYPE_ANNOTATION_SET_REF_LIST,
'TYPE_ANNOTATION_SET_ITEM': TYPE_ANNOTATION_SET_ITEM,
'TYPE_CLASS_DATA_ITEM': TYPE_CLASS_DATA_ITEM,
'TYPE_CODE_ITEM': TYPE_CODE_ITEM,
'TYPE_STRING_DATA_ITEM': TYPE_STRING_DATA_ITEM,
'TYPE_DEBUG_INFO_ITEM': TYPE_DEBUG_INFO_ITEM,
'TYPE_ANNOTATION_ITEM': TYPE_ANNOTATION_ITEM,
'TYPE_ENCODED_ARRAY_ITEM': TYPE_ENCODED_ARRAY_ITEM,
'TYPE_ANNOTATIONS_DIRECTORY_ITEM': TYPE_ANNOTATIONS_DIRECTORY_ITEM,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
def dump(self, prefix=None, f=sys.stdout, print_name=True,
parent_path=None):
f.write(str(self))
# ----------------------------------------------------------------------
# Method Handle Type Codes
# ----------------------------------------------------------------------
METHOD_HANDLE_TYPE_STATIC_PUT = 0x00
METHOD_HANDLE_TYPE_STATIC_GET = 0x01
METHOD_HANDLE_TYPE_INSTANCE_PUT = 0x02
METHOD_HANDLE_TYPE_INSTANCE_GET = 0x03
METHOD_HANDLE_TYPE_INVOKE_STATIC = 0x04
METHOD_HANDLE_TYPE_INVOKE_INSTANCE = 0x05
class MethodHandleTypeCode(dict_utils.Enum):
enum = {
'METHOD_HANDLE_TYPE_STATIC_PUT': METHOD_HANDLE_TYPE_STATIC_PUT,
'METHOD_HANDLE_TYPE_STATIC_GET': METHOD_HANDLE_TYPE_STATIC_GET,
'METHOD_HANDLE_TYPE_INSTANCE_PUT': METHOD_HANDLE_TYPE_INSTANCE_PUT,
'METHOD_HANDLE_TYPE_INSTANCE_GET': METHOD_HANDLE_TYPE_INSTANCE_GET,
'METHOD_HANDLE_TYPE_INVOKE_STATIC': METHOD_HANDLE_TYPE_INVOKE_STATIC,
'METHOD_HANDLE_TYPE_INVOKE_INSTANCE':
METHOD_HANDLE_TYPE_INVOKE_INSTANCE,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
PRINTABLE = string.ascii_letters + string.digits + string.punctuation + ' '
def escape(c):
global PRINTABLE
if c in PRINTABLE:
return c
c = ord(c)
if c <= 0xff:
return '\\x' + '%02.2x' % (c)
elif c <= '\uffff':
return '\\u' + '%04.4x' % (c)
else:
return '\\U' + '%08.8x' % (c)
def print_string(s, f):
f.write('"')
f.write(''.join(escape(c) for c in s))
f.write('"')
def print_version(version, f):
if len(version) == 3:
f.write("%u.%u.%u" % (version[0], version[1], version[2]))
def print_hex_bytes(data, f):
for byte in data:
f.write("%2.2x" % (byte))
def print_endian(value, f):
f.write("%#8.8x" % (value))
if value == ENDIAN_CONSTANT:
f.write(" (ENDIAN_CONSTANT)")
elif value == REVERSE_ENDIAN_CONSTANT:
f.write(" (REVERSE_ENDIAN_CONSTANT)")
def is_zero(value):
if value == 0:
return None
return 'value should be zero, bit is %s' % (str(value))
def is_dex_magic(magic):
if magic == MAGIC:
return None
return 'value should be %s but is %s' % (MAGIC, magic)
def hex_escape(s):
return ''.join(escape(c) for c in s)
# ----------------------------------------------------------------------
# encoded_field
# ----------------------------------------------------------------------
class encoded_field(AutoParser):
items = [
{'type': 'uleb', 'name': 'field_idx', 'format': '%u'},
{'type': 'uleb', 'name': 'access_flags', 'format': '0x%8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
@classmethod
def fixup_indexes(cls, items):
for i in range(1, len(items)):
items[i].field_idx += items[i - 1].field_idx
@classmethod
def get_table_header(self):
return 'FIELD FLAGS\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_method
# ----------------------------------------------------------------------
class encoded_method(AutoParser):
items = [
{'type': 'uleb', 'name': 'method_idx', 'format': '%u'},
{'type': 'uleb', 'name': 'access_flags', 'format': '0x%8.8x'},
{'type': 'uleb', 'name': 'code_off', 'format': '0x%8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
@classmethod
def fixup_indexes(cls, items):
for i in range(1, len(items)):
items[i].method_idx += items[i - 1].method_idx
@classmethod
def get_table_header(self):
return 'METHOD FLAGS\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# class_data_item
# ----------------------------------------------------------------------
class class_data_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'static_fields_size'},
{'type': 'uleb', 'name': 'instance_fields_size'},
{'type': 'uleb', 'name': 'direct_methods_size'},
{'type': 'uleb', 'name': 'virtual_methods_size'},
{'class': encoded_field, 'name': 'static_fields',
'attr_count': 'static_fields_size', 'flat': True},
{'class': encoded_field, 'name': 'instance_fields',
'attr_count': 'instance_fields_size', 'flat': True},
{'class': encoded_method, 'name': 'direct_methods',
'attr_count': 'direct_methods_size', 'flat': True},
{'class': encoded_method, 'name': 'virtual_methods',
'attr_count': 'virtual_methods_size', 'flat': True},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
encoded_field.fixup_indexes(self.static_fields)
encoded_field.fixup_indexes(self.instance_fields)
encoded_method.fixup_indexes(self.direct_methods)
encoded_method.fixup_indexes(self.virtual_methods)
@classmethod
def create_empty(cls):
data = file_extract.FileExtract(StringIO.StringIO('\0\0\0\0'), '=')
return class_data_item(data)
# ----------------------------------------------------------------------
# class_def_item
# ----------------------------------------------------------------------
class class_def_item(AutoParser):
items = [
{'type': 'u32', 'name': 'class_idx', 'align': 4},
{'type': 'u32', 'name': 'access_flags'},
{'type': 'u32', 'name': 'superclass_idx'},
{'type': 'u32', 'name': 'interfaces_off'},
{'type': 'u32', 'name': 'source_file_idx'},
{'type': 'u32', 'name': 'annotations_off'},
{'type': 'u32', 'name': 'class_data_off'},
{'type': 'u32', 'name': 'static_values_off'},
{'class': class_data_item, 'name': 'class_data',
'attr_offset': 'class_data_off',
'condition': lambda item, data: item.class_data_off != 0,
'dump': False,
'default': class_data_item.create_empty()},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return ('CLASS ACCESS SUPERCLASS INTERFACES SOURCE'
' ANNOTATION CLASS_DATA STATIC_VALUES\n')
def get_dump_flat(self):
return True
def find_encoded_method_by_code_off(self, code_off):
for encoded_method in self.class_data.direct_methods:
if encoded_method.code_off == code_off:
return encoded_method
for encoded_method in self.class_data.virtual_methods:
if encoded_method.code_off == code_off:
return encoded_method
return None
# ----------------------------------------------------------------------
# try_item
# ----------------------------------------------------------------------
class try_item(AutoParser):
items = [
{'type': 'u32', 'name': 'start_addr'},
{'type': 'u16', 'name': 'insn_count'},
{'type': 'u16', 'name': 'handler_off'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_type_addr_pair
# ----------------------------------------------------------------------
class encoded_type_addr_pair(AutoParser):
items = [
{'type': 'uleb', 'name': 'type_idx', 'format': '%#8.8x'},
{'type': 'uleb', 'name': 'addr', 'format': '%#8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_catch_handler
# ----------------------------------------------------------------------
class encoded_catch_handler(AutoParser):
items = [
{'type': 'sleb', 'name': 'size'},
{'class': encoded_type_addr_pair, 'name': 'handlers',
'attr_count': 'size', 'attr_count_fixup': abs},
{'type': 'uleb', 'name': 'catch_all_addr', 'default': 0,
'condition': lambda item, data: item.size <= 0},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_catch_handler_list
# ----------------------------------------------------------------------
class encoded_catch_handler_list(AutoParser):
items = [
{'type': 'uleb', 'name': 'size'},
{'class': encoded_catch_handler, 'name': 'list', 'attr_count': 'size'}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
def print_instructions(insns, prefix, flat, f):
f.write('\n')
code_units = CodeUnits(insns)
dex_inst = DexInstruction()
while code_units.index_is_valid():
dex_inst.decode(code_units)
if prefix:
f.write(prefix)
f.write(' ')
dex_inst.dump()
DBG_END_SEQUENCE = 0x00
DBG_ADVANCE_PC = 0x01
DBG_ADVANCE_LINE = 0x02
DBG_START_LOCAL = 0x03
DBG_START_LOCAL_EXTENDED = 0x04
DBG_END_LOCAL = 0x05
DBG_RESTART_LOCAL = 0x06
DBG_SET_PROLOGUE_END = 0x07
DBG_SET_EPILOGUE_BEGIN = 0x08
DBG_SET_FILE = 0x09
DBG_FIRST_SPECIAL = 0x0a
DBG_LINE_BASE = -4
DBG_LINE_RANGE = 15
class DBG(dict_utils.Enum):
enum = {
'DBG_END_SEQUENCE': DBG_END_SEQUENCE,
'DBG_ADVANCE_PC': DBG_ADVANCE_PC,
'DBG_ADVANCE_LINE': DBG_ADVANCE_LINE,
'DBG_START_LOCAL': DBG_START_LOCAL,
'DBG_START_LOCAL_EXTENDED': DBG_START_LOCAL_EXTENDED,
'DBG_END_LOCAL': DBG_END_LOCAL,
'DBG_RESTART_LOCAL': DBG_RESTART_LOCAL,
'DBG_SET_PROLOGUE_END': DBG_SET_PROLOGUE_END,
'DBG_SET_EPILOGUE_BEGIN': DBG_SET_EPILOGUE_BEGIN,
'DBG_SET_FILE': DBG_SET_FILE
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint8(), self.enum)
def dump(self, prefix=None, f=sys.stdout, print_name=True,
parent_path=None):
f.write(str(self))
class debug_info_op(AutoParser):
items = [
{'class': DBG, 'name': 'op'},
{'switch': 'op', 'cases': {
DBG_ADVANCE_PC: [
{'type': 'uleb', 'name': 'addr_offset'}
],
DBG_ADVANCE_LINE: [
{'type': 'sleb', 'name': 'line_offset'},
],
DBG_START_LOCAL: [
{'type': 'uleb', 'name': 'register_num'},
{'type': 'ulebp1', 'name': 'name_idx'},
{'type': 'ulebp1', 'name': 'type_idx'},
],
DBG_START_LOCAL_EXTENDED: [
{'type': 'uleb', 'name': 'register_num'},
{'type': 'ulebp1', 'name': 'name_idx'},
{'type': 'ulebp1', 'name': 'type_idx'},
{'type': 'ulebp1', 'name': 'sig_idx'},
],
DBG_END_LOCAL: [
{'type': 'uleb', 'name': 'register_num'}
],
DBG_RESTART_LOCAL: [
{'type': 'uleb', 'name': 'register_num'}
],
DBG_SET_FILE: [
{'type': 'ulebp1', 'name': 'name_idx'}
],
'default': []
}
}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
if self.op >= DBG_FIRST_SPECIAL:
adjusted_opcode = int(self.op) - DBG_FIRST_SPECIAL
line_offset = DBG_LINE_BASE + (adjusted_opcode % DBG_LINE_RANGE)
addr_offset = (adjusted_opcode / DBG_LINE_RANGE)
setattr(self, 'line_offset', line_offset)
setattr(self, 'addr_offset', addr_offset)
setattr(self, 'byte_size', data.tell() - self.get_offset())
def get_dump_flat(self):
return True
def get_byte_size(self):
return self.byte_size
def dump_opcode(self, f=sys.stdout):
f.write(str(self.op))
if self.op == DBG_ADVANCE_PC:
f.write('(%u)' % self.addr_offset)
elif self.op == DBG_ADVANCE_LINE:
f.write('(%u)' % self.line_offset)
elif self.op == DBG_START_LOCAL:
f.write('(register_num=%u, name_idx=' % self.register_num)
if self.name_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.name_idx))
f.write(', type_idx=')
if self.type_idx < 0:
f.write('NO_INDEX)')
else:
f.write('%u)' % (self.type_idx))
elif self.op == DBG_START_LOCAL_EXTENDED:
f.write('(register_num=%u, name_idx=' % self.register_num)
if self.name_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.name_idx))
f.write(', type_idx=')
if self.type_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.type_idx))
f.write(', sig_idx=')
if self.type_idx < 0:
f.write('NO_INDEX)')
else:
f.write('%u)' % (self.type_idx))
elif self.op == DBG_END_LOCAL or self.op == DBG_RESTART_LOCAL:
f.write('(register_num=%u)' % self.register_num)
elif self.op == DBG_SET_FILE:
f.write('(name_idx=%u)' % self.name_idx)
elif self.op >= DBG_FIRST_SPECIAL:
f.write(' (addr_offset=%u, line_offset=%i)' %
(self.addr_offset, self.line_offset))
class debug_info_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'line_start'},
{'type': 'uleb', 'name': 'parameters_size'},
{'type': 'ulebp1', 'name': 'parameter_names',
'attr_count': 'parameters_size'},
]
class row(object):
def __init__(self):
self.address = 0
self.line = 1
self.source_file = -1
self.prologue_end = False
self.epilogue_begin = False
def dump(self, f=sys.stdout):
f.write('0x%4.4x %5u %5u ' %
(self.address, self.line, self.source_file))
if self.prologue_end or self.epilogue_begin:
if self.prologue_end:
f.write('P ')
else:
f.write(' ')
if self.epilogue_begin:
f.write('E')
f.write('\n')
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
self.data = data
self.ops = None
self.line_table = None
self.debug_info_offset = data.tell()
def check_encoding(self, dex_method, f=sys.stdout):
bytes_saved = 0
ops = self.get_ops()
if len(ops) == 1:
op = ops[0]
if op.op == DBG_END_SEQUENCE:
bytes_saved += (get_uleb128_byte_size(self.line_start) +
get_uleb128p1_byte_size(self.parameters_size))
for parameter_name in self.parameter_names:
bytes_saved += get_uleb128p1_byte_size(parameter_name)
bytes_saved += 1
f.write('warning: %s debug info contains only a single ' % (
dex_method.get_qualified_name()))
f.write('%s, all debug info can be removed ' % (op.op))
f.write('(%u bytes)\n' % (bytes_saved))
return bytes_saved
# Dex files built for release don't need any the following
# debug info ops
for op in ops:
size = op.get_byte_size()
if op.op == DBG_SET_PROLOGUE_END:
f.write('warning: %s %s can be removed (%u byte)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_SET_EPILOGUE_BEGIN:
f.write('warning: %s %s can be removed (%u byte)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_START_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_START_LOCAL_EXTENDED:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_END_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_RESTART_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
return bytes_saved
def get_line_table(self):
if self.line_table is None:
ops = self.get_ops()
row = debug_info_item.row()
for op_args in ops:
op = op_args[0]
if op == DBG_END_SEQUENCE:
break
if op == DBG_ADVANCE_PC:
row.address += op.addr_offset
elif op == DBG_ADVANCE_LINE:
row.line += op.line_offset
elif op == DBG_START_LOCAL:
pass
elif op == DBG_START_LOCAL_EXTENDED:
pass
elif op == DBG_END_LOCAL:
pass
elif op == DBG_RESTART_LOCAL:
pass
elif op == DBG_SET_PROLOGUE_END:
row.prologue_end = True
elif op == DBG_SET_EPILOGUE_BEGIN:
row.epilogue_begin = True
elif op == DBG_SET_FILE:
row.source_file = op.name_idx
else:
row.line += op.line_offset
row.address += op.addr_offset
self.line_table.append(copy.copy(row))
row.prologue_end = False
row.epilogue_begin = False
return self.line_table
def get_ops(self):
if self.ops is None:
data = self.data
data.push_offset_and_seek(self.debug_info_offset)
self.ops = list()
while True:
op = debug_info_op(data)
self.ops.append(op)
if op.op == DBG_END_SEQUENCE:
break
data.pop_offset_and_seek()
return self.ops
def dump_debug_info(self, f=sys.stdout, prefix=None):
ops = self.get_ops()
for op in ops:
if prefix:
f.write(prefix)
f.write(' ')
op.dump_opcode(f=f)
f.write('\n')
# ----------------------------------------------------------------------
# code_item
# ----------------------------------------------------------------------
class code_item(AutoParser):
items = [
{'type': 'u16', 'name': 'registers_size', 'align': 4},
{'type': 'u16', 'name': 'ins_size'},
{'type': 'u16', 'name': 'outs_size'},
{'type': 'u16', 'name': 'tries_size'},
{'type': 'u32', 'name': 'debug_info_off'},
{'type': 'u32', 'name': 'insns_size', 'format': '%u'},
{'type': 'u16', 'name': 'insns',
'attr_count': 'insns_size', 'dump_list': print_instructions},
{'type': 'u16', 'condition': lambda item,
data: item.tries_size != 0 and item.insns_size & 1},
{'class': try_item, 'name': 'tries', 'attr_count': 'tries_size',
'condition': lambda item, data: item.tries_size != 0,
'default': None},
{'class': encoded_catch_handler_list, 'name': 'handlers',
'condition': lambda item, data: item.tries_size != 0,
'default': None}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
self.debug_info = None
self.data = data
# Convert insns from a list to a tuple to avoid mutattion and also to
# allow self.insns to be hashed.
self.insns = tuple(self.insns)
def get_debug_info(self):
if self.debug_info is None and self.debug_info_off > 0:
data = self.data
data.push_offset_and_seek(self.debug_info_off)
self.debug_info = debug_info_item(data)
data.pop_offset_and_seek()
return self.debug_info
class encoded_value:
def __init__(self, data):
arg_type = data.get_uint8()
value_arg = arg_type >> 5
value_type = arg_type & 0x1f
self.value_type = ValueFormat(value_type)
self.value = None
size = value_arg + 1
if value_type == VALUE_BYTE:
if value_arg != 0:
raise ValueError(
'VALUE_BYTE value_arg != 0 (%u)' % (value_arg))
self.value = data.get_sint8()
elif value_type == VALUE_SHORT:
self.value = data.get_sint_size(size)
elif value_type == VALUE_CHAR:
self.value = data.get_uint_size(size)
elif value_type == VALUE_INT:
self.value = data.get_sint_size(size)
elif value_type == VALUE_LONG:
self.value = data.get_sint_size(size)
elif value_type == VALUE_FLOAT:
raise ValueError('VALUE_FLOAT not supported yet')
elif value_type == VALUE_DOUBLE:
raise ValueError('VALUE_DOUBLE not supported yet')
elif value_type == VALUE_METHOD_TYPE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_METHOD_HANDLE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_STRING:
self.value = data.get_uint_size(size)
elif value_type == VALUE_TYPE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_FIELD:
self.value = data.get_uint_size(size)
elif value_type == VALUE_METHOD:
self.value = data.get_uint_size(size)
elif value_type == VALUE_ENUM:
self.value = data.get_uint_size(size)
elif value_type == VALUE_ARRAY:
if value_arg != 0:
raise ValueError(
'VALUE_ARRAY value_arg != 0 (%u)' % (value_arg))
raise ValueError('VALUE_ARRAY not supported yet')
# encoded_array: an array of values, in the format specified by
# "encoded_array format". The size of the value is implicit in
# the encoding.
elif value_type == VALUE_ANNOTATION:
if value_arg != 0:
raise ValueError(
'VALUE_ANNOTATION value_arg != 0 (%u)' % (value_arg))
# encoded_annotation: a sub-annotation, in the format specified by
# "encoded_annotation format" below. The size of the value is
# implicit in the encoding.
elif value_type == VALUE_NULL:
if value_arg != 0:
raise ValueError(
'VALUE_ARRAY value_arg != 0 (%u)' % (value_arg))
self.value = 0
elif value_type == VALUE_BOOLEAN:
if size == 0:
self.value = False
else:
self.value = data.get_uint8() != 0
# ----------------------------------------------------------------------
# encoded_array
# ----------------------------------------------------------------------
class encoded_array(AutoParser):
items = [
{'type': 'uleb', 'name': 'size'},
{'class': encoded_value, 'name': 'values', 'attr_count': 'size'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
class encoded_array_item(AutoParser):
items = [
{'class': encoded_array, 'name': 'value'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# field_id_item
# ----------------------------------------------------------------------
class field_id_item(AutoParser):
items = [
{'type': 'u16', 'name': 'class_idx', 'align': 4},
{'type': 'u16', 'name': 'type_idx'},
{'type': 'u32', 'name': 'name_idx'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return 'CLASS TYPE NAME\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# header_item
# ----------------------------------------------------------------------
class header_item(AutoParser):
items = [
{'type': 'cstr[4]', 'name': 'magic', 'validate': is_dex_magic},
{'type': 'u8[3]', 'name': 'version', 'dump': print_version},
{'type': 'u8', 'validate': is_zero}, # NULL byte
{'type': 'u32', 'name': 'checksum'},
{'type': 'u8[20]', 'name': 'signature', 'dump': print_hex_bytes},
{'type': 'u32', 'name': 'file_size'},
{'type': 'u32', 'name': 'header_size'},
{'type': 'u32', 'name': 'endian_tag', 'type': 'u32',
'dump': print_endian},
{'type': 'u32', 'name': 'link_size'},
{'type': 'u32', 'name': 'link_off'},
{'type': 'u32', 'name': 'map_off'},
{'type': 'u32', 'name': 'string_ids_size'},
{'type': 'u32', 'name': 'string_ids_off'},
{'type': 'u32', 'name': 'type_ids_size'},
{'type': 'u32', 'name': 'type_ids_off'},
{'type': 'u32', 'name': 'proto_ids_size'},
{'type': 'u32', 'name': 'proto_ids_off'},
{'type': 'u32', 'name': 'field_ids_size'},
{'type': 'u32', 'name': 'field_ids_off'},
{'type': 'u32', 'name': 'method_ids_size'},
{'type': 'u32', 'name': 'method_ids_off'},
{'type': 'u32', 'name': 'class_defs_size'},
{'type': 'u32', 'name': 'class_defs_off'},
{'type': 'u32', 'name': 'data_size'},
{'type': 'u32', 'name': 'data_off'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_header(self):
return 'DEX header:'
# ----------------------------------------------------------------------
# map_item
# ----------------------------------------------------------------------
class map_item(AutoParser):
items = [
{'class': TypeCode, 'name': 'type',
'dump_width': TypeCode.max_width()},
{'type': 'u16'},
{'type': 'u32', 'name': 'size'},
{'type': 'u32', 'name': 'offset'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_list_header_lines(self):
return [' TYPE SIZE OFFSET\n']
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# map_list
# ----------------------------------------------------------------------
class map_list(AutoParser):
items = [
{'type': 'u32', 'name': 'size', 'align': 4, 'dump': False},
{'class': map_item, 'name': 'list', 'attr_count': 'size',
'flat': True},
]
def get_dump_header(self):
return 'map_list:'
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# method_handle_item
# ----------------------------------------------------------------------
class method_handle_item(AutoParser):
items = [
{'class': MethodHandleTypeCode, 'name': 'method_handle_type',
'align': 4},
{'type': 'u16'},
{'type': 'u16', 'name': 'field_or_method_id'},
{'type': 'u16'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# method_id_item
# ----------------------------------------------------------------------
class method_id_item(AutoParser):
items = [
{'type': 'u16', 'name': 'class_idx', 'align': 4},
{'type': 'u16', 'name': 'proto_idx'},
{'type': 'u32', 'name': 'name_idx'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return 'CLASS PROTO NAME\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# proto_id_item
# ----------------------------------------------------------------------
class proto_id_item(AutoParser):
items = [
{'type': 'u32', 'name': 'shorty_idx', 'align': 4},
{'type': 'u32', 'name': 'return_type_idx'},
{'type': 'u32', 'name': 'parameters_off'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
self.parameters = None
def get_dump_flat(self):
return True
@classmethod
def get_table_header(self):
return 'SHORTY_IDX RETURN PARAMETERS\n'
def get_parameters(self):
if self.parameters_off != 0 and self.parameters is None:
# Get the data from our dex.File object
data = self.context.data
data.push_offset_and_seek(self.parameters_off)
self.parameters = type_list(data)
data.pop_offset_and_seek()
return self.parameters
# ----------------------------------------------------------------------
# string_data_item
# ----------------------------------------------------------------------
class string_data_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'utf16_size', 'format': '%3u'},
{'type': 'cstr', 'name': 'data', 'dump': print_string},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# type_list
# ----------------------------------------------------------------------
class type_list(AutoParser):
items = [
{'type': 'u32', 'name': 'size', 'align': 4},
{'type': 'u16', 'name': 'list', 'attr_count': 'size'},
]
def get_dump_header(self):
return 'type_list:'
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
class Progard:
'''Parses a proguard map file and does name lookups.'''
def __init__(self, path):
self.path = path
self.classes_dict = {}
class_dict = None
regex = re.compile('\s+([0-9]+:[0-9]+:)?(.*) -> (.*)$')
with open(path, 'r') as f:
for line in f:
line = line.rstrip('\n')
if line:
if line[0].isspace():
match = regex.match(line)
if match:
old = match.group(2)
new = match.group(3)
# print('other old = "%s"' % (old))
# print('other new = "%s"' % (new))
class_dict[new] = old
else:
(old, new) = line.split(' -> ')
# print('class old = "%s"' % (old))
# print('class new = "%s"' % (new))
class_dict = {}
self.classes_dict[new] = (old, class_dict)
def lookup_class(self, new_class):
'''Translate a new class name to the old class name.'''
if new_class in self.classes_dict:
(old_class, class_dict) = self.classes_dict[new_class]
if old_class is not None:
return old_class
return None
def lookup_method(self, new_class, new_method):
'''Translate a new class name and a new method into the old class
name and the old method name.'''
if new_class in self.classes_dict:
(old_class, class_dict) = self.classes_dict[new_class]
if new_method in class_dict:
return class_dict[new_method]
return None
class DexMethod:
'''Encapsulates a method within a DEX file.'''
def __init__(self, dex_class, encoded_method, is_virtual):
self.dex_class = dex_class
self.encoded_method = encoded_method
self.method_id = None
self.is_virtual = is_virtual
self.code_item = None
self.insns = None
self.name_in_file = None
self.name = None
def get_qualified_name(self):
class_name = self.get_class().get_name()
method_name = self.get_name()
if class_name[-1] != ';':
return class_name + ':' + method_name
else:
return class_name + method_name
def get_method_id(self):
'''Get the method_id_item for this method.'''
if self.method_id is None:
self.method_id = self.get_dex().get_method_id(self.encoded_method)
return self.method_id
def get_method_index(self):
'''Get the method index into the method_ids array in the DEX file.'''
return self.encoded_method.method_idx
def get_code_offset(self):
'''Get the code offset for this method.'''
return self.encoded_method.code_off
def get_code_item_index(self):
'''Get the index into the code_items array in the dex file for the
code for this method, or -1 if there is no code for this method.'''
code_item = self.get_code_item()
if code_item:
return self.get_dex().get_code_item_index_from_code_off(
code_item.get_offset())
return -1
def get_dex(self):
return self.dex_class.get_dex()
def get_name_in_file(self):
'''Returns the name of the method as it is known in the current DEX
file (no proguard remapping)'''
if self.name_in_file is None:
self.name_in_file = self.get_dex().get_string(
self.get_method_id().name_idx)
return self.name_in_file
def get_name(self):
if self.name is None:
cls_mangled = self.get_class().get_mangled_name()
name_in_file = self.get_name_in_file()
if cls_mangled and name_in_file:
self.name = self.get_dex().demangle_class_method_name(
cls_mangled, name_in_file)
if self.name is None:
self.name = name_in_file
return self.name
def get_class(self):
return self.dex_class
def get_code_item(self):
if self.code_item is None:
if self.encoded_method.code_off != 0:
self.code_item = self.get_dex().find_code_item(
self.encoded_method.code_off)
return self.code_item
def get_code_byte_size(self):
code_item = self.get_code_item()
if code_item:
return len(code_item.insns) * 2
return 0
def get_instructions(self):
if self.insns is None:
self.insns = []
code_item = self.get_code_item()
if code_item:
code_units = CodeUnits(code_item.insns)
while code_units.index_is_valid():
insn = DexInstruction()
insn.decode(code_units)
self.insns.append(insn)
return self.insns
def dump(self, dump_code=True, dump_debug_info=True, f=sys.stdout):
if self.is_virtual:
method_type = 'virtual'
else:
method_type = 'direct'
dex = self.get_dex()
f.write('method: (%s) %s%s\n' %
(method_type, self.get_class().get_name(), self.get_name()))
code_item_idx = dex.get_code_item_index_from_code_off(
self.encoded_method.code_off)
self.encoded_method.dump(f=f, prefix=' encoded_method.', flat=False)
method_id = dex.get_method_id(self.encoded_method.method_idx)
if method_id:
method_id.dump(f=f, prefix=' method_id.', flat=False)
proto_id = dex.get_proto_id(method_id.proto_idx)
if proto_id:
proto_id.dump(f=f, prefix=' proto_id.', flat=False)
f.write('\n')
if dump_code:
if code_item_idx >= 0:
code_item = dex.get_code_items()[code_item_idx]
f.write(' code_item[%u] @ %#8.8x:\n' % (code_item_idx,
code_item.get_offset()))
code_item.dump(f=f, prefix=' ')
if dump_debug_info:
self.dump_debug_info(f=f, prefix=' ')
def dump_code(self, f=sys.stdout):
insns = self.get_instructions()
for insn in insns:
insn.dump(f=f)
def get_debug_info(self):
code_item = self.get_code_item()
if code_item:
return code_item.get_debug_info()
return None
def dump_debug_info(self, f=sys.stdout, prefix=None):
debug_info = self.get_debug_info()
if prefix:
f.write(prefix)
if debug_info:
f.write('debug info @ %#8.8x:\n' % (debug_info.get_offset()))
debug_info.dump_debug_info(f=f, prefix=prefix)
f.write('\n')
else:
f.write('no debug info\n')
def check_debug_info_encoding(self):
debug_info = self.get_debug_info()
if debug_info:
return debug_info.check_encoding(self)
class DexClass:
'''Encapsulates a class within a DEX file.'''
def __init__(self, dex, class_def):
self.dex = dex
self.class_def = class_def
self.methods = None
self.num_direct_methods = 0
self.mangled = None
self.demangled = None
def dump(self, f=sys.stdout):
f.write('\nclass: %s\n' % (self.get_name()))
dex = self.get_dex()
class_def_offset = self.class_def.get_offset()
class_def_idx = dex.get_class_def_index_from_offset(class_def_offset)
f.write(' class_def[%u] @ %#8.8x:\n' % (class_def_idx,
class_def_offset))
self.class_def.dump(f=f, flat=False, prefix=' ')
f.write(' class_data_item @ %#8.8x:\n' % (
self.class_def.class_data.get_offset()))
self.class_def.class_data.dump(f=f, flat=False, prefix=' ')
f.write('\n')
def get_type_index(self):
'''Get type ID index (class_idx) for this class.'''
return self.class_def.class_idx
def is_abstract(self):
return (self.class_def.access_flags & ACC_ABSTRACT) != 0
def get_mangled_name(self):
if self.mangled is None:
dex = self.get_dex()
self.mangled = dex.get_typename(self.class_def.class_idx)
return self.mangled
def get_name(self):
'''Get the demangled name for a class if we have a proguard file or
return the mangled name if we don't have a proguard file.'''
if self.demangled is None:
mangled = self.get_mangled_name()
if mangled:
self.demangled = self.get_dex().demangle_class_name(mangled)
if self.demangled is None:
self.demangled = mangled
return self.demangled
def get_dex(self):
return self.dex
def get_methods(self):
if self.methods is None:
self.methods = []
self.num_direct_methods = len(
self.class_def.class_data.direct_methods)
for encoded_method in self.class_def.class_data.direct_methods:
self.methods.append(DexMethod(self, encoded_method, False))
for encoded_method in self.class_def.class_data.virtual_methods:
self.methods.append(DexMethod(self, encoded_method, True))
return self.methods
def demangle_classname(mangled):
if (mangled and len(mangled) > 2 and mangled[0] == 'L' and
mangled[-1] == ';'):
return mangled[1:-1].replace('/', '.') + ':'
# Already demangled
return mangled
def mangle_classname(demangled):
if (demangled and len(demangled) > 2 and
(demangled[0] != 'L' or demangled[-1] != ';')):
return 'L' + demangled.replace('.', '/') + ';'
# Already demangled
return demangled
class File:
'''Represents and DEX (Dalvik Executable) file'''
def __init__(self, path, proguard_path):
self.path = path
self.proguard = None
if proguard_path and os.path.exists(proguard_path):
self.proguard = Progard(proguard_path)
self.data = file_extract.FileExtract(open(self.path), '=', 4)
self.header = header_item(self.data)
self.map_list = None
self.string_ids = None
self.type_ids = None
self.proto_ids = None
self.field_ids = None
self.method_ids = None
self.class_defs = None
self.classes = None
self.call_site_ids = None
self.method_handle_items = None
self.code_items = None
self.code_off_to_code_item_idx = {}
self.strings = None
self.call_sites = None
self.dex_classes = {}
def demangle_class_name(self, cls_mangled):
'''Given a mangled type name as it would appear in a DEX file like
"LX/JxK;", return the demangled version if we have a proguard file,
otherwise return the original class typename'''
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_class(cls_demangled)
return None
def demangle_class_method_name(self, cls_mangled, method_name):
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_method(cls_demangled, method_name)
return None
def get_map_list(self):
if self.map_list is None:
self.data.push_offset_and_seek(self.header.map_off)
self.map_list = map_list(self.data)
self.data.pop_offset_and_seek()
return self.map_list
def get_map_tuple(self, type_code):
map_list = self.get_map_list()
for item in map_list.list:
if item.type.get_enum_value() == type_code:
return (item.size, item.offset)
return (0, 0)
def find_class(self, class_ref):
class_idx = class_ref
if isinstance(class_ref, six.string_types):
# Make sure the string is in 'L' <classname-with-slashes> ';'
class_mangled = mangle_classname(class_ref)
class_str_idx = self.find_string_idx(class_mangled)
if class_str_idx >= 0:
class_idx = self.find_type_idx(class_str_idx)
if isinstance(class_idx, numbers.Integral):
classes = self.get_classes()
for cls in classes:
if cls.class_def.class_idx == class_idx:
return cls
return None
def find_string_idx(self, match_s):
strings = self.get_strings()
for (i, s) in enumerate(strings):
if match_s == s.data:
return i
return -1
def get_string(self, index):
strings = self.get_strings()
if index < len(strings):
return strings[index].data
return None
def get_typename(self, type_id):
types = self.get_type_ids()
if type_id < len(types):
return self.get_string(types[type_id])
return None
def get_string_ids(self):
if self.string_ids is None:
self.string_ids = list()
self.data.push_offset_and_seek(self.header.string_ids_off)
for i in range(self.header.string_ids_size):
self.string_ids.append(self.data.get_uint32())
self.data.pop_offset_and_seek()
return self.string_ids
def get_type_ids(self):
if self.type_ids is None:
self.type_ids = list()
self.data.push_offset_and_seek(self.header.type_ids_off)
for i in range(self.header.type_ids_size):
self.type_ids.append(self.data.get_uint32())
self.data.pop_offset_and_seek()
return self.type_ids
def get_proto_ids(self):
if self.proto_ids is None:
self.proto_ids = list()
self.data.push_offset_and_seek(self.header.proto_ids_off)
for i in range(self.header.proto_ids_size):
self.proto_ids.append(proto_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.proto_ids
def get_proto_id(self, proto_idx):
proto_ids = self.get_proto_ids()
if proto_idx >= 0 and proto_idx < len(proto_ids):
return proto_ids[proto_idx]
return None
def get_proto_shorty(self, proto_idx):
id = self.get_proto_id(proto_idx)
return self.get_string(id.shorty_idx)
def get_field_ids(self):
if self.field_ids is None:
self.field_ids = list()
self.data.push_offset_and_seek(self.header.field_ids_off)
for i in range(self.header.field_ids_size):
self.field_ids.append(field_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.field_ids
def get_method_ids(self):
if self.method_ids is None:
self.method_ids = list()
self.data.push_offset_and_seek(self.header.method_ids_off)
for i in range(self.header.method_ids_size):
self.method_ids.append(method_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.method_ids
def find_method_ids(self, method_name, class_ref=None):
dex_class = None
if class_ref is not None:
dex_class = self.find_class(class_ref)
matches = list() # Return a list of matching methods
method_ids = self.get_method_ids()
if not method_ids:
return matches
name_idx = self.find_string_idx(method_name)
if name_idx <= 0:
return matches
for method_id in method_ids:
if method_id.name_idx == name_idx:
if dex_class:
if method_id.class_idx != dex_class.class_def.class_idx:
continue
matches.append(method_id)
return matches
def find_method_id_by_code_offset(self, code_off):
class_defs = self.get_class_defs()
for class_def in class_defs:
method_id = class_def.find_encoded_method_by_code_off(code_off)
if method_id:
return method_id
return None
def get_method_id(self, method_ref):
'''method_ref can be one of:
- a encoded_method object
- integer method index'''
method_ids = self.get_method_ids()
if method_ids:
if isinstance(method_ref, encoded_method):
if method_ref.method_idx < len(method_ids):
return method_ids[method_ref.method_idx]
elif isinstance(method_ref, numbers.Integral):
if method_ref < len(method_ids):
return method_ids[method_ref]
else:
raise ValueError('invalid method_ref type %s' %
(type(method_ref)))
return None
# def get_call_site(self, idx):
# call_site_ids = self.get_call_site_ids()
# if idx >= len(call_site_ids):
# return None
# if self.call_sites[idx] is None:
# self.data.push_offset_and_seek(call_site_ids[idx])
# self.call_sites[idx] = call_site_item(self.data)
# self.data.pop_offset_and_seek()
# return self.call_sites[idx]
def get_call_site_ids(self):
if self.call_site_ids is None:
self.call_site_ids = list()
self.call_sites = list()
(size, offset) = self.get_map_tuple(TYPE_CALL_SITE_ID_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.call_site_ids.append(self.data.get_uint32())
self.call_sites.append(None)
self.data.pop_offset_and_seek()
return self.call_site_ids
def get_method_handle_items(self):
if self.method_handle_items is None:
self.method_handle_items = list()
(size, offset) = self.get_map_tuple(TYPE_METHOD_HANDLE_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.method_handle_items.append(method_handle_item(self.data))
self.data.pop_offset_and_seek()
return self.method_handle_items
def get_code_items(self):
if self.code_items is None:
self.code_items = list()
(size, offset) = self.get_map_tuple(TYPE_CODE_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.data.align_to(4)
item = code_item(self.data)
self.code_items.append(item)
self.code_off_to_code_item_idx[item.get_offset()] = i
self.data.pop_offset_and_seek()
return self.code_items
def report_code_duplication(self):
code_to_code_items = {}
code_items = self.get_code_items()
if code_items:
for code_item in code_items:
key = code_item.insns
if key in code_to_code_items:
code_to_code_items[key].append(code_item)
else:
code_to_code_items[key] = [code_item]
for key in code_to_code_items:
code_items = code_to_code_items[key]
if len(code_items) > 1:
print('-' * 72)
print('The following methods have the same code:')
for code_item in code_items:
method = self.find_method_from_code_off(
code_item.get_offset())
if method.is_virtual:
print('virtual', end=' ')
else:
print('direct', end=' ')
print(method.get_qualified_name())
# Dump the code once for all methods
method.dump_code()
def get_class_def_index_from_offset(self, class_def_offset):
class_defs = self.get_class_defs()
for (i, class_def) in enumerate(class_defs):
if class_def.get_offset() == class_def_offset:
return i
return -1
def get_code_item_index_from_code_off(self, code_off):
# Make sure the code items are created
self.get_code_items()
if code_off in self.code_off_to_code_item_idx:
return self.code_off_to_code_item_idx[code_off]
return -1
def find_code_item(self, code_off):
code_item_idx = self.get_code_item_index_from_code_off(code_off)
if code_item_idx >= 0:
return self.get_code_items()[code_item_idx]
else:
raise ValueError('invalid code item offset %#8.8x' % code_off)
def find_method_from_code_off(self, code_off):
if code_off == 0:
return None
for cls in self.get_classes():
for method in cls.get_methods():
if method.get_code_offset() == code_off:
return method
return None
def get_class_defs(self):
if self.class_defs is None:
self.class_defs = list()
self.data.push_offset_and_seek(self.header.class_defs_off)
for i in range(self.header.class_defs_size):
class_def = class_def_item(self.data, self)
self.class_defs.append(class_def)
self.data.pop_offset_and_seek()
return self.class_defs
def get_classes(self):
if self.classes is None:
self.classes = list()
class_defs = self.get_class_defs()
for class_def in class_defs:
dex_class = DexClass(self, class_def)
self.classes.append(dex_class)
self.data.pop_offset_and_seek()
return self.classes
def get_strings(self):
if self.strings is None:
self.strings = list()
for string_id_item in self.get_string_ids():
self.data.push_offset_and_seek(string_id_item)
self.strings.append(string_data_item(self.data))
self.data.pop_offset_and_seek()
return self.strings
def dump_header(self, options, f=sys.stdout):
self.header.dump(f=f)
def dump_map_list(self, options, f=sys.stdout):
self.get_map_list().dump(f=f)
f.write('\n')
def dump_string_ids(self, options, f=sys.stdout):
string_ids = self.get_string_ids()
if string_ids:
f.write('string_ids:\n')
for (i, item) in enumerate(self.get_strings()):
f.write('[%3u] %#8.8x ( ' % (i, string_ids[i]))
item.dump(f=f)
f.write(')\n')
def dump_type_ids(self, options, f=sys.stdout):
type_ids = self.get_type_ids()
if type_ids:
f.write('\ntype_ids:\n DESCRIPTOR_IDX\n')
for (i, item) in enumerate(type_ids):
f.write('[%3u] %#8.8x ("%s")\n' %
(i, item, self.get_string(item)))
def find_type_idx(self, class_str_idx):
types = self.get_type_ids()
i = bisect.bisect_left(types, class_str_idx)
if i != len(types) and types[i] == class_str_idx:
return i
return -1
def find_class_def_by_type_index(self, class_idx):
class_defs = self.get_class_defs()
for class_def in class_defs:
if class_def.class_idx == class_idx:
return class_def
return None
def dump_proto_ids(self, options, f=sys.stdout):
proto_ids = self.get_proto_ids()
if proto_ids:
f.write('\nproto_ids:\n')
f.write(' ' * (5 + 1))
f.write(proto_id_item.get_table_header())
for (i, item) in enumerate(proto_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
shorty = self.get_string(item.shorty_idx)
ret = self.get_string(item.return_type_idx)
f.write(' ("%s", "%s"' % (shorty, ret))
parameters = item.get_parameters()
if parameters:
f.write(', (')
for (i, type_id) in enumerate(parameters.list):
if i > 0:
f.write(', ')
f.write(self.get_string(type_id))
f.write(')')
else:
f.write(', ()')
f.write(')\n')
def dump_field_ids(self, options, f=sys.stdout):
field_ids = self.get_field_ids()
if field_ids:
f.write('\nfield_ids:\n')
f.write(' ' * (5 + 1))
f.write(field_id_item.get_table_header())
for (i, item) in enumerate(field_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s", "%s", "%s")\n' % (
self.get_typename(item.class_idx),
self.get_typename(item.type_idx),
self.get_string(item.name_idx)))
def dump_method_ids(self, options, f=sys.stdout):
method_ids = self.get_method_ids()
if method_ids:
f.write('\nmethod_ids:\n')
f.write(' ' * (5 + 1))
f.write(method_id_item.get_table_header())
for (i, item) in enumerate(method_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s", "%s", "%s")\n' % (
self.get_typename(item.class_idx),
self.get_proto_shorty(item.proto_idx),
self.get_string(item.name_idx)))
def dump_class_defs(self, options, f=sys.stdout):
class_defs = self.get_class_defs()
if class_defs:
f.write('\nclass_defs:\n')
f.write(' ' * (5 + 1))
f.write(class_def_item.get_table_header())
for (i, item) in enumerate(class_defs):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s")' % (self.get_typename(item.class_idx)))
f.write('\n')
def dump_call_site_ids(self, options, f=sys.stdout):
call_site_ids = self.get_call_site_ids()
if call_site_ids:
f.write('\ncall_site_ids:\n')
f.write(' ' * (5 + 1))
for (i, item) in enumerate(call_site_ids):
f.write('[%3u] %#8.8x\n' % (i, item))
def dump_method_handle_items(self, options, f=sys.stdout):
method_handle_items = self.get_method_handle_items()
if method_handle_items:
f.write('\nmethod_handle_items:\n')
f.write(' ' * (5 + 1))
for (i, item) in enumerate(method_handle_items):
f.write('[%3u] ' % (i))
item.dump(f=f)
f.write('\n')
def dump_code(self, options, f=sys.stdout):
classes = self.get_classes()
if classes:
for cls in classes:
if cls.is_abstract():
continue
cls.dump(f=f)
methods = cls.get_methods()
dc = options.dump_code or options.dump_all
ddi = options.debug or options.dump_all
for method in methods:
if options.dump_code or options.dump_all:
method.dump(f=f, dump_code=dc, dump_debug_info=ddi)
f.write('\n')
def dump_code_items(self, options, f=sys.stdout):
code_items = self.get_code_items()
if code_items:
for (i, code_item) in enumerate(code_items):
f.write('code_item[%u]:\n' % (i))
code_item.dump(f=f)
def dump(self, options, f=sys.stdout):
self.dump_header(options, f)
f.write('\n')
self.dump_map_list(options, f)
self.dump_string_ids(options, f)
self.dump_type_ids(options, f)
self.dump_proto_ids(options, f)
self.dump_field_ids(options, f)
self.dump_method_ids(options, f)
self.dump_class_defs(options, f)
self.dump_call_site_ids(options, f)
self.dump_method_handle_items(options, f)
self.dump_code(options, f)
self.dump_code_items(options, f)
def sign_extending(value, bit_width):
# is the highest bit (sign) set? (x>>(b-1)) would be faster
if value & (1 << (bit_width - 1)):
return value - (1 << bit_width) # 2s complement
return value
def get_signed_hex_offset_as_str(signed_offset, width):
if signed_offset < 0:
s = '-'
offset = abs(signed_offset)
else:
s = '+'
offset = signed_offset
if width == 2:
s += '%2.2x' % (offset & 0xff)
elif width == 4:
s += '%4.4x' % (offset & 0xffff)
elif width == 8:
s += '%8.8x' % (offset & 0xffffffff)
else:
raise ValueError("only sizes of 2 4 or 8 are supported")
return s
class Opcode(object):
def __init__(self, inst):
self.inst = inst
def check_encoding(self, f=sys.stdout):
'''Verify that this instruction can't be encoded more efficiently'''
return 0 # Return zero to indicate we can't save any bytes
def new_encoding(self, f=sys.stdout):
'''Look for bytes we can save by making new opcodes that are encoded
as unsigned, or other optimizations'''
return 0 # Return zero to indicate we can't save any bytes
def get_op(self):
return self.inst.get_op()
def get_name(self):
op = self.get_op()
return self.ops[op]
def get_num_code_units(self):
return self.num_code_units
def regs_are_sequential(self):
if len(self.regs) <= 1:
return True
prev_reg = self.regs[0]
for i in range(1, len(self.regs)):
curr_reg = self.regs[i]
if prev_reg + 1 != curr_reg:
return False
return True
class Opcode00(Opcode):
ops = {0x00: 'nop'}
num_code_units = 1
max_regs = 0
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.nature = inst.get_AA()
if self.nature == 0:
pass # NOP
elif self.nature == 1:
self.size = code_units.get_code_unit()
self.first_key = code_units.get_int()
self.targets = list()
for i in range(self.size):
self.targets.append(code_units.get_int())
elif self.nature == 2:
self.size = code_units.get_code_unit()
self.keys = list()
self.targets = list()
for i in range(self.size):
self.keys.append(code_units.get_int())
for i in range(self.size):
self.targets.append(code_units.get_int())
elif self.nature == 3:
self.element_width = code_units.get_code_unit()
self.size = code_units.get_uint()
num_code_units = int((self.size * self.element_width + 1) / 2)
encoder = file_extract.FileEncode(StringIO.StringIO(), 'little', 4)
for i in range(num_code_units):
encoder.put_uint16(code_units.get_code_unit())
encoder.seek(0)
self.data = encoder.file.getvalue()
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def get_name(self):
if self.nature == 0:
return self.ops[0]
elif self.nature == 1:
return 'packed-switch-payload'
elif self.nature == 2:
return 'sparse-switch-payload'
elif self.nature == 3:
return 'fill-array-data-payload'
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def get_num_code_units(self):
if self.nature == 0:
return 1
elif self.nature == 1:
op_count = 1
size_count = 1
first_key_count = 2
keys_count = self.size * 2
return op_count + size_count + first_key_count + keys_count
elif self.nature == 2:
op_count = 1
size_count = 1
keys_and_targets_count = self.size * 4
return op_count + size_count + keys_and_targets_count
elif self.nature == 3:
op_count = 1
element_width_count = 2
return op_count + element_width_count + len(self.data)
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def dump(self, f=sys.stdout):
if self.nature == 0:
f.write('%s' % (self.get_name()))
elif self.nature == 1:
f.write('packed-switch-payload\n')
f.write('INDEX KEY TARGET\n===== --------- ---------\n')
for (i, target) in enumerate(self.targets):
f.write('[%3u] %+8.8x %+8.8x\n' %
(i, self.first_key + i, target))
elif self.nature == 2:
f.write('sparse-switch-payload\n')
f.write('INDEX KEY TARGET\n===== --------- ---------\n')
for (i, key) in enumerate(self.keys):
f.write('[%3u] %+8.8x %+8.8x\n' % (i, key, self.targets[i]))
elif self.nature == 3:
f.write('fill-array-data-payload (elem_width = %u, size = %u)\n' %
(self.element_width, self.size))
file_extract.dump_memory(0, self.data, self.element_width, f)
def emulate(self, emulator):
pass
class Opcode01(Opcode):
ops = {0x01: 'move'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode02(Opcode):
ops = {0x02: 'move/from16'}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move/from16" can be encoded as a "move"')
f.write(' more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode03(Opcode):
ops = {0x03: 'move/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move/16" can be encoded as a "move"')
f.write(' more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move/16" can be encoded as a "move/from16"')
f.write(' more efficiently as its first register is <= %u\n' %
(UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode04(Opcode):
ops = {0x04: 'move-wide'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode05(Opcode):
ops = {0x05: 'move-wide/from16'}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-wide/from16" can be encoded as a ')
f.write('"move-wide" more efficiently as its registers are ')
f.write('both <= %u\n' % (UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode06(Opcode):
ops = {0x06: 'move-wide/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-wide/16" can be encoded as a "move-wide" ')
f.write('more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move-wide/16" can be encoded as a ')
f.write('"move-wide/from16" more efficiently as its first ')
f.write('register is <= %u\n' % (UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode07(Opcode):
ops = {0x07: 'move-object'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode08(Opcode):
ops = {0x08: 'move-object/from16 '}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-object/from16" can be encoded as a ')
f.write('"move-object" more efficiently as its registers are ')
f.write('both <= %u\n' % (UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode09(Opcode):
ops = {0x09: 'move-object/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-object/16" can be encoded as a ')
f.write('"move-object" more efficiently as its registers ')
f.write('are both <= %u\n' % (UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move-object/16" can be encoded as a ')
f.write('"move-object/from16" more efficiently as its first ')
f.write('register is <= %u\n' % (UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0A_0D(Opcode):
ops = {
0x0a: 'move-result',
0x0b: 'move-result-wide',
0x0c: 'move-result-object',
0x0d: 'move-exception'
}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0E(Opcode):
ops = {0x0e: 'return-void'}
num_code_units = 1
max_regs = 0
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
def dump(self, f=sys.stdout):
f.write('%s' % (self.get_name()))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0F(Opcode):
ops = {0x0f: 'return'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode10(Opcode):
ops = {0x10: 'return-wide'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode11(Opcode):
ops = {0x11: 'return-object'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode12(Opcode):
ops = {0x12: 'const/4'}
num_code_units = 1
max_regs = 1
extra_data = 'n'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_A()
self.imm = sign_extending(inst[0] >> 12, 4)
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode13(Opcode):
ops = {0x13: 'const/16'}
num_code_units = 2
max_regs = 1
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16)
def check_encoding(self, f=sys.stdout):
if (self.reg <= UINT4_MAX and INT4_MIN <= self.imm and
self.imm <= INT4_MAX):
f.write('warning: "const/16" can be encoded as a "const/4" more ')
f.write('efficiently as its register is <= %u and ' % (UINT4_MAX))
f.write('(%i <= %i <= %i)\n' % (INT4_MIN, self.imm, INT4_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if (self.reg <= UINT4_MAX and self.imm > INT4_MAX and
self.imm <= (INT4_MAX + UINT4_MAX)):
f.write('"const/16" could be encoded as a new "const/u4" stores ')
f.write('a 4 bit unsigned offset from +8 for a constant range ')
f.write('of [8-24):\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode14(Opcode):
ops = {0x14: 'const'}
num_code_units = 3
max_regs = 1
extra_data = 'i'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_uint32(1)
def check_encoding(self, f=sys.stdout):
if (self.reg <= UINT8_MAX and INT16_MIN <= self.imm and
self.imm <= INT16_MAX):
f.write('warning: "const" can be encoded as a "const/16" more ')
f.write('efficiently as its register is < %u ' % (UINT8_MAX))
f.write('and (%i <= %i <= %i)\n' % (INT16_MIN, self.imm,
INT16_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if self.imm > INT16_MAX and self.imm <= (INT16_MAX + UINT16_MAX):
f.write('"const" could be encoded as a new "const/u16" stores a ')
f.write('16 bit unsigned offset from 32768 instead of a 16 bit ')
f.write('signed value\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode15(Opcode):
ops = {0x15: 'const/high16'}
num_code_units = 2
max_regs = 1
extra_data = 'h'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst[1] << 16
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode16(Opcode):
ops = {0x16: 'const-wide/16'}
num_code_units = 2
max_regs = 1
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode17(Opcode):
ops = {0x17: 'const-wide/32'}
num_code_units = 3
max_regs = 1
extra_data = 'i'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_sint32(1)
def check_encoding(self, f=sys.stdout):
if INT16_MIN <= self.imm and self.imm <= INT16_MAX:
f.write('warning: "const-wide/32" can be encoded as a ')
f.write('"const-wide/16" more efficiently as (%i <= %i <= %i)\n' %
(UINT8_MAX, INT16_MIN, self.imm, INT16_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if self.imm > INT16_MAX and self.imm <= (INT16_MAX + UINT16_MAX):
f.write('"const-wide/32" could be encoded as a new ')
f.write('"const-wide/u16" stores a 16 bit unsigned offset from ')
f.write('32768 instead of a 16 bit signed value\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode18(Opcode):
ops = {0x18: 'const-wide/64'}
num_code_units = 5
max_regs = 1
extra_data = 'l'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_uint64(1)
def check_encoding(self, f=sys.stdout):
if INT16_MIN <= self.imm and self.imm <= INT16_MAX:
f.write('warning: "const-wide/64" can be encoded as a ')
f.write('"const-wide/16" more efficiently as (%i <= %i <= %i)\n' %
(INT16_MIN, self.imm, INT16_MAX))
return 6
if INT32_MIN <= self.imm and self.imm <= INT32_MAX:
f.write('warning: "const-wide/64" can be encoded as a ')
f.write('"const-wide/32" more efficiently as (%i <= %i <= %i)\n' %
(INT32_MIN, self.imm, INT32_MAX))
return 4
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode19(Opcode):
ops = {0x19: 'const-wide/high16'}
num_code_units = 2
max_regs = 1
extra_data = 'h'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16) << 48
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode1A(Opcode):
ops = {0x1a: 'const-string'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.string_idx = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, string@%4.4x' %
(self.get_name(), self.reg, self.string_idx))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1B(Opcode):
ops = {0x1b: 'const-string/jumbo'}
num_code_units = 3
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.string_idx = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, string@%8.8x' %
(self.get_name(), self.reg, self.string_idx))
def check_encoding(self, f=sys.stdout):
if self.signed_offset <= UINT16_MAX:
f.write('warning: "const-string/jumbo" can be encoded as a ')
f.write('"const-string" more efficiently as its offset is ')
f.write('<= UINT16_MAX\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1C(Opcode):
ops = {0x1c: 'const-class'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1D(Opcode):
ops = {0x1d: 'monitor-enter'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1E(Opcode):
ops = {0x1e: 'monitor-exit'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1F(Opcode):
ops = {0x1f: 'check-cast'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode20(Opcode):
ops = {0x20: 'instance-of'}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, type@%4.4x' %
(self.get_name(), self.regs[0], self.regs[1], self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode21(Opcode):
ops = {0x21: 'array-length'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode22(Opcode):
ops = {0x22: 'new-instance'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode23(Opcode):
ops = {0x23: 'new-array'}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, type@%4.4x' %
(self.get_name(), self.regs[0], self.regs[1], self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode24(Opcode):
ops = {0x24: 'filled-new-array'}
num_code_units = 3
max_regs = 5
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst[0] >> 12
self.type = inst[1]
self.regs = list()
regs = inst[2] | ((inst[0] << 8) & 0xf0000)
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode25(Opcode):
ops = {0x25: 'filled-new-array/range '}
num_code_units = 3
max_regs = 'r'
extra_data = 'c'
format = '3rc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst.get_AA()
self.type = inst[1]
first_reg = inst[2]
self.regs = list()
for i in range(arg_count):
self.regs.append(first_reg + i)
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode26(Opcode):
ops = {0x26: 'fill-array-data'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.signed_offset = inst.get_sint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // %s' % (self.get_name(), self.reg,
self.inst.code_unit_idx + self.signed_offset,
get_signed_hex_offset_as_str(self.signed_offset, 8)))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode27(Opcode):
ops = {0x27: 'throw'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode28(Opcode):
ops = {0x28: 'goto'}
num_code_units = 1
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = inst.get_signed_AA()
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
f.write('error: "goto" has a zero offset (invalid encoding)\n')
return 0
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode29(Opcode):
ops = {0x29: 'goto/16'}
num_code_units = 2
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
f.write(
'error: "goto/16" has a zero offset (invalid encoding)\n')
elif INT8_MIN <= self.signed_offset and self.signed_offset <= INT8_MAX:
f.write('warning: "goto/16" can be encoded as a "goto" more ')
f.write('efficiently since (INT8_MIN <= offset <= INT8_MAX)\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2A(Opcode):
ops = {0x2A: 'goto/32'}
num_code_units = 3
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = inst.get_sint32(1)
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
return 0
if INT8_MIN <= self.signed_offset and self.signed_offset <= INT8_MAX:
f.write('warning: "goto/32" can be encoded as a "goto" more ')
f.write('efficiently since (INT8_MIN <= offset <= INT8_MAX)\n')
return 2
if INT16_MIN <= self.signed_offset and self.signed_offset <= INT16_MAX:
f.write('warning: "goto/32" can be encoded as a "goto/16" more ')
f.write('efficiently since (INT16_MIN <= offset <= INT16_MAX)\n')
return 4
return 0
def new_encoding(self, f=sys.stdout):
if INT16_MIN <= self.signed_offset and self.signed_offset <= INT16_MAX:
return 0
if INT24_MIN <= self.signed_offset and self.signed_offset <= INT24_MAX:
f.write('"goto/32" could be encoded as a new "goto/16" where ')
f.write('that opcode uses the extra 8 bits in the first code ')
f.write('unit to provide a 24 bit branch range\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2B(Opcode):
ops = {0x2b: 'packed-switch'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.branch = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // +%8.8x' % (self.get_name(), self.reg,
self.inst.get_code_unit_index() + self.branch, self.branch))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2C(Opcode):
ops = {0x2c: 'sparse-switch'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.branch = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // +%8.8x' % (self.get_name(), self.reg,
self.inst.get_code_unit_index() + self.branch, self.branch))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2D_31(Opcode):
ops = {
0x2d: 'cmpl-float (lt bias)',
0x2e: 'cmpg-float (gt bias)',
0x2f: 'cmpl-double (lt bias)',
0x30: 'cmpg-double (gt bias)',
0x31: 'cmp-long',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode32_37(Opcode):
ops = {
0x32: 'if-eq',
0x33: 'if-ne',
0x34: 'if-lt',
0x35: 'if-ge',
0x36: 'if-gt',
0x37: 'if-le',
}
num_code_units = 2
max_regs = 2
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, %4.4x // %i' % (self.get_name(), self.regs[0],
self.regs[1], self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode38_3D(Opcode):
ops = {
0x38: 'if-eqz',
0x39: 'if-nez',
0x3a: 'if-ltz',
0x3b: 'if-gez',
0x3c: 'if-gtz',
0x3d: 'if-lez',
}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, %4.4x // %s' % (self.get_name(), self.reg,
self.signed_offset + self.inst.code_unit_idx,
get_signed_hex_offset_as_str(self.signed_offset, 4)))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode44_51(Opcode):
ops = {
0x44: 'aget',
0x45: 'aget-wide',
0x46: 'aget-object',
0x47: 'aget-boolean',
0x48: 'aget-byte',
0x49: 'aget-char',
0x4a: 'aget-short',
0x4b: 'aput',
0x4c: 'aput-wide',
0x4d: 'aput-object',
0x4e: 'aput-boolean',
0x4f: 'aput-byte',
0x50: 'aput-char',
0x51: 'aput-short',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode52_5f(Opcode):
ops = {
0x52: 'iget',
0x53: 'iget-wide',
0x54: 'iget-object',
0x55: 'iget-boolean',
0x56: 'iget-byte',
0x57: 'iget-char',
0x58: 'iget-short',
0x59: 'iput',
0x5a: 'iput-wide',
0x5b: 'iput-object',
0x5c: 'iput-boolean',
0x5d: 'iput-byte',
0x5e: 'iput-char',
0x5f: 'iput-short',
}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.field = inst[1]
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, field@%4.4x" %
(self.get_name(), self.regs[0], self.regs[1], self.field))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode60_6d(Opcode):
ops = {
0x60: 'sget',
0x61: 'sget-wide',
0x62: 'sget-object',
0x63: 'sget-boolean',
0x64: 'sget-byte',
0x65: 'sget-char',
0x66: 'sget-short',
0x67: 'sput',
0x68: 'sput-wide',
0x69: 'sput-object',
0x6a: 'sput-boolean',
0x6b: 'sput-byte',
0x6c: 'sput-char',
0x6d: 'sput-short',
}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.field = inst.get_uint16(1)
def dump(self, f=sys.stdout):
f.write("%s v%u, field@%4.4x" %
(self.get_name(), self.reg, self.field))
def emulate(self, emulator):
raise ValueError('emulate not supported')
can_use_new_encoding = 0
cant_use_new_encoding = 0
class Opcode6E_72(Opcode):
ops = {
0x6e: 'invoke-virtual',
0x6f: 'invoke-super',
0x70: 'invoke-direct',
0x71: 'invoke-static',
0x72: 'invoke-interface',
}
num_code_units = 3
max_regs = 5
extra_data = 'c'
format = '35c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst[0] >> 12
self.method_idx = inst[1]
self.regs = list()
regs = inst[2] | ((inst[0] << 8) & 0xf0000)
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} method@%4.4x" % (self.method_idx))
def new_encoding(self, f=sys.stdout):
if (self.regs_are_sequential() and
(len(self.regs) == 0 or self.regs[0] <= UINT4_MAX) and
len(self.regs) <= UINT4_MAX):
global can_use_new_encoding
can_use_new_encoding += 1
name = self.get_name()
f.write('"%s" can be encoded as "%s/min-range" ' % (name, name))
f.write('where the first register is contained in the first ')
f.write('opcode\n')
return 2
global cant_use_new_encoding
cant_use_new_encoding += 1
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode74_78(Opcode):
ops = {
0x74: 'invoke-virtual/range',
0x75: 'invoke-super/range',
0x76: 'invoke-direct/range',
0x77: 'invoke-static/range',
0x78: 'invoke-interface/range',
}
num_code_units = 3
max_regs = 'r'
extra_data = 'c'
format = '3rc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst.get_AA()
self.method_idx = inst[1]
first_reg = inst[2]
self.regs = list()
for i in range(arg_count):
self.regs.append(first_reg + i)
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} method@%4.4x" % (self.method_idx))
def new_encoding(self, f=sys.stdout):
if (self.regs_are_sequential() and
(len(self.regs) == 0 or self.regs[0] <= UINT4_MAX) and
len(self.regs) <= UINT4_MAX):
name = self.get_name()
f.write('"%s" can be encoded as a "%s/min-range" ' % (name, name))
f.write('where the first register is contained in the first ')
f.write('opcode\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode7B_8F(Opcode):
ops = {
0x7b: 'neg-int',
0x7c: 'not-int',
0x7d: 'neg-long',
0x7e: 'not-long',
0x7f: 'neg-float',
0x80: 'neg-double',
0x81: 'int-to-long',
0x82: 'int-to-float',
0x83: 'int-to-double',
0x84: 'long-to-int',
0x85: 'long-to-float',
0x86: 'long-to-double',
0x87: 'float-to-int',
0x88: 'float-to-long',
0x89: 'float-to-double',
0x8a: 'double-to-int',
0x8b: 'double-to-long',
0x8c: 'double-to-float',
0x8d: 'int-to-byte',
0x8e: 'int-to-char',
0x8f: 'int-to-short',
}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode90_AF(Opcode):
ops = {
0x90: 'add-int',
0x91: 'sub-int',
0x92: 'mul-int',
0x93: 'div-int',
0x94: 'rem-int',
0x95: 'and-int',
0x96: 'or-int',
0x97: 'xor-int',
0x98: 'shl-int',
0x99: 'shr-int',
0x9a: 'ushr-int',
0x9b: 'add-long',
0x9c: 'sub-long',
0x9d: 'mul-long',
0x9e: 'div-long',
0x9f: 'rem-long',
0xa0: 'and-long',
0xa1: 'or-long',
0xa2: 'xor-long',
0xa3: 'shl-long',
0xa4: 'shr-long',
0xa5: 'ushr-long',
0xa6: 'add-float',
0xa7: 'sub-float',
0xa8: 'mul-float',
0xa9: 'div-float',
0xaa: 'rem-float',
0xab: 'add-double',
0xac: 'sub-double',
0xad: 'mul-double',
0xae: 'div-double',
0xaf: 'rem-double',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def opIsCommutative(self):
'''Return True if the operation is commutative'''
op = self.get_op()
return (op == 0x90 or # add-int
op == 0x92 or # mul-int
op == 0x95 or # and-int
op == 0x96 or # or-int
op == 0x97 or # xor-int
op == 0x9b or # add-long
op == 0x9d or # mul-long
op == 0xa0 or # and-long
op == 0xa1 or # or-long
op == 0xa2 or # xor-long
op == 0xa6 or # add-float
op == 0xa8 or # mul-float
op == 0xab or # add-double
op == 0xad) # mul-double
def check_encoding(self, f=sys.stdout):
vAA = self.regs[0]
vBB = self.regs[1]
vCC = self.regs[2]
if vAA == vBB and vAA <= UINT4_MAX and vCC <= UINT4_MAX:
name = self.get_name()
f.write('warning: "%s" can be encoded more efficiently ' % (name))
f.write('as "%s/2addr v%u, v%u"\n' % (name, vAA, vCC))
return 2
if (vAA == vCC and vAA <= UINT4_MAX and vBB <= UINT4_MAX and
self.opIsCommutative()):
name = self.get_name()
f.write('warning: "%s" is commutative and can be ' % (name))
f.write('encoded more efficiently as "%s/2addr v%u, v%u"\n' %
(name, vAA, vBB))
return 2
return 0 # Return zero to indicate we can't save any bytes
def emulate(self, emulator):
raise ValueError('emulate not supported')
class OpcodeB0_CF(Opcode):
ops = {
0xb0: 'add-int/2addr',
0xb1: 'sub-int/2addr',
0xb2: 'mul-int/2addr',
0xb3: 'div-int/2addr',
0xb4: 'rem-int/2addr',
0xb5: 'and-int/2addr',
0xb6: 'or-int/2addr',
0xb7: 'xor-int/2addr',
0xb8: 'shl-int/2addr',
0xb9: 'shr-int/2addr',
0xba: 'ushr-int/2addr',
0xbb: 'add-long/2addr',
0xbc: 'sub-long/2addr',
0xbd: 'mul-long/2addr',
0xbe: 'div-long/2addr',
0xbf: 'rem-long/2addr',
0xc0: 'and-long/2addr',
0xc1: 'or-long/2addr',
0xc2: 'xor-long/2addr',
0xc3: 'shl-long/2addr',
0xc4: 'shr-long/2addr',
0xc5: 'ushr-long/2addr',
0xc6: 'add-float/2addr',
0xc7: 'sub-float/2addr',
0xc8: 'mul-float/2addr',
0xc9: 'div-float/2addr',
0xca: 'rem-float/2addr',
0xcb: 'add-double/2addr',
0xcc: 'sub-double/2addr',
0xcd: 'mul-double/2addr',
0xce: 'div-double/2addr',
0xcf: 'rem-double/2addr ',
}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class OpcodeD0_D7(Opcode):
ops = {
0xd0: 'add-int/lit16',
0xd1: 'rsub-int/lit16',
0xd2: 'mul-int/lit16',
0xd3: 'div-int/lit16',
0xd4: 'rem-int/lit16',
0xd5: 'and-int/lit16',
0xd6: 'or-int/lit16',
0xd7: 'xor-int/lit16',
}
num_code_units = 2
max_regs = 2
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.imm = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, #int %i // #%#x' % (self.get_name(),
self.regs[0], self.regs[1], self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class OpcodeD8_E2(Opcode):
ops = {
0xd8: 'add-int/lit8',
0xd9: 'rsub-int/lit8',
0xda: 'mul-int/lit8',
0xdb: 'div-int/lit8',
0xdc: 'rem-int/lit8',
0xdd: 'and-int/lit8',
0xde: 'or-int/lit8',
0xdf: 'xor-int/lit8',
0xe0: 'shl-int/lit8',
0xe1: 'shr-int/lit8',
0xe2: 'ushr-int/lit8',
}
num_code_units = 2
max_regs = 2
extra_data = 'b'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.imm = sign_extending(inst.get_uint8_hi(1), 8)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, #int %i // #%#x' % (self.get_name(),
self.regs[0], self.regs[1], self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class OpcodeFA(Opcode):
ops = {0xfa: 'invoke-polymorphic'}
num_code_units = 4
max_regs = 5
extra_data = 'cc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
raise ValueError('debug this when we find one of these')
arg_count = inst[0] >> 12
self.method_ref_idx = inst[1]
self.method_hdl_ref = inst[2]
self.regs = list()
regs = inst[3] | ((inst[0] << 8) & 0xf0000)
self.proto = inst[4]
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class CodeUnits(Opcode):
def __init__(self, code_units):
self.code_units = code_units
self.idx = 0
def index_is_valid(self):
return self.idx < len(self.code_units)
def get_index(self):
return self.idx
def peek_code_unit(self, idx):
return self.code_units[idx]
def get_int(self):
return sign_extending(self.get_uint(), 32)
def get_uint(self):
return self.get_code_unit() | (self.get_code_unit() << 16)
def get_code_unit(self):
idx = self.idx
self.idx += 1
return self.code_units[idx]
def swap16(u):
return ((u >> 8) & 0x00ff) | ((u << 8) & 0xff00)
class DexInstruction(object):
opcode_defs = list()
@classmethod
def initialize(cls):
opcode_classes = [
Opcode00,
Opcode01,
Opcode02,
Opcode03,
Opcode04,
Opcode05,
Opcode06,
Opcode07,
Opcode08,
Opcode09,
Opcode0A_0D,
Opcode0E,
Opcode0F,
Opcode10,
Opcode11,
Opcode12,
Opcode13,
Opcode14,
Opcode15,
Opcode16,
Opcode17,
Opcode18,
Opcode19,
Opcode1A,
Opcode1B,
Opcode1C,
Opcode1D,
Opcode1E,
Opcode1F,
Opcode20,
Opcode21,
Opcode22,
Opcode23,
Opcode24,
Opcode25,
Opcode26,
Opcode27,
Opcode28,
Opcode29,
Opcode2A,
Opcode2B,
Opcode2C,
Opcode2D_31,
Opcode32_37,
Opcode38_3D,
Opcode44_51,
Opcode52_5f,
Opcode60_6d,
Opcode6E_72,
Opcode74_78,
Opcode7B_8F,
Opcode90_AF,
OpcodeB0_CF,
OpcodeD0_D7,
OpcodeD8_E2,
OpcodeFA,
]
for i in range(256):
cls.opcode_defs.append(None)
for opcode_class in opcode_classes:
for op in opcode_class.ops:
if cls.opcode_defs[op] is None:
cls.opcode_defs[op] = opcode_class
else:
raise ValueError("registering the same opcode twice: "
"%#2.2x in %s" % (op, str(opcode_class)))
def dump(self, f=sys.stdout, suffix='\n'):
f.write('%4.4x:' % (self.code_unit_idx))
for code_unit in self.code_units:
f.write(' %4.4x' % (swap16(code_unit)))
num_code_units = len(self.code_units)
if num_code_units < 5:
pad = 5 - num_code_units
for i in range(pad):
f.write(' ')
f.write(' ')
self.instruction.dump(f=f)
if suffix:
f.write(suffix)
def __init__(self):
self.code_unit_idx = -1
self.code_units = None
def check_encoding(self, f=sys.stdout):
bytes_saved = self.instruction.check_encoding(f)
if bytes_saved:
self.dump(f)
return bytes_saved
def new_encoding(self, f=sys.stdout):
bytes_saved = self.instruction.new_encoding(f)
if bytes_saved:
self.dump(f)
return bytes_saved
def get_code_unit_index(self):
return self.code_unit_idx
def decode(self, code_units):
self.code_unit_idx = code_units.get_index()
self.code_units = list()
self.code_units.append(code_units.get_code_unit())
op = self.get_op()
opcode_class = self.opcode_defs[op]
if opcode_class is None:
raise ValueError("unsupported opcode %#4.4x" % (swap16(self[0])))
for i in range(1, opcode_class.num_code_units):
self.code_units.append(code_units.get_code_unit())
self.instruction = opcode_class(self, code_units)
def get_name(self):
return self.instruction.get_name()
def get_num_code_units(self):
return self.instruction.get_num_code_units()
def get_op(self):
'''Return the 1 byte op field that tells us what instruction this is'''
return self.code_units[0] & 0xff
def get_A(self):
'''Get the 4 bit value of A'''
return (self.code_units[0] >> 8) & 0xf
def get_B(self):
'''Get the 4 bit value of B'''
return (self.code_units[0] >> 12) & 0xf
def get_AA(self):
'''Get the 8 bit value of AA from the byte next to the Op'''
return self.get_uint8_hi(0)
def get_signed_AA(self):
return sign_extending(self.get_AA(), 8)
def get_uint8_lo(self, idx):
return self.code_units[idx] & 0xff
def get_sint8_lo(self, idx):
return sign_extending(self.get_uint8_lo(), 8)
def get_uint8_hi(self, idx):
return (self.code_units[idx] >> 8) & 0xff
def get_sint8_hi(self, idx):
return sign_extending(self.get_uint8_hi(), 8)
def get_uint16(self, idx):
return self.code_units[idx]
def get_sint16(self, idx):
return sign_extending(self.get_uint16(), 16)
def get_uint32(self, idx):
return self.code_units[idx + 1] << 16 | self.code_units[idx]
def get_sint32(self, idx):
return sign_extending(self.get_uint32(idx), 32)
def get_uint64(self, idx):
return (self.code_units[idx + 3] << 48 |
self.code_units[idx + 2] << 32 |
self.code_units[idx + 1] << 16 |
self.code_units[idx])
def get_sint64(self, idx):
return sign_extending(self.get_uint64(idx), 64)
def __len__(self):
'''Overload the length operator to give out the number of code units'''
return len(self.code_units)
def __getitem__(self, key):
'''Overload the [] operator to give out code units'''
return self.code_units[key]
def emulate(self, emulator):
self.instruction.emulate(emulator)
DexInstruction.initialize()
def get_percentage(part, total):
return (float(part) / float(total)) * 100.0
def print_code_stats(size, total_size, file_size):
code_savings = get_percentage(size, total_size)
file_savings = get_percentage(size, file_size)
print('error: %u of %u code bytes (%u file bytes) ' % (size, total_size,
file_size), end='')
print('could be saved by encoding opcodes more efficiently ', end='')
print('(%2.2f%% code savings, %2.2f%% file savings).\n' % (code_savings,
file_savings))
def print_debug_stats(size, file_size):
file_savings = get_percentage(size, file_size)
print('error: %u debug info bytes of %u file ' % (size, file_size), end='')
print('bytes could be saved by encoding debug info more ', end='')
print('efficiently (%2.2f%% file savings).\n' % (file_savings))
def print_encoding_stats(size, total_size, file_size):
code_savings = get_percentage(size, total_size)
file_savings = get_percentage(size, file_size)
print('%u of %u code bytes could be saved ' % (size, total_size), end='')
print('could be saved by encoding opcodes more efficiently ', end='')
print('(%2.2f%% code savings, %2.2f%% file savings).\n' % (code_savings,
file_savings))
class DexEmulator(object):
def __init__(self):
self.registers = dict()
self.pc = 0
def read_register(self, reg):
if reg in self.registers:
return self.registers[reg]
raise ValueError("reading register with no value")
def write_register(self, reg, value):
self.registers[reg] = value
def emulate(self, uint16_array):
pass
def main():
usage = 'Usage: dex.py [options] [dex file(s)]'
parser = optparse.OptionParser(
usage=usage,
description='A script that parses DEX files.')
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
parser.add_option('-C', '--color',
action='store_true',
dest='color',
help='Enable colorized output',
default=False)
parser.add_option('-a', '--all',
action='store_true',
dest='dump_all',
help='Dump all DEX sections.',
default=False)
parser.add_option('-H', '--header',
action='store_true',
dest='dump_header',
help='Dump the DEX file header.',
default=False)
parser.add_option('--map-list',
action='store_true',
dest='dump_map_list',
help='Dump the DEX map list info.',
default=False)
parser.add_option('-s', '--strings',
action='store_true',
dest='dump_strings',
help='Dump the DEX strings.',
default=False)
parser.add_option('-t', '--types',
action='store_true',
dest='dump_types',
help='Dump the DEX types.',
default=False)
parser.add_option('-p', '--protos',
action='store_true',
dest='dump_protos',
help='Dump the DEX protos.',
default=False)
parser.add_option('-f', '--fields',
action='store_true',
dest='dump_fields',
help='Dump the DEX fields.',
default=False)
parser.add_option('-m', '--methods',
action='store_true',
dest='dump_methods',
help='Dump the DEX methods.',
default=False)
parser.add_option('--method-handles',
action='store_true',
dest='dump_method_handles',
help='Dump the DEX method handles.',
default=False)
parser.add_option('--classes',
action='store_true',
dest='dump_classes',
help='Dump the DEX classes.',
default=False)
parser.add_option('--class',
dest='class_filter',
help='Find a class by name. ' +
'Accepts `Lpath/to/Class;` or `path.to.Class`',
default=None)
parser.add_option('--method',
dest='method_filter',
help='Find a method by name. Must be used with --class',
default=None)
parser.add_option('--call-sites',
action='store_true',
dest='dump_call_sites',
help='Dump the DEX call sites.',
default=False)
parser.add_option('--code',
action='store_true',
dest='dump_code',
help='Dump the DEX code in all class methods.',
default=False)
parser.add_option('--code-items',
action='store_true',
dest='dump_code_items',
help='Dump the DEX code items.',
default=False)
parser.add_option('--code-duplication',
action='store_true',
dest='code_duplication',
help=('Dump any methods in the DEX file that have the '
'same instructions.'),
default=False)
parser.add_option('--debug',
action='store_true',
dest='debug',
help='Dump the DEX debug info.',
default=False)
parser.add_option('-d', '--disassemble',
action='store_true',
dest='dump_disassembly',
help='Dump the DEX code items instructions.',
default=False)
parser.add_option('--stats',
action='store_true',
dest='dump_stats',
help='Dump the DEX opcode statistics.',
default=False)
parser.add_option('--check-encoding',
action='store_true',
dest='check_encoding',
help='Verify opcodes are efficiently encoded.',
default=False)
parser.add_option('--new-encoding',
action='store_true',
dest='new_encoding',
help='Report byte savings from potential new encodings.',
default=False)
parser.add_option('--proguard',
dest='proguard',
help='Specify a progard file to use for demangling.',
default=None)
(options, files) = parser.parse_args()
total_code_bytes_inefficiently_encoded = 0
total_debug_info_bytes_inefficiently_encoded = 0
total_new_code_bytes_inefficiently_encoded = 0
total_opcode_byte_size = 0
total_file_size = 0
op_name_to_size = {}
string_counts = {}
i = 0
if len(files) == 0:
print('No input files. {}'.format(usage))
return
for (i, path) in enumerate(files):
if os.path.splitext(path)[1] == '.apk':
print('error: dex.py operates on dex files, please unpack your apk')
return
print('Dex file: %s' % (path))
file_size = os.path.getsize(path)
total_file_size += file_size
dex = File(path, options.proguard)
if options.class_filter:
dex_class = dex.find_class(options.class_filter)
if dex_class:
if options.method_filter is None:
dex_class.dump()
for method in dex_class.get_methods():
method_name = method.get_name()
if options.method_filter:
if options.method_filter != method_name:
continue
method.dump()
else:
print('error: class definition not found for "%s"' % (
options.class_filter))
if options.dump_header or options.dump_all:
dex.dump_header(options)
print('')
if options.dump_map_list or options.dump_all:
dex.dump_map_list(options)
if options.dump_strings or options.dump_all:
dex.dump_string_ids(options)
if options.dump_types or options.dump_all:
dex.dump_type_ids(options)
if options.dump_protos or options.dump_all:
dex.dump_proto_ids(options)
if options.dump_fields or options.dump_all:
dex.dump_field_ids(options)
if options.dump_methods or options.dump_all:
dex.dump_method_ids(options)
if options.dump_classes or options.dump_all:
dex.dump_class_defs(options)
if options.dump_call_sites or options.dump_all:
dex.dump_call_site_ids(options)
if options.dump_method_handles or options.dump_all:
dex.dump_method_handle_items(options)
if options.dump_code or options.debug or options.dump_all:
dex.dump_code(options)
if options.dump_code_items:
dex.dump_code_items(options)
if (options.dump_disassembly or options.dump_stats or
options.check_encoding or options.new_encoding):
if options.dump_stats:
for string_item in dex.get_strings():
if string_item.data not in string_counts:
string_counts[string_item.data] = 0
string_counts[string_item.data] += 1
code_bytes_inefficiently_encoded = 0
debug_info_bytes_inefficiently_encoded = 0
new_code_bytes_inefficiently_encoded = 0
file_opcodes_byte_size = 0
classes = dex.get_classes()
used_code_item_indexes = list()
for cls in classes:
methods = cls.get_methods()
for method in methods:
if options.dump_disassembly or options.debug:
method.dump(
f=sys.stdout, dump_code=options.dump_disassembly,
dump_debug_info=options.debug)
opcodes_bytes_size = method.get_code_byte_size()
file_opcodes_byte_size += opcodes_bytes_size
total_opcode_byte_size += opcodes_bytes_size
if (options.dump_stats or options.check_encoding or
options.new_encoding):
for dex_inst in method.get_instructions():
if options.dump_stats:
op_name = dex_inst.get_name()
size = dex_inst.get_num_code_units() * 2
if op_name not in op_name_to_size:
op_name_to_size[op_name] = 0
op_name_to_size[op_name] += size
if options.check_encoding:
code_bytes_inefficiently_encoded += (
dex_inst.check_encoding())
if options.new_encoding:
new_code_bytes_inefficiently_encoded += (
dex_inst.new_encoding())
if options.check_encoding:
code_item_idx = method.get_code_item_index()
if code_item_idx >= 0:
used_code_item_indexes.append(code_item_idx)
debug_info = method.get_debug_info()
if debug_info:
debug_info_bytes_inefficiently_encoded += (
method.check_debug_info_encoding())
if options.check_encoding:
efficiently_encoded = True
if code_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
total_code_bytes_inefficiently_encoded += (
code_bytes_inefficiently_encoded)
print_code_stats(code_bytes_inefficiently_encoded,
file_opcodes_byte_size, file_size)
if debug_info_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
total_debug_info_bytes_inefficiently_encoded += (
debug_info_bytes_inefficiently_encoded)
print_debug_stats(debug_info_bytes_inefficiently_encoded,
file_size)
# Verify that all code items are used.
used_code_item_indexes.sort()
prev_ci_idx = 0
for ci_idx in used_code_item_indexes:
if ci_idx != prev_ci_idx:
efficiently_encoded = False
for idx in range(prev_ci_idx + 1, ci_idx):
print('code_item[%u] is not used and its '
'code_item can be removed' % (idx))
prev_ci_idx = ci_idx
if efficiently_encoded:
print('file is efficiently encoded.')
if options.new_encoding:
if new_code_bytes_inefficiently_encoded > 0:
total_new_code_bytes_inefficiently_encoded += (
new_code_bytes_inefficiently_encoded)
print_encoding_stats(new_code_bytes_inefficiently_encoded,
file_opcodes_byte_size, file_size)
else:
print('file is efficiently encoded.')
if options.code_duplication:
dex.report_code_duplication()
if options.dump_stats:
duped_strings_byte_size = 0
for s in string_counts:
count = string_counts[s]
if count > 1:
s_len = len(s)
duped_strings_byte_size += (count - 1) * \
s_len + get_uleb128_byte_size(s_len)
if duped_strings_byte_size > 0:
print('%u bytes in duplicated strings across dex files.' % (
duped_strings_byte_size))
print('BYTESIZE %AGE OPCODE')
print('======== ===== =================================')
sorted_x = sorted(op_name_to_size.items(),
key=operator.itemgetter(1))
for (op_name, byte_size) in sorted_x:
percentage = get_percentage(byte_size, total_opcode_byte_size)
print('%-8u %5.2f %s' % (byte_size, percentage, op_name))
print('-------- ----- ---------------------------------')
print('%-8u 100.0' % (total_opcode_byte_size))
if i > 0:
if options.check_encoding:
if total_code_bytes_inefficiently_encoded > 0:
print_code_stats(total_code_bytes_inefficiently_encoded,
total_opcode_byte_size, total_file_size)
if total_debug_info_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
print_debug_stats(total_debug_info_bytes_inefficiently_encoded,
total_file_size)
if options.new_encoding:
invoke_kind_percentage = get_percentage(
can_use_new_encoding,
can_use_new_encoding + cant_use_new_encoding)
print('%u invoke-kind opcodes could use new encoding' % (
can_use_new_encoding), end='')
print('%u could not (%2.2f%%)' % (cant_use_new_encoding,
invoke_kind_percentage))
if total_new_code_bytes_inefficiently_encoded > 0:
print_encoding_stats(
total_new_code_bytes_inefficiently_encoded,
total_opcode_byte_size, total_file_size)
if __name__ == '__main__':
main()
| tools/python/dex.py | 137,949 | Encapsulates a class within a DEX file.
Encapsulates a method within a DEX file.
Represents and DEX (Dalvik Executable) file
Parses a proguard map file and does name lookups.
Overload the [] operator to give out code units
Overload the length operator to give out the number of code units
Verify that this instruction can't be encoded more efficiently
Given a mangled type name as it would appear in a DEX file like
"LX/JxK;", return the demangled version if we have a proguard file,
otherwise return the original class typename
Get the 4 bit value of A
Get the 8 bit value of AA from the byte next to the Op
Get the 4 bit value of B
Get the index into the code_items array in the dex file for the
code for this method, or -1 if there is no code for this method.
Get the code offset for this method.
Get the method_id_item for this method.
method_ref can be one of:
- a encoded_method object
- integer method index
Get the method index into the method_ids array in the DEX file.
Get the demangled name for a class if we have a proguard file or
return the mangled name if we don't have a proguard file.
Returns the name of the method as it is known in the current DEX
file (no proguard remapping)
Return the 1 byte op field that tells us what instruction this is
Get type ID index (class_idx) for this class.
Translate a new class name to the old class name.
Translate a new class name and a new method into the old class
name and the old method name.
Look for bytes we can save by making new opcodes that are encoded
as unsigned, or other optimizations
Return True if the operation is commutative
!/usr/bin/env python Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. ---------------------------------------------------------------------- Constants ---------------------------------------------------------------------- ---------------------------------------------------------------------- access_flags definitions ---------------------------------------------------------------------- ---------------------------------------------------------------------- Value formats ---------------------------------------------------------------------- ---------------------------------------------------------------------- Type Codes ---------------------------------------------------------------------- size = 0x70 size = 0x04 size = 0x04 size = 0x0c size = 0x08 size = 0x08 size = 0x20 size = 0x04 size = 0x08 size = 4 + (item.size * 12) size = 4 + (item.size * 2) size = 4 + (item.size * 4) size = 4 + (item.size * 4) ---------------------------------------------------------------------- Method Handle Type Codes ---------------------------------------------------------------------- ---------------------------------------------------------------------- encoded_field ---------------------------------------------------------------------- ---------------------------------------------------------------------- encoded_method ---------------------------------------------------------------------- ---------------------------------------------------------------------- class_data_item ---------------------------------------------------------------------- ---------------------------------------------------------------------- class_def_item ---------------------------------------------------------------------- ---------------------------------------------------------------------- try_item ---------------------------------------------------------------------- ---------------------------------------------------------------------- encoded_type_addr_pair ---------------------------------------------------------------------- ---------------------------------------------------------------------- encoded_catch_handler ---------------------------------------------------------------------- ---------------------------------------------------------------------- encoded_catch_handler_list ---------------------------------------------------------------------- Dex files built for release don't need any the following debug info ops ---------------------------------------------------------------------- code_item ---------------------------------------------------------------------- Convert insns from a list to a tuple to avoid mutattion and also to allow self.insns to be hashed. encoded_array: an array of values, in the format specified by "encoded_array format". The size of the value is implicit in the encoding. encoded_annotation: a sub-annotation, in the format specified by "encoded_annotation format" below. The size of the value is implicit in the encoding. ---------------------------------------------------------------------- encoded_array ---------------------------------------------------------------------- ---------------------------------------------------------------------- field_id_item ---------------------------------------------------------------------- ---------------------------------------------------------------------- header_item ---------------------------------------------------------------------- NULL byte ---------------------------------------------------------------------- map_item ---------------------------------------------------------------------- ---------------------------------------------------------------------- map_list ---------------------------------------------------------------------- ---------------------------------------------------------------------- method_handle_item ---------------------------------------------------------------------- ---------------------------------------------------------------------- method_id_item ---------------------------------------------------------------------- ---------------------------------------------------------------------- proto_id_item ---------------------------------------------------------------------- Get the data from our dex.File object ---------------------------------------------------------------------- string_data_item ---------------------------------------------------------------------- ---------------------------------------------------------------------- type_list ---------------------------------------------------------------------- print('other old = "%s"' % (old)) print('other new = "%s"' % (new)) print('class old = "%s"' % (old)) print('class new = "%s"' % (new)) Already demangled Already demangled Make sure the string is in 'L' <classname-with-slashes> ';' Return a list of matching methods def get_call_site(self, idx): call_site_ids = self.get_call_site_ids() if idx >= len(call_site_ids): return None if self.call_sites[idx] is None: self.data.push_offset_and_seek(call_site_ids[idx]) self.call_sites[idx] = call_site_item(self.data) self.data.pop_offset_and_seek() return self.call_sites[idx] Dump the code once for all methods Make sure the code items are created is the highest bit (sign) set? (x>>(b-1)) would be faster 2s complement Return zero to indicate we can't save any bytes Return zero to indicate we can't save any bytes NOP add-int mul-int and-int or-int xor-int add-long mul-long and-long or-long xor-long add-float mul-float add-double mul-double Return zero to indicate we can't save any bytes Verify that all code items are used. | 7,362 | en | 0.397159 |
#!/usr/bin/env python
"""Tests for `calvestbr` package."""
import unittest
from calvestbr import calvestbr
class TestCalvestbr(unittest.TestCase):
"""Tests for `calvestbr` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
| tests/test_calvestbr.py | 397 | Tests for `calvestbr` package.
Set up test fixtures, if any.
Tear down test fixtures, if any.
Test something.
Tests for `calvestbr` package.
!/usr/bin/env python | 162 | en | 0.573116 |
# Copyright 2018 Samuel Payne sam_payne@byu.edu
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import requests
import shutil
import warnings
import cptac
from cptac.file_download import get_box_token
from cptac.exceptions import DatasetAlreadyInstalledWarning, InvalidParameterError, NoInternetError, PdcDownloadError
from .pancanbrca import SOURCES as BRCA_SOURCES
from .pancanccrcc import SOURCES as CCRCC_SOURCES
from .pancancoad import SOURCES as COAD_SOURCES
from .pancangbm import SOURCES as GBM_SOURCES
from .pancanhnscc import SOURCES as HNSCC_SOURCES
from .pancanlscc import SOURCES as LSCC_SOURCES
from .pancanluad import SOURCES as LUAD_SOURCES
from .pancanov import SOURCES as OV_SOURCES
from .pancanucec import SOURCES as UCEC_SOURCES
from .pancanpdac import SOURCES as PDAC_SOURCES
STUDY_IDS_MAP = {
"pdcbrca": {
"acetylome": "PDC000239", # Prospective Breast BI Acetylome
"phosphoproteome": "PDC000121", # Prospective BRCA Phosphoproteome S039-2
"proteome": "PDC000120", # Prospective BRCA Proteome S039-1
},
"pdcccrcc": {
"phosphoproteome": "PDC000128", # CPTAC CCRCC Discovery Study - Phosphoproteme S044-2
"proteome": "PDC000127", # CPTAC CCRCC Discovery Study - Proteome S044-1
},
"pdccoad": {
"phosphoproteome": "PDC000117", # Prospective COAD Phosphoproteome S037-3
"proteome": "PDC000116", # Prospective COAD Proteome S037-2
},
"pdcgbm": {
"acetylome": "PDC000245", # CPTAC GBM Discovery Study - Acetylome
"phosphoproteome": "PDC000205", # CPTAC GBM Discovery Study - Phosphoproteome
"proteome": "PDC000204", # CPTAC GBM Discovery Study - Proteome
},
"pdchnscc": {
"phosphoproteome": "PDC000222", # CPTAC HNSCC Discovery Study - Phosphoproteome
"proteome": "PDC000221", # CPTAC HNSCC Discovery Study - Proteome
},
"pdclscc": {
"acetylome": "PDC000233", # CPTAC LSCC Discovery Study - Acetylome
"phosphoproteome": "PDC000232", # CPTAC LSCC Discovery Study - Phosphoproteome
"proteome": "PDC000234", # CPTAC LSCC Discovery Study - Proteome
"ubiquitylome": "PDC000237", # CPTAC LSCC Discovery Study - Ubiquitylome
},
"pdcluad": {
"acetylome": "PDC000224", # CPTAC LUAD Discovery Study - Acetylome
"phosphoproteome": "PDC000149", # CPTAC LUAD Discovery Study - Phosphoproteome
"proteome": "PDC000153", # CPTAC LUAD Discovery Study - Proteome
},
"pdcov": {
"phosphoproteome": "PDC000119", # Prospective OV Phosphoproteome S038-3
"proteome": "PDC000118", # Prospective OV Proteome S038-2
},
"pdcpdac": {
"proteome": "PDC000270", # CPTAC PDAC Discovery Study - Proteome
"phosphoproteome": "PDC000271", # CPTAC PDAC Discovery Study - Phosphoproteome
},
"pdcucec": {
"acetylome": "PDC000226", # CPTAC UCEC Discovery Study - Acetylome
"phosphoproteome": "PDC000126", # UCEC Discovery - Phosphoproteome S043-2
"proteome": "PDC000125", # UCEC Discovery - Proteome S043-1
},
}
def download(dataset, version="latest", redownload=False):
dataset = dataset.lower()
if dataset.startswith("pdc"):
box_token = get_box_token()
if dataset != 'pdcbrca': # pdcbrca is the only dataset that doesn't need a mapping file for PDC
mapping = cptac.download(dataset, version=version, redownload=redownload, _box_auth=True, _box_token=box_token) # download helper file for mapping aliquots to patient IDs
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics and mapping:
return True
else:
return False
else: # pdcbrca only needs omics
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics:
return True
else:
return False
elif dataset.startswith("pancan") or dataset == "all":
box_token = get_box_token()
if dataset == "pancanbrca":
sources = BRCA_SOURCES
elif dataset == "pancanccrcc":
sources = CCRCC_SOURCES
elif dataset == "pancancoad":
sources = COAD_SOURCES
elif dataset == "pancangbm":
sources = GBM_SOURCES
elif dataset == "pancanhnscc":
sources = HNSCC_SOURCES
elif dataset == "pancanlscc":
sources = LSCC_SOURCES
elif dataset == "pancanluad":
sources = LUAD_SOURCES
elif dataset == "pancanov":
sources = OV_SOURCES
elif dataset == "pancanucec":
sources = UCEC_SOURCES
elif dataset == "pancanpdac":
sources = PDAC_SOURCES
elif dataset == "all":
sources = sorted(set(BRCA_SOURCES + CCRCC_SOURCES + COAD_SOURCES + GBM_SOURCES + HNSCC_SOURCES + LSCC_SOURCES + LUAD_SOURCES + OV_SOURCES + UCEC_SOURCES + PDAC_SOURCES))
else:
raise InvalidParameterError(f"{dataset} is not a valid dataset.")
overall_success = True
for source in sources:
if source.startswith("pdc"):
single_success = download(source, version=version, redownload=redownload)
else:
single_success = cptac.download(source, version=version, redownload=redownload, _box_auth=True, _box_token=box_token)
if not single_success:
overall_success = False
return overall_success
else:
return cptac.download(dataset, version=version, redownload=redownload, _box_auth=True)
def download_pdc_id(pdc_id, _download_msg=True):
"""Download a PDC dataset by its PDC study id.
Returns:
pandas.DataFrame: The clinical table for the study id.
pandas.DataFrame: The quantitative table for the study id.
"""
if _download_msg:
clin_msg = f"Downloading clinical table for {pdc_id}..."
print(clin_msg, end="\r")
# Download the clinical table
clin = _download_study_clin(pdc_id).\
set_index("case_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(clin_msg), end="\r")
bio_msg = f"Downloading biospecimenPerStudy table for {pdc_id}..."
print(bio_msg, end="\r")
# The the biospecimenPerStudy table, which has both patient IDs and aliquot IDs
bio = _download_study_biospecimen(pdc_id).\
set_index("aliquot_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(bio_msg), end="\r")
quant_msg = f"Downloading quantitative table for {pdc_id}..."
print(quant_msg, end="\r")
# Get the quantitative data table
quant = _download_study_quant(pdc_id)
if _download_msg:
print(" " * len(quant_msg), end="\r")
format_msg = f"Formatting tables for {pdc_id}..."
print(format_msg, end="\r")
# Join the patient IDs from the biospecimenPerStudy table into the quant table
quant = quant.\
assign(aliquot_submitter_id=quant.iloc[:, 0].str.split(":", n=1, expand=True)[1]).\
drop(columns=quant.columns[0]).\
set_index("aliquot_submitter_id").\
sort_index()
quant = bio.\
join(quant, how="inner").\
reset_index().\
set_index(["case_submitter_id", "aliquot_submitter_id"]).\
sort_index()
# Clear message
if _download_msg:
print(" " * len(format_msg), end="\r")
return clin, quant
def list_pdc_datasets():
for dataset in STUDY_IDS_MAP.keys():
print(f"Pdc{dataset[3:].title()}:")
for data_type in STUDY_IDS_MAP[dataset].keys():
print(f"\t{data_type}: {STUDY_IDS_MAP[dataset][data_type]}")
# Helper functions
def _pdc_download(dataset, version, redownload):
"""Download data for the specified cancer type from the PDC."""
dataset = str.lower(dataset)
if dataset == "pdcall":
overall_result = True
for dataset in STUDY_IDS_MAP.keys():
if not pdc_download(dataset, version, redownload):
overall_result = False
return overall_result
if not dataset.startswith("pdc"):
raise InvalidParameterError(f"pdc_download function can only be used for PDC datasets, which start with the prefix 'pdc'. You tried to download '{dataset}'.")
if dataset not in STUDY_IDS_MAP.keys():
raise InvalidParameterError(f"PDC dataset must be one of the following:\n{list(STUDY_IDS_MAP.keys())}\nYou passed '{dataset}'.")
dataset_ids = STUDY_IDS_MAP[dataset]
# Get the directory to where to store the data, and see if it exists
path_here = os.path.abspath(os.path.dirname(__file__))
cancer_dir = os.path.join(path_here, f"data_{dataset}")
if os.path.isdir(cancer_dir):
index_path = os.path.join(cancer_dir, "index.txt")
# Check that they also have the index
if not os.path.isfile(index_path):
redownload = True
else:
# The PDC doesn't have a versioning scheme for the tables they serve, so originally we just called it version 0.0 but later decided it would be better to call it 1.0. So, check if theirs is called 0.0; if so, replace it with 1.0.
with open(index_path, "r") as index_file:
first_line = index_file.readline()
if first_line.startswith("#0.0"):
redownload=True
if redownload:
shutil.rmtree(cancer_dir)
else:
return True
os.mkdir(cancer_dir)
data_dir = os.path.join(cancer_dir, f"{dataset}_v1.0")
os.mkdir(data_dir)
# We'll combine all the clinical tables in case there are differences
master_clin = pd.DataFrame()
for data_type in dataset_ids.keys():
# Print an update
download_msg = f"Downloading {dataset} {data_type} files..."
print(download_msg, end="\r")
# Get the clinical and quantitative tables for the study ID
clin, quant = download_pdc_id(dataset_ids[data_type], _download_msg=False)
# Print a new update
print(" " * len(download_msg), end="\r")
save_msg = f"Saving {dataset} {data_type} files..."
print(save_msg, end="\r")
# Append the clinical dataframe
master_clin = master_clin.append(clin)
# Save the quantitative table
quant.to_csv(os.path.join(data_dir, f"{data_type}.tsv.gz"), sep="\t")
# Erase update
print(" " * len(save_msg), end="\r")
# Print an update
save_msg = f"Saving {dataset} clinical file..."
print(save_msg, end="\r")
# Drop any duplicated rows in combined clinical table, then save it too
master_clin = master_clin.drop_duplicates(keep="first")
master_clin.to_csv(os.path.join(data_dir, "clinical.tsv.gz"), sep="\t")
# Write a dummy index with just version numbers
index_path = os.path.join(cancer_dir, "index.txt")
with open(index_path, "w") as index_file:
index_file.write("#1.0\n")
# Erase update
print(" " * len(save_msg), end="\r")
return True
def _download_study_clin(pdc_study_id):
"""Download PDC clinical data for a particular study."""
clinical_query = '''
query {
clinicalPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
age_at_diagnosis, ajcc_clinical_m, ajcc_clinical_n, ajcc_clinical_stage, ajcc_clinical_t, ajcc_pathologic_m,
ajcc_pathologic_n, ajcc_pathologic_stage, ajcc_pathologic_t, ann_arbor_b_symptoms, ann_arbor_clinical_stage,
ann_arbor_extranodal_involvement, ann_arbor_pathologic_stage, best_overall_response, burkitt_lymphoma_clinical_variant,
case_id, case_submitter_id, cause_of_death, circumferential_resection_margin, classification_of_tumor, colon_polyps_history,
days_to_best_overall_response, days_to_birth, days_to_death, days_to_diagnosis, days_to_hiv_diagnosis, days_to_last_follow_up,
days_to_last_known_disease_status, days_to_new_event, days_to_recurrence, demographic_id, demographic_submitter_id,
diagnosis_id, diagnosis_submitter_id, disease_type, ethnicity, figo_stage, gender, hiv_positive, hpv_positive_type, hpv_status,
icd_10_code, iss_stage, last_known_disease_status, laterality, ldh_level_at_diagnosis, ldh_normal_range_upper,
lymphatic_invasion_present, lymph_nodes_positive, method_of_diagnosis, morphology, new_event_anatomic_site, new_event_type,
overall_survival, perineural_invasion_present, primary_diagnosis, primary_site, prior_malignancy, prior_treatment,
progression_free_survival, progression_free_survival_event, progression_or_recurrence, race, residual_disease,
site_of_resection_or_biopsy, status, synchronous_malignancy, tissue_or_organ_of_origin, tumor_cell_content, tumor_grade,
tumor_stage, vascular_invasion_present, vital_status, year_of_birth, year_of_death, year_of_diagnosis
}
}
'''
result_json = _query_pdc(clinical_query)
result_df = pd.\
DataFrame(result_json["data"]["clinicalPerStudy"])
return result_df
def _download_study_biospecimen(pdc_study_id):
"""Download PDC biospecimen data for a particular study."""
biospecimen_query = '''
query {
biospecimenPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
aliquot_submitter_id
case_submitter_id
}
}
'''
result_json = _query_pdc(biospecimen_query)
result_df = pd.\
DataFrame(result_json["data"]["biospecimenPerStudy"])
return result_df
def _download_study_quant(pdc_study_id):
"""Download PDC quantitative data for a particular study."""
proteome_query = '''
query {
quantDataMatrix(pdc_study_id: "''' + pdc_study_id + '''", data_type: "log2_ratio", acceptDUA: true)
}
'''
result_json = _query_pdc(proteome_query)
result_df = pd.DataFrame(result_json["data"]["quantDataMatrix"])
if result_df.shape[1] != 0:
result_df = result_df.set_index(result_df.columns[0]).transpose()
else:
raise PdcDownloadError(f"quantDataMatrix table returned for PDC study ID {pdc_study_id} was empty.")
return result_df
def _query_pdc(query):
"""Send a GraphQL query to the PDC and return the results."""
url = 'https://pdc.cancer.gov/graphql'
try:
response = requests.post(url, json={'query': query})
response.raise_for_status() # Raises a requests.HTTPError if the response code was unsuccessful
except requests.RequestException: # Parent class for all exceptions in the requests module
raise NoInternetError("Insufficient internet. Check your internet connection.") from None
return response.json()
def _check_ids_match(ids_map):
"""Check that the ids in the download function's STUDY_IDS_MAP match up."""
for cancer in ids_map.values():
for data in cancer.values():
pdc_study_id = data["pdc_study_id"]
study_submitter_id = data["study_submitter_id"]
query = '''
query {
study (pdc_study_id: "''' + pdc_study_id + '''" acceptDUA: true) {
pdc_study_id,
study_submitter_id
}
}
'''
idres = _query_pdc(query)
server_psi = idres["data"]["study"][0]["pdc_study_id"]
server_ssi = idres["data"]["study"][0]["study_submitter_id"]
assert server_psi == pdc_study_id
assert server_ssi == study_submitter_id
print(f"{server_psi} == {pdc_study_id}")
print(f"{server_ssi} == {study_submitter_id}")
print()
| cptac/pancan/file_download.py | 16,297 | Check that the ids in the download function's STUDY_IDS_MAP match up.
Download PDC biospecimen data for a particular study.
Download PDC clinical data for a particular study.
Download PDC quantitative data for a particular study.
Download data for the specified cancer type from the PDC.
Send a GraphQL query to the PDC and return the results.
Download a PDC dataset by its PDC study id.
Returns:
pandas.DataFrame: The clinical table for the study id.
pandas.DataFrame: The quantitative table for the study id.
Copyright 2018 Samuel Payne sam_payne@byu.edu Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Prospective Breast BI Acetylome Prospective BRCA Phosphoproteome S039-2 Prospective BRCA Proteome S039-1 CPTAC CCRCC Discovery Study - Phosphoproteme S044-2 CPTAC CCRCC Discovery Study - Proteome S044-1 Prospective COAD Phosphoproteome S037-3 Prospective COAD Proteome S037-2 CPTAC GBM Discovery Study - Acetylome CPTAC GBM Discovery Study - Phosphoproteome CPTAC GBM Discovery Study - Proteome CPTAC HNSCC Discovery Study - Phosphoproteome CPTAC HNSCC Discovery Study - Proteome CPTAC LSCC Discovery Study - Acetylome CPTAC LSCC Discovery Study - Phosphoproteome CPTAC LSCC Discovery Study - Proteome CPTAC LSCC Discovery Study - Ubiquitylome CPTAC LUAD Discovery Study - Acetylome CPTAC LUAD Discovery Study - Phosphoproteome CPTAC LUAD Discovery Study - Proteome Prospective OV Phosphoproteome S038-3 Prospective OV Proteome S038-2 CPTAC PDAC Discovery Study - Proteome CPTAC PDAC Discovery Study - Phosphoproteome CPTAC UCEC Discovery Study - Acetylome UCEC Discovery - Phosphoproteome S043-2 UCEC Discovery - Proteome S043-1 pdcbrca is the only dataset that doesn't need a mapping file for PDC download helper file for mapping aliquots to patient IDs pdcbrca only needs omics Download the clinical table The the biospecimenPerStudy table, which has both patient IDs and aliquot IDs Get the quantitative data table Join the patient IDs from the biospecimenPerStudy table into the quant table Clear message Helper functions Get the directory to where to store the data, and see if it exists Check that they also have the index The PDC doesn't have a versioning scheme for the tables they serve, so originally we just called it version 0.0 but later decided it would be better to call it 1.0. So, check if theirs is called 0.0; if so, replace it with 1.0. We'll combine all the clinical tables in case there are differences Print an update Get the clinical and quantitative tables for the study ID Print a new update Append the clinical dataframe Save the quantitative table Erase update Print an update Drop any duplicated rows in combined clinical table, then save it too Write a dummy index with just version numbers Erase update Raises a requests.HTTPError if the response code was unsuccessful Parent class for all exceptions in the requests module | 3,365 | en | 0.75524 |
#!/usr/bin/env python
import os
import logging
import requests
import json
import configparser
import sys
import time
import re
from os.path import dirname
from config import (
instanceA_url, instanceA_key, instanceA_path, instanceA_profile,
instanceA_profile_id, instanceA_profile_filter, instanceA_profile_filter_id,
instanceA_language_id, instanceA_language, instanceA_quality_match,
instanceA_tag_filter_id, instanceA_tag_filter, instanceA_blacklist,
instanceB_url, instanceB_key, instanceB_path, instanceB_profile,
instanceB_profile_id, instanceB_profile_filter, instanceB_profile_filter_id,
instanceB_language_id, instanceB_language, instanceB_quality_match,
instanceB_tag_filter_id, instanceB_tag_filter, instanceB_blacklist,
content_id_key, logger, is_sonarr, is_radarr, is_lidarr,
get_status_path, get_content_path, get_profile_path, get_language_path, get_tag_path, get_content_put_path,
is_in_docker, instance_sync_interval_seconds,
sync_bidirectionally, auto_search, skip_missing, monitor_new_content,
api_version, is_test_run, sync_monitor
)
def get_content_details(content, instance_path, instance_profile_id, instance_url, instance_language_id=None):
"""gets details of a content item"""
global monitor_new_content, auto_search
images = content.get('images')
for image in images:
image['url'] = '{0}{1}'.format(instance_url, image.get('url'))
monitored = content.get('monitored')
if monitor_new_content is not None:
monitored = True if monitor_new_content else False
payload = {
content_id_key: content.get(content_id_key),
'qualityProfileId': int(instance_profile_id or content.get('qualityProfileId')),
'monitored': monitored,
'rootFolderPath': instance_path,
'images': images,
}
add_options = content.get('addOptions', {})
search_missing = True if auto_search else False
if is_sonarr:
payload['title'] = content.get('title')
payload['titleSlug'] = content.get('titleSlug')
payload['seasons'] = content.get('seasons')
payload['year'] = content.get('year')
payload['tvRageId'] = content.get('tvRageId')
payload['seasonFolder'] = content.get('seasonFolder')
payload['languageProfileId'] = instance_language_id if instance_language_id else content.get(
'languageProfileId')
payload['tags'] = content.get('tags')
payload['seriesType'] = content.get('seriesType')
payload['useSceneNumbering'] = content.get('useSceneNumbering')
payload['addOptions'] = {
**add_options,
**{'searchForMissingEpisodes': search_missing}
}
elif is_radarr:
payload['title'] = content.get('title')
payload['year'] = content.get('year')
payload['tmdbId'] = content.get('tmdbId')
payload['titleSlug'] = content.get('titleSlug')
payload['addOptions'] = {
**add_options,
**{'searchForMovie': search_missing}
}
elif is_lidarr:
payload['artistName'] = content.get('artistName')
payload['albumFolder'] = content.get('albumFolder')
payload['metadataProfileId'] = content.get('metadataProfileId')
payload['addOptions'] = {
**add_options,
**{
"monitored": monitored,
"searchForMissingAlbums": search_missing
}
}
logger.debug(payload)
return payload
def get_quality_profiles(instance_session, instance_url, instance_key):
instance_profile_url = get_profile_path(instance_url, instance_key)
profiles_response = instance_session.get(instance_profile_url)
if profiles_response.status_code != 200:
logger.error(f'Could not get profile id from {instance_profile_url}')
exit_system()
instance_profiles = None
try:
instance_profiles = profiles_response.json()
return instance_profiles
except:
logger.error(f'Could not decode profile id from {instance_profile_url}')
exit_system()
def get_profile_from_id(instance_session, instance_url, instance_key, instance_profile, instance_name=''):
instance_profiles = get_quality_profiles(instance_session=instance_session, instance_url=instance_url, instance_key=instance_key)
profile = next((item for item in instance_profiles if item["name"].lower() == instance_profile.lower()), False)
if not profile:
logger.error('Could not find profile_id for instance {} profile {}'.format(instance_name, instance_profile))
exit_system()
instance_profile_id = profile.get('id')
logger.debug(f'found profile_id (instance{instance_name}) "{instance_profile_id}" from profile "{instance_profile}"')
return instance_profile_id
def get_tag_from_id(instance_session, instance_url, instance_key, instance_tag, instance_name=''):
instance_tag_url = get_tag_path(instance_url, instance_key)
tag_response = instance_session.get(instance_tag_url)
if tag_response.status_code != 200:
logger.error(f'Could not get tag id from (instance{instance_name}) {instance_tag_url} - only works on Sonarr')
exit_system()
instance_tags = None
try:
instance_tags = tag_response.json()
except:
logger.error(f'Could not decode tag id from {instance_tag_url}')
exit_system()
tag_ids = []
for item in instance_tags:
for instance_item in instance_tag:
if item.get('label').lower() == instance_item.lower():
tag_ids.append(item)
if not tag_ids:
logger.error(f'Could not find tag_id for instance {instance_name} and tag {instance_tags}')
exit_system()
instance_tag_ids = [tag.get('id') for tag in tag_ids]
logger.debug(f'found id "{instance_tag_ids}" from tag "{instance_tag}" for instance {instance_name}')
if instance_tag_ids is None:
logger.error(f'tag_id is None for instance {instance_name} and tag {instance_tag}')
exit_system()
return instance_tag_ids
def get_language_from_id(instance_session, instance_url, instance_key, instance_language, instance_name=''):
instance_language_url = get_language_path(instance_url, instance_key)
language_response = instance_session.get(instance_language_url)
if language_response.status_code != 200:
logger.error(f'Could not get language id from (instance{instance_name}) {instance_language_url} - only works on sonarr v3')
exit_system()
instance_languages = None
try:
instance_languages = language_response.json()
except:
logger.error(f'Could not decode language id from {instance_language_url}')
exit_system()
instance_languages = instance_languages[0]['languages']
language = next((item for item in instance_languages if item.get('language', {}).get('name').lower() == instance_language.lower()), False)
if not language:
logger.error(f'Could not find language_id for instance {instance_name} and language {instance_language}')
exit_system()
instance_language_id = language.get('language', {}).get('id')
logger.debug(f'found id "{instance_language_id}" from language "{instance_language}" for instance {instance_name}')
if instance_language_id is None:
logger.error(f'language_id is None for instance {instance_name} and language {instance_language}')
exit_system()
return instance_language_id
def sync_servers(instanceA_contents, instanceB_language_id, instanceB_contentIds,
instanceB_path, instanceB_profile_id, instanceA_profile_filter_id,
instanceB_session, instanceB_url, instanceB_key, instanceA_quality_match,
instanceA_tag_filter_id, instanceA_blacklist, instanceB_contents):
global is_radarr, is_sonarr, is_test_run, sync_monitor
search_ids = []
# if given instance A profile id then we want to filter out content without that id
if instanceA_profile_filter_id:
logging.info(f'only filtering content with instanceA_profile_filter_id {instanceA_profile_filter_id}')
# for each content id in instance A, check if it needs to be synced to instance B
for content in instanceA_contents:
content_not_synced = content[content_id_key] not in instanceB_contentIds
# only skip alrerady synced items if we arent syncing monitoring as well
if content_not_synced or sync_monitor:
title = content.get('title') or content.get('artistName')
instance_path = instanceB_path or dirname(content.get('path'))
# if skipping missing files, we want to skip any that don't have files
if is_radarr and skip_missing:
content_has_file = content.get('hasFile')
if not content_has_file:
logging.debug(f'Skipping content {title} - file missing')
continue
# if given this, we want to filter from instance by profile id
if instanceA_profile_filter_id:
quality_profile_id = content.get('qualityProfileId')
if instanceA_profile_filter_id != quality_profile_id:
logging.debug(f'Skipping content {title} - mismatched quality_profile_id {quality_profile_id} with instanceA_profile_filter_id {instanceA_profile_filter_id}')
continue
# if given quality filter we want to filter if quality from instanceA isnt high enough yet
if is_radarr and instanceA_quality_match:
content_quality = content.get('movieFile', {}).get('quality', {}).get('quality', {}).get('name', '')
if content_quality and not re.match(instanceA_quality_match, content_quality):
logging.debug(f'Skipping content {title} - mismatched content_quality {content_quality} with instanceA_quality_match {instanceA_quality_match}')
continue
# if given tag filter then filter by tag - (Sonarr/Radarr v3 only)
if (is_sonarr or is_radarr) and instanceA_tag_filter_id:
content_tag_ids = content.get('tags')
if not (set(content_tag_ids) & set(instanceA_tag_filter_id)):
logging.debug(f'Skipping content {title} - mismatched content_tag_ids {content_tag_ids} with instanceA_tag_filter_id {instanceA_tag_filter_id}')
continue
# if black list given then dont sync matching slugs/ids
if instanceA_blacklist:
title_slug = content.get('titleSlug') or content.get('foreignArtistId')
if title_slug in instanceA_blacklist:
logging.debug(f'Skipping content {title} - blacklist slug: {title_slug}')
continue
content_id = str(content.get('id'))
if content_id in instanceA_blacklist:
logging.debug(f'Skipping content {title} - blacklist ID: {content_id}')
continue
# generate content from instance A to sync into instance B
formatted_content = get_content_details(
content=dict(content),
instance_path=instance_path,
instance_profile_id=instanceB_profile_id,
instance_url=instanceB_url,
instance_language_id=instanceB_language_id,
)
instanceB_content_url = get_content_path(instanceB_url, instanceB_key)
if is_test_run:
logging.info('content title "{0}" synced successfully (test only)'.format(title))
elif content_not_synced:
# sync content if not synced
logging.info(f'syncing content title "{title}"')
sync_response = instanceB_session.post(instanceB_content_url, json=formatted_content)
# check response and save content id for searching later on if success
if sync_response.status_code != 201 and sync_response.status_code != 200:
logger.error(f'server sync error for {title} - response: {sync_response.text}')
else:
try:
search_ids.append(int(sync_response.json()['id']))
except:
logger.error(f'Could not decode sync response from {instanceB_content_url}')
logging.info('content title "{0}" synced successfully'.format(title))
elif sync_monitor:
# else if is already synced and we want to sync monitoring then sync that now
# find matching content from instance B to check monitored status
matching_content_instanceB = list(filter(lambda content_instanceB: content_instanceB['titleSlug'] == content.get('titleSlug'), instanceB_contents))
if(len(matching_content_instanceB) == 1):
matching_content_instanceB = matching_content_instanceB[0]
# if we found a content match from instance B, then check monitored status - if different then sync from A to B
if matching_content_instanceB['monitored'] != content['monitored']:
matching_content_instanceB['monitored'] = content['monitored']
instanceB_content_url = get_content_put_path(instanceB_url, instanceB_key, matching_content_instanceB.get('id'))
sync_response = instanceB_session.put(instanceB_content_url, json=matching_content_instanceB)
# check response and save content id for searching later on if success
if sync_response.status_code != 202:
logger.error(f'server monitoring sync error for {title} - response: {sync_response.text}')
else:
try:
search_ids.append(int(sync_response.json()['id']))
except:
logger.error(f'Could not decode sync response from {instanceB_content_url}')
logging.info('content title "{0}" monitoring synced successfully'.format(title))
logging.info(f'{len(search_ids)} contents synced successfully')
def get_instance_contents(instance_url, instance_key, instance_session, instance_name=''):
instance_contentIds = []
instance_content_url = get_content_path(instance_url, instance_key)
instance_contents = instance_session.get(instance_content_url)
if instance_contents.status_code != 200:
logger.error('instance{} server error - response {}'.format(instance_name, instance_contents.status_code))
exit_system()
else:
try:
instance_contents = instance_contents.json()
except:
logger.error(f'Could not decode contents from {instance_content_url}')
exit_system()
for content_to_sync in instance_contents:
instance_contentIds.append(content_to_sync[content_id_key])
logger.debug('{} contents in instance {}'.format(len(instance_contentIds), instance_name))
return instance_contents, instance_contentIds
def check_status(instance_session, instance_url, instance_key, instance_name=''):
global api_version
instance_status_url = get_status_path(instance_url, instance_key)
error_message = f'Could not connect to instance{instance_name}: {instance_status_url}'
status_response = None
try:
status_response = instance_session.get(instance_status_url)
if status_response.status_code != 200:
logger.error(error_message)
exit_system()
except:
logger.error(error_message)
exit_system()
if status_response is None:
logger.error(error_message)
exit_system()
else:
try:
status_response = status_response.json()
except Exception as error:
if not isinstance(status_response, dict):
logger.error(
f"Could not retrieve status for {instance_status_url}: {status_response} - {error}")
exit_system()
if(status_response.get('error')):
logger.error(f"{instance_status_url} error {status_response.get('error')}")
exit_system()
logger.debug(f"{instance_status_url} version {status_response.get('version')}")
return status_response
def sync_content():
global instanceA_profile_id, instanceA_profile, instanceB_profile_id, instanceB_profile, instanceA_profile_filter, instanceA_profile_filter_id, instanceB_profile_filter, instanceB_profile_filter_id, tested_api_version, instanceA_language_id, instanceA_language, instanceB_language_id, instanceB_language, instanceA_quality_match, instanceB_quality_match, is_sonarr, instanceA_tag_filter_id, instanceA_tag_filter, instanceB_tag_filter_id, instanceB_tag_filter, is_radarr, instanceA_blacklist, instanceB_blacklist
# get sessions
instanceA_session = requests.Session()
instanceA_session.trust_env = False
instanceB_session = requests.Session()
instanceB_session.trust_env = False
# if given a profile instead of a profile id then try to find the profile id
if not instanceA_profile_id and instanceA_profile:
instanceA_profile_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile, 'A')
if not instanceB_profile_id and instanceB_profile:
instanceB_profile_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile, 'B')
logger.debug({
'instanceA_profile_id': instanceA_profile_id,
'instanceA_profile': instanceA_profile,
'instanceB_profile_id': instanceB_profile_id,
'instanceB_profile': instanceB_profile,
})
# do the same for profile id filters if they exist
if not instanceA_profile_filter_id and instanceA_profile_filter:
instanceA_profile_filter_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile_filter, 'A')
if not instanceB_profile_filter_id and instanceB_profile_filter:
instanceB_profile_filter_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile_filter, 'B')
logger.debug({
'instanceAprofile_filter_id': instanceA_profile_filter_id,
'instanceAprofile_filter': instanceA_profile_filter,
'instanceBprofile_filter_id': instanceB_profile_filter_id,
'instanceBprofile_filter': instanceB_profile_filter,
})
# do the same for tag id filters if they exist - (only Sonarr)
if is_sonarr or is_radarr:
if not instanceA_tag_filter_id and instanceA_tag_filter:
instanceA_tag_filter_id = get_tag_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_tag_filter, 'A')
if not instanceB_tag_filter_id and instanceB_tag_filter:
instanceB_tag_filter_id = get_tag_from_id(instanceB_session, instanceB_url, instanceB_key, instanceA_tag_filter, 'B')
logger.debug({
'instanceA_tag_filter': instanceA_tag_filter,
'instanceA_profile_filter': instanceA_profile_filter,
'instanceB_tag_filter_id': instanceB_tag_filter_id,
'instanceB_tag_filter': instanceB_tag_filter,
})
# if given language instead of language id then try to find the lanaguage id - (only Sonarr v3)
if is_sonarr:
if not instanceA_language_id and instanceA_language:
instanceA_language_id = get_language_from_id(
instance_session=instanceA_session,
instance_url=instanceA_url,
instance_key=instanceA_key,
instance_language=instanceA_language,
instance_name='A'
)
if not instanceB_language_id and instanceB_language:
instanceB_language_id = get_language_from_id(
instance_session=instanceB_session,
instance_url=instanceB_url,
instance_key=instanceB_key,
instance_language=instanceB_language,
instance_name='B'
)
logger.debug({
'instanceA_language_id': instanceA_language_id,
'instanceA_language': instanceA_language,
'instanceB_language_id': instanceB_language_id,
'instanceB_language': instanceB_language,
'is_sonarr': is_sonarr,
'api_version': api_version,
})
# get contents to compare
instanceA_contents, instanceA_contentIds = get_instance_contents(instanceA_url, instanceA_key, instanceA_session, instance_name='A')
instanceB_contents, instanceB_contentIds = get_instance_contents(instanceB_url, instanceB_key, instanceB_session, instance_name='B')
logger.info('syncing content from instance A to instance B')
sync_servers(
instanceA_contents=instanceA_contents,
instanceB_contents=instanceB_contents,
instanceB_contentIds=instanceB_contentIds,
instanceB_language_id=instanceB_language_id,
instanceB_path=instanceB_path,
instanceB_profile_id=instanceB_profile_id,
instanceB_session=instanceB_session,
instanceB_url=instanceB_url,
instanceA_profile_filter_id=instanceA_profile_filter_id,
instanceB_key=instanceB_key,
instanceA_quality_match=instanceA_quality_match,
instanceA_tag_filter_id=instanceA_tag_filter_id,
instanceA_blacklist=instanceA_blacklist
)
# if given bidirectional flag then sync from instance B to instance A
if sync_bidirectionally:
logger.info('syncing content from instance B to instance A')
sync_servers(
instanceA_contents=instanceB_contents,
instanceB_contents=instanceA_contents,
instanceB_contentIds=instanceA_contentIds,
instanceB_language_id=instanceA_language_id,
instanceB_path=instanceA_path,
instanceB_profile_id=instanceA_profile_id,
instanceB_session=instanceA_session,
instanceB_url=instanceA_url,
instanceA_profile_filter_id=instanceB_profile_filter_id,
instanceB_key=instanceA_key,
instanceA_quality_match=instanceB_quality_match,
instanceA_tag_filter_id=instanceB_tag_filter_id,
instanceA_blacklist=instanceB_blacklist
)
########################################################################################################################
def exit_system():
"""we dont want to exit if in docker"""
if is_in_docker:
raise Exception
else:
sys.exit(0)
if is_in_docker:
logger.info('syncing every {} seconds'.format(instance_sync_interval_seconds))
sync_content()
if is_in_docker:
while True:
try:
time.sleep(instance_sync_interval_seconds)
sync_content()
except Exception as inst:
d = inst
| index.py | 23,135 | we dont want to exit if in docker
gets details of a content item
!/usr/bin/env python if given instance A profile id then we want to filter out content without that id for each content id in instance A, check if it needs to be synced to instance B only skip alrerady synced items if we arent syncing monitoring as well if skipping missing files, we want to skip any that don't have files if given this, we want to filter from instance by profile id if given quality filter we want to filter if quality from instanceA isnt high enough yet if given tag filter then filter by tag - (Sonarr/Radarr v3 only) if black list given then dont sync matching slugs/ids generate content from instance A to sync into instance B sync content if not synced check response and save content id for searching later on if success else if is already synced and we want to sync monitoring then sync that now find matching content from instance B to check monitored status if we found a content match from instance B, then check monitored status - if different then sync from A to B check response and save content id for searching later on if success get sessions if given a profile instead of a profile id then try to find the profile id do the same for profile id filters if they exist do the same for tag id filters if they exist - (only Sonarr) if given language instead of language id then try to find the lanaguage id - (only Sonarr v3) get contents to compare if given bidirectional flag then sync from instance B to instance A | 1,514 | en | 0.805985 |
import logging
import os
import queue
import requests
import time
from threading import Thread
cri_sock = os.getenv("KIP_CRI_SOCK", "unix:///var/run/containerd/containerd.sock")
cri_client = os.getenv("KIP_CRI_CLI", False)
gateway_host = os.getenv("KIP_GATEWAY_HOST", "http://localhost:8888")
num_pullers = int(os.getenv("KIP_NUM_PULLERS", "2"))
num_retries = int(os.getenv("KIP_NUM_RETRIES", "3"))
interval = int(os.getenv("KIP_INTERVAL", "300"))
log_level = os.getenv("KIP_LOG_LEVEL", "INFO")
POLICY_IF_NOT_PRESENT = "IfNotPresent"
POLICY_ALYWAYS = "Always"
policies = (POLICY_IF_NOT_PRESENT, POLICY_ALYWAYS)
policy = os.getenv("KIP_PULL_POLICY", POLICY_IF_NOT_PRESENT)
if cri_client or cri_client in ('Yes', 'yes', 'True', 'true'):
from docker.errors import NotFound
from cri_api.channel import Channel
from cri_api.images import Images
from cri_api.exceptions import ImageServiceException as APIError
class DockerMocker:
def __init__(self, cli):
self.cli=cli
def get(self, img_name):
ret=self.cli.get_image(img_name)
if ret is None:
raise NotFound
else:
return ret
def pull(self, img_name):
try:
self.cli.pull_image(img_name)
except APIError as err:
if "failed to resolve image" in str(err):
raise NotFound(err)
else:
raise APIError(err)
class CriClient:
def __init__(self, cri_sock):
self.channel=Channel(cri_sock)
self.cli=Images(self.channel)
self.images=DockerMocker(self.cli)
docker_client = CriClient(cri_sock)
else:
from docker.client import DockerClient
from docker.errors import APIError
from docker.errors import NotFound
docker_client = DockerClient.from_env()
logging.basicConfig(format='[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s')
def get_kernelspecs():
"""Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs"""
end_point = '{}/api/kernelspecs'.format(gateway_host)
logger.info("Fetching kernelspecs from '{}' ...".format(end_point))
resp = requests.get(end_point)
if not resp.ok:
raise requests.exceptions.HTTPError('Gateway server response: {}'.format(resp.status_code))
return resp.json()
def fetch_image_names():
"""Fetches the image names by hitting the /api/kernelspecs endpoint of the Gateway.
For process-proxy kernelspecs, the image names are contained in the config stanza - which
resides in the process-proxy stanza located in the metadata.
"""
kspecs = None
try:
kspecs_response = get_kernelspecs()
kspecs = kspecs_response.get('kernelspecs')
except Exception as ex:
logger.error("Got exception attempting to retrieve kernelspecs - retrying. Exception was: {}".format(ex))
finally:
if kspecs is None:
return False
# Locate the configured images within the kernelspecs and add to set for duplicate management
images = set()
for key in kspecs.keys():
metadata = kspecs.get(key).get('spec').get('metadata')
if metadata is not None:
process_proxy = metadata.get('process_proxy')
if process_proxy is not None:
config = process_proxy.get('config')
if config is not None:
image_name = config.get('image_name')
if image_name is not None:
images.add(image_name)
executor_image_name = config.get('executor_image_name')
if executor_image_name is not None:
images.add(executor_image_name)
# Add the image names to the name queue
for image_name in images:
name_queue.put_nowait(image_name)
return True
def pull_image(image_name):
"""Pulls the image.
If the policy is `IfNotPresent` the set of pulled image names is
checked and, if present, the method returns. Otherwise, the pull attempt is made
and the set of pulled images is updated, when successful.
Since NotFound exceptions are tolerated, we trap for only that exception and let
the caller handle others.
"""
if policy == POLICY_IF_NOT_PRESENT:
if image_name in pulled_images:
# Image has been pulled, but make sure it still exists. If it doesn't exist
# let this drop through to actual pull
logger.info("Image '{}' already pulled and policy is '{}'. Checking existence.".
format(image_name, policy))
try:
t1 = time.time()
docker_client.images.get(image_name)
t2 = time.time()
logger.debug("Checked existence of image '{}' in {:.3f} secs.".format(image_name, t2 - t1))
return
except NotFound:
pulled_images.remove(image_name)
logger.warning("Previously pulled image '{}' was not found - attempting pull...".format(image_name))
logger.debug("Pulling image '{}'...".format(image_name))
try:
t1 = time.time()
docker_client.images.pull(image_name)
t2 = time.time()
pulled_images.add(image_name)
logger.info("Pulled image '{}' in {:.3f} secs.".format(image_name, t2 - t1))
except NotFound:
logger.warning("Image '{}' was not found!".format(image_name))
def puller():
"""Thread-based puller.
Gets image name from the queue and attempts to pull the image. Any issues, except
for NotFound, are retried up to num_retries times. Once the image has been pulled, it's not found or the
retries have been exceeded, the queue task is marked as done.
"""
while True:
image_name = name_queue.get()
if image_name is None:
break
i = 0
while i < num_retries:
try:
pull_image(image_name)
break
except APIError as ex:
i += 1
if i < num_retries:
logger.warning("Attempt {} to pull image '{}' encountered exception - retrying. Exception was: {}".
format(i, image_name, ex))
else:
logger.error("Attempt {} to pull image '{}' failed with exception: {}".
format(i, image_name, ex))
name_queue.task_done()
if __name__ == "__main__":
logger = logging.getLogger('kernel_image_puller')
logger.setLevel(log_level)
# Determine pull policy.
pulled_images = set()
if policy not in policies:
logger.warning("Invalid pull policy detected in KIP_PULL_POLICY: '{}'. Using policy '{}'.".
format(policy, POLICY_IF_NOT_PRESENT))
policy = POLICY_IF_NOT_PRESENT
logger.info("Starting Kernel Image Puller with the following parameters:")
logger.info("KIP_GATEWAY_HOST: {}".format(gateway_host))
logger.info("KIP_CRI_CLI: {}".format(cri_client))
logger.info("KIP_CRI_SOCK: {}".format(cri_sock))
logger.info("KIP_INTERVAL: {} secs".format(interval))
logger.info("KIP_NUM_PULLERS: {}".format(num_pullers))
logger.info("KIP_NUM_RETRIES: {}".format(num_retries))
logger.info("KIP_PULL_POLICY: {}".format(policy))
logger.info("KIP_LOG_LEVEL: {}\n".format(log_level))
# Create an empty queue and start the puller threads. The number of puller threads is configurable.
name_queue = queue.Queue()
threads = []
for i in range(num_pullers):
t = Thread(target=puller, name="t{}".format(i + 1))
t.start()
threads.append(t)
# Fetch the image names, then wait for name queue to drain. Once drained, or if there were issues
# fetching the image names, wait the interval number of seconds and perform the operation again.
wait_interval = 5 # Start with 5 seconds to ensure EG service gets started...
time.sleep(wait_interval)
while True:
fetched = fetch_image_names()
if fetched:
wait_interval = interval # Once we have fetched kernelspecs, update wait_interval
name_queue.join()
logger.info("Images pulled. Sleeping {} seconds...\n".format(wait_interval))
else:
logger.info("Sleeping {} seconds to fetch image names...\n".format(wait_interval))
time.sleep(wait_interval)
| kernel_image_puller.py | 8,449 | Fetches the image names by hitting the /api/kernelspecs endpoint of the Gateway.
For process-proxy kernelspecs, the image names are contained in the config stanza - which
resides in the process-proxy stanza located in the metadata.
Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs
Pulls the image.
If the policy is `IfNotPresent` the set of pulled image names is
checked and, if present, the method returns. Otherwise, the pull attempt is made
and the set of pulled images is updated, when successful.
Since NotFound exceptions are tolerated, we trap for only that exception and let
the caller handle others.
Thread-based puller.
Gets image name from the queue and attempts to pull the image. Any issues, except
for NotFound, are retried up to num_retries times. Once the image has been pulled, it's not found or the
retries have been exceeded, the queue task is marked as done.
Locate the configured images within the kernelspecs and add to set for duplicate management Add the image names to the name queue Image has been pulled, but make sure it still exists. If it doesn't exist let this drop through to actual pull Determine pull policy. Create an empty queue and start the puller threads. The number of puller threads is configurable. Fetch the image names, then wait for name queue to drain. Once drained, or if there were issues fetching the image names, wait the interval number of seconds and perform the operation again. Start with 5 seconds to ensure EG service gets started... Once we have fetched kernelspecs, update wait_interval | 1,598 | en | 0.918109 |
import socket
import timeit
import numpy as np
from PIL import Image
from datetime import datetime
import os
import sys
from collections import OrderedDict
sys.path.append('./')
# PyTorch includes
import torch
from torch.autograd import Variable
from torchvision import transforms
import cv2
# Custom includes
from networks import deeplab_xception_transfer, graph
from dataloaders import custom_transforms as tr
#
import argparse
import torch.nn.functional as F
label_colours = [(0,0,0)
, (128,0,0), (255,0,0), (0,85,0), (170,0,51), (255,85,0), (0,0,85), (0,119,221), (85,85,0), (0,85,85), (85,51,0), (52,86,128), (0,128,0)
, (0,0,255), (51,170,221), (0,255,255), (85,255,170), (170,255,85), (255,255,0), (255,170,0)]
def flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def flip_cihp(tail_list):
'''
:param tail_list: tail_list size is 1 x n_class x h x w
:return:
'''
# tail_list = tail_list[0]
tail_list_rev = [None] * 20
for xx in range(14):
tail_list_rev[xx] = tail_list[xx].unsqueeze(0)
tail_list_rev[14] = tail_list[15].unsqueeze(0)
tail_list_rev[15] = tail_list[14].unsqueeze(0)
tail_list_rev[16] = tail_list[17].unsqueeze(0)
tail_list_rev[17] = tail_list[16].unsqueeze(0)
tail_list_rev[18] = tail_list[19].unsqueeze(0)
tail_list_rev[19] = tail_list[18].unsqueeze(0)
return torch.cat(tail_list_rev,dim=0)
def decode_labels(mask, num_images=1, num_classes=20):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
n, h, w = mask.shape
assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (
n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_, j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs
def read_img(img_path):
_img = Image.open(img_path).convert('RGB') # return is RGB pic
return _img
def img_transform(img, transform=None):
sample = {'image': img, 'label': 0}
sample = transform(sample)
return sample
def get_img_paths(imgs_dir):
img_paths = []
for dirpath, dirnames, filenames in os.walk(imgs_dir):
for filename in [f for f in filenames if f.endswith('.png') or f.endswith('.PNG') or f.endswith('.jpg') or f.endswith('.JPG') or f.endswith('.jpeg') or f.endswith('.JPEG')]:
img_paths.append(os.path.join(dirpath,filename))
img_paths.sort()
return img_paths
def inference(net, img_path='', output_path='./', output_name='f', use_gpu=True):
'''
:param net:
:param img_path:
:param output_path:
:return:
'''
# adj
adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda().transpose(2, 3)
adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()
cihp_adj = graph.preprocess_adj(graph.cihp_graph)
adj3_ = Variable(torch.from_numpy(cihp_adj).float())
adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()
# multi-scale
scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]
img = read_img(img_path)
testloader_list = []
testloader_flip_list = []
for pv in scale_list:
composed_transforms_ts = transforms.Compose([
tr.Scale_only_img(pv),
tr.Normalize_xception_tf_only_img(),
tr.ToTensor_only_img()])
composed_transforms_ts_flip = transforms.Compose([
tr.Scale_only_img(pv),
tr.HorizontalFlip_only_img(),
tr.Normalize_xception_tf_only_img(),
tr.ToTensor_only_img()])
testloader_list.append(img_transform(img, composed_transforms_ts))
# print(img_transform(img, composed_transforms_ts))
testloader_flip_list.append(img_transform(img, composed_transforms_ts_flip))
# print(testloader_list)
start_time = timeit.default_timer()
# One testing epoch
net.eval()
# 1 0.5 0.75 1.25 1.5 1.75 ; flip:
for iii, sample_batched in enumerate(zip(testloader_list, testloader_flip_list)):
inputs, labels = sample_batched[0]['image'], sample_batched[0]['label']
inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label']
inputs = inputs.unsqueeze(0)
inputs_f = inputs_f.unsqueeze(0)
inputs = torch.cat((inputs, inputs_f), dim=0)
if iii == 0:
_, _, h, w = inputs.size()
# assert inputs.size() == inputs_f.size()
# Forward pass of the mini-batch
inputs = Variable(inputs, requires_grad=False)
with torch.no_grad():
if use_gpu >= 0:
inputs = inputs.cuda()
# outputs = net.forward(inputs)
outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda())
outputs = (outputs[0] + flip(flip_cihp(outputs[1]), dim=-1)) / 2
outputs = outputs.unsqueeze(0)
if iii > 0:
outputs = F.upsample(outputs, size=(h, w), mode='bilinear', align_corners=True)
outputs_final = outputs_final + outputs
else:
outputs_final = outputs.clone()
################ plot pic
predictions = torch.max(outputs_final, 1)[1]
results = predictions.cpu().numpy()
vis_res = decode_labels(results)
parsing_im = Image.fromarray(vis_res[0])
parsing_im.save(output_path+'/{}.png'.format(output_name))
#we don't need the gray image
#cv2.imwrite(output_path+'/{}_gray.png'.format(output_name), results[0, :, :])
end_time = timeit.default_timer()
print('time used for the multi-scale image inference' + ' is :' + str(end_time - start_time))
if __name__ == '__main__':
'''argparse begin'''
parser = argparse.ArgumentParser()
# parser.add_argument('--loadmodel',default=None,type=str)
parser.add_argument('--loadmodel', default='', type=str)
parser.add_argument('--imgs_dir', default='', type=str)
parser.add_argument('--output_dir', default='', type=str)
parser.add_argument('--use_gpu', default=1, type=int)
opts = parser.parse_args()
net = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=20,
hidden_layers=128,
source_classes=7, )
if not opts.loadmodel == '':
x = torch.load(opts.loadmodel)
net.load_source_model(x)
print('load model:', opts.loadmodel)
else:
print('no model load !!!!!!!!')
raise RuntimeError('No model!!!!')
if opts.use_gpu >0 :
net.cuda()
use_gpu = True
else:
use_gpu = False
raise RuntimeError('must use the gpu!!!!')
img_paths = get_img_paths(opts.imgs_dir)
for idx, path in enumerate(img_paths):
filename = os.path.splitext(os.path.basename(path))[0]
output_name = filename +"_seg"
inference(net=net, img_path=path, output_path=opts.output_dir , output_name=output_name, use_gpu=use_gpu)
| exp/inference/inference_dir.py | 7,939 | Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
:param tail_list: tail_list size is 1 x n_class x h x w
:return:
:param net:
:param img_path:
:param output_path:
:return:
PyTorch includes Custom includes tail_list = tail_list[0] return is RGB pic adj multi-scale print(img_transform(img, composed_transforms_ts)) print(testloader_list) One testing epoch 1 0.5 0.75 1.25 1.5 1.75 ; flip: assert inputs.size() == inputs_f.size() Forward pass of the mini-batch outputs = net.forward(inputs) plot picwe don't need the gray imagecv2.imwrite(output_path+'/{}_gray.png'.format(output_name), results[0, :, :]) parser.add_argument('--loadmodel',default=None,type=str) | 907 | en | 0.587187 |
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import pendulum
import requests
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from pendulum import DateTime, Period
from slack_sdk import WebClient
class SlackStream(HttpStream, ABC):
url_base = "https://slack.com/api/"
primary_key = "id"
page_size = 100
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
# Slack uses a cursor-based pagination strategy.
# Extract the cursor from the response if it exists and return it in a format that can be used to update request parameters
json_response = response.json()
next_cursor = json_response.get("response_metadata", {}).get("next_cursor")
if next_cursor:
return {"cursor": next_cursor}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = {"limit": self.page_size}
if next_page_token:
params.update(**next_page_token)
return params
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[MutableMapping]:
json_response = response.json()
yield from json_response.get(self.data_field, [])
def backoff_time(self, response: requests.Response) -> Optional[float]:
# This method is called if we run into the rate limit. Slack puts the retry time in the `Retry-After` response header so we
# we return that value. If the response is anything other than a 429 (e.g: 5XX) fall back on default retry behavior.
# https://api.slack.com/docs/rate-limits#web
if response.status_code == 429:
return int(response.headers.get("Retry-After", 0))
@property
@abstractmethod
def data_field(self) -> str:
"""The name of the field in the response which contains the data"""
class Channels(SlackStream):
data_field = "channels"
def path(self, **kwargs) -> str:
return "conversations.list"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["types"] = "public_channel"
return params
class ChannelMembers(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "conversations.members"
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params["channel"] = stream_slice["channel_id"]
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for member_id in super().parse_response(response, **kwargs):
# Slack just returns raw IDs as a string, so we want to put them in a "join table" format
yield {"member_id": member_id, "channel_id": stream_slice["channel_id"]}
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel_record in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel_id": channel_record["id"]}
class Users(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "users.list"
# Incremental Streams
def chunk_date_range(start_date: DateTime, interval=pendulum.duration(days=1)) -> Iterable[Period]:
"""
Yields a list of the beginning and ending timestamps of each day between the start date and now.
The return value is a pendulum.period
"""
now = pendulum.now()
# Each stream_slice contains the beginning and ending timestamp for a 24 hour period
while start_date <= now:
end_date = start_date + interval
yield pendulum.period(start_date, end_date)
start_date = end_date
class IncrementalMessageStream(SlackStream, ABC):
data_field = "messages"
cursor_field = "float_ts"
primary_key = ["channel_id", "ts"]
def __init__(self, default_start_date: DateTime, **kwargs):
self._start_ts = default_start_date.timestamp()
super().__init__(**kwargs)
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params.update(**stream_slice)
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for record in super().parse_response(response, **kwargs):
record[self.primary_key[0]] = stream_slice.get("channel", "")
record[self.cursor_field] = float(record[self.primary_key[1]])
yield record
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
current_stream_state = current_stream_state or {}
current_stream_state[self.cursor_field] = max(
latest_record[self.cursor_field], current_stream_state.get(self.cursor_field, self._start_ts)
)
return current_stream_state
class ChannelMessages(IncrementalMessageStream):
def path(self, **kwargs) -> str:
return "conversations.history"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
stream_state = stream_state or {}
start_date = pendulum.from_timestamp(stream_state.get(self.cursor_field, self._start_ts))
for period in chunk_date_range(start_date):
yield {"oldest": period.start.timestamp(), "latest": period.end.timestamp()}
def read_records(self, stream_slice: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
# Channel is provided when reading threads
if "channel" in stream_slice:
yield from super().read_records(stream_slice=stream_slice, **kwargs)
else:
# if channel is not provided, then get channels and read accordingly
channels = Channels(authenticator=self.authenticator)
for channel_record in channels.read_records(sync_mode=SyncMode.full_refresh):
stream_slice["channel"] = channel_record["id"]
yield from super().read_records(stream_slice=stream_slice, **kwargs)
class Threads(IncrementalMessageStream):
def __init__(self, lookback_window: Mapping[str, int], **kwargs):
self.messages_lookback_window = lookback_window
super().__init__(**kwargs)
def path(self, **kwargs) -> str:
return "conversations.replies"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
"""
The logic for incrementally syncing threads is not very obvious, so buckle up.
To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread, basically its ID.
One complication is that threads can be updated at any time in the future. Therefore, if we wanted to comprehensively sync data i.e: get every
single response in a thread, we'd have to read every message in the slack instance every time we ran a sync, because otherwise there is no
way to guarantee that a thread deep in the past didn't receive a new message.
A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past, get every message since,
and read all of the thread responses. This is essentially the approach we're taking here via slicing: create slices from N days into the
past and read all messages in threads since then. We could optionally filter out records we have already read, but that's omitted to keep
the logic simple to reason about.
Good luck.
"""
stream_state = stream_state or {}
channels_stream = Channels(authenticator=self.authenticator)
if self.cursor_field in stream_state:
# Since new messages can be posted to threads continuously after the parent message has been posted, we get messages from the latest date
# found in the state minus 7 days to pick up any new messages in threads.
# If there is state always use lookback
messages_start_date = pendulum.from_timestamp(stream_state[self.cursor_field]) - self.messages_lookback_window
else:
# If there is no state i.e: this is the first sync then there is no use for lookback, just get messages from the default start date
messages_start_date = pendulum.from_timestamp(self._start_ts)
messages_stream = ChannelMessages(authenticator=self.authenticator, default_start_date=messages_start_date)
for message_chunk in messages_stream.stream_slices(stream_state={self.cursor_field: messages_start_date.timestamp()}):
self.logger.info(f"Syncing replies {message_chunk}")
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
message_chunk["channel"] = channel["id"]
for message in messages_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=message_chunk):
yield {"channel": channel["id"], self.cursor_field: message[self.primary_key]}
class JoinChannelsStream(HttpStream):
"""
This class is a special stream which joins channels because the Slack API only returns messages from channels this bot is in.
Its responses should only be logged for debugging reasons, not read as records.
"""
url_base = "https://slack.com/api/"
http_method = "POST"
primary_key = "id"
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
return [{"message": f"Successfully joined channel: {stream_slice['channel_name']}"}]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None # No pagination
def path(self, **kwargs) -> str:
return "conversations.join"
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel": channel["id"], "channel_name": channel["name"]}
def request_body_json(self, stream_slice: Mapping = None, **kwargs) -> Optional[Mapping]:
return {"channel": stream_slice["channel"]}
class SourceSlack(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
slack_client = WebClient(token=config["api_token"])
users = slack_client.users_list(limit=1).get("members", [])
if len(users) > 0:
return True, None
else:
return False, "There are no users in the given Slack instance"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = TokenAuthenticator(config["api_token"])
default_start_date = pendulum.parse(config["start_date"])
threads_lookback_window = pendulum.Duration(days=config["lookback_window"])
streams = [
Channels(authenticator=authenticator),
ChannelMembers(authenticator=authenticator),
ChannelMessages(authenticator=authenticator, default_start_date=default_start_date),
Threads(authenticator=authenticator, default_start_date=default_start_date, lookback_window=threads_lookback_window),
Users(authenticator=authenticator),
]
# To sync data from channels, the bot backed by this token needs to join all those channels. This operation is idempotent.
if config["join_channels"]:
logger = AirbyteLogger()
logger.info("joining Slack channels")
join_channels_stream = JoinChannelsStream(authenticator=authenticator)
for stream_slice in join_channels_stream.stream_slices():
for message in join_channels_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice):
logger.info(message["message"])
return streams
| airbyte-integrations/connectors/source-slack/source_slack/source.py | 14,299 | This class is a special stream which joins channels because the Slack API only returns messages from channels this bot is in.
Its responses should only be logged for debugging reasons, not read as records.
Yields a list of the beginning and ending timestamps of each day between the start date and now.
The return value is a pendulum.period
The name of the field in the response which contains the data
The logic for incrementally syncing threads is not very obvious, so buckle up.
To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread, basically its ID.
One complication is that threads can be updated at any time in the future. Therefore, if we wanted to comprehensively sync data i.e: get every
single response in a thread, we'd have to read every message in the slack instance every time we ran a sync, because otherwise there is no
way to guarantee that a thread deep in the past didn't receive a new message.
A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past, get every message since,
and read all of the thread responses. This is essentially the approach we're taking here via slicing: create slices from N days into the
past and read all messages in threads since then. We could optionally filter out records we have already read, but that's omitted to keep
the logic simple to reason about.
Good luck.
MIT License Copyright (c) 2020 Airbyte Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Slack uses a cursor-based pagination strategy. Extract the cursor from the response if it exists and return it in a format that can be used to update request parameters This method is called if we run into the rate limit. Slack puts the retry time in the `Retry-After` response header so we we return that value. If the response is anything other than a 429 (e.g: 5XX) fall back on default retry behavior. https://api.slack.com/docs/rate-limitsweb Slack just returns raw IDs as a string, so we want to put them in a "join table" format Incremental Streams Each stream_slice contains the beginning and ending timestamp for a 24 hour period Channel is provided when reading threads if channel is not provided, then get channels and read accordingly Since new messages can be posted to threads continuously after the parent message has been posted, we get messages from the latest date found in the state minus 7 days to pick up any new messages in threads. If there is state always use lookback If there is no state i.e: this is the first sync then there is no use for lookback, just get messages from the default start date No pagination To sync data from channels, the bot backed by this token needs to join all those channels. This operation is idempotent. | 3,764 | en | 0.909283 |
# -*- coding: utf-8 -*-
import argparse
import importlib
import json
import logging
import os
import re
import sys
from io import StringIO
import boto3
import tabulate
import yaml
from dask.distributed import Client
from dask_kubernetes import KubeCluster
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.config import load_kube_config
RUN_TEMPLATE = """
/bin/bash <<'EOF'
{}
EOF
"""
CONFIG_TEMPLATE = """
cat > config.json << JSON
{}
JSON
"""
WORKER_COMM = '/usr/bin/prepare.sh dask-worker --no-dashboard --memory-limit 0 --death-timeout 0'
def _import_function(config):
function = config['function']
function = function.split('.')
function_name = function[-1]
package = '.'.join(function[:-1])
module = importlib.import_module(package)
return getattr(module, function_name)
def _get_extra_setup(setup_dict):
extra_packages = []
script = setup_dict.get('script')
if script:
extra_packages.append('exec {}'.format(script))
apt_packages = setup_dict.get('apt_packages')
if apt_packages:
extra_packages.append('apt get install {}'.format(' '.join(apt_packages)))
pip_packages = setup_dict.get('pip_packages')
if pip_packages:
extra_packages.append('pip install {}'.format(' '.join(pip_packages)))
git_repository = setup_dict.get('git_repository')
if git_repository:
url = git_repository.get('url')
reference = git_repository.get('reference', 'master')
install = git_repository.get('install')
git_clone = 'git clone {} repo && cd repo'.format(url)
git_checkout = 'git checkout {}'.format(reference)
extra_packages.append('\n '.join([git_clone, git_checkout, install]))
if len(extra_packages) > 1:
return '\n '.join(extra_packages)
return extra_packages[0]
def _generate_cluster_spec(config, kubernetes=False):
extra_setup = ''
dask_cluster = config['dask_cluster']
metadata = {}
worker_config = dask_cluster.get('worker_config')
if worker_config.get('setup'):
extra_setup = _get_extra_setup(worker_config['setup'])
if kubernetes:
name = worker_config.get('image', 'daskdev/dask:latest')
name = '{}-'.format(re.sub(r'[\W_]', '-', name))
metadata['generateName'] = name
config_command = CONFIG_TEMPLATE.format(json.dumps(config))
run_command = 'python -u -m btb_benchmark.kubernetes config.json'
extra_setup = '\n'.join([extra_setup, config_command, run_command])
else:
run_command = WORKER_COMM
extra_setup = '\n'.join([extra_setup, run_command])
run_commands = RUN_TEMPLATE.format(extra_setup)
spec = {
'metadata': metadata,
'spec': {
'restartPolicy': 'Never',
'containers': [{
'args': ['-c', run_commands],
'command': ['tini', '-g', '--', '/bin/sh'],
'image': worker_config.get('image', 'daskdev/dask:latest'),
'name': 'dask-worker',
'resources': worker_config.get('resources', {})
}]
}
}
return spec
def _df_to_csv_str(df):
with StringIO() as sio:
df.to_csv(sio)
return sio.getvalue()
def _upload_to_s3(bucket, path, results, aws_key=None, aws_secret=None):
client = boto3.client('s3', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
client.put_object(Bucket=bucket, Key=path, Body=_df_to_csv_str(results))
def run_dask_function(config):
"""Start a Dask Cluster using dask-kubernetes and run a function.
Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each
forming a `dask` cluster. Then, a function specified from `config` is being imported and
run with the given arguments. The tasks created by this `function` are being run on the
`dask` cluster for distributed computation.
The config dict must contain the following sections:
* run
* dask_cluster
* output
Args:
config (dict):
Config dictionary.
"""
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if not path:
raise ValueError('An output path must be provided when providing `output`.')
cluster_spec = _generate_cluster_spec(config, kubernetes=False)
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if not workers:
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
run = _import_function(config['run'])
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
_upload_to_s3(bucket, path, results, aws_key, aws_secret)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results
def run_on_kubernetes(config, namespace='default'):
"""Run dask function inside a pod using the given config.
Create a pod, using the local kubernetes configuration that starts a Dask Cluster
using dask-kubernetes and runs a function specified within the `config` dictionary.
Args:
config (dict):
Config dictionary.
namespace (str):
Kubernetes namespace were the pod will be created.
"""
# read local config
load_kube_config()
c = Configuration()
Configuration.set_default(c)
# create client and create pod on default namespace
core_v1 = core_v1_api.CoreV1Api()
spec = _generate_cluster_spec(config, kubernetes=True)
core_v1.create_namespaced_pod(body=spec, namespace=namespace)
print('Pod created.')
def _get_parser():
parser = argparse.ArgumentParser(description='Run on Kubernetes Command Line Interface')
parser.add_argument('config', help='Path to the JSON config file.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Be verbose. Use -vv for increased verbosity.')
parser.add_argument('--create-pod', action='store_true',
help='Create a master pod and run the given `config` from there.')
parser.add_argument('-n', '--namespace', default='default',
help='Namespace were the pod will be created.')
return parser
def main():
# Parse args
parser = _get_parser()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
# Logger setup
log_level = (3 - args.verbose) * 10
fmt = '%(asctime)s - %(process)d - %(levelname)s - %(name)s - %(module)s - %(message)s'
logging.basicConfig(level=log_level, format=fmt)
with open(args.config) as config_file:
if args.config.endswith('yaml') or args.config.endswith('yml'):
config = yaml.safe_load(config_file)
else:
config = json.load(config_file)
if args.create_pod:
run_on_kubernetes(config, args.namespace)
else:
results = run_dask_function(config)
if results is not None:
print(tabulate.tabulate(
results,
tablefmt='github',
headers=results.columns
))
if __name__ == '__main__':
main()
| benchmark/btb_benchmark/kubernetes.py | 7,915 | Start a Dask Cluster using dask-kubernetes and run a function.
Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each
forming a `dask` cluster. Then, a function specified from `config` is being imported and
run with the given arguments. The tasks created by this `function` are being run on the
`dask` cluster for distributed computation.
The config dict must contain the following sections:
* run
* dask_cluster
* output
Args:
config (dict):
Config dictionary.
Run dask function inside a pod using the given config.
Create a pod, using the local kubernetes configuration that starts a Dask Cluster
using dask-kubernetes and runs a function specified within the `config` dictionary.
Args:
config (dict):
Config dictionary.
namespace (str):
Kubernetes namespace were the pod will be created.
-*- coding: utf-8 -*- read local config create client and create pod on default namespace Parse args Logger setup | 995 | en | 0.657675 |
"""
Django settings for webapp2 project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-jtp=j6oy)@&t#9l$zv#1iavkq#l-#9f$*z97d@623=nzeo@pgm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webapp2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webapp2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| webapp2/settings.py | 3,222 | Django settings for webapp2 project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
Build paths inside the project like this: BASE_DIR / 'subdir'. Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/4.0/ref/settings/databases Password validation https://docs.djangoproject.com/en/4.0/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/4.0/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/4.0/howto/static-files/ Default primary key field type https://docs.djangoproject.com/en/4.0/ref/settings/default-auto-field | 1,080 | en | 0.650195 |
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Crown Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run fuzz test targets.
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
import argparse
import configparser
import logging
import os
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='''Run the fuzz targets with all inputs from the seed_dir once.''',
)
parser.add_argument(
"-l",
"--loglevel",
dest="loglevel",
default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.",
)
parser.add_argument(
'--valgrind',
action='store_true',
help='If true, run fuzzing binaries under the valgrind memory error detector',
)
parser.add_argument(
'-x',
'--exclude',
help="A comma-separated list of targets to exclude",
)
parser.add_argument(
'--par',
'-j',
type=int,
default=4,
help='How many targets to merge or execute in parallel.',
)
parser.add_argument(
'seed_dir',
help='The seed corpus to run on (must contain subfolders for each fuzz target).',
)
parser.add_argument(
'target',
nargs='*',
help='The target(s) to run. Default is to run all targets.',
)
parser.add_argument(
'--m_dir',
help='Merge inputs from this directory into the seed_dir. Needs /target subdirectory.',
)
parser.add_argument(
'-g',
'--generate',
action='store_true',
help='Create new corpus seeds (or extend the existing ones) by running'
' the given targets for a finite number of times. Outputs them to'
' the passed seed_dir.'
)
args = parser.parse_args()
# Set up logging
logging.basicConfig(
format='%(message)s',
level=int(args.loglevel) if args.loglevel.isdigit() else args.loglevel.upper(),
)
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
if not config["components"].getboolean("ENABLE_FUZZ"):
logging.error("Must have fuzz targets built")
sys.exit(1)
# Build list of tests
test_list_all = parse_test_list(makefile=os.path.join(config["environment"]["SRCDIR"], 'src', 'Makefile.test.include'))
if not test_list_all:
logging.error("No fuzz targets found")
sys.exit(1)
logging.debug("{} fuzz target(s) found: {}".format(len(test_list_all), " ".join(sorted(test_list_all))))
args.target = args.target or test_list_all # By default run all
test_list_error = list(set(args.target).difference(set(test_list_all)))
if test_list_error:
logging.error("Unknown fuzz targets selected: {}".format(test_list_error))
test_list_selection = list(set(test_list_all).intersection(set(args.target)))
if not test_list_selection:
logging.error("No fuzz targets selected")
if args.exclude:
for excluded_target in args.exclude.split(","):
if excluded_target not in test_list_selection:
logging.error("Target \"{}\" not found in current target list.".format(excluded_target))
continue
test_list_selection.remove(excluded_target)
test_list_selection.sort()
logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
if not args.generate:
test_list_seedless = []
for t in test_list_selection:
corpus_path = os.path.join(args.seed_dir, t)
if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
test_list_seedless.append(t)
test_list_seedless.sort()
if test_list_seedless:
logging.info(
"Fuzzing harnesses lacking a seed corpus: {}".format(
" ".join(test_list_seedless)
)
)
logging.info("Please consider adding a fuzz seed corpus at https://github.com/crown-core/qa-assets")
try:
help_output = subprocess.run(
args=[
os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', test_list_selection[0]),
'-help=1',
],
timeout=20,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
).stderr
if "libFuzzer" not in help_output:
logging.error("Must be built with libFuzzer")
sys.exit(1)
except subprocess.TimeoutExpired:
logging.error("subprocess timed out: Currently only libFuzzer is supported")
sys.exit(1)
with ThreadPoolExecutor(max_workers=args.par) as fuzz_pool:
if args.generate:
return generate_corpus_seeds(
fuzz_pool=fuzz_pool,
build_dir=config["environment"]["BUILDDIR"],
seed_dir=args.seed_dir,
targets=test_list_selection,
)
if args.m_dir:
merge_inputs(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
merge_dir=args.m_dir,
)
return
run_once(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
use_valgrind=args.valgrind,
)
def generate_corpus_seeds(*, fuzz_pool, build_dir, seed_dir, targets):
"""Generates new corpus seeds.
Run {targets} without input, and outputs the generated corpus seeds to
{seed_dir}.
"""
logging.info("Generating corpus seeds to {}".format(seed_dir))
def job(command):
logging.debug("Running '{}'\n".format(" ".join(command)))
logging.debug("Command '{}' output:\n'{}'\n".format(
' '.join(command),
subprocess.run(command, check=True, stderr=subprocess.PIPE,
universal_newlines=True).stderr
))
futures = []
for target in targets:
target_seed_dir = os.path.join(seed_dir, target)
os.makedirs(target_seed_dir, exist_ok=True)
command = [
os.path.join(build_dir, "src", "test", "fuzz", target),
"-runs=100000",
target_seed_dir,
]
futures.append(fuzz_pool.submit(job, command))
for future in as_completed(futures):
future.result()
def merge_inputs(*, fuzz_pool, corpus, test_list, build_dir, merge_dir):
logging.info("Merge the inputs in the passed dir into the seed_dir. Passed dir {}".format(merge_dir))
jobs = []
for t in test_list:
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-merge=1',
'-use_value_profile=1', # Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487
os.path.join(corpus, t),
os.path.join(merge_dir, t),
]
os.makedirs(os.path.join(corpus, t), exist_ok=True)
os.makedirs(os.path.join(merge_dir, t), exist_ok=True)
def job(t, args):
output = 'Run {} with args {}\n'.format(t, " ".join(args))
output += subprocess.run(args, check=True, stderr=subprocess.PIPE, universal_newlines=True).stderr
logging.debug(output)
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
future.result()
def run_once(*, fuzz_pool, corpus, test_list, build_dir, use_valgrind):
jobs = []
for t in test_list:
corpus_path = os.path.join(corpus, t)
os.makedirs(corpus_path, exist_ok=True)
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-runs=1',
corpus_path,
]
if use_valgrind:
args = ['valgrind', '--quiet', '--error-exitcode=1'] + args
def job(t, args):
output = 'Run {} with args {}'.format(t, args)
result = subprocess.run(args, stderr=subprocess.PIPE, universal_newlines=True)
output += result.stderr
return output, result
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
output, result = future.result()
logging.debug(output)
try:
result.check_returncode()
except subprocess.CalledProcessError as e:
if e.stdout:
logging.info(e.stdout)
if e.stderr:
logging.info(e.stderr)
logging.info("Target \"{}\" failed with exit code {}".format(" ".join(result.args), e.returncode))
sys.exit(1)
def parse_test_list(makefile):
with open(makefile, encoding='utf-8') as makefile_test:
test_list_all = []
read_targets = False
for line in makefile_test.readlines():
line = line.strip().replace('test/fuzz/', '').replace(' \\', '')
if read_targets:
if not line:
break
test_list_all.append(line)
continue
if line == 'FUZZ_TARGETS =':
read_targets = True
return test_list_all
if __name__ == '__main__':
main()
| test/fuzz/test_runner.py | 9,885 | Generates new corpus seeds.
Run {targets} without input, and outputs the generated corpus seeds to
{seed_dir}.
Run fuzz test targets.
!/usr/bin/env python3 Copyright (c) 2019-2020 The Crown Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Set up logging Read config generated by configure. Build list of tests By default run all Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406issuecomment-387790487 | 521 | en | 0.719548 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetEventCategoriesResult',
'AwaitableGetEventCategoriesResult',
'get_event_categories',
]
@pulumi.output_type
class GetEventCategoriesResult:
"""
A collection of values returned by getEventCategories.
"""
def __init__(__self__, event_categories=None, id=None, source_type=None):
if event_categories and not isinstance(event_categories, list):
raise TypeError("Expected argument 'event_categories' to be a list")
pulumi.set(__self__, "event_categories", event_categories)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if source_type and not isinstance(source_type, str):
raise TypeError("Expected argument 'source_type' to be a str")
pulumi.set(__self__, "source_type", source_type)
@property
@pulumi.getter(name="eventCategories")
def event_categories(self) -> List[str]:
"""
A list of the event categories.
"""
return pulumi.get(self, "event_categories")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[str]:
return pulumi.get(self, "source_type")
class AwaitableGetEventCategoriesResult(GetEventCategoriesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventCategoriesResult(
event_categories=self.event_categories,
id=self.id,
source_type=self.source_type)
def get_event_categories(source_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventCategoriesResult:
"""
## Example Usage
List the event categories of all the RDS resources.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories()
pulumi.export("example", example_event_categories.event_categories)
```
List the event categories specific to the RDS resource `db-snapshot`.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")
pulumi.export("example", example_event_categories.event_categories)
```
:param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot.
"""
__args__ = dict()
__args__['sourceType'] = source_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getEventCategories:getEventCategories', __args__, opts=opts, typ=GetEventCategoriesResult).value
return AwaitableGetEventCategoriesResult(
event_categories=__ret__.event_categories,
id=__ret__.id,
source_type=__ret__.source_type)
| sdk/python/pulumi_aws/rds/get_event_categories.py | 3,590 | A collection of values returned by getEventCategories.
A list of the event categories.
## Example Usage
List the event categories of all the RDS resources.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories()
pulumi.export("example", example_event_categories.event_categories)
```
List the event categories specific to the RDS resource `db-snapshot`.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")
pulumi.export("example", example_event_categories.event_categories)
```
:param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot.
The provider-assigned unique ID for this managed resource.
coding=utf-8 *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test | 1,088 | en | 0.662826 |
# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
def closureTest1():
# Assign, but the value is not supposed to be used by the function, instead the later
# update is effective.
d = 1
def subby():
return d
d = 22222*2222
return subby()
def closureTest2():
# Using a closure variable that is not initialized at the time it is closured should
# work as well.
def subby():
return d
d = 2222*2222
return subby()
def closureTest3():
def subby():
return undefined_global # @UndefinedVariable
try:
return subby()
except NameError:
return 88
d = 1
def scopeTest4():
try:
return d
d = 1
except UnboundLocalError as e:
return repr(e)
print("Test closure where value is overwritten:", closureTest1())
print("Test closure where value is assigned only late:", closureTest2())
print("Test function where closured value is never assigned:", closureTest3())
print("Scope test where UnboundLocalError is expected:", scopeTest4())
def function():
pass
class ClosureLocalizerClass:
print("Function before assigned in a class:", function)
function = 1
print("Function after it was assigned in class:", function)
ClosureLocalizerClass()
def ClosureLocalizerFunction():
try:
function = function
print("Function didn't give unbound local error")
except UnboundLocalError as e:
print("Function gave unbound local error when accessing function before assignment:", repr(e))
ClosureLocalizerFunction()
class X:
def __init__(self, x):
self.x = x
def changingClosure():
print("Changing a closure taken value after it was taken.")
a = 1
def closureTaker():
return X(a)
x = closureTaker()
a=2
print("Closure value first time:", x.x)
x = closureTaker()
print("Closure value second time:", x.x)
changingClosure()
| tests/basics/LateClosureAssignment.py | 2,701 | Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com Python tests originally created or extracted from other peoples work. The parts were too small to be protected. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Assign, but the value is not supposed to be used by the function, instead the later update is effective. Using a closure variable that is not initialized at the time it is closured should work as well. @UndefinedVariable | 951 | en | 0.922646 |
# SPDX-FileCopyrightText: Copyright (c) 2021 Martin Stephens
#
# SPDX-License-Identifier: MIT
"""These tests are run with a sensor connected to confirm that the correct
responses are received from the sensor.
The try - except clauses and an if __name__ == "__main__" allow the code to be
run with pytest on a Raspberry Pi or as a stand alone file copied into main.py
on a CircuitPython board. To run on a board also copy 'biffobear_as3935.py' to
the lib folder.
"""
# Many Pylnt conventions are broken for the sake of test readability
# Others fail because Pylint doesn't understand Pytest.
# Therefore skip this file.
# pylint: skip-file
import time
try:
import pytest # If this works, we're on a Raspberry Pi
import os
from CircuitPython_AS3935 import biffobear_as3935 as as3935
# try:
# sensor_attached = os.environ["SENSOR_ATTACHED"]
# except (KeyError, AttributeError):
pytestmark = pytest.mark.skip(reason="No as3935 board connected.")
print("hello world")
except ImportError:
# Deduce that pytest didn't import, so we are running on a board
import biffobear_as3935 as as3935
import board
device = None
def setup_module():
# Returns an instance of the AS3935 driver
global device
# Look for I2C connected sensor
try:
print("Setting up I2C connection...")
i2c = board.I2C()
try:
interrupt = board.D25
except AttributeError:
interrupt = board.D7
device = as3935.AS3935_I2C(i2c, interrupt_pin=interrupt)
except ValueError:
print("No I2C connection found.")
print("Setting up SPI connection...")
spi = board.SPI()
try:
cs = board.D24
interrupt = board.D25
except AttributeError:
cs = board.D5
interrupt = board.D7
device = as3935.AS3935(spi, cs, interrupt_pin=interrupt)
def teardown_module():
# Reset the chip between runs for consistent test results
device.reset()
def test_indoor_outdoor():
assert device.indoor is True # Chip default
device.indoor = False
assert device.indoor is False
def test_power_down():
assert device.power_down is False # Chip default
device.power_down = True
assert device.power_down is True
device.power_down = False
assert device.power_down is False
def test_noise_floor_level():
assert device.noise_floor_limit == 0x02 # Chip default
# Test possible values
for level in range(8):
device.noise_floor_limit = level
assert device.noise_floor_limit == level
def test_watchdog():
assert device.watchdog == 0x02 # Chip default
# Test possible values
for level in range(11):
device.watchdog = level
assert device.watchdog == level
def test_spike_rejection():
assert device.spike_threshold == 0x02 # Chip default
# Test possible values
for level in range(12):
device.spike_threshold = level
assert device.spike_threshold == level
def test_disturber_mask():
assert device.disturber_mask is False # Chip default
device.disturber_mask = True
assert device.disturber_mask is True
def test_strike_count_threshold():
assert device.strike_count_threshold == 1
# Test possible values
for level in (1, 5, 9, 16):
device.strike_count_threshold = level
assert device.strike_count_threshold == level
def test_freq_divisor():
assert device.freq_divisor == 16 # Chip default
# Test possible values
for divisor in (16, 32, 64, 128):
device.freq_divisor = divisor
assert device.freq_divisor == divisor
def test_output_antenna_freq():
assert device.output_antenna_freq is False
device.output_antenna_freq = True
assert device.output_antenna_freq is True
def test_output_srco():
assert device.output_srco is False # Chip default
device.output_srco = True
assert device.output_srco is True
def test_output_trco():
assert device.output_trco is False # Chip default
device.output_trco = True
assert device.output_trco is True
def test_tuning_capacitance():
assert device.tuning_capacitance == 0 # Chip default
# Test possible values
for capacitance in range(0, 128, 8):
device.tuning_capacitance = capacitance
assert device.tuning_capacitance == capacitance
def test_reset():
# Set a none default value
device.freq_divisor = 32
assert device.freq_divisor == 32
device.reset()
# Confirm that is reset to default
assert device.freq_divisor == 16 # Chip default
def test_commands_which_do_not_change_readable_values():
# Call to see if an exception is raised
device.clear_stats()
device.calibrate_clocks()
def test_registers_with_unpredictable_states():
# Just read them to see if an error occurs since value depends on presence of lightning.
device.energy
device.distance
device.interrupt_status
def test_read_interrupt_pin():
# The state of the pin is unknown, so just read it error free.
device.interrupt_set
if __name__ == "__main__":
print("setup...")
setup_module()
device.reset()
print("test_indoor_outdoor...")
test_indoor_outdoor()
print("power_down...")
test_power_down()
print("noise_floor_level...")
test_noise_floor_level()
print("watchdog...")
test_watchdog()
print("spike_rejection...")
test_spike_rejection()
print("strike_count_threshold...")
test_strike_count_threshold()
print("disturber_mask...")
test_disturber_mask()
print("freq_divisor...")
test_freq_divisor()
print("output_antenna_freq...")
test_output_antenna_freq()
print("output_srco...")
test_output_srco()
print("output_trco...")
test_output_trco()
print("tuning_capacitance...")
test_tuning_capacitance()
print("reset...")
test_reset()
print("commands_which_do_not_change_readable_values...")
test_commands_which_do_not_change_readable_values()
print("registers_with_unpredictable_states...")
test_registers_with_unpredictable_states()
print("Interrupt pin...")
test_read_interrupt_pin()
print("teardown...")
teardown_module()
print("Tests complete.")
| tests/test_board_responses.py | 6,297 | These tests are run with a sensor connected to confirm that the correct
responses are received from the sensor.
The try - except clauses and an if __name__ == "__main__" allow the code to be
run with pytest on a Raspberry Pi or as a stand alone file copied into main.py
on a CircuitPython board. To run on a board also copy 'biffobear_as3935.py' to
the lib folder.
SPDX-FileCopyrightText: Copyright (c) 2021 Martin Stephens SPDX-License-Identifier: MIT Many Pylnt conventions are broken for the sake of test readability Others fail because Pylint doesn't understand Pytest. Therefore skip this file. pylint: skip-file If this works, we're on a Raspberry Pi try: sensor_attached = os.environ["SENSOR_ATTACHED"] except (KeyError, AttributeError): Deduce that pytest didn't import, so we are running on a board Returns an instance of the AS3935 driver Look for I2C connected sensor Reset the chip between runs for consistent test results Chip default Chip default Chip default Test possible values Chip default Test possible values Chip default Test possible values Chip default Test possible values Chip default Test possible values Chip default Chip default Chip default Test possible values Set a none default value Confirm that is reset to default Chip default Call to see if an exception is raised Just read them to see if an error occurs since value depends on presence of lightning. The state of the pin is unknown, so just read it error free. | 1,466 | en | 0.777551 |
import json
from banal import ensure_list
from functools import lru_cache
from pantomime.types import JSON
from requests.exceptions import TooManyRedirects
from opensanctions.core import Dataset
from opensanctions import helpers as h
FORMATS = ["%d %b %Y", "%d %B %Y", "%Y", "%b %Y", "%B %Y"]
SDN = Dataset.require("us_ofac_sdn")
@lru_cache(maxsize=None)
def deref_url(context, url):
try:
res = context.http.get(url, stream=True)
return res.url
except TooManyRedirects:
return url
def parse_result(context, result):
type_ = result.pop("type", None)
schema = context.lookup_value("type", type_)
if schema is None:
context.log.error("Unknown result type", type=type_)
return
entity = context.make(schema)
entity.id = context.make_slug(result.pop("id"))
entity_number = result.pop("entity_number", None)
if entity_number is not None:
assert int(entity_number)
entity.id = SDN.make_slug(entity_number)
name = result.pop("name", None)
name = name.replace("and any successor, sub-unit, or subsidiary thereof", "")
entity.add("name", name)
for alias in ensure_list(result.pop("alt_names", "")):
entity.add("alias", alias.split("; "))
entity.add("notes", result.pop("remarks", None))
entity.add("country", result.pop("country", None))
if entity.schema.is_a("Person"):
entity.add("position", result.pop("title", None))
entity.add("nationality", result.pop("nationalities", None))
entity.add("nationality", result.pop("citizenships", None))
for dob in result.pop("dates_of_birth", []):
entity.add("birthDate", h.parse_date(dob, FORMATS))
entity.add("birthPlace", result.pop("places_of_birth", None))
elif entity.schema.is_a("Vessel"):
entity.add("flag", result.pop("vessel_flag", None))
entity.add("callSign", result.pop("call_sign", None))
entity.add("type", result.pop("vessel_type", None))
grt = result.pop("gross_registered_tonnage", None)
entity.add("grossRegisteredTonnage", grt)
gt = result.pop("gross_tonnage", None)
entity.add("tonnage", gt)
# TODO: make adjacent owner entity
result.pop("vessel_owner", None)
assert result.pop("title", None) is None
assert not len(result.pop("nationalities", []))
assert not len(result.pop("citizenships", []))
assert not len(result.pop("dates_of_birth", []))
assert not len(result.pop("places_of_birth", []))
for address in result.pop("addresses", []):
obj = h.make_address(
context,
street=address.get("address"),
city=address.get("city"),
postal_code=address.get("postal_code"),
region=address.get("state"),
country=address.get("country"),
)
h.apply_address(context, entity, obj)
for ident in result.pop("ids", []):
country = ident.pop("country")
entity.add("country", country)
h.apply_feature(
context,
entity,
ident.pop("type"),
ident.pop("number"),
country=country,
date_formats=FORMATS,
start_date=ident.pop("issue_date", None),
end_date=ident.pop("expiration_date", None),
)
sanction = context.make("Sanction")
sanction.id = context.make_id(entity.id, "Sanction")
sanction.add("entity", entity)
sanction.add("program", result.pop("programs", []))
sanction.add("status", result.pop("license_policy", []))
sanction.add("reason", result.pop("license_requirement", []))
sanction.add("reason", result.pop("federal_register_notice", None))
sanction.add("startDate", result.pop("start_date", None))
sanction.add("endDate", result.pop("end_date", None))
sanction.add("country", "us")
sanction.add("authority", result.pop("source", None))
# TODO: deref
source_url = deref_url(context, result.pop("source_information_url"))
sanction.add("sourceUrl", source_url)
result.pop("source_list_url")
# TODO: what is this?
result.pop("standard_order", None)
context.emit(sanction)
context.emit(entity, target=True, unique=True)
if len(result):
context.pprint(result)
def crawl(context):
path = context.fetch_resource("source.json", context.dataset.data.url)
context.export_resource(path, JSON, title=context.SOURCE_TITLE)
with open(path, "r") as file:
data = json.load(file)
for result in data.get("results"):
parse_result(context, result)
| opensanctions/crawlers/us_trade_csl.py | 4,618 | TODO: make adjacent owner entity TODO: deref TODO: what is this? | 64 | en | 0.671551 |
import argparse
import json
import os
import pandas as pd
import torch
import torch.optim as optim
import torch.nn as nn
import torch.utils.data
# imports the model in model.py by name
from model import BinaryClassifier
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BinaryClassifier(model_info['input_features'], model_info['hidden_dim'], model_info['output_dim'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# set to eval mode, could use no_grad
model.to(device).eval()
print("Done loading model.")
return model
# Gets training data in batches from the train.csv file
def _get_train_data_loader(batch_size, training_dir):
print("Get train data loader.")
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
train_x = torch.from_numpy(train_data.drop([0], axis=1).values).float()
train_ds = torch.utils.data.TensorDataset(train_x, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
# Provided training function
def train(model, train_loader, epochs, criterion, optimizer, device):
"""
This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
train_loader - The PyTorch DataLoader that should be used during training.
epochs - The total number of epochs to train for.
criterion - The loss function used for training.
optimizer - The optimizer to use during training.
device - Where the model and data should be loaded (gpu or cpu).
"""
# training loop is provided
for epoch in range(1, epochs + 1):
model.train() # Make sure that the model is in training mode.
total_loss = 0
for batch in train_loader:
# get data
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
optimizer.zero_grad()
# get predictions from model
y_pred = model(batch_x)
# perform backprop
loss = criterion(y_pred, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("Epoch: {}, Loss: {}".format(epoch, total_loss / len(train_loader)))
## TODO: Complete the main code
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments
# when this script is executed, during a training job
# Here we set up an argument parser to easily access the parameters
parser = argparse.ArgumentParser()
# SageMaker parameters, like the directories for training data and saving models; set automatically
# Do not need to change
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
# Training Parameters, given
parser.add_argument('--batch-size', type=int, default=10, metavar='N',
help='input batch size for training (default: 10)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
## TODO: Add args for the three model parameters: input_features, hidden_dim, output_dim
# Model Parameters
parser.add_argument('--input_features', type=int, default=2, metavar='IN',
help='number of input features to model (default: 2)')
parser.add_argument('--hidden_dim', type=int, default=10, metavar='H',
help='hidden dim of model (default: 10)')
parser.add_argument('--output_dim', type=int, default=1, metavar='OUT',
help='output dim of model (default: 1)')
# args holds all passed-in arguments
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
# Load the training data.
train_loader = _get_train_data_loader(args.batch_size, args.data_dir)
## --- Your code here --- ##
## TODO: Build the model by passing in the input params
# To get params from the parser, call args.argument_name, ex. args.epochs or ards.hidden_dim
# Don't forget to move your model .to(device) to move to GPU , if appropriate
model = BinaryClassifier(args.input_features, args.hidden_dim, args.output_dim).to(device)
## TODO: Define an optimizer and loss function for training
optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.BCELoss()
# Trains the model (given line of code, which calls the above training function)
train(model, train_loader, args.epochs, criterion, optimizer, device)
## TODO: complete in the model_info by adding three argument names, the first is given
# Keep the keys of this dictionary as they are
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'input_features': args.input_features,
'hidden_dim': args.hidden_dim,
'output_dim': args.output_dim,
}
torch.save(model_info, f)
## --- End of your code --- ##
# Save the model parameters
model_path = os.path.join(args.model_dir, 'model.pth')
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
| Project_Plagiarism_Detection/source_pytorch/train.py | 6,641 | Load the PyTorch model from the `model_dir` directory.
This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
train_loader - The PyTorch DataLoader that should be used during training.
epochs - The total number of epochs to train for.
criterion - The loss function used for training.
optimizer - The optimizer to use during training.
device - Where the model and data should be loaded (gpu or cpu).
imports the model in model.py by name First, load the parameters used to create the model. Determine the device and construct the model. Load the stored model parameters. set to eval mode, could use no_grad Gets training data in batches from the train.csv file Provided training function training loop is provided Make sure that the model is in training mode. get data get predictions from model perform backprop TODO: Complete the main code All of the model parameters and training parameters are sent as arguments when this script is executed, during a training job Here we set up an argument parser to easily access the parameters SageMaker parameters, like the directories for training data and saving models; set automatically Do not need to change Training Parameters, given TODO: Add args for the three model parameters: input_features, hidden_dim, output_dim Model Parameters args holds all passed-in arguments Load the training data. --- Your code here --- TODO: Build the model by passing in the input params To get params from the parser, call args.argument_name, ex. args.epochs or ards.hidden_dim Don't forget to move your model .to(device) to move to GPU , if appropriate TODO: Define an optimizer and loss function for training Trains the model (given line of code, which calls the above training function) TODO: complete in the model_info by adding three argument names, the first is given Keep the keys of this dictionary as they are --- End of your code --- Save the model parameters | 2,040 | en | 0.832044 |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Optional, Union, cast
import tensorflow as tf
from merlin_standard_lib import Schema, Tag
from ..features.continuous import ContinuousFeatures
from ..features.embedding import EmbeddingFeatures
from ..tabular.base import TabularBlock
from .base import Block, BlockType
class ExpandDimsAndToTabular(tf.keras.layers.Lambda):
def __init__(self, **kwargs):
super().__init__(lambda x: dict(continuous=x), **kwargs)
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class DLRMBlock(Block):
def __init__(
self,
continuous_features: Union[List[str], Schema, Optional[TabularBlock]],
embedding_layer: EmbeddingFeatures,
bottom_mlp: BlockType,
top_mlp: Optional[BlockType] = None,
interaction_layer: Optional[tf.keras.layers.Layer] = None,
**kwargs
):
super().__init__(**kwargs)
_continuous_features: Optional[TabularBlock]
if isinstance(continuous_features, Schema):
_continuous_features = cast(
Optional[TabularBlock],
ContinuousFeatures.from_schema(
cast(Schema, continuous_features), aggregation="concat"
),
)
if isinstance(continuous_features, list):
_continuous_features = ContinuousFeatures.from_features(
continuous_features, aggregation="concat"
)
else:
_continuous_features = cast(Optional[TabularBlock], continuous_features)
if _continuous_features:
continuous_embedding = _continuous_features >> bottom_mlp >> ExpandDimsAndToTabular()
continuous_embedding.block_name = "ContinuousEmbedding"
self.stack_features = embedding_layer.merge(continuous_embedding, aggregation="stack")
else:
embedding_layer.set_aggregation("stack")
self.stack_features = embedding_layer
# self.stack_features = tabular.MergeTabular(embedding_layer, continuous_embedding,
# aggregation_registry="stack")
# self.stack_features = embedding_layer + continuous_embedding
# self.stack_features.aggregation_registry = "stack"
from ..layers import DotProductInteraction
self.interaction_layer = interaction_layer or DotProductInteraction()
self.top_mlp = top_mlp
@classmethod
def from_schema(
cls, schema: Schema, bottom_mlp: BlockType, top_mlp: Optional[BlockType] = None, **kwargs
):
embedding_layer = EmbeddingFeatures.from_schema(
schema.select_by_tag(Tag.CATEGORICAL),
infer_embedding_sizes=False,
embedding_dim_default=bottom_mlp.layers[-1].units,
)
if not embedding_layer:
raise ValueError("embedding_layer must be set.")
continuous_features = cast(
Optional[TabularBlock],
ContinuousFeatures.from_schema(
schema.select_by_tag(Tag.CONTINUOUS), aggregation="concat"
),
)
return cls(continuous_features, embedding_layer, bottom_mlp, top_mlp=top_mlp, **kwargs)
def call(self, inputs, **kwargs):
stacked = self.stack_features(inputs)
interactions = self.interaction_layer(stacked)
return interactions if not self.top_mlp else self.top_mlp(interactions)
| transformers4rec/tf/block/dlrm.py | 4,016 | Copyright (c) 2021, NVIDIA CORPORATION. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. self.stack_features = tabular.MergeTabular(embedding_layer, continuous_embedding, aggregation_registry="stack") self.stack_features = embedding_layer + continuous_embedding self.stack_features.aggregation_registry = "stack" | 828 | en | 0.809156 |
import xml.etree.ElementTree as ET
from .. import NAMESPACE
class ServerResponseError(Exception):
def __init__(self, code, summary, detail):
self.code = code
self.summary = summary
self.detail = detail
super(ServerResponseError, self).__init__(str(self))
def __str__(self):
return "\n\n\t{0}: {1}\n\t\t{2}".format(self.code, self.summary, self.detail)
@classmethod
def from_response(cls, resp):
# Check elements exist before .text
parsed_response = ET.fromstring(resp)
error_response = cls(parsed_response.find('t:error', namespaces=NAMESPACE).get('code', ''),
parsed_response.find('.//t:summary', namespaces=NAMESPACE).text,
parsed_response.find('.//t:detail', namespaces=NAMESPACE).text)
return error_response
class MissingRequiredFieldError(Exception):
pass
| tableauserverclient/server/endpoint/exceptions.py | 917 | Check elements exist before .text | 33 | en | 0.382627 |
import csv
from convertextract.parsers.csv_parser import Parser as BaseParser
class Parser(BaseParser):
"""Extract text from tab separated values files (.tsv).
"""
delimiter = '\t' | convertextract/parsers/tsv_parser.py | 195 | Extract text from tab separated values files (.tsv). | 52 | en | 0.247573 |
#!/usr/bin/env python3
from Crypto.PublicKey import RSA, ECC
import json
from hashlib import sha256
from Crypto.Cipher import AES, PKCS1_OAEP
from base64 import b64decode
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
import socket
from base64 import *
from server import *
# key = RSA.importKey(open("rsapubkey.pem", "r").read() )
# key = ECC.generate(curve='P-256')
# f = open("fakekey.pem", 'w')
# f.write(key.export_key(format='PEM'))
message = json.loads('{"aeskey": "nwmHkXTN/EjnoO5IzhpNwE3nXEUMHsNWFI7dcHnpxIIiXCO+dLCjR6TfqYfbL9Z6a7SNCKbeTFBLnipXcRoN6o56urZMWwCioVTsV7PHrlCU42cKX+c/ShcVFrA5aOTTjaO9rxTMxB1PxJqYyxlpNaUpRFslzj9LKH+g8hVEuP9lVMm7q4aniyOUgPrAxyn044mbuxPu6Kh+JHSt5dkmnPZGNfUDKCwvMKeilb5ZkLaW/EaoXXsJLh/wUinMROIqmD2dkiWnk10633sJIu1lEOUsiykYXtJcd3o/B2dfTx2/85C2J6IsIp3+jJne76AYryAONPSxuh+M0h1xCzNeQg==", "message": "6VCnnSOU1DBImyhlqt7SoEjRtmBxjmABFVmXYhlKDyc+NBlnZ3Hpj4EkLwydPGpHiAvr4R0zTXSyUnMk5N6fi0/BFZE=", "nonce": "Cems9uHF6mk=", "signature": "uhLCnBvGfdC1fVkGUKQ8zNp/fOXNnFxNuDEc7CDGEYSxnuZMoGqbEqMLguJqDdvHFSHoUrq2R9/+mfk8LHndhw==", "eccpubkey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGww+NA3xHj4kCyztekLhmJVB62Hhq/oGDWwo4fxgZCgbODqD3vrMFFTGCWfO8ZyHtstuW+Yztpq94CnSNpJoug=="}')
def fake_signature(msg) :
eccpubkey = ECC.import_key(msg["eccpubkey"])
h = SHA256.new(msg["aeskey"] + msg["nonce"] + msg["message"])
sign = DSS.new(eccpubkey, 'fips-186-3')
msg['signature'] = sign.sign(h)
return msg
HOST = 'crypto1.ctf.nullcon.net' # The server's hostname or IP address
PORT = 5001 # The port used by the server
def sendMsg(msg) :
msg = fake_signature(msg)
msg["nonce"] = b64encode(msg["nonce"]).decode()
msg["message"] = b64encode(msg["message"]).decode()
msg["aeskey"] = b64encode(msg["aeskey"]).decode()
msg["signature"] = b64encode(msg["signature"]).decode()
msg["eccpubkey"] = b64encode(msg["eccpubkey"]).decode()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.recv(1024)
s.sendall(json.dumps(msg).encode() + b"\n")
recpt = s.recv(1024).split(b'\n')
assert recpt[0] == b'Here is your read receipt:'
return recpt[1]
"""
Recovery xor key
"""
def xor(a, b) :
return bytes([ai ^ bi for (ai, bi) in zip(a,b)])
ciphertext = b64decode(message['message'])
print(ciphertext)
flag = b"hackim20{digital_singatures_does_not_always_imp"
fake_message = xor(flag, ciphertext[:len(flag)])
import progressbar
from string import ascii_lowercase , digits
printable = ascii_lowercase + "{}_" + digits
for _ in range(len(flag), len(ciphertext)) :
print(_)
H = SHA256.new(bytes(len(fake_message) + 1)).hexdigest().encode()
brute = list(map(lambda x : ord(x) ^ ciphertext[_], printable))
for i in progressbar.ProgressBar(widgets=[progressbar.Counter(), ' ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()])(brute) :
message["nonce"] = b64decode(message["nonce"])
message["aeskey"] = b64decode(message["aeskey"])
message["signature"] = b64decode(message["signature"])
message['eccpubkey'] = open("fakekey.pem","r").read().encode()
new_fake_message = fake_message + bytes([i])
message['message'] = new_fake_message
recpt = sendMsg(message)
if recpt == H :
fake_message += bytes([i])
flag = xor(fake_message, ciphertext[:_+1])
print(flag)
break
| ctf/2020/nullcon/msg/solve.py | 3,576 | !/usr/bin/env python3 key = RSA.importKey(open("rsapubkey.pem", "r").read() ) key = ECC.generate(curve='P-256') f = open("fakekey.pem", 'w') f.write(key.export_key(format='PEM')) The server's hostname or IP address The port used by the server | 246 | en | 0.496163 |
"""
strings and logic related to composing notifications
"""
HELLO_STATUS = "Hello! I'm Vaccination Notifier"
HELLO_MESSAGE = (
"Hello there!\n"
"\n"
"I'm Vaccination Notifier. This is just a message to let you know I'm running and "
"to test our notification configuration. I'll check for changes to your "
"vaccination status once every {delay} minutes---unless I crash! Every now and then, "
"you should probably check on me to make sure nothing has gone wrong.\n"
"\n"
"Love,\n"
"Vaccination Notifier"
)
def hello_message(delay):
return (HELLO_STATUS, HELLO_MESSAGE.format(delay=delay))
UPDATE_STATUS = "Vaccination update detected"
UPDATE_MESSAGE = (
"Hello there!\n"
"\n"
"I noticed that your vaccination results page was updated recently. Here's "
"a summary of the update:\n"
"Health Facility:{facility}\n"
"Vaccination Location:{location}\n"
"Date:{date}\n"
"Time:{time}\n"
"\n"
"Love,\n"
"Vaccination Notifier"
)
def update_message(dict):
facility = dict['Health Facility:']
location = dict['Vaccination Location:']
date = dict['Date:']
time = dict['Time:']
return (UPDATE_STATUS,
UPDATE_MESSAGE.format(facility=facility, location=location, date=date, time=time)) | messages.py | 1,290 | strings and logic related to composing notifications | 52 | en | 0.860756 |
from __future__ import print_function
"""
A script to batch render and update interactive viewer.
"""
import os
import sys
import argparse
import pyexr
import numpy as np
import json
import subprocess as sp
from analyze import update_stats, compute_stats, write_data
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(
description='Batch analysis of rendered images.')
parser.add_argument('-mts', '--mitsuba',
help='mitsuba executable', type=str, default='./mitsuba')
parser.add_argument('-r', '--ref',
help='reference image', type=str, required=True)
parser.add_argument('-s', '--scene',
help='scene xml file', type=str, required=True)
parser.add_argument('-o', '--options',
help='mitsuba options', type=str)
parser.add_argument('-d', '--dir',
help='corresponding viewer scene directory', type=str, required=True)
parser.add_argument('-n', '--name',
help='algorithm name', type=str, required=True)
parser.add_argument('-a', '--alg',
help='mitsuba algorithm keyword', type=str, required=True)
parser.add_argument('-t', '--timeout',
help='render time (s)', type=int)
parser.add_argument('-f', '--frequency',
help='intermediate image output frequency (s)', type=int)
parser.add_argument('-m', '--metrics',
help='difference metrics', nargs='+', choices=['l1', 'l2', 'mrse', 'mape', 'smape', 'dssim'], type=str)
parser.add_argument('-eps', '--epsilon',
help='epsilon value', type=float, default=1e-2)
parser.add_argument('-c', '--clip',
help='clipping values for min/max', nargs=2, type=float, default=[0, 1])
args = parser.parse_args()
# Create Mistuba command
fname = '{}.exr'.format(args.name.replace(' ', '-'))
out_path = os.path.join(os.path.dirname(args.scene), fname)
render = '{} {} -D integrator={}'.format(
args.mitsuba, args.scene, args.alg)
if args.frequency:
render = '{} -r {}'.format(render, args.frequency)
if args.options:
render = '{} {}'.format(render, args.options)
render = '{} -o {}'.format(render, out_path)
cmd = render.split()
# Run and time out after fixed amount of time
sys.stdout.write('Rendering... ')
sys.stdout.flush()
try:
out = sp.check_output(cmd, shell=False, timeout=args.timeout)
except sp.TimeoutExpired as e:
print('done.')
# Update interactive viewer
sys.stdout.write('Recomputing metrics... ')
sys.stdout.flush()
ref_fp = pyexr.open(args.ref)
ref = np.array(ref_fp.get())
img_fp = pyexr.open(out_path)
img = np.array(img_fp.get())
test = [{'name': args.name, 'data': img}]
with open(os.path.join(args.dir, 'data.json'), 'r') as fp:
data = json.load(fp)
with open(os.path.join(args.dir, 'stats.json'), 'r') as fp:
stats = json.load(fp)
data = update_stats(args.dir, data, ref, test,
args.metrics, args.clip, args.epsilon)
write_data(args.dir, data)
print('done.')
web_url = os.path.abspath(os.path.join(args.dir, 'index.html'))
print('Interactive viewer updated: {}'.format(web_url))
| tools/render.py | 3,426 | Parse arguments Create Mistuba command Run and time out after fixed amount of time Update interactive viewer | 108 | en | 0.51569 |
# PuLP : Python LP Modeler
# Version 1.4.2
# Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org)
# Modifications Copyright (c) 2007- Stuart Anthony Mitchell (s.mitchell@auckland.ac.nz)
# $Id:solvers.py 1791 2008-04-23 22:54:34Z smit023 $
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
from .core import LpSolver_CMD, LpSolver, subprocess, PulpSolverError, clock, log
from .core import gurobi_path
import os
from uuid import uuid4
import sys
from .. import constants
import warnings
# to import the gurobipy name into the module scope
gurobipy = None
class GUROBI(LpSolver):
"""
The Gurobi LP/MIP solver (via its python interface)
The Gurobi variables are available (after a solve) in var.solverVar
Constriaints in constraint.solverConstraint
and the Model is in prob.solverModel
"""
try:
sys.path.append(gurobi_path)
# to import the name into the module scope
global gurobipy
import gurobipy
except: # FIXME: Bug because gurobi returns
#a gurobi exception on failed imports
def available(self):
"""True if the solver is available"""
return False
def actualSolve(self, lp, callback = None):
"""Solve a well formulated lp problem"""
raise PulpSolverError("GUROBI: Not Available")
else:
def __init__(self,
mip = True,
msg = True,
timeLimit = None,
epgap = None,
**solverParams):
"""
Initializes the Gurobi solver.
@param mip: if False the solver will solve a MIP as an LP
@param msg: displays information from the solver to stdout
@param timeLimit: sets the maximum time for solution
@param epgap: sets the integer bound gap
"""
LpSolver.__init__(self, mip, msg)
self.timeLimit = timeLimit
self.epgap = epgap
#set the output of gurobi
if not self.msg:
gurobipy.setParam("OutputFlag", 0)
#set the gurobi parameter values
for key,value in solverParams.items():
gurobipy.setParam(key, value)
def findSolutionValues(self, lp):
model = lp.solverModel
solutionStatus = model.Status
GRB = gurobipy.GRB
# TODO: check status for Integer Feasible
gurobiLpStatus = {GRB.OPTIMAL: constants.LpStatusOptimal,
GRB.INFEASIBLE: constants.LpStatusInfeasible,
GRB.INF_OR_UNBD: constants.LpStatusInfeasible,
GRB.UNBOUNDED: constants.LpStatusUnbounded,
GRB.ITERATION_LIMIT: constants.LpStatusNotSolved,
GRB.NODE_LIMIT: constants.LpStatusNotSolved,
GRB.TIME_LIMIT: constants.LpStatusNotSolved,
GRB.SOLUTION_LIMIT: constants.LpStatusNotSolved,
GRB.INTERRUPTED: constants.LpStatusNotSolved,
GRB.NUMERIC: constants.LpStatusNotSolved,
}
#populate pulp solution values
try:
for var, value in zip(lp.variables(), model.getAttr(GRB.Attr.X, model.getVars())):
var.varValue = value
except (gurobipy.GurobiError, AttributeError):
pass
try:
for var, value in zip(lp.variables(), model.getAttr(GRB.Attr.RC, model.getVars())):
var.dj = value
except (gurobipy.GurobiError, AttributeError):
pass
#put pi and slack variables against the constraints
try:
for constr, value in zip(lp.constraints.values(), model.getAttr(GRB.Pi, model.getConstrs())):
constr.pi = value
except (gurobipy.GurobiError, AttributeError):
pass
try:
for constr, value in zip(lp.constraints.values(), model.getAttr(GRB.Slack, model.getConstrs())):
constr.slack = value
except (gurobipy.GurobiError, AttributeError):
pass
if self.msg:
print("Gurobi status=", solutionStatus)
lp.resolveOK = True
for var in lp.variables():
var.isModified = False
status = gurobiLpStatus.get(solutionStatus, constants.LpStatusUndefined)
lp.assignStatus(status)
return status
def available(self):
"""True if the solver is available"""
return True
def callSolver(self, lp, callback = None):
"""Solves the problem with gurobi
"""
#solve the problem
self.solveTime = -clock()
lp.solverModel.optimize(callback = callback)
self.solveTime += clock()
def buildSolverModel(self, lp):
"""
Takes the pulp lp model and translates it into a gurobi model
"""
log.debug("create the gurobi model")
lp.solverModel = gurobipy.Model(lp.name)
log.debug("set the sense of the problem")
if lp.sense == constants.LpMaximize:
lp.solverModel.setAttr("ModelSense", -1)
if self.timeLimit:
lp.solverModel.setParam("TimeLimit", self.timeLimit)
if self.epgap:
lp.solverModel.setParam("MIPGap", self.epgap)
log.debug("add the variables to the problem")
for var in lp.variables():
lowBound = var.lowBound
if lowBound is None:
lowBound = -gurobipy.GRB.INFINITY
upBound = var.upBound
if upBound is None:
upBound = gurobipy.GRB.INFINITY
obj = lp.objective.get(var, 0.0)
varType = gurobipy.GRB.CONTINUOUS
if var.cat == constants.LpInteger and self.mip:
varType = gurobipy.GRB.INTEGER
var.solverVar = lp.solverModel.addVar(lowBound, upBound,
vtype = varType,
obj = obj, name = var.name)
lp.solverModel.update()
log.debug("add the Constraints to the problem")
for name,constraint in lp.constraints.items():
#build the expression
expr = gurobipy.LinExpr(list(constraint.values()),
[v.solverVar for v in constraint.keys()])
if constraint.sense == constants.LpConstraintLE:
relation = gurobipy.GRB.LESS_EQUAL
elif constraint.sense == constants.LpConstraintGE:
relation = gurobipy.GRB.GREATER_EQUAL
elif constraint.sense == constants.LpConstraintEQ:
relation = gurobipy.GRB.EQUAL
else:
raise PulpSolverError('Detected an invalid constraint type')
constraint.solverConstraint = lp.solverModel.addConstr(expr,
relation, -constraint.constant, name)
lp.solverModel.update()
def actualSolve(self, lp, callback = None):
"""
Solve a well formulated lp problem
creates a gurobi model, variables and constraints and attaches
them to the lp model which it then solves
"""
self.buildSolverModel(lp)
#set the initial solution
log.debug("Solve the Model using gurobi")
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
def actualResolve(self, lp, callback = None):
"""
Solve a well formulated lp problem
uses the old solver and modifies the rhs of the modified constraints
"""
log.debug("Resolve the Model using gurobi")
for constraint in lp.constraints.values():
if constraint.modified:
constraint.solverConstraint.setAttr(gurobipy.GRB.Attr.RHS,
-constraint.constant)
lp.solverModel.update()
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
class GUROBI_CMD(LpSolver_CMD):
"""The GUROBI_CMD solver"""
def defaultPath(self):
return self.executableExtension("gurobi_cl")
def available(self):
"""True if the solver is available"""
return self.executable(self.path)
def actualSolve(self, lp):
"""Solve a well formulated lp problem"""
# TODO: workaround for python not reading LD_LIBRARY_PATH
# in my version of ubuntu
if 'GUROBI_HOME' in os.environ:
if 'LD_LIBRARY_PATH' not in os.environ:
os.environ['LD_LIBRARY_PATH'] = ""
os.environ['LD_LIBRARY_PATH'] += ':' + os.environ['GUROBI_HOME'] + "/lib"
if not self.executable(self.path):
raise PulpSolverError("PuLP: cannot execute "+self.path)
if not self.keepFiles:
uuid = uuid4().hex
tmpLp = os.path.join(self.tmpDir, "%s-pulp.lp" % uuid)
tmpSol = os.path.join(self.tmpDir, "%s-pulp.sol" % uuid)
tmpMst = os.path.join(self.tmpDir, "%s-pulp.mst" % uuid)
else:
tmpLp = lp.name+"-pulp.lp"
tmpSol = lp.name+"-pulp.sol"
tmpMst = lp.name + "-pulp.mst"
vs = lp.writeLP(tmpLp, writeSOS = 1)
try: os.remove(tmpSol)
except: pass
cmd = self.path
cmd += ' ' + ' '.join(['%s=%s' % (key, value)
for key, value in self.options])
cmd += ' ResultFile=%s' % tmpSol
if self.mip_start:
self.writesol(filename=tmpMst, vs=vs)
cmd += ' InputFile=%s' % tmpMst
if lp.isMIP():
if not self.mip:
warnings.warn('GUROBI_CMD does not allow a problem to be relaxed')
cmd += ' %s' % tmpLp
if self.msg:
pipe = None
else:
pipe = open(os.devnull, 'w')
return_code = subprocess.call(cmd.split(), stdout = pipe, stderr = pipe)
# Close the pipe now if we used it.
if pipe is not None:
pipe.close()
if return_code != 0:
raise PulpSolverError("PuLP: Error while trying to execute "+self.path)
if not os.path.exists(tmpSol):
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
values = reducedCosts = shadowPrices = slacks = None
else:
status, values, reducedCosts, shadowPrices, slacks = self.readsol(tmpSol)
if not self.keepFiles:
for f in [tmpSol, tmpMst, tmpLp, "gurobi.log"]:
try: os.remove(f)
except: pass
if status != constants.LpStatusInfeasible:
lp.assignVarsVals(values)
lp.assignVarsDj(reducedCosts)
lp.assignConsPi(shadowPrices)
lp.assignConsSlack(slacks)
lp.assignStatus(status)
return status
def readsol(self, filename):
"""Read a Gurobi solution file"""
with open(filename) as my_file:
try:
next(my_file) # skip the objective value
except StopIteration:
# Empty file not solved
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
return status, {}, {}, {}, {}
#We have no idea what the status is assume optimal
# TODO: check status for Integer Feasible
status = constants.LpStatusOptimal
shadowPrices = {}
slacks = {}
shadowPrices = {}
slacks = {}
values = {}
reducedCosts = {}
for line in my_file:
if line[0] != '#': #skip comments
name, value = line.split()
values[name] = float(value)
return status, values, reducedCosts, shadowPrices, slacks
def writesol(self, filename, vs):
"""Writes a GUROBI solution file"""
values = [(v.name, v.value()) for v in vs if v.value() is not None]
rows = []
for name, value in values:
rows.append('{} {}'.format(name, value))
with open(filename, 'w') as f:
f.write('\n'.join(rows))
return True
| pulp/apis/gurobi_api.py | 14,405 | The Gurobi LP/MIP solver (via its python interface)
The Gurobi variables are available (after a solve) in var.solverVar
Constriaints in constraint.solverConstraint
and the Model is in prob.solverModel
The GUROBI_CMD solver
Initializes the Gurobi solver.
@param mip: if False the solver will solve a MIP as an LP
@param msg: displays information from the solver to stdout
@param timeLimit: sets the maximum time for solution
@param epgap: sets the integer bound gap
Solve a well formulated lp problem
uses the old solver and modifies the rhs of the modified constraints
Solve a well formulated lp problem
Solve a well formulated lp problem
creates a gurobi model, variables and constraints and attaches
them to the lp model which it then solves
Solve a well formulated lp problem
True if the solver is available
True if the solver is available
True if the solver is available
Takes the pulp lp model and translates it into a gurobi model
Solves the problem with gurobi
Read a Gurobi solution file
Writes a GUROBI solution file
PuLP : Python LP Modeler Version 1.4.2 Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org) Modifications Copyright (c) 2007- Stuart Anthony Mitchell (s.mitchell@auckland.ac.nz) $Id:solvers.py 1791 2008-04-23 22:54:34Z smit023 $ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" to import the gurobipy name into the module scope to import the name into the module scope FIXME: Bug because gurobi returnsa gurobi exception on failed importsset the output of gurobiset the gurobi parameter values TODO: check status for Integer Feasiblepopulate pulp solution valuesput pi and slack variables against the constraintssolve the problembuild the expressionset the initial solutionget the solution informationget the solution information TODO: workaround for python not reading LD_LIBRARY_PATH in my version of ubuntu Close the pipe now if we used it. skip the objective value Empty file not solvedWe have no idea what the status is assume optimal TODO: check status for Integer Feasibleskip comments | 3,020 | en | 0.787928 |
import os, sys
from base64 import decodebytes
from wptserve.utils import isomorphic_decode
import importlib
subresource = importlib.import_module("common.security-features.subresource.subresource")
def generate_payload(request, server_data):
data = (u'{"headers": %(headers)s}') % server_data
if b"id" in request.GET:
request.server.stash.put(request.GET[b"id"], data)
# Simple base64 encoded .tff font
return decodebytes(b"AAEAAAANAIAAAwBQRkZUTU6u6MkAAAXcAAAAHE9TLzJWYW"
b"QKAAABWAAAAFZjbWFwAA8D7wAAAcAAAAFCY3Z0IAAhAnkA"
b"AAMEAAAABGdhc3D//wADAAAF1AAAAAhnbHlmCC6aTwAAAx"
b"QAAACMaGVhZO8ooBcAAADcAAAANmhoZWEIkAV9AAABFAAA"
b"ACRobXR4EZQAhQAAAbAAAAAQbG9jYQBwAFQAAAMIAAAACm"
b"1heHAASQA9AAABOAAAACBuYW1lehAVOgAAA6AAAAIHcG9z"
b"dP+uADUAAAWoAAAAKgABAAAAAQAAMhPyuV8PPPUACwPoAA"
b"AAAMU4Lm0AAAAAxTgubQAh/5wFeAK8AAAACAACAAAAAAAA"
b"AAEAAAK8/5wAWgXcAAAAAAV4AAEAAAAAAAAAAAAAAAAAAA"
b"AEAAEAAAAEAAwAAwAAAAAAAgAAAAEAAQAAAEAALgAAAAAA"
b"AQXcAfQABQAAAooCvAAAAIwCigK8AAAB4AAxAQIAAAIABg"
b"kAAAAAAAAAAAABAAAAAAAAAAAAAAAAUGZFZABAAEEAQQMg"
b"/zgAWgK8AGQAAAABAAAAAAAABdwAIQAAAAAF3AAABdwAZA"
b"AAAAMAAAADAAAAHAABAAAAAAA8AAMAAQAAABwABAAgAAAA"
b"BAAEAAEAAABB//8AAABB////wgABAAAAAAAAAQYAAAEAAA"
b"AAAAAAAQIAAAACAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAA"
b"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAA"
b"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
b"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
b"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
b"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
b"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
b"AAAAAAAAAAAAAAAAAAAhAnkAAAAqACoAKgBGAAAAAgAhAA"
b"ABKgKaAAMABwAusQEALzyyBwQA7TKxBgXcPLIDAgDtMgCx"
b"AwAvPLIFBADtMrIHBgH8PLIBAgDtMjMRIREnMxEjIQEJ6M"
b"fHApr9ZiECWAAAAwBk/5wFeAK8AAMABwALAAABNSEVATUh"
b"FQE1IRUB9AH0/UQDhPu0BRQB9MjI/tTIyP7UyMgAAAAAAA"
b"4ArgABAAAAAAAAACYATgABAAAAAAABAAUAgQABAAAAAAAC"
b"AAYAlQABAAAAAAADACEA4AABAAAAAAAEAAUBDgABAAAAAA"
b"AFABABNgABAAAAAAAGAAUBUwADAAEECQAAAEwAAAADAAEE"
b"CQABAAoAdQADAAEECQACAAwAhwADAAEECQADAEIAnAADAA"
b"EECQAEAAoBAgADAAEECQAFACABFAADAAEECQAGAAoBRwBD"
b"AG8AcAB5AHIAaQBnAGgAdAAgACgAYwApACAAMgAwADAAOA"
b"AgAE0AbwB6AGkAbABsAGEAIABDAG8AcgBwAG8AcgBhAHQA"
b"aQBvAG4AAENvcHlyaWdodCAoYykgMjAwOCBNb3ppbGxhIE"
b"NvcnBvcmF0aW9uAABNAGEAcgBrAEEAAE1hcmtBAABNAGUA"
b"ZABpAHUAbQAATWVkaXVtAABGAG8AbgB0AEYAbwByAGcAZQ"
b"AgADIALgAwACAAOgAgAE0AYQByAGsAQQAgADoAIAA1AC0A"
b"MQAxAC0AMgAwADAAOAAARm9udEZvcmdlIDIuMCA6IE1hcm"
b"tBIDogNS0xMS0yMDA4AABNAGEAcgBrAEEAAE1hcmtBAABW"
b"AGUAcgBzAGkAbwBuACAAMAAwADEALgAwADAAMAAgAABWZX"
b"JzaW9uIDAwMS4wMDAgAABNAGEAcgBrAEEAAE1hcmtBAAAA"
b"AgAAAAAAAP+DADIAAAABAAAAAAAAAAAAAAAAAAAAAAAEAA"
b"AAAQACACQAAAAAAAH//wACAAAAAQAAAADEPovuAAAAAMU4"
b"Lm0AAAAAxTgubQ==")
def generate_report_headers_payload(request, server_data):
stashed_data = request.server.stash.take(request.GET[b"id"])
return stashed_data
def main(request, response):
handler = lambda data: generate_payload(request, data)
content_type = b'application/x-font-truetype'
if b"report-headers" in request.GET:
handler = lambda data: generate_report_headers_payload(request, data)
content_type = b'application/json'
subresource.respond(request,
response,
payload_generator = handler,
content_type = content_type,
access_control_allow_origin = b"*")
| common/security-features/subresource/font.py | 4,367 | Simple base64 encoded .tff font | 31 | en | 0.280993 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import json
import logging
import os
import warnings
from builtins import str
from typing import Any
from rasa_core import utils
from rasa_core.domain import Domain
from rasa_core.policies import Policy
from rasa_core.training.data import DialogueTrainingData
logger = logging.getLogger(__name__)
class KerasPolicy(Policy):
SUPPORTS_ONLINE_TRAINING = True
def __init__(self, model=None, graph=None, current_epoch=0,
featurizer=None, max_history=None):
import keras
super(KerasPolicy, self).__init__(featurizer, max_history)
if KerasPolicy.is_using_tensorflow() and not graph:
self.graph = keras.backend.tf.get_default_graph()
else:
self.graph = graph
self.model = model
self.current_epoch = current_epoch
@property
def max_len(self):
if self.model:
return self.model.layers[0].batch_input_shape[1]
else:
return None
@staticmethod
def is_using_tensorflow():
import keras
return keras.backend._BACKEND == "tensorflow"
def predict_action_probabilities(self, tracker, domain):
x = self.featurize(tracker, domain)
# we need to add a batch dimension with length 1
x = x.reshape((1, self.max_len, x.shape[1]))
if KerasPolicy.is_using_tensorflow() and self.graph is not None:
with self.graph.as_default():
y_pred = self.model.predict(x, batch_size=1)
else:
y_pred = self.model.predict(x, batch_size=1)
return y_pred[-1].tolist()
def _build_model(self, num_features, num_actions, max_history_len):
warnings.warn("Deprecated, use `model_architecture` instead.",
DeprecationWarning, stacklevel=2)
return
def model_architecture(self, num_features, num_actions, max_history_len):
"""Build a keras model and return a compiled model.
:param max_history_len: The maximum number of historical
turns used to decide on next action
"""
from keras.layers import LSTM, Activation, Masking, Dense
from keras.models import Sequential
n_hidden = 32 # Neural Net and training params
batch_shape = (None, max_history_len, num_features)
# Build Model
model = Sequential()
model.add(Masking(-1, batch_input_shape=batch_shape))
model.add(LSTM(n_hidden, batch_input_shape=batch_shape, dropout=0.2))
model.add(Dense(input_dim=n_hidden, units=num_actions))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
logger.debug(model.summary())
return model
def train(self, training_data, domain, **kwargs):
# type: (DialogueTrainingData, Domain, **Any) -> None
self.model = self.model_architecture(domain.num_features,
domain.num_actions,
training_data.max_history())
shuffled_X, shuffled_y = training_data.shuffled(domain)
validation_split = kwargs.get("validation_split", 0.0)
logger.info("Fitting model with {} total samples and a validation "
"split of {}".format(training_data.num_examples(),
validation_split))
self.model.fit(shuffled_X, shuffled_y, **kwargs)
self.current_epoch = kwargs.get("epochs", 10)
logger.info("Done fitting keras policy model")
def continue_training(self, training_data, domain, **kwargs):
# fit to one extra example
self.current_epoch += 1
self.model.fit(training_data.X, training_data.y_as_one_hot(domain),
epochs=self.current_epoch + 1,
batch_size=1,
verbose=0,
initial_epoch=self.current_epoch)
def _persist_configuration(self, config_file):
model_config = {
"arch": "keras_arch.json",
"weights": "keras_weights.h5",
"epochs": self.current_epoch}
utils.dump_obj_as_json_to_file(config_file, model_config)
def persist(self, path):
if self.model:
arch_file = os.path.join(path, 'keras_arch.json')
weights_file = os.path.join(path, 'keras_weights.h5')
config_file = os.path.join(path, 'keras_policy.json')
# makes sure the model directory exists
utils.create_dir_for_file(weights_file)
utils.dump_obj_as_str_to_file(arch_file, self.model.to_json())
self._persist_configuration(config_file)
self.model.save_weights(weights_file, overwrite=True)
else:
warnings.warn("Persist called without a trained model present. "
"Nothing to persist then!")
@classmethod
def _load_model_arch(cls, path, meta):
from keras.models import model_from_json
arch_file = os.path.join(path, meta["arch"])
if os.path.isfile(arch_file):
with io.open(arch_file) as f:
model = model_from_json(f.read())
return model
else:
return None
@classmethod
def _load_weights_for_model(cls, path, model, meta):
weights_file = os.path.join(path, meta["weights"])
if model is not None and os.path.exists(weights_file):
model.load_weights(weights_file)
return model
@classmethod
def load(cls, path, featurizer, max_history):
if os.path.exists(path):
meta_path = os.path.join(path, "keras_policy.json")
if os.path.isfile(meta_path):
with io.open(meta_path) as f:
meta = json.loads(f.read())
model_arch = cls._load_model_arch(path, meta)
return cls(
cls._load_weights_for_model(path, model_arch, meta),
current_epoch=meta["epochs"],
max_history=max_history,
featurizer=featurizer
)
else:
return cls(max_history=max_history,
featurizer=featurizer)
else:
raise Exception("Failed to load dialogue model. Path {} "
"doesn't exist".format(os.path.abspath(path)))
| rasa_core/policies/keras_policy.py | 6,688 | Build a keras model and return a compiled model.
:param max_history_len: The maximum number of historical
turns used to decide on next action
we need to add a batch dimension with length 1 Neural Net and training params Build Model type: (DialogueTrainingData, Domain, **Any) -> None fit to one extra example makes sure the model directory exists | 373 | en | 0.782146 |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from textwrap import dedent
import pytest
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.repository import Repository
from pants.backend.jvm.scala_artifact import ScalaArtifact
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants.rules.core import list_targets_old
from pants.testutil.goal_rule_test_base import GoalRuleTestBase
class ListTargetsTest(GoalRuleTestBase):
goal_cls = list_targets_old.List
@classmethod
def alias_groups(cls):
return BuildFileAliases(
targets={
"target": Target,
"java_library": JavaLibrary,
"python_library": PythonLibrary,
},
objects={
"pants": lambda x: x,
"artifact": Artifact,
"scala_artifact": ScalaArtifact,
"public": Repository(
name="public", url="http://maven.example.com", push_db_basedir="/tmp"
),
},
)
@classmethod
def rules(cls):
return super().rules() + list_targets_old.rules()
def setUp(self) -> None:
super().setUp()
# Setup a BUILD tree for various list tests
class Lib:
def __init__(self, name: str, provides: bool = False) -> None:
self.name = name
self.provides = (
dedent(
f"""
artifact(
org='com.example',
name='{name}',
repo=public
)
"""
).strip()
if provides
else "None"
)
def create_library(path: str, *libs: Lib) -> None:
libs = libs or (Lib(os.path.basename(os.path.dirname(self.build_path(path)))),)
for lib in libs:
target = f"java_library(name='{lib.name}', provides={lib.provides}, sources=[])\n"
self.add_to_build_file(path, target)
create_library("a")
create_library("a/b", Lib("b", provides=True))
create_library("a/b/c", Lib("c"), Lib("c2", provides=True), Lib("c3"))
create_library("a/b/d")
create_library("a/b/e", Lib("e1"))
self.add_to_build_file(
"f",
dedent(
'''
target(
name='alias',
dependencies=[
'a/b/c:c3',
'a/b/d:d',
],
description = """
Exercises alias resolution.
Further description.
""",
)
'''
),
)
def test_list_all_empty(self):
# NB: Also renders a warning to stderr, which is challenging to detect here but confirmed in:
# tests/python/pants_test/engine/legacy/test_list_integration.py
self.assert_console_output(args=[])
def test_list_path(self):
self.assert_console_output("a/b:b", args=["a/b"])
def test_list_siblings(self):
self.assert_console_output("a/b:b", args=["a/b:"])
self.assert_console_output("a/b/c:c", "a/b/c:c2", "a/b/c:c3", args=["a/b/c/:"])
def test_list_descendants(self):
self.assert_console_output("a/b/c:c", "a/b/c:c2", "a/b/c:c3", args=["a/b/c/::"])
self.assert_console_output(
"a/b:b", "a/b/c:c", "a/b/c:c2", "a/b/c:c3", "a/b/d:d", "a/b/e:e1", args=["a/b::"]
)
@pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/8678")
def test_list_all(self):
self.assert_entries(
"\n",
"a:a",
"a/b:b",
"a/b/c:c",
"a/b/c:c2",
"a/b/c:c3",
"a/b/d:d",
"a/b/e:e1",
"f:alias",
args=["::"],
)
self.assert_entries(
", ",
"a:a",
"a/b:b",
"a/b/c:c",
"a/b/c:c2",
"a/b/c:c3",
"a/b/d:d",
"a/b/e:e1",
"f:alias",
args=["--sep=, ", "::"],
)
self.assert_console_output(
"a:a",
"a/b:b",
"a/b/c:c",
"a/b/c:c2",
"a/b/c:c3",
"a/b/d:d",
"a/b/e:e1",
"f:alias",
args=["::"],
)
def test_list_provides(self):
self.assert_console_output(
"a/b:b com.example#b", "a/b/c:c2 com.example#c2", args=["--provides", "::"]
)
def test_list_provides_customcols(self):
self.assert_console_output(
"/tmp a/b:b http://maven.example.com public com.example#b",
"/tmp a/b/c:c2 http://maven.example.com public com.example#c2",
args=[
"--provides",
"--provides-columns=push_db_basedir,address,repo_url,repo_name,artifact_id",
"::",
],
)
def test_list_dedups(self):
self.assert_console_output("a/b/c:c3", "a/b/d:d", args=["a/b/d/::", "a/b/c:c3", "a/b/d:d"])
def test_list_documented(self):
self.assert_console_output(
# Confirm empty listing
args=["--documented", "a/b"],
)
self.assert_console_output_ordered(
"f:alias",
" Exercises alias resolution.",
" Further description.",
args=["--documented", "::"],
)
| tests/python/pants_test/backend/graph_info/tasks/test_list_targets.py | 5,910 | Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). Licensed under the Apache License, Version 2.0 (see LICENSE). Setup a BUILD tree for various list tests NB: Also renders a warning to stderr, which is challenging to detect here but confirmed in: tests/python/pants_test/engine/legacy/test_list_integration.py Confirm empty listing | 347 | en | 0.774199 |
from typing import Any, Dict
from .base import Presenter
from .presenter import register_presenter
@register_presenter("initial-data")
class InitialData(Presenter):
"""
Initial data for setup
"""
@property
def data(self) -> Dict[Any, Any]:
return {
"privacy_policy": "The PP",
"legal_notice": "The LN",
"theme": "openslides-default",
"logo_web_header_path": None,
"login_info_text": None,
"saml_settings": None,
}
| openslides_backend/presenter/initial_data.py | 526 | Initial data for setup | 22 | en | 0.665853 |
import sqlite3
import logging
DOOR_OPENED = 'door opened'
DOOR_CLOSED = 'door closed'
class DataStore:
def __init__(self, setup=False):
self.connection = sqlite3.connect('db/app.sqlite3.db')
self.connection.row_factory = sqlite3.Row
if setup:
self.setup()
def record_door_opened(self):
self.add_event(DOOR_OPENED)
def record_door_closed(self):
self.add_event(DOOR_CLOSED)
def add_event(self, event):
params = [event]
cursor = self.connection.cursor()
cursor.execute("""insert into events (EVENT)
values(?);""", params)
self.connection.commit()
cursor.close()
def get_events(self):
cursor = self.connection.cursor()
cursor.execute("""select datetime(dt,'localtime') as dt, event from events order by dt desc limit 15""")
rows = cursor.fetchall()
events = []
if rows is not None:
for row in rows:
event = {}
for key in row.keys():
event[key.lower()] = row[key]
events.append(event)
cursor.close()
return events
def get_last_event(self):
cursor = self.connection.cursor()
cursor.execute("""select datetime(dt,'localtime') as dt, event from events order by dt desc limit 1""")
rows = cursor.fetchone()
event = {}
if rows is not None:
for row in rows:
for key in row.keys():
event[key.lower()] = row[key]
cursor.close()
return event
def get_status(self):
cursor = self.connection.cursor()
cursor.execute("""select datetime(dt,'localtime') as dt, event,
(strftime('%s','now') - strftime('%s',dt))/60 as
elapsed_minutes from events order by dt desc limit 1""")
row = cursor.fetchone()
status = {}
if row is not None:
for key in row.keys():
status[key.lower()] = row[key]
cursor.close()
return status
def get_settings(self):
cursor = self.connection.cursor()
cursor.execute("""select * from settings limit 1""")
row = cursor.fetchone()
settings = {}
if row is not None:
for key in row.keys():
settings[key.lower()] = row[key]
cursor.close()
return settings
def shutdown(self):
self.connection.commit()
self.connection.close()
def setup(self):
cursor = self.connection.cursor()
try:
cursor.execute('select count(*) from events')
# print cursor.fetchone()
except Exception as e:
logging.getLogger('garage').info('Required table not found... creating events table...')
cursor.execute("""create table events(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
DT DATETIME DEFAULT CURRENT_TIMESTAMP,
EVENT TEXT);""")
logging.info('done!')
finally:
cursor.close()
self.connection.commit()
cursor = self.connection.cursor()
try:
cursor.execute('select count(*) from settings')
# print cursor.fetchone()
except Exception as e:
logging.getLogger('garage').info('Required table not found... creating settings table...')
cursor.execute("""CREATE TABLE "settings" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"check_interval_mins" INTEGER DEFAULT (2),
"notify_interval_mins" INTEGER DEFAULT (30),
"warning_threshold_mins" INTEGER DEFAULT (15),
"sentry_mode" INTEGER DEFAULT (0))""")
self.connection.commit()
cursor.execute('insert into settings (id) values (1)')
logging.info('done!')
finally:
cursor.close()
self.connection.commit()
| garage/datastore.py | 4,064 | print cursor.fetchone() print cursor.fetchone() | 47 | en | 0.17944 |
import torch
import torch.nn as nn
#from .utils import load_state_dict_from_url
from .utils import zerocenter
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
out = zerocenter(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
out = zerocenter(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = zerocenter(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
class ZeroCenterEncoder(ResNet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pretrained = False
del self.fc
def forward(self, x):
x0 = self.conv1(x)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x1 = self.maxpool(x0)
x1 = zerocenter(x1)
x1 = self.layer1(x1)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
return [x4, x3, x2, x1, x0]
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop('fc.bias')
state_dict.pop('fc.weight')
super().load_state_dict(state_dict, **kwargs) | segmentation_models_pytorch/encoders/zerocenter.py | 14,437 | 1x1 convolution
3x3 convolution with padding
ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
from .utils import load_state_dict_from_url Both self.conv1 and self.downsample layers downsample the input when stride != 1 Both self.conv2 and self.downsample layers downsample the input when stride != 1 each element in the tuple indicates if we should replace the 2x2 stride with a dilated convolution instead Zero-initialize the last BN in each residual branch, so that the residual branch starts with zeros, and each residual block behaves like an identity. This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if pretrained: state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) | 3,761 | en | 0.731439 |
from collections import defaultdict
import requests
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
TRACKER = 'fio.sc.couchbase.com'
TEMPLATE = {
'group': '{}, random mixed reads and writes, IOPS',
'metric': None,
'value': None,
}
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, verbose)
def __exit__(self, *args, **kwargs):
pass
@staticmethod
def _parse(results):
"""Parse the test output.
See also https://github.com/axboe/fio/blob/master/HOWTO
"""
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7]) # reads
stats[host] += int(job.split(';')[48]) # writes
return stats
def _post(self, data):
data = pretty_dict(data)
logger.info('Posting: {}'.format(data))
requests.post('http://{}/api/v1/benchmarks'.format(self.TRACKER),
data=data)
def _report_kpi(self, stats):
for host, iops in stats.items():
data = self.TEMPLATE.copy()
data['group'] = data['group'].format(self.cluster_spec.name.title())
data['metric'] = host
data['value'] = iops
self._post(data)
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
self._report_kpi(self._parse(stats))
| perfrunner/tests/fio.py | 1,727 | Parse the test output.
See also https://github.com/axboe/fio/blob/master/HOWTO
reads writes | 94 | en | 0.769331 |
#!/usr/bin/env python3
"""
Possible string formats:
<author(s)> <title> <source> <year>
"""
import re
import pdf
CRED = '\033[91m'
CGREEN = '\33[32m'
CYELLOW = '\33[33m'
CBLUE = '\33[34m'
CVIOLET = '\33[35m'
CBEIGE = '\33[36m'
CWHITE = '\33[37m'
CEND = '\033[0m'
def extract_references_list_by_keyword(text, keyword):
print(text)
match_res = re.search(keyword, text)
ref_text = text[match_res.span()[0]:]
# print(ref_text)
# WARNING: not more than 999 references!
index_re = re.compile('\[[0-9]([0-9]|)([0-9]|)\]')
ref_pos = []
for ref in index_re.finditer(ref_text):
ref_pos.append(ref.span()[0])
ref_pos.append(len(ref_text))
for i in range(len(ref_pos)-1):
print(CYELLOW + ref_text[ref_pos[i]:ref_pos[i+1]] + CEND)
def extract_references_list(text):
res = []
buffer = ""
state = 0
for i in reversed(range(0, len(text)-1)):
c = text[i]
buffer = c + buffer
if state == 0:
if c == ']':
state = 1
elif state == 1:
if c in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
state = 2
else:
state = 0
elif state == 2:
if c == '[':
res.append(buffer)
if buffer[1] == '1' and buffer[2] == ']':
break
state = 0
buffer = ""
else:
print("Unknown state")
raise
return reversed(res)
def extract_article_from_reference(string):
pass
# return (autors, title, date)
if __name__ == '__main__':
import sys
text = pdf.extract_text(sys.argv[1])
print(text)
# zextract_references_list_by_keyword('REFERENCES')
ref_list = extract_references_list(text)
for ref in ref_list:
print(CYELLOW + ref + CEND)
| parse_reference.py | 1,867 | Possible string formats:
<author(s)> <title> <source> <year>
!/usr/bin/env python3 print(ref_text) WARNING: not more than 999 references! return (autors, title, date) zextract_references_list_by_keyword('REFERENCES') | 217 | en | 0.259337 |
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
import tests.integ
from sagemaker import AutoML, CandidateEstimator, AutoMLInput
from botocore.exceptions import ClientError
from sagemaker.utils import unique_name_from_base
from tests.integ import DATA_DIR, AUTO_ML_DEFAULT_TIMEMOUT_MINUTES, auto_ml_utils
from tests.integ.timeout import timeout
ROLE = "SageMakerRole"
PREFIX = "sagemaker/beta-automl-xgboost"
AUTO_ML_INSTANCE_TYPE = "ml.m5.2xlarge"
INSTANCE_COUNT = 1
RESOURCE_POOLS = [{"InstanceType": AUTO_ML_INSTANCE_TYPE, "PoolSize": INSTANCE_COUNT}]
TARGET_ATTRIBUTE_NAME = "virginica"
DATA_DIR = os.path.join(DATA_DIR, "automl", "data")
TRAINING_DATA = os.path.join(DATA_DIR, "iris_training.csv")
TEST_DATA = os.path.join(DATA_DIR, "iris_test.csv")
TRANSFORM_DATA = os.path.join(DATA_DIR, "iris_transform.csv")
PROBLEM_TYPE = "MultiClassClassification"
BASE_JOB_NAME = "auto-ml"
# use a succeeded AutoML job to test describe and list candidates method, otherwise tests will run too long
AUTO_ML_JOB_NAME = "python-sdk-integ-test-base-job"
DEFAULT_MODEL_NAME = "python-sdk-automl"
EXPECTED_DEFAULT_JOB_CONFIG = {
"CompletionCriteria": {"MaxCandidates": 3},
"SecurityConfig": {"EnableInterContainerTrafficEncryption": False},
}
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
@pytest.mark.canary_quick
def test_auto_ml_fit(sagemaker_session):
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=3,
)
job_name = unique_name_from_base("auto-ml", max_length=32)
inputs = sagemaker_session.upload_data(path=TRAINING_DATA, key_prefix=PREFIX + "/input")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_fit_local_input(sagemaker_session):
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=1,
)
inputs = TRAINING_DATA
job_name = unique_name_from_base("auto-ml", max_length=32)
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_input_object_fit(sagemaker_session):
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=1,
)
job_name = unique_name_from_base("auto-ml", max_length=32)
s3_input = sagemaker_session.upload_data(path=TRAINING_DATA, key_prefix=PREFIX + "/input")
inputs = AutoMLInput(inputs=s3_input, target_attribute_name=TARGET_ATTRIBUTE_NAME)
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_fit_optional_args(sagemaker_session):
output_path = "s3://{}/{}".format(sagemaker_session.default_bucket(), "specified_ouput_path")
problem_type = "MulticlassClassification"
job_objective = {"MetricName": "Accuracy"}
auto_ml = AutoML(
role=ROLE,
target_attribute_name=TARGET_ATTRIBUTE_NAME,
sagemaker_session=sagemaker_session,
max_candidates=1,
output_path=output_path,
problem_type=problem_type,
job_objective=job_objective,
)
inputs = TRAINING_DATA
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.fit(inputs, job_name=unique_name_from_base(BASE_JOB_NAME))
auto_ml_desc = auto_ml.describe_auto_ml_job(job_name=auto_ml.latest_auto_ml_job.job_name)
assert auto_ml_desc["AutoMLJobStatus"] == "Completed"
assert auto_ml_desc["AutoMLJobName"] == auto_ml.latest_auto_ml_job.job_name
assert auto_ml_desc["AutoMLJobObjective"] == job_objective
assert auto_ml_desc["ProblemType"] == problem_type
assert auto_ml_desc["OutputDataConfig"]["S3OutputPath"] == output_path
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_invalid_target_attribute(sagemaker_session):
auto_ml = AutoML(
role=ROLE, target_attribute_name="y", sagemaker_session=sagemaker_session, max_candidates=1
)
job_name = unique_name_from_base("auto-ml", max_length=32)
inputs = sagemaker_session.upload_data(path=TRAINING_DATA, key_prefix=PREFIX + "/input")
with pytest.raises(
ClientError,
match=r"An error occurred \(ValidationException\) when calling the CreateAutoMLJob "
"operation: Target attribute name y does not exist in header.",
):
auto_ml.fit(inputs, job_name=job_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_describe_auto_ml_job(sagemaker_session):
expected_default_input_config = [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/input/iris_training.csv".format(
sagemaker_session.default_bucket(), PREFIX
),
}
},
"TargetAttributeName": TARGET_ATTRIBUTE_NAME,
}
]
expected_default_output_config = {
"S3OutputPath": "s3://{}/".format(sagemaker_session.default_bucket())
}
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
desc = auto_ml.describe_auto_ml_job(job_name=AUTO_ML_JOB_NAME)
assert desc["AutoMLJobName"] == AUTO_ML_JOB_NAME
assert desc["AutoMLJobStatus"] == "Completed"
assert isinstance(desc["BestCandidate"], dict)
assert desc["InputDataConfig"] == expected_default_input_config
assert desc["AutoMLJobConfig"] == EXPECTED_DEFAULT_JOB_CONFIG
assert desc["OutputDataConfig"] == expected_default_output_config
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_auto_ml_attach(sagemaker_session):
expected_default_input_config = [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/input/iris_training.csv".format(
sagemaker_session.default_bucket(), PREFIX
),
}
},
"TargetAttributeName": TARGET_ATTRIBUTE_NAME,
}
]
expected_default_output_config = {
"S3OutputPath": "s3://{}/".format(sagemaker_session.default_bucket())
}
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
attached_automl_job = AutoML.attach(
auto_ml_job_name=AUTO_ML_JOB_NAME, sagemaker_session=sagemaker_session
)
attached_desc = attached_automl_job.describe_auto_ml_job()
assert attached_desc["AutoMLJobName"] == AUTO_ML_JOB_NAME
assert attached_desc["AutoMLJobStatus"] == "Completed"
assert isinstance(attached_desc["BestCandidate"], dict)
assert attached_desc["InputDataConfig"] == expected_default_input_config
assert attached_desc["AutoMLJobConfig"] == EXPECTED_DEFAULT_JOB_CONFIG
assert attached_desc["OutputDataConfig"] == expected_default_output_config
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_list_candidates(sagemaker_session):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
assert len(candidates) == 3
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_best_candidate(sagemaker_session):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
best_candidate = auto_ml.best_candidate(job_name=AUTO_ML_JOB_NAME)
assert len(best_candidate["InferenceContainers"]) == 3
assert len(best_candidate["CandidateSteps"]) == 4
assert best_candidate["CandidateStatus"] == "Completed"
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
@pytest.mark.canary_quick
def test_deploy_best_candidate(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
best_candidate = auto_ml.best_candidate(job_name=AUTO_ML_JOB_NAME)
endpoint_name = unique_name_from_base("sagemaker-auto-ml-best-candidate-test")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
auto_ml.deploy(
candidate=best_candidate,
initial_instance_count=INSTANCE_COUNT,
instance_type=cpu_instance_type,
endpoint_name=endpoint_name,
)
endpoint_status = sagemaker_session.sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)["EndpointStatus"]
assert endpoint_status == "InService"
sagemaker_session.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_create_model_best_candidate(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML.attach(auto_ml_job_name=AUTO_ML_JOB_NAME, sagemaker_session=sagemaker_session)
best_candidate = auto_ml.best_candidate()
with timeout(minutes=5):
pipeline_model = auto_ml.create_model(
name=DEFAULT_MODEL_NAME,
candidate=best_candidate,
sagemaker_session=sagemaker_session,
vpc_config=None,
enable_network_isolation=False,
model_kms_key=None,
predictor_cls=None,
)
inputs = sagemaker_session.upload_data(
path=TRANSFORM_DATA, key_prefix=PREFIX + "/transform_input"
)
pipeline_model.transformer(
instance_count=1,
instance_type=cpu_instance_type,
assemble_with="Line",
output_path="s3://{}/{}".format(sagemaker_session.default_bucket(), "transform_test"),
accept="text/csv",
).transform(data=inputs, content_type="text/csv", split_type="Line", join_source="Input")
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_candidate_estimator_default_rerun_and_deploy(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
candidate = candidates[1]
candidate_estimator = CandidateEstimator(candidate, sagemaker_session)
inputs = sagemaker_session.upload_data(path=TEST_DATA, key_prefix=PREFIX + "/input")
endpoint_name = unique_name_from_base("sagemaker-auto-ml-rerun-candidate-test")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
candidate_estimator.fit(inputs)
auto_ml.deploy(
initial_instance_count=INSTANCE_COUNT,
instance_type=cpu_instance_type,
candidate=candidate,
endpoint_name=endpoint_name,
)
endpoint_status = sagemaker_session.sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)["EndpointStatus"]
assert endpoint_status == "InService"
sagemaker_session.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_candidate_estimator_rerun_with_optional_args(sagemaker_session, cpu_instance_type):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
candidate = candidates[1]
candidate_estimator = CandidateEstimator(candidate, sagemaker_session)
inputs = sagemaker_session.upload_data(path=TEST_DATA, key_prefix=PREFIX + "/input")
endpoint_name = unique_name_from_base("sagemaker-auto-ml-rerun-candidate-test")
with timeout(minutes=AUTO_ML_DEFAULT_TIMEMOUT_MINUTES):
candidate_estimator.fit(inputs, encrypt_inter_container_traffic=True)
auto_ml.deploy(
initial_instance_count=INSTANCE_COUNT,
instance_type=cpu_instance_type,
candidate=candidate,
endpoint_name=endpoint_name,
)
endpoint_status = sagemaker_session.sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)["EndpointStatus"]
assert endpoint_status == "InService"
sagemaker_session.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
@pytest.mark.skipif(
tests.integ.test_region() in tests.integ.NO_AUTO_ML_REGIONS,
reason="AutoML is not supported in the region yet.",
)
def test_candidate_estimator_get_steps(sagemaker_session):
auto_ml_utils.create_auto_ml_job_if_not_exist(sagemaker_session)
auto_ml = AutoML(
role=ROLE, target_attribute_name=TARGET_ATTRIBUTE_NAME, sagemaker_session=sagemaker_session
)
candidates = auto_ml.list_candidates(job_name=AUTO_ML_JOB_NAME)
candidate = candidates[1]
candidate_estimator = CandidateEstimator(candidate, sagemaker_session)
steps = candidate_estimator.get_steps()
assert len(steps) == 3
| tests/integ/test_auto_ml.py | 15,469 | Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. use a succeeded AutoML job to test describe and list candidates method, otherwise tests will run too long | 651 | en | 0.883253 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.models.decode import mot_decode
from lib.models.losses import FocalLoss
from lib.models.losses import RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss
from lib.models.utils import _sigmoid, _tranpose_and_gather_feat
from lib.utils.post_process import ctdet_post_process
from .base_trainer import BaseTrainer
# 损失函数的定义
class MotLoss(torch.nn.Module):
def __init__(self, opt):
super(MotLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None # L1 loss or smooth l1 loss
self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
NormRegL1Loss() if opt.norm_wh else \
RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg # box size loss
self.opt = opt
self.emb_dim = opt.reid_dim
self.nID = opt.nID
# 唯一包含可学习参数的层: 用于Re-ID的全连接层
self.classifier = nn.Linear(self.emb_dim, self.nID) # 不同的track id分类最后一层FC:将特征转换到概率得分
self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1) # 不同的track id分类用交叉熵损失
# self.TriLoss = TripletLoss()
self.emb_scale = math.sqrt(2) * math.log(self.nID - 1)
self.s_det = nn.Parameter(-1.85 * torch.ones(1)) # 检测的损失缩放系数
self.s_id = nn.Parameter(-1.05 * torch.ones(1)) # track id分类的损失缩放系数
def forward(self, outputs, batch):
"""
:param outputs:
:param batch:
:return:
"""
opt = self.opt
hm_loss, wh_loss, off_loss, id_loss = 0.0, 0.0, 0.0, 0.0 # 初始化4个loss为0
for s in range(opt.num_stacks):
output = outputs[s]
if not opt.mse_loss:
output['hm'] = _sigmoid(output['hm'])
# 计算heatmap loss
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.wh_weight > 0:
if opt.dense_wh:
mask_weight = batch['dense_wh_mask'].sum() + 1e-4
wh_loss += (self.crit_wh(output['wh'] * batch['dense_wh_mask'],
batch['dense_wh'] * batch['dense_wh_mask']) /
mask_weight) / opt.num_stacks
else: # 计算box尺寸的L1/Smooth L1 loss
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0: # 计算box中心坐标偏移的L1 loss
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
# 检测目标id分类的交叉熵损失
if opt.id_weight > 0:
id_head = _tranpose_and_gather_feat(output['id'], batch['ind'])
id_head = id_head[batch['reg_mask'] > 0].contiguous() # 只有有目标的像素才计算id loss
id_head = self.emb_scale * F.normalize(id_head)
id_target = batch['ids'][batch['reg_mask'] > 0] # 有目标的track id
id_output = self.classifier.forward(id_head).contiguous() # 用于检测目标分类的最后一层是FC?
id_loss += self.IDLoss(id_output, id_target)
# id_loss += self.IDLoss(id_output, id_target) + self.TriLoss(id_head, id_target)
# loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + opt.off_weight * off_loss + opt.id_weight * id_loss
det_loss = opt.hm_weight * hm_loss \
+ opt.wh_weight * wh_loss \
+ opt.off_weight * off_loss
loss = torch.exp(-self.s_det) * det_loss \
+ torch.exp(-self.s_id) * id_loss \
+ (self.s_det + self.s_id)
loss *= 0.5
# print(loss, hm_loss, wh_loss, off_loss, id_loss)
loss_stats = {'loss': loss,
'hm_loss': hm_loss,
'wh_loss': wh_loss,
'off_loss': off_loss,
'id_loss': id_loss}
return loss, loss_stats
# 核心训练类
class MotTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(MotTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'wh_loss', 'off_loss', 'id_loss']
loss = MotLoss(opt)
return loss_states, loss
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
dets = mot_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = ctdet_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], output['hm'].shape[1])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0]
| src/lib/trains/mot.py | 5,548 | :param outputs:
:param batch:
:return:
损失函数的定义 L1 loss or smooth l1 loss box size loss 唯一包含可学习参数的层: 用于Re-ID的全连接层 不同的track id分类最后一层FC:将特征转换到概率得分 不同的track id分类用交叉熵损失 self.TriLoss = TripletLoss() 检测的损失缩放系数 track id分类的损失缩放系数 初始化4个loss为0 计算heatmap loss 计算box尺寸的L1/Smooth L1 loss 计算box中心坐标偏移的L1 loss 检测目标id分类的交叉熵损失 只有有目标的像素才计算id loss 有目标的track id 用于检测目标分类的最后一层是FC? id_loss += self.IDLoss(id_output, id_target) + self.TriLoss(id_head, id_target) loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + opt.off_weight * off_loss + opt.id_weight * id_loss print(loss, hm_loss, wh_loss, off_loss, id_loss) 核心训练类 | 606 | zh | 0.335986 |
from typing import Any, Dict, List
import pandas
from dagster import AssetKey, AssetMaterialization, EventMetadataEntry
from dagster_dbt import DbtOutput
from .snowflake_io_manager import connect_snowflake
class DbtAssetResource:
"""
This class defines a resource that is capable of producing a list of AssetMaterializations from
a DbtOutput. It has one public function, get_asset_materializations(), which finds all the
generated models in the dbt output and produces corresponding asset materializations.
Putting this logic in a resource makes it easier to swap out between modes. You probably want
your local testing / development pipelines to produce different assets than your production
pipelines, as they will ideally be writing to different tables (with different dbt profiles).
"""
def __init__(self, asset_key_prefix: List[str]):
self._asset_key_prefix = asset_key_prefix
def _get_metadata(self, result: Dict[str, Any]) -> List[EventMetadataEntry]:
return [
EventMetadataEntry.float(
value=result["execution_time"], label="Execution Time (seconds)"
)
]
def get_asset_materializations(self, dbt_output: DbtOutput) -> List[AssetMaterialization]:
ret = []
# dbt_output.result contains the parsed contents of the results.json file
# Note that the json schema can change from version to version. This is written for
# https://schemas.getdbt.com/dbt/run-results/v2.json (also will work with v1.json)
for result in dbt_output.result["results"]:
if result["status"] != "success":
continue
unique_id = result["unique_id"]
# Here, we choose a naming scheme for our asset keys that will look something like
# <asset prefix> / model / <dbt project> / <model name>, but this is pretty arbitrary
asset_key = AssetKey(self._asset_key_prefix + unique_id.split("."))
# create an AssetMaterialization with our key and metadata
ret.append(
AssetMaterialization(
description=f"dbt node: {unique_id}",
metadata_entries=self._get_metadata(result),
asset_key=asset_key,
)
)
return ret
class SnowflakeQueryDbtAssetResource(DbtAssetResource):
"""
This resource allows us to add in some extra information to these AssetMaterialization events.
Because the relevant dbt project is configured for a Snowflake cluster, we can query the output
models to get some additional information that we might want Dagster to track over time.
Of course, this is completely optional.
"""
def __init__(self, snowflake_config: Dict[str, str], dbt_schema: str):
self._snowflake_config = snowflake_config
self._dbt_schema = dbt_schema
super().__init__(asset_key_prefix=["snowflake", dbt_schema])
def _get_metadata(self, result: Dict[str, Any]) -> List[EventMetadataEntry]:
"""
Here, we run queries against our output Snowflake database tables to add additional context
to our asset materializations.
"""
table_name = result["unique_id"].split(".")[-1]
with connect_snowflake(config=self._snowflake_config, schema=self._dbt_schema) as con:
n_rows = pandas.read_sql_query(f"SELECT COUNT(*) FROM {table_name}", con)
sample_rows = pandas.read_sql_query(
f"SELECT * FROM {table_name} SAMPLE ROW (10 rows)", con
)
return super()._get_metadata(result) + [
EventMetadataEntry.int(int(n_rows.iloc[0][0]), "dbt Model Number of Rows"),
EventMetadataEntry.md(sample_rows.astype("str").to_markdown(), "dbt Model Sample Rows"),
]
| examples/hacker_news/hacker_news/resources/dbt_asset_resource.py | 3,854 | This class defines a resource that is capable of producing a list of AssetMaterializations from
a DbtOutput. It has one public function, get_asset_materializations(), which finds all the
generated models in the dbt output and produces corresponding asset materializations.
Putting this logic in a resource makes it easier to swap out between modes. You probably want
your local testing / development pipelines to produce different assets than your production
pipelines, as they will ideally be writing to different tables (with different dbt profiles).
This resource allows us to add in some extra information to these AssetMaterialization events.
Because the relevant dbt project is configured for a Snowflake cluster, we can query the output
models to get some additional information that we might want Dagster to track over time.
Of course, this is completely optional.
Here, we run queries against our output Snowflake database tables to add additional context
to our asset materializations.
dbt_output.result contains the parsed contents of the results.json file Note that the json schema can change from version to version. This is written for https://schemas.getdbt.com/dbt/run-results/v2.json (also will work with v1.json) Here, we choose a naming scheme for our asset keys that will look something like <asset prefix> / model / <dbt project> / <model name>, but this is pretty arbitrary create an AssetMaterialization with our key and metadata | 1,456 | en | 0.876133 |
# ***************************************************************************************
# ***************************************************************************************
#
# Name : importcode.py
# Author : Paul Robson (paul@robsons.org.uk)
# Date : 12th March 2019.
# Purpose : Import code into buffer area
#
# ***************************************************************************************
# ***************************************************************************************
import sys
from imagelib import *
#
# Initialise and get info
#
image = BinaryImage()
bufferInfo = image.sourcePages()
firstSourcePage = bufferInfo[0]
sourcePageCount = bufferInfo[1]
pageSize = image.getBufferSize()
#
# Clear all buffers
#
for p in range(firstSourcePage,firstSourcePage+sourcePageCount*2,2):
for a in range(0xC000,0x10000,pageSize):
image.write(p,a,0x80)
image.write(p,a+pageSize-1,0x00)
print("Found and erased {0} buffers for import ${1:02x}-${2:02x}.". \
format(int(sourcePageCount*16384/pageSize),firstSourcePage,firstSourcePage+sourcePageCount*2-2))
#
# Info on first buffer
#
currentPageNumber = firstSourcePage
currentPageAddress = 0xC000
currentBasePageAddress = 0xC000
bytesRemaining = pageSize
count = 1
#
# Work through all the source
#
for f in sys.argv[1:]:
src = [x if x.find("//") < 0 else x[:x.find("//")] for x in open(f).readlines()]
src = " ".join([x.replace("\t"," ").replace("\n"," ") for x in src])
src = [x for x in src.split(" ") if x != ""]
for word in src:
#
# For each word, look at it to see if it has a tag. Default is compilation.
#
tag = 0x40 # Green (compile) $40
if word[0] == ":": # Red (define) $00
tag = 0x00
word = word[1:]
elif word[0] == "[" and word[-1] == "]": # Yellow (execute) $80
tag = 0x80
word = word[1:-1]
#
# Make the final word and check it fits.
#
assert len(word) < 32,"Word too long "+word
if len(word) + 4 >= bytesRemaining: # it doesn't fit.
image.write(currentPageNumber,currentPageAddress,0x80)
currentPageAddress = (currentBasePageAddress + pageSize) & 0xFFFF
if currentPageAddress == 0:
currentPageNumber += 1
currentPageAddress = 0xC000
currentBasePageAddress = currentPageAddress
count += 1
bytesRemaining = pageSize
#
#print("\t\t{0:02x} {1:16} ${2:02x}:${3:04x} {4}".format(tag,word,currentPageNumber,currentPageAddress,bytesRemaining))
#
# Store the word
#
image.write(currentPageNumber,currentPageAddress,tag+len(word))
currentPageAddress += 1
for c in word:
image.write(currentPageNumber,currentPageAddress,ord(c))
currentPageAddress += 1
bytesRemaining = bytesRemaining - 1 - len(word)
#
# Add a trailing $80 in case it is the last.
#
image.write(currentPageNumber,currentPageAddress,0x80)
print("\tImported file '{0}'.".format(f))
#
# and write out
#
image.save()
print("Filled {0} buffers.".format(count))
| scripts/importcode.py | 2,940 | *************************************************************************************** *************************************************************************************** Name : importcode.py Author : Paul Robson (paul@robsons.org.uk) Date : 12th March 2019. Purpose : Import code into buffer area *************************************************************************************** *************************************************************************************** Initialise and get info Clear all buffers Info on first buffer Work through all the source For each word, look at it to see if it has a tag. Default is compilation. Green (compile) $40 Red (define) $00 Yellow (execute) $80 Make the final word and check it fits. it doesn't fit.print("\t\t{0:02x} {1:16} ${2:02x}:${3:04x} {4}".format(tag,word,currentPageNumber,currentPageAddress,bytesRemaining)) Store the word Add a trailing $80 in case it is the last. and write out | 967 | en | 0.537077 |
# -*- coding: utf-8 -*-
import os
from O365 import Account, Connection, FileSystemTokenBackend
from datetime import datetime as dt
from datetime import timedelta
from conf.conf import CONFIG as conf
from fritzhome import FritzBox
import logging
class Core:
@staticmethod
def get_credentials():
return conf['OFFICE_CLIENT_ID'], conf['OFFICE_CLIENT_SECRET']
@staticmethod
def get_account():
return Account(credentials=Core.get_credentials())
@staticmethod
def get_scopes():
return ['offline_access',
'https://graph.microsoft.com/Mail.ReadWrite',
'https://graph.microsoft.com/Mail.Send',
'https://graph.microsoft.com/Calendars.Read',
'https://graph.microsoft.com/Files.ReadWrite',
'https://graph.microsoft.com/User.Read']
@staticmethod
def get_con_obj():
credentials = (conf['OFFICE_CLIENT_ID'], conf['OFFICE_CLIENT_SECRET'])
scopes = Core.get_scopes()
return Connection(credentials, scopes=scopes, token_backend=FileSystemTokenBackend(token_filename='o365_token.txt'))
def run(self):
con = Core.get_con_obj()
if not con.token_backend.check_token():
logging.error("You have to generate your token file with python -m radiator_fritz_o365_sync.gen_token first!")
exit(1)
con.refresh_token()
heating = self.query_for_heating_periods()
# Cool down if no heating entries found in calendar
if len(heating) == 0:
logging.debug('No heating entry in calendar found. Cooling down all thermostats if they are heating. ')
self.cool_down_all()
# For each heating entry in calendar heat up
subjects = []
for heat in heating:
logging.info('Found entry "%s"', heat.subject)
self.heat_up(heat.subject)
subjects.append(heat.subject)
# Cool down thermostats if they are not heated
self.cool_down_unless(subjects)
# auto reset
if len(heating) == 0:
self.auto_reset()
# Every night refresh the token and cool down to reset manual changes on thermostats
if dt.now().time().strftime('%H:%M') == '00:00':
con.refresh_token()
"""
Gets all thermostats from fritzbox
"""
def get_thermostats(self):
if conf['FRITZ_TLS']:
fritzbox = FritzBox(conf['FRITZ_IP'], conf['FRITZ_USER'], conf['FRITZ_PW'], use_tls=conf['FRITZ_TLS'], tls_cert_path='conf/fritz.crt')
else:
fritzbox = FritzBox(conf['FRITZ_IP'], conf['FRITZ_USER'], conf['FRITZ_PW'])
fritzbox.login()
actors = fritzbox.get_actors()
thermostats = []
for actor in actors:
if actor.has_heating_controller:
thermostats.append(actor)
return thermostats
def thermostat_heatup(self, actor):
if actor.target_temperature == conf['HEATING_LOW_TEMP']:
logging.info('Heating up %s ...', actor.name)
actor.set_temperature(conf['HEATING_COMFORT_TEMP'])
"""
Sets the temperature of thermostats with matching subject or all thermostats to comfort temperature
"""
def heat_up(self, sub):
thermostats = self.get_thermostats()
for thermostat in thermostats:
if sub == conf['CALENDAR_HEAT_ALL_SUBJECT']:
self.thermostat_heatup(thermostat)
else:
if thermostat.name == sub:
self.thermostat_heatup(thermostat)
"""
Cool down every thermostat which is not in unless list
"""
def cool_down_unless(self, unless):
# return if wildcard is found in subjects
if conf['CALENDAR_HEAT_ALL_SUBJECT'] in unless:
return
thermostats = self.get_thermostats()
for thermostat in thermostats:
if thermostat.name not in unless:
self.cool_down(thermostat)
"""
Sets the temperature of all thermostats to LOW_TEMP if they are currently set to COMFORT_TEMP
"""
def cool_down_all(self):
thermostats = self.get_thermostats()
for thermostat in thermostats:
self.cool_down(thermostat)
"""
Sets the temperature of thermostat to low temp if it is on comfort temp
"""
def cool_down(self, thermostat):
if thermostat.target_temperature == conf['HEATING_COMFORT_TEMP']:
logging.info('Cooling down %s ...', thermostat.name)
thermostat.set_temperature(conf['HEATING_LOW_TEMP'])
"""
If the temperature has changed manually via app or on the thermostat itself,
this method resets the temperature to the HEATING_LOW_TEMP on a given time
"""
def auto_reset(self):
if conf['HEATING_AUTO_RESET']:
current_time = dt.now().time()
target_time = conf['HEATING_AUTO_RESET_TIME']
if current_time.strftime('%H:%M') == target_time:
logging.info('Resetting temperature on all thermostats now!')
thermostats = self.get_thermostats()
for thermostat in thermostats:
thermostat.set_temperature(conf['HEATING_LOW_TEMP'])
def query_for_heating_periods(self):
account = Core.get_account()
schedule = account.schedule()
calendar = schedule.get_calendar(calendar_name=conf['CALENDAR_NAME'])
if calendar is None:
logging.error("Calendar with name '%s' does not exist!", conf['CALENDAR_NAME'])
exit(1)
q = calendar.new_query('start').greater_equal(dt.now())
q.chain('and').on_attribute('end').less_equal(dt.now() + timedelta(minutes=5))
return list(calendar.get_events(query=q))
if __name__ == "__main__":
Core().run()
| radiator_fritz_o365_sync/core.py | 5,848 | -*- coding: utf-8 -*- Cool down if no heating entries found in calendar For each heating entry in calendar heat up Cool down thermostats if they are not heated auto reset Every night refresh the token and cool down to reset manual changes on thermostats return if wildcard is found in subjects | 293 | en | 0.81157 |
from wagtailstreamforms.models import Form
def get_form_instance_from_request(request):
""" Get the form class from the request. """
form_id = request.POST.get("form_id")
if form_id and form_id.isdigit():
try:
return Form.objects.get(pk=int(form_id))
except Form.DoesNotExist:
pass
return None
| wagtailstreamforms/utils/requests.py | 353 | Get the form class from the request. | 36 | en | 0.931851 |
# define BipIdb and some helper functions for easier scripting (at the end).
import ida_kernwin
import idaapi
import idc
class BipIdb(object):
"""
Class for representing the idb loaded by IDA, this has the goal to
provide access to things specific to the IDB.
Currently this contain only static methods.
"""
@staticmethod
def ptr_size():
"""
Return the number of bits in a pointer.
:rtype: int
"""
info = idaapi.get_inf_structure()
if info.is_64bit():
bits = 64
elif info.is_32bit():
bits = 32
else:
bits = 16
return bits
@staticmethod
def min_ea():
"""
Return the lowest mapped address of the IDB.
"""
return idc.get_inf_attr(idc.INF_MIN_EA)
@staticmethod
def max_ea():
"""
Return the highest mapped address of the IDB.
"""
return idc.get_inf_attr(idc.INF_MAX_EA)
@staticmethod
def image_base():
"""
Return the base address of the image loaded in the IDB.
This is different from :meth:`~BipIdb.min_ea` which is the lowest
*mapped* address.
"""
return idaapi.get_imagebase()
@staticmethod
def current_addr():
"""
Return current screen address.
:return: The current address selected.
"""
return ida_kernwin.get_screen_ea()
@staticmethod
def relea(addr):
"""
Calculate the relative address compare to the IDA image base.
The calcul done is ``ADDR - IMGBASE``.
The opposite of this function is :func:`absea`.
:param int addr: The absolute address to translate.
:return: The offset from image base corresponding to ``addr``.
:rtype: int
"""
return addr-idaapi.get_imagebase()
@staticmethod
def absea(offset):
"""
Calculate the absolute address from an offset of the image base.
The calcul done is ``OFFSET + IMGBASE`` .
The opposite of this function is :func:`relea`.
:param int offset: The offset from the beginning of the image base
to translate.
:return: The absolute address corresponding to the offset.
:rtype: int
"""
return offset+idaapi.get_imagebase()
def min_ea():
"""
Return the lowest mapped address of the IDB.
Wrapper on :meth:`BipIdb.min_ea`.
"""
return BipIdb.min_ea()
def max_ea():
"""
Return the highest mapped address of the IDB.
Wrapper on :meth:`BipIdb.max_ea`.
"""
return BipIdb.max_ea()
def Here():
"""
Return current screen address.
:return: The current address.
"""
return BipIdb.current_addr()
| bip/base/bipidb.py | 2,975 | Class for representing the idb loaded by IDA, this has the goal to
provide access to things specific to the IDB.
Currently this contain only static methods.
Return current screen address.
:return: The current address.
Calculate the absolute address from an offset of the image base.
The calcul done is ``OFFSET + IMGBASE`` .
The opposite of this function is :func:`relea`.
:param int offset: The offset from the beginning of the image base
to translate.
:return: The absolute address corresponding to the offset.
:rtype: int
Return current screen address.
:return: The current address selected.
Return the base address of the image loaded in the IDB.
This is different from :meth:`~BipIdb.min_ea` which is the lowest
*mapped* address.
Return the highest mapped address of the IDB.
Wrapper on :meth:`BipIdb.max_ea`.
Return the highest mapped address of the IDB.
Return the lowest mapped address of the IDB.
Wrapper on :meth:`BipIdb.min_ea`.
Return the lowest mapped address of the IDB.
Return the number of bits in a pointer.
:rtype: int
Calculate the relative address compare to the IDA image base.
The calcul done is ``ADDR - IMGBASE``.
The opposite of this function is :func:`absea`.
:param int addr: The absolute address to translate.
:return: The offset from image base corresponding to ``addr``.
:rtype: int
define BipIdb and some helper functions for easier scripting (at the end). | 1,402 | en | 0.806107 |
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Modifications copyright (c) 2021 DocYard Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import torch
from torch import nn
from torch.nn import functional as F
from .self_attention import WrapEncoder, WrapEncoderForFeature
gradient_clip = 10
from functools import partial
class PVAM(nn.Module):
def __init__(
self,
in_channels,
char_num,
max_text_length,
num_heads,
num_encoder_tus,
hidden_dims,
):
super(PVAM, self).__init__()
self.char_num = char_num
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_tus
self.hidden_dims = hidden_dims
# Transformer encoder
t = 256
self.wrap_encoder_for_feature = WrapEncoderForFeature(
src_vocab_size=1,
max_length=t,
n_layer=self.num_encoder_TUs,
n_head=self.num_heads,
d_key=int(self.hidden_dims / self.num_heads),
d_value=int(self.hidden_dims / self.num_heads),
d_model=self.hidden_dims,
d_inner_hid=self.hidden_dims,
prepostprocess_dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1,
preprocess_cmd="n",
postprocess_cmd="da",
weight_sharing=True,
)
# PVAM
self.flatten0 = nn.Flatten(start_dim=0, end_dim=1)
self.fc0 = nn.Linear(
in_features=in_channels,
out_features=in_channels,
)
self.emb = nn.Embedding(
num_embeddings=self.max_length, embedding_dim=in_channels
)
self.flatten1 = nn.Flatten(start_dim=0, end_dim=2)
self.fc1 = nn.Linear(
in_features=in_channels, out_features=1, bias=False
)
def forward(self, inputs, encoder_word_pos, gsrm_word_pos):
b, c, h, w = inputs.shape
conv_features = torch.reshape(inputs, shape=(-1, c, h * w))
conv_features = conv_features.permute(0, 2, 1)
# transformer encoder
b, t, c = conv_features.shape
enc_inputs = [conv_features, encoder_word_pos, None]
word_features = self.wrap_encoder_for_feature(enc_inputs)
# pvam
b, t, c = word_features.shape
word_features = self.fc0(word_features)
word_features_ = torch.reshape(word_features, (-1, 1, t, c))
word_features_ = torch.tile(word_features_, (1, self.max_length, 1, 1))
word_pos_feature = self.emb(gsrm_word_pos)
word_pos_feature_ = torch.reshape(
word_pos_feature, (-1, self.max_length, 1, c)
)
word_pos_feature_ = torch.tile(word_pos_feature_, (1, 1, t, 1))
y = word_pos_feature_ + word_features_
y = F.tanh(y)
attention_weight = self.fc1(y)
attention_weight = torch.reshape(
attention_weight, shape=(-1, self.max_length, t)
)
attention_weight = F.softmax(attention_weight, dim=-1)
pvam_features = torch.bmm(
attention_weight, word_features
) # [b, max_length, c]
return pvam_features
class GSRM(nn.Module):
def __init__(
self,
in_channels,
char_num,
max_text_length,
num_heads,
num_encoder_tus,
num_decoder_tus,
hidden_dims,
):
super(GSRM, self).__init__()
self.char_num = char_num
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_tus
self.num_decoder_TUs = num_decoder_tus
self.hidden_dims = hidden_dims
self.fc0 = nn.Linear(
in_features=in_channels, out_features=self.char_num
)
self.wrap_encoder0 = WrapEncoder(
src_vocab_size=self.char_num + 1,
max_length=self.max_length,
n_layer=self.num_decoder_TUs,
n_head=self.num_heads,
d_key=int(self.hidden_dims / self.num_heads),
d_value=int(self.hidden_dims / self.num_heads),
d_model=self.hidden_dims,
d_inner_hid=self.hidden_dims,
prepostprocess_dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1,
preprocess_cmd="n",
postprocess_cmd="da",
weight_sharing=True,
)
self.wrap_encoder1 = WrapEncoder(
src_vocab_size=self.char_num + 1,
max_length=self.max_length,
n_layer=self.num_decoder_TUs,
n_head=self.num_heads,
d_key=int(self.hidden_dims / self.num_heads),
d_value=int(self.hidden_dims / self.num_heads),
d_model=self.hidden_dims,
d_inner_hid=self.hidden_dims,
prepostprocess_dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1,
preprocess_cmd="n",
postprocess_cmd="da",
weight_sharing=True,
)
# self.mul = lambda x: torch.matmul(x,
# (self.wrap_encoder0.prepare_decoder.emb0.weightk).transpose(-2, -1)) # ! This is an error here, weightk is wrong correct it by visualizing torch model_dict
self.mul = partial(
self.f, self.wrap_encoder0.prepare_decoder.emb0.weight
)
@staticmethod
def f(w, x):
return torch.matmul(x, w.transpose(-2, -1))
def forward(
self, inputs, gsrm_word_pos, gsrm_slf_attn_bias1, gsrm_slf_attn_bias2
):
# ===== GSRM Visual-to-semantic embedding block =====
b, t, c = inputs.shape
pvam_features = torch.reshape(inputs, (-1, c))
word_out = self.fc0(pvam_features)
word_ids = torch.argmax(F.softmax(word_out), dim=1)
word_ids = torch.reshape(word_ids, shape=(-1, t, 1))
# ===== GSRM Semantic reasoning block =====
"""
This module is achieved through bi-transformers,
ngram_feature1 is the froward one, ngram_fetaure2 is the backward one
"""
pad_idx = self.char_num
word1 = word_ids.float()
pad1 = nn.ConstantPad1d((1, 0), value=1.0 * pad_idx)
word1 = pad1(word1.permute(0, 2, 1)).permute(0, 2, 1)
word1 = word1.long()
word1 = word1[:, :-1, :]
word2 = word_ids
enc_inputs_1 = [word1, gsrm_word_pos, gsrm_slf_attn_bias1]
enc_inputs_2 = [word2, gsrm_word_pos, gsrm_slf_attn_bias2]
gsrm_feature1 = self.wrap_encoder0(enc_inputs_1)
gsrm_feature2 = self.wrap_encoder1(enc_inputs_2)
pad = nn.ConstantPad1d((0, 1), value=0.0)
gsrm_feature2 = pad(gsrm_feature2.permute(0, 2, 1)).permute(0, 2, 1)
gsrm_feature2 = gsrm_feature2[
:,
1:,
]
gsrm_features = gsrm_feature1 + gsrm_feature2
gsrm_out = self.mul(gsrm_features)
b, t, c = gsrm_out.shape
gsrm_out = torch.reshape(gsrm_out, (-1, c))
return gsrm_features, word_out, gsrm_out
class VSFD(nn.Module):
def __init__(self, in_channels=512, pvam_ch=512, char_num=38):
super(VSFD, self).__init__()
self.char_num = char_num
self.fc0 = nn.Linear(in_features=in_channels * 2, out_features=pvam_ch)
self.fc1 = nn.Linear(in_features=pvam_ch, out_features=self.char_num)
def forward(self, pvam_feature, gsrm_feature):
b, t, c1 = pvam_feature.shape
b, t, c2 = gsrm_feature.shape
combine_feature_ = torch.cat([pvam_feature, gsrm_feature], dim=2)
img_comb_feature_ = torch.reshape(
combine_feature_, shape=(-1, c1 + c2)
)
img_comb_feature_map = self.fc0(img_comb_feature_)
img_comb_feature_map = torch.sigmoid(img_comb_feature_map)
img_comb_feature_map = torch.reshape(
img_comb_feature_map, shape=(-1, t, c1)
)
combine_feature = (
img_comb_feature_map * pvam_feature
+ (1.0 - img_comb_feature_map) * gsrm_feature
)
img_comb_feature = torch.reshape(combine_feature, shape=(-1, c1))
out = self.fc1(img_comb_feature)
return out
class SRNHead(nn.Module):
def __init__(
self,
in_channels,
out_channels,
max_text_length,
num_heads,
num_encoder_TUs,
num_decoder_TUs,
hidden_dims,
**kwargs
):
super(SRNHead, self).__init__()
self.char_num = out_channels
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_TUs
self.num_decoder_TUs = num_decoder_TUs
self.hidden_dims = hidden_dims
self.pvam = PVAM(
in_channels=in_channels,
char_num=self.char_num,
max_text_length=self.max_length,
num_heads=self.num_heads,
num_encoder_tus=self.num_encoder_TUs,
hidden_dims=self.hidden_dims,
)
self.gsrm = GSRM(
in_channels=in_channels,
char_num=self.char_num,
max_text_length=self.max_length,
num_heads=self.num_heads,
num_encoder_tus=self.num_encoder_TUs,
num_decoder_tus=self.num_decoder_TUs,
hidden_dims=self.hidden_dims,
)
self.vsfd = VSFD(in_channels=in_channels, char_num=self.char_num)
self.gsrm.wrap_encoder1.prepare_decoder.emb0 = (
self.gsrm.wrap_encoder0.prepare_decoder.emb0
)
def forward(self, inputs, others):
encoder_word_pos = others[0]
gsrm_word_pos = others[1]
gsrm_slf_attn_bias1 = others[2]
gsrm_slf_attn_bias2 = others[3]
pvam_feature = self.pvam(inputs, encoder_word_pos, gsrm_word_pos)
gsrm_feature, word_out, gsrm_out = self.gsrm(
pvam_feature,
gsrm_word_pos,
gsrm_slf_attn_bias1,
gsrm_slf_attn_bias2,
)
final_out = self.vsfd(pvam_feature, gsrm_feature)
if not self.training:
final_out = F.softmax(final_out, dim=1)
_, decoded_out = torch.topk(final_out, k=1)
predicts = OrderedDict(
[
("predict", final_out),
("pvam_feature", pvam_feature),
("decoded_out", decoded_out),
("word_out", word_out),
("gsrm_out", gsrm_out),
]
)
return predicts
| ucr/core/architecture/head/rec_srn_head.py | 11,162 | copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. Modifications copyright (c) 2021 DocYard Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Transformer encoder PVAM transformer encoder pvam [b, max_length, c] self.mul = lambda x: torch.matmul(x, (self.wrap_encoder0.prepare_decoder.emb0.weightk).transpose(-2, -1)) ! This is an error here, weightk is wrong correct it by visualizing torch model_dict ===== GSRM Visual-to-semantic embedding block ===== ===== GSRM Semantic reasoning block ===== | 1,042 | en | 0.796184 |
# coding: utf-8
"""
FINBOURNE Insights API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.0.238
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from finbourne_insights.configuration import Configuration
class AuditProcess(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'name': 'str',
'run_id': 'str',
'start_time': 'datetime',
'end_time': 'datetime',
'succeeded': 'bool'
}
attribute_map = {
'name': 'name',
'run_id': 'runId',
'start_time': 'startTime',
'end_time': 'endTime',
'succeeded': 'succeeded'
}
required_map = {
'name': 'required',
'run_id': 'required',
'start_time': 'required',
'end_time': 'optional',
'succeeded': 'optional'
}
def __init__(self, name=None, run_id=None, start_time=None, end_time=None, succeeded=None, local_vars_configuration=None): # noqa: E501
"""AuditProcess - a model defined in OpenAPI"
:param name: (required)
:type name: str
:param run_id: (required)
:type run_id: str
:param start_time: (required)
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param succeeded:
:type succeeded: bool
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._run_id = None
self._start_time = None
self._end_time = None
self._succeeded = None
self.discriminator = None
self.name = name
self.run_id = run_id
self.start_time = start_time
self.end_time = end_time
self.succeeded = succeeded
@property
def name(self):
"""Gets the name of this AuditProcess. # noqa: E501
:return: The name of this AuditProcess. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AuditProcess.
:param name: The name of this AuditProcess. # noqa: E501
:type name: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) > 128):
raise ValueError("Invalid value for `name`, length must be less than or equal to `128`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 0):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `0`") # noqa: E501
self._name = name
@property
def run_id(self):
"""Gets the run_id of this AuditProcess. # noqa: E501
:return: The run_id of this AuditProcess. # noqa: E501
:rtype: str
"""
return self._run_id
@run_id.setter
def run_id(self, run_id):
"""Sets the run_id of this AuditProcess.
:param run_id: The run_id of this AuditProcess. # noqa: E501
:type run_id: str
"""
if self.local_vars_configuration.client_side_validation and run_id is None: # noqa: E501
raise ValueError("Invalid value for `run_id`, must not be `None`") # noqa: E501
self._run_id = run_id
@property
def start_time(self):
"""Gets the start_time of this AuditProcess. # noqa: E501
:return: The start_time of this AuditProcess. # noqa: E501
:rtype: datetime
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this AuditProcess.
:param start_time: The start_time of this AuditProcess. # noqa: E501
:type start_time: datetime
"""
if self.local_vars_configuration.client_side_validation and start_time is None: # noqa: E501
raise ValueError("Invalid value for `start_time`, must not be `None`") # noqa: E501
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this AuditProcess. # noqa: E501
:return: The end_time of this AuditProcess. # noqa: E501
:rtype: datetime
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this AuditProcess.
:param end_time: The end_time of this AuditProcess. # noqa: E501
:type end_time: datetime
"""
self._end_time = end_time
@property
def succeeded(self):
"""Gets the succeeded of this AuditProcess. # noqa: E501
:return: The succeeded of this AuditProcess. # noqa: E501
:rtype: bool
"""
return self._succeeded
@succeeded.setter
def succeeded(self, succeeded):
"""Sets the succeeded of this AuditProcess.
:param succeeded: The succeeded of this AuditProcess. # noqa: E501
:type succeeded: bool
"""
self._succeeded = succeeded
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuditProcess):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AuditProcess):
return True
return self.to_dict() != other.to_dict()
| sdk/finbourne_insights/models/audit_process.py | 7,817 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Returns true if both objects are equal
AuditProcess - a model defined in OpenAPI"
:param name: (required)
:type name: str
:param run_id: (required)
:type run_id: str
:param start_time: (required)
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param succeeded:
:type succeeded: bool
Returns true if both objects are not equal
For `print` and `pprint`
Gets the end_time of this AuditProcess. # noqa: E501
:return: The end_time of this AuditProcess. # noqa: E501
:rtype: datetime
Sets the end_time of this AuditProcess.
:param end_time: The end_time of this AuditProcess. # noqa: E501
:type end_time: datetime
Gets the name of this AuditProcess. # noqa: E501
:return: The name of this AuditProcess. # noqa: E501
:rtype: str
Sets the name of this AuditProcess.
:param name: The name of this AuditProcess. # noqa: E501
:type name: str
Gets the run_id of this AuditProcess. # noqa: E501
:return: The run_id of this AuditProcess. # noqa: E501
:rtype: str
Sets the run_id of this AuditProcess.
:param run_id: The run_id of this AuditProcess. # noqa: E501
:type run_id: str
Gets the start_time of this AuditProcess. # noqa: E501
:return: The start_time of this AuditProcess. # noqa: E501
:rtype: datetime
Sets the start_time of this AuditProcess.
:param start_time: The start_time of this AuditProcess. # noqa: E501
:type start_time: datetime
Gets the succeeded of this AuditProcess. # noqa: E501
:return: The succeeded of this AuditProcess. # noqa: E501
:rtype: bool
Sets the succeeded of this AuditProcess.
:param succeeded: The succeeded of this AuditProcess. # noqa: E501
:type succeeded: bool
Returns the model properties as a dict
Returns the string representation of the model
FINBOURNE Insights API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.0.238
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 | 2,179 | en | 0.671187 |
#! /usr/bin/env python
# PuLP : Python LP Modeler
# Version 1.5.1
# Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org)
# Modifications Copyright (c) 2007- Stuart Anthony Mitchell (s.mitchell@auckland.ac.nz)
# $Id: pulp.py 1791 2008-04-23 22:54:34Z smit023 $
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
PuLP is an LP modeler written in python. PuLP can generate MPS or LP files
and call GLPK[1], COIN CLP/CBC[2], CPLEX[3], and GUROBI[4] to solve linear
problems.
See the examples directory for examples.
PuLP requires Python >= 2.5.
The examples require at least a solver in your PATH or a shared library file.
Documentation is found on https://www.coin-or.org/PuLP/.
A comprehensive wiki can be found at https://www.coin-or.org/PuLP/
Use LpVariable() to create new variables. To create a variable 0 <= x <= 3
>>> x = LpVariable("x", 0, 3)
To create a variable 0 <= y <= 1
>>> y = LpVariable("y", 0, 1)
Use LpProblem() to create new problems. Create "myProblem"
>>> prob = LpProblem("myProblem", LpMinimize)
Combine variables to create expressions and constraints and add them to the
problem.
>>> prob += x + y <= 2
If you add an expression (not a constraint), it will
become the objective.
>>> prob += -4*x + y
Choose a solver and solve the problem. ex:
>>> status = prob.solve(GLPK(msg = 0))
Display the status of the solution
>>> LpStatus[status]
'Optimal'
You can get the value of the variables using value(). ex:
>>> value(x)
2.0
Exported Classes:
- LpProblem -- Container class for a Linear programming problem
- LpVariable -- Variables that are added to constraints in the LP
- LpConstraint -- A constraint of the general form
a1x1+a2x2 ...anxn (<=, =, >=) b
- LpConstraintVar -- Used to construct a column of the model in column-wise
modelling
Exported Functions:
- value() -- Finds the value of a variable or expression
- lpSum() -- given a list of the form [a1*x1, a2x2, ..., anxn] will construct
a linear expression to be used as a constraint or variable
- lpDot() --given two lists of the form [a1, a2, ..., an] and
[ x1, x2, ..., xn] will construct a linear epression to be used
as a constraint or variable
Comments, bug reports, patches and suggestions are welcome.
pulp-or-discuss@googlegroups.com
References:
[1] http://www.gnu.org/software/glpk/glpk.html
[2] http://www.coin-or.org/
[3] http://www.cplex.com/
[4] http://www.gurobi.com/
"""
import types
import string
import itertools
from constants import *
from solvers import *
from types import GeneratorType
_DICT_TYPE = dict
if sys.platform not in ['cli']:
# iron python does not like an OrderedDict
try:
from odict import OrderedDict
_DICT_TYPE = OrderedDict
except ImportError:
pass
try:
#python 2.7 or 3.1
from collections import OrderedDict
_DICT_TYPE = OrderedDict
except ImportError:
pass
def setConfigInformation(**keywords):
"""
set the data in the configuration file
at the moment will only edit things in [locations]
the keyword value pairs come from the keywords dictionary
"""
#TODO: extend if we ever add another section in the config file
#read the old configuration
config = ConfigParser.SafeConfigParser()
config.read(config_filename)
#set the new keys
for (key,val) in keywords.items():
config.set("locations",key,val)
#write the new configuration
fp = open(config_filename,"w")
config.write(fp)
fp.close()
# Default solver selection
if PULP_CBC_CMD().available():
LpSolverDefault = PULP_CBC_CMD()
elif GLPK_CMD().available():
LpSolverDefault = GLPK_CMD()
elif COIN_CMD().available():
LpSolverDefault = COIN_CMD()
else:
LpSolverDefault = None
class LpElement(object):
"""Base class for LpVariable and LpConstraintVar
"""
#to remove illegal characters from the names
trans = string.maketrans("-+[] ->/","________")
def setName(self,name):
if name:
self.__name = str(name).translate(self.trans)
else:
self.__name = None
def getName(self):
return self.__name
name = property(fget = getName,fset = setName)
def __init__(self, name):
self.name = name
# self.hash MUST be different for each variable
# else dict() will call the comparison operators that are overloaded
self.hash = id(self)
self.modified = True
def __hash__(self):
return self.hash
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __neg__(self):
return - LpAffineExpression(self)
def __pos__(self):
return self
def __nonzero__(self):
return 1
def __add__(self, other):
return LpAffineExpression(self) + other
def __radd__(self, other):
return LpAffineExpression(self) + other
def __sub__(self, other):
return LpAffineExpression(self) - other
def __rsub__(self, other):
return other - LpAffineExpression(self)
def __mul__(self, other):
return LpAffineExpression(self) * other
def __rmul__(self, other):
return LpAffineExpression(self) * other
def __div__(self, other):
return LpAffineExpression(self)/other
def __rdiv__(self, other):
raise TypeError, "Expressions cannot be divided by a variable"
def __le__(self, other):
return LpAffineExpression(self) <= other
def __ge__(self, other):
return LpAffineExpression(self) >= other
def __eq__(self, other):
return LpAffineExpression(self) == other
def __ne__(self, other):
if isinstance(other, LpVariable):
return self.name is not other.name
elif isinstance(other, LpAffineExpression):
if other.isAtomic():
return self is not other.atom()
else:
return 1
else:
return 1
class LpVariable(LpElement):
"""
This class models an LP Variable with the specified associated parameters
:param name: The name of the variable used in the output .lp file
:param lowbound: The lower bound on this variable's range.
Default is negative infinity
:param upBound: The upper bound on this variable's range.
Default is positive infinity
:param cat: The category this variable is in, Integer, Binary or
Continuous(default)
:param e: Used for column based modelling: relates to the variable's
existence in the objective function and constraints
"""
def __init__(self, name, lowBound = None, upBound = None,
cat = LpContinuous, e = None):
LpElement.__init__(self,name)
self.lowBound = lowBound
self.upBound = upBound
self.cat = cat
self.varValue = None
self.init = 0
#code to add a variable to constraints for column based
# modelling
if cat == LpBinary:
self.lowBound = 0
self.upBound = 1
self.cat = LpInteger
if e:
self.add_expression(e)
def add_expression(self,e):
self.expression = e
self.addVariableToConstraints(e)
@classmethod
def matrix(cls, name, indexs, lowBound = None, upBound = None, cat = LpContinuous,
indexStart = []):
if not isinstance(indexs, tuple): indexs = (indexs,)
if "%" not in name: name += "_%s" * len(indexs)
index = indexs[0]
indexs = indexs[1:]
if len(indexs) == 0:
return [
LpVariable(name % tuple(indexStart + [i]), lowBound, upBound, cat)
for i in index
]
else:
return [
LpVariable.matrix(name, indexs, lowBound, upBound, cat, indexStart + [i])
for i in index
]
@classmethod
def dicts(cls, name, indexs, lowBound = None, upBound = None, cat = LpContinuous,
indexStart = []):
"""
Creates a dictionary of LP variables
This function creates a dictionary of LP Variables with the specified
associated parameters.
:param name: The prefix to the name of each LP variable created
:param indexs: A list of strings of the keys to the dictionary of LP
variables, and the main part of the variable name itself
:param lowbound: The lower bound on these variables' range. Default is
negative infinity
:param upBound: The upper bound on these variables' range. Default is
positive infinity
:param cat: The category these variables are in, Integer or
Continuous(default)
:return: A dictionary of LP Variables
"""
if not isinstance(indexs, tuple): indexs = (indexs,)
if "%" not in name: name += "_%s" * len(indexs)
index = indexs[0]
indexs = indexs[1:]
d = {}
if len(indexs) == 0:
for i in index:
d[i] = LpVariable(name % tuple(indexStart + [str(i)]), lowBound, upBound, cat)
else:
for i in index:
d[i] = LpVariable.dicts(name, indexs, lowBound, upBound, cat, indexStart + [i])
return d
@classmethod
def dict(cls, name, indexs, lowBound = None, upBound = None, cat = LpContinuous):
if not isinstance(indexs, tuple): indexs = (indexs,)
if "%" not in name: name += "_%s" * len(indexs)
lists = indexs
if len(indexs)>1:
# Cartesian product
res = []
while len(lists):
first = lists[-1]
nres = []
if res:
if first:
for f in first:
nres.extend([[f]+r for r in res])
else:
nres = res
res = nres
else:
res = [[f] for f in first]
lists = lists[:-1]
index = [tuple(r) for r in res]
elif len(indexs) == 1:
index = indexs[0]
else:
return {}
d = dict((i, cls(name % i, lowBound, upBound, cat)) for i in index)
return d
def getLb(self):
return self.lowBound
def getUb(self):
return self.upBound
def bounds(self, low, up):
self.lowBound = low
self.upBound = up
def positive(self):
self.lowBound = 0
self.upBound = None
def value(self):
return self.varValue
def round(self, epsInt = 1e-5, eps = 1e-7):
if self.varValue is not None:
if self.upBound != None and self.varValue > self.upBound and self.varValue <= self.upBound + eps:
self.varValue = self.upBound
elif self.lowBound != None and self.varValue < self.lowBound and self.varValue >= self.lowBound - eps:
self.varValue = self.lowBound
if self.cat == LpInteger and abs(round(self.varValue) - self.varValue) <= epsInt:
self.varValue = round(self.varValue)
def roundedValue(self, eps = 1e-5):
if self.cat == LpInteger and self.varValue != None \
and abs(self.varValue - round(self.varValue)) <= eps:
return round(self.varValue)
else:
return self.varValue
def valueOrDefault(self):
if self.varValue != None:
return self.varValue
elif self.lowBound != None:
if self.upBound != None:
if 0 >= self.lowBound and 0 <= self.upBound:
return 0
else:
if self.lowBound >= 0:
return self.lowBound
else:
return self.upBound
else:
if 0 >= self.lowBound:
return 0
else:
return self.lowBound
elif self.upBound != None:
if 0 <= self.upBound:
return 0
else:
return self.upBound
else:
return 0
def valid(self, eps):
if self.varValue == None: return False
if self.upBound != None and self.varValue > self.upBound + eps:
return False
if self.lowBound != None and self.varValue < self.lowBound - eps:
return False
if self.cat == LpInteger and abs(round(self.varValue) - self.varValue) > eps:
return False
return True
def infeasibilityGap(self, mip = 1):
if self.varValue == None: raise ValueError, "variable value is None"
if self.upBound != None and self.varValue > self.upBound:
return self.varValue - self.upBound
if self.lowBound != None and self.varValue < self.lowBound:
return self.varValue - self.lowBound
if mip and self.cat == LpInteger and round(self.varValue) - self.varValue != 0:
return round(self.varValue) - self.varValue
return 0
def isBinary(self):
return self.cat == LpInteger and self.lowBound == 0 and self.upBound == 1
def isInteger(self):
return self.cat == LpInteger
def isFree(self):
return self.lowBound == None and self.upBound == None
def isConstant(self):
return self.lowBound != None and self.upBound == self.lowBound
def isPositive(self):
return self.lowBound == 0 and self.upBound == None
def asCplexLpVariable(self):
if self.isFree(): return self.name + " free"
if self.isConstant(): return self.name + " = %.12g" % self.lowBound
if self.lowBound == None:
s= "-inf <= "
# Note: XPRESS and CPLEX do not interpret integer variables without
# explicit bounds
elif (self.lowBound == 0 and self.cat == LpContinuous):
s = ""
else:
s= "%.12g <= " % self.lowBound
s += self.name
if self.upBound != None:
s+= " <= %.12g" % self.upBound
return s
def asCplexLpAffineExpression(self, name, constant = 1):
return LpAffineExpression(self).asCplexLpAffineExpression(name, constant)
def __ne__(self, other):
if isinstance(other, LpElement):
return self.name is not other.name
elif isinstance(other, LpAffineExpression):
if other.isAtomic():
return self is not other.atom()
else:
return 1
else:
return 1
def addVariableToConstraints(self,e):
"""adds a variable to the constraints indicated by
the LpConstraintVars in e
"""
for constraint, coeff in e.items():
constraint.addVariable(self,coeff)
def setInitialValue(self,val):
"""sets the initial value of the Variable to val
may of may not be supported by the solver
"""
raise NotImplementedError
class LpAffineExpression(_DICT_TYPE):
"""
A linear combination of :class:`LpVariables<LpVariable>`.
Can be initialised with the following:
#. e = None: an empty Expression
#. e = dict: gives an expression with the values being the coefficients of the keys (order of terms is undetermined)
#. e = list or generator of 2-tuples: equivalent to dict.items()
#. e = LpElement: an expression of length 1 with the coefficient 1
#. e = other: the constant is initialised as e
Examples:
>>> f=LpAffineExpression(LpElement('x'))
>>> f
1*x + 0
>>> x_name = ['x_0', 'x_1', 'x_2']
>>> x = [LpVariable(x_name[i], lowBound = 0, upBound = 10) for i in range(3) ]
>>> c = LpAffineExpression([ (x[0],1), (x[1],-3), (x[2],4)])
>>> c
1*x_0 + -3*x_1 + 4*x_2 + 0
"""
#to remove illegal characters from the names
trans = string.maketrans("-+[] ","_____")
def setName(self,name):
if name:
self.__name = str(name).translate(self.trans)
else:
self.__name = None
def getName(self):
return self.__name
name = property(fget=getName, fset=setName)
def __init__(self, e = None, constant = 0, name = None):
self.name = name
#TODO remove isinstance usage
if e is None:
e = {}
if isinstance(e, LpAffineExpression):
# Will not copy the name
self.constant = e.constant
super(LpAffineExpression, self).__init__(e.items())
elif isinstance(e, dict):
self.constant = constant
super(LpAffineExpression, self).__init__(e.items())
elif isinstance(e, list) or isinstance(e, GeneratorType):
self.constant = constant
super(LpAffineExpression, self).__init__(e)
elif isinstance(e,LpElement):
self.constant = 0
super(LpAffineExpression, self).__init__( [(e, 1)])
else:
self.constant = e
super(LpAffineExpression, self).__init__()
# Proxy functions for variables
def isAtomic(self):
return len(self) == 1 and self.constant == 0 and self.values()[0] == 1
def isNumericalConstant(self):
return len(self) == 0
def atom(self):
return self.keys()[0]
# Functions on expressions
def __nonzero__(self):
return float(self.constant) != 0 or len(self)
def value(self):
s = self.constant
for v,x in self.iteritems():
if v.varValue is None:
return None
s += v.varValue * x
return s
def valueOrDefault(self):
s = self.constant
for v,x in self.iteritems():
s += v.valueOrDefault() * x
return s
def addterm(self, key, value):
y = self.get(key, 0)
if y:
y += value
self[key] = y
else:
self[key] = value
def emptyCopy(self):
return LpAffineExpression()
def copy(self):
"""Make a copy of self except the name which is reset"""
# Will not copy the name
return LpAffineExpression(self)
def __str__(self, constant = 1):
s = ""
for v in self.sorted_keys():
val = self[v]
if val<0:
if s != "": s += " - "
else: s += "-"
val = -val
elif s != "": s += " + "
if val == 1: s += str(v)
else: s += str(val) + "*" + str(v)
if constant:
if s == "":
s = str(self.constant)
else:
if self.constant < 0: s += " - " + str(-self.constant)
elif self.constant > 0: s += " + " + str(self.constant)
elif s == "":
s = "0"
return s
def sorted_keys(self):
"""
returns the list of keys sorted by name
"""
result = [(v.name, v) for v in self.keys()]
result.sort()
result = [v for _, v in result]
return result
def __repr__(self):
l = [str(self[v]) + "*" + str(v)
for v in self.sorted_keys()]
l.append(str(self.constant))
s = " + ".join(l)
return s
@staticmethod
def _count_characters(line):
#counts the characters in a list of strings
return sum(len(t) for t in line)
def asCplexVariablesOnly(self, name):
"""
helper for asCplexLpAffineExpression
"""
result = []
line = ["%s:" % name]
notFirst = 0
variables = self.sorted_keys()
for v in variables:
val = self[v]
if val < 0:
sign = " -"
val = -val
elif notFirst:
sign = " +"
else:
sign = ""
notFirst = 1
if val == 1:
term = "%s %s" %(sign, v.name)
else:
term = "%s %.12g %s" % (sign, val, v.name)
if self._count_characters(line) + len(term) > LpCplexLPLineSize:
result += ["".join(line)]
line = [term]
else:
line += [term]
return result, line
def asCplexLpAffineExpression(self, name, constant = 1):
"""
returns a string that represents the Affine Expression in lp format
"""
#refactored to use a list for speed in iron python
result, line = self.asCplexVariablesOnly(name)
if not self:
term = " %s" % self.constant
else:
term = ""
if constant:
if self.constant < 0:
term = " - %s" % (-self.constant)
elif self.constant > 0:
term = " + %s" % self.constant
if self._count_characters(line) + len(term) > LpCplexLPLineSize:
result += ["".join(line)]
line += [term]
else:
line += [term]
result += ["".join(line)]
result = "%s\n" % "\n".join(result)
return result
def addInPlace(self, other):
if other is 0: return self
if other is None: return self
if isinstance(other,LpElement):
self.addterm(other, 1)
elif (isinstance(other,list)
or isinstance(other,types.GeneratorType)):
for e in other:
self.addInPlace(e)
elif isinstance(other,LpAffineExpression):
self.constant += other.constant
for v,x in other.iteritems():
self.addterm(v, x)
elif isinstance(other,dict):
for e in other.itervalues():
self.addInPlace(e)
else:
self.constant += other
return self
def subInPlace(self, other):
if other is 0: return self
if other is None: return self
if isinstance(other,LpElement):
self.addterm(other, -1)
elif (isinstance(other,list)
or isinstance(other,types.GeneratorType)):
for e in other:
self.subInPlace(e)
elif isinstance(other,LpAffineExpression):
self.constant -= other.constant
for v,x in other.iteritems():
self.addterm(v, -x)
elif isinstance(other,dict):
for e in other.itervalues():
self.subInPlace(e)
else:
self.constant -= other
return self
def __neg__(self):
e = self.emptyCopy()
e.constant = - self.constant
for v,x in self.iteritems():
e[v] = - x
return e
def __pos__(self):
return self
def __add__(self, other):
return self.copy().addInPlace(other)
def __radd__(self, other):
return self.copy().addInPlace(other)
def __sub__(self, other):
return self.copy().subInPlace(other)
def __rsub__(self, other):
return (-self).addInPlace(other)
def __mul__(self, other):
e = self.emptyCopy()
if isinstance(other,LpAffineExpression):
e.constant = self.constant * other.constant
if len(other):
if len(self):
raise TypeError, "Non-constant expressions cannot be multiplied"
else:
c = self.constant
if c != 0:
for v,x in other.iteritems():
e[v] = c * x
else:
c = other.constant
if c != 0:
for v,x in self.iteritems():
e[v] = c * x
elif isinstance(other,LpVariable):
return self * LpAffineExpression(other)
else:
if other != 0:
e.constant = self.constant * other
for v,x in self.iteritems():
e[v] = other * x
return e
def __rmul__(self, other):
return self * other
def __div__(self, other):
if isinstance(other,LpAffineExpression) or isinstance(other,LpVariable):
if len(other):
raise TypeError, "Expressions cannot be divided by a non-constant expression"
other = other.constant
e = self.emptyCopy()
e.constant = self.constant / other
for v,x in self.iteritems():
e[v] = x / other
return e
def __rdiv__(self, other):
e = self.emptyCopy()
if len(self):
raise TypeError, "Expressions cannot be divided by a non-constant expression"
c = self.constant
if isinstance(other,LpAffineExpression):
e.constant = other.constant / c
for v,x in other.iteritems():
e[v] = x / c
else:
e.constant = other / c
return e
def __le__(self, other):
return LpConstraint(self - other, LpConstraintLE)
def __ge__(self, other):
return LpConstraint(self - other, LpConstraintGE)
def __eq__(self, other):
return LpConstraint(self - other, LpConstraintEQ)
class LpConstraint(LpAffineExpression):
"""An LP constraint"""
def __init__(self, e = None, sense = LpConstraintEQ,
name = None, rhs = None):
"""
:param e: an instance of :class:`LpAffineExpression`
:param sense: one of :data:`~pulp.constants.LpConstraintEQ`, :data:`~pulp.constants.LpConstraintGE`, :data:`~pulp.constants.LpConstraintLE` (0, 1, -1 respectively)
:param name: identifying string
:param rhs: numerical value of constraint target
"""
LpAffineExpression.__init__(self, e, name = name)
if rhs is not None:
self.constant = - rhs
self.sense = sense
self.modified = True
def getLb(self):
if ( (self.sense == LpConstraintGE) or
(self.sense == LpConstraintEQ) ):
return -self.constant
else:
return None
def getUb(self):
if ( (self.sense == LpConstraintLE) or
(self.sense == LpConstraintEQ) ):
return -self.constant
else:
return None
def __str__(self):
s = LpAffineExpression.__str__(self, 0)
if self.sense:
s += " " + LpConstraintSenses[self.sense] + " " + str(-self.constant)
return s
def asCplexLpConstraint(self, name):
"""
Returns a constraint as a string
"""
result, line = self.asCplexVariablesOnly(name)
if not self.keys():
line += ["0"]
c = -self.constant
if c == 0:
c = 0 # Supress sign
term = " %s %.12g" % (LpConstraintSenses[self.sense], c)
if self._count_characters(line)+len(term) > LpCplexLPLineSize:
result += ["".join(line)]
line = [term]
else:
line += [term]
result += ["".join(line)]
result = "%s\n" % "\n".join(result)
return result
def changeRHS(self, RHS):
"""
alters the RHS of a constraint so that it can be modified in a resolve
"""
self.constant = -RHS
self.modified = True
def __repr__(self):
s = LpAffineExpression.__repr__(self)
if self.sense is not None:
s += " " + LpConstraintSenses[self.sense] + " 0"
return s
def copy(self):
"""Make a copy of self"""
return LpConstraint(self, self.sense)
def emptyCopy(self):
return LpConstraint(sense = self.sense)
def addInPlace(self, other):
if isinstance(other,LpConstraint):
if self.sense * other.sense >= 0:
LpAffineExpression.addInPlace(self, other)
self.sense |= other.sense
else:
LpAffineExpression.subInPlace(self, other)
self.sense |= - other.sense
elif isinstance(other,list):
for e in other:
self.addInPlace(e)
else:
LpAffineExpression.addInPlace(self, other)
#raise TypeError, "Constraints and Expressions cannot be added"
return self
def subInPlace(self, other):
if isinstance(other,LpConstraint):
if self.sense * other.sense <= 0:
LpAffineExpression.subInPlace(self, other)
self.sense |= - other.sense
else:
LpAffineExpression.addInPlace(self, other)
self.sense |= other.sense
elif isinstance(other,list):
for e in other:
self.subInPlace(e)
else:
LpAffineExpression.subInPlace(self, other)
#raise TypeError, "Constraints and Expressions cannot be added"
return self
def __neg__(self):
c = LpAffineExpression.__neg__(self)
c.sense = - c.sense
return c
def __add__(self, other):
return self.copy().addInPlace(other)
def __radd__(self, other):
return self.copy().addInPlace(other)
def __sub__(self, other):
return self.copy().subInPlace(other)
def __rsub__(self, other):
return (-self).addInPlace(other)
def __mul__(self, other):
if isinstance(other,LpConstraint):
c = LpAffineExpression.__mul__(self, other)
if c.sense == 0:
c.sense = other.sense
elif other.sense != 0:
c.sense *= other.sense
return c
else:
return LpAffineExpression.__mul__(self, other)
def __rmul__(self, other):
return self * other
def __div__(self, other):
if isinstance(other,LpConstraint):
c = LpAffineExpression.__div__(self, other)
if c.sense == 0:
c.sense = other.sense
elif other.sense != 0:
c.sense *= other.sense
return c
else:
return LpAffineExpression.__mul__(self, other)
def __rdiv__(self, other):
if isinstance(other,LpConstraint):
c = LpAffineExpression.__rdiv__(self, other)
if c.sense == 0:
c.sense = other.sense
elif other.sense != 0:
c.sense *= other.sense
return c
else:
return LpAffineExpression.__mul__(self, other)
def valid(self, eps = 0):
val = self.value()
if self.sense == LpConstraintEQ: return abs(val) <= eps
else: return val * self.sense >= - eps
def makeElasticSubProblem(self, *args, **kwargs):
"""
Builds an elastic subproblem by adding variables to a hard constraint
uses FixedElasticSubProblem
"""
return FixedElasticSubProblem(self, *args, **kwargs)
class LpFractionConstraint(LpConstraint):
"""
Creates a constraint that enforces a fraction requirement a/b = c
"""
def __init__(self, numerator, denominator = None, sense = LpConstraintEQ,
RHS = 1.0, name = None,
complement = None):
"""
creates a fraction Constraint to model constraints of
the nature
numerator/denominator {==, >=, <=} RHS
numerator/(numerator + complement) {==, >=, <=} RHS
:param numerator: the top of the fraction
:param denominator: as described above
:param sense: the sense of the relation of the constraint
:param RHS: the target fraction value
:param complement: as described above
"""
self.numerator = numerator
if denominator is None and complement is not None:
self.complement = complement
self.denominator = numerator + complement
elif denominator is not None and complement is None:
self.denominator = denominator
self.complement = denominator - numerator
else:
self.denominator = denominator
self.complement = complement
lhs = self.numerator - RHS * self.denominator
LpConstraint.__init__(self, lhs,
sense = sense, rhs = 0, name = name)
self.RHS = RHS
def findLHSValue(self):
"""
Determines the value of the fraction in the constraint after solution
"""
if abs(value(self.denominator))>= EPS:
return value(self.numerator)/value(self.denominator)
else:
if abs(value(self.numerator))<= EPS:
#zero divided by zero will return 1
return 1.0
else:
raise ZeroDivisionError
def makeElasticSubProblem(self, *args, **kwargs):
"""
Builds an elastic subproblem by adding variables and splitting the
hard constraint
uses FractionElasticSubProblem
"""
return FractionElasticSubProblem(self, *args, **kwargs)
class LpConstraintVar(LpElement):
"""A Constraint that can be treated as a variable when constructing
a LpProblem by columns
"""
def __init__(self, name = None ,sense = None,
rhs = None, e = None):
LpElement.__init__(self,name)
self.constraint = LpConstraint(name = self.name, sense = sense,
rhs = rhs , e = e)
def addVariable(self, var, coeff):
"""
Adds a variable to the constraint with the
activity coeff
"""
self.constraint.addterm(var, coeff)
def value(self):
return self.constraint.value()
class LpProblem(object):
"""An LP Problem"""
def __init__(self, name = "NoName", sense = LpMinimize):
"""
Creates an LP Problem
This function creates a new LP Problem with the specified associated parameters
:param name: name of the problem used in the output .lp file
:param sense: of the LP problem objective. \
Either :data:`~pulp.constants.LpMinimize` (default) \
or :data:`~pulp.constants.LpMaximize`.
:return: An LP Problem
"""
self.objective = None
self.constraints = _DICT_TYPE()
self.name = name
self.sense = sense
self.sos1 = {}
self.sos2 = {}
self.status = LpStatusNotSolved
self.noOverlap = 1
self.solver = None
self.initialValues = {}
self.resolveOK = False
self._variables = []
self._variable_ids = {} #old school using dict.keys() for a set
self.dummyVar = None
# locals
self.lastUnused = 0
def __repr__(self):
string = self.name+":\n"
if self.sense == 1:
string += "MINIMIZE\n"
else:
string += "MAXIMIZE\n"
string += repr(self.objective) +"\n"
if self.constraints:
string += "SUBJECT TO\n"
for n, c in self.constraints.iteritems():
string += c.asCplexLpConstraint(n) +"\n"
string += "VARIABLES\n"
for v in self.variables():
string += v.asCplexLpVariable() + " " + LpCategories[v.cat] + "\n"
return string
def copy(self):
"""Make a copy of self. Expressions are copied by reference"""
lpcopy = LpProblem(name = self.name, sense = self.sense)
lpcopy.objective = self.objective
lpcopy.constraints = self.constraints.copy()
lpcopy.sos1 = self.sos1.copy()
lpcopy.sos2 = self.sos2.copy()
return lpcopy
def deepcopy(self):
"""Make a copy of self. Expressions are copied by value"""
lpcopy = LpProblem(name = self.name, sense = self.sense)
if self.objective is not None:
lpcopy.objective = self.objective.copy()
lpcopy.constraints = {}
for k,v in self.constraints.iteritems():
lpcopy.constraints[k] = v.copy()
lpcopy.sos1 = self.sos1.copy()
lpcopy.sos2 = self.sos2.copy()
return lpcopy
def normalisedNames(self):
constraintsNames = {}
i = 0
for k in self.constraints:
constraintsNames[k] = "C%07d" % i
i += 1
variablesNames = {}
i = 0
for k in self.variables():
variablesNames[k.name] = "X%07d" % i
i += 1
return constraintsNames, variablesNames, "OBJ"
def isMIP(self):
for v in self.variables():
if v.cat == LpInteger: return 1
return 0
def roundSolution(self, epsInt = 1e-5, eps = 1e-7):
"""
Rounds the lp variables
Inputs:
- none
Side Effects:
- The lp variables are rounded
"""
for v in self.variables():
v.round(epsInt, eps)
def unusedConstraintName(self):
self.lastUnused += 1
while 1:
s = "_C%d" % self.lastUnused
if s not in self.constraints: break
self.lastUnused += 1
return s
def valid(self, eps = 0):
for v in self.variables():
if not v.valid(eps): return False
for c in self.constraints.itervalues():
if not c.valid(eps): return False
else:
return True
def infeasibilityGap(self, mip = 1):
gap = 0
for v in self.variables():
gap = max(abs(v.infeasibilityGap(mip)), gap)
for c in self.constraints.itervalues():
if not c.valid(0):
gap = max(abs(c.value()), gap)
return gap
def addVariable(self, variable):
"""
Adds a variable to the problem before a constraint is added
@param variable: the variable to be added
"""
if id(variable) not in self._variable_ids:
self._variables.append(variable)
self._variable_ids[id(variable)] = variable
def addVariables(self, variables):
"""
Adds variables to the problem before a constraint is added
@param variables: the variables to be added
"""
for v in variables:
self.addVariable(v)
def variables(self):
"""
Returns a list of the problem variables
Inputs:
- none
Returns:
- A list of the problem variables
"""
if self.objective:
self.addVariables(self.objective.keys())
for c in self.constraints.itervalues():
self.addVariables(c.keys())
variables = self._variables
#sort the varibles DSU
variables = [[v.name, v] for v in variables]
variables.sort()
variables = [v for _, v in variables]
return variables
def variablesDict(self):
variables = {}
if self.objective:
for v in self.objective:
variables[v.name] = v
for c in self.constraints.values():
for v in c:
variables[v.name] = v
return variables
def add(self, constraint, name = None):
self.addConstraint(constraint, name)
def addConstraint(self, constraint, name = None):
if not isinstance(constraint, LpConstraint):
raise TypeError, "Can only add LpConstraint objects"
if name:
constraint.name = name
try:
if constraint.name:
name = constraint.name
else:
name = self.unusedConstraintName()
except AttributeError:
raise TypeError, "Can only add LpConstraint objects"
#removed as this test fails for empty constraints
# if len(constraint) == 0:
# if not constraint.valid():
# raise ValueError, "Cannot add false constraints"
if name in self.constraints:
if self.noOverlap:
raise PulpError, "overlapping constraint names: " + name
else:
print "Warning: overlapping constraint names:", name
self.constraints[name] = constraint
self.addVariables(constraint.keys())
def setObjective(self,obj):
"""
Sets the input variable as the objective function. Used in Columnwise Modelling
:param obj: the objective function of type :class:`LpConstraintVar`
Side Effects:
- The objective function is set
"""
if isinstance(obj, LpVariable):
# allows the user to add a LpVariable as an objective
obj = obj + 0.0
try:
obj = obj.constraint
name = obj.name
except AttributeError:
name = None
self.objective = obj
self.objective.name = name
self.resolveOK = False
def __iadd__(self, other):
if isinstance(other, tuple):
other, name = other
else:
name = None
if other is True:
return self
if isinstance(other, LpConstraintVar):
self.addConstraint(other.constraint)
elif isinstance(other, LpConstraint):
self.addConstraint(other, name)
elif isinstance(other, LpAffineExpression):
self.objective = other
self.objective.name = name
elif isinstance(other, LpVariable) or type(other) in [int, float]:
self.objective = LpAffineExpression(other)
self.objective.name = name
else:
raise TypeError, "Can only add LpConstraintVar, LpConstraint, LpAffineExpression or True objects"
return self
def extend(self, other, use_objective = True):
"""
extends an LpProblem by adding constraints either from a dictionary
a tuple or another LpProblem object.
@param use_objective: determines whether the objective is imported from
the other problem
For dictionaries the constraints will be named with the keys
For tuples an unique name will be generated
For LpProblems the name of the problem will be added to the constraints
name
"""
if isinstance(other, dict):
for name in other:
self.constraints[name] = other[name]
elif isinstance(other, LpProblem):
for v in set(other.variables()).difference(self.variables()):
v.name = other.name + v.name
for name,c in other.constraints.iteritems():
c.name = other.name + name
self.addConstraint(c)
if use_objective:
self.objective += other.objective
else:
for c in other:
if isinstance(c,tuple):
name = c[0]
c = c[1]
else:
name = None
if not name: name = c.name
if not name: name = self.unusedConstraintName()
self.constraints[name] = c
def coefficients(self, translation = None):
coefs = []
if translation == None:
for c in self.constraints:
cst = self.constraints[c]
coefs.extend([(v.name, c, cst[v]) for v in cst])
else:
for c in self.constraints:
ctr = translation[c]
cst = self.constraints[c]
coefs.extend([(translation[v.name], ctr, cst[v]) for v in cst])
return coefs
def writeMPS(self, filename, mpsSense = 0, rename = 0, mip = 1):
wasNone, dummyVar = self.fixObjective()
f = file(filename, "w")
if mpsSense == 0: mpsSense = self.sense
cobj = self.objective
if mpsSense != self.sense:
n = cobj.name
cobj = - cobj
cobj.name = n
if rename:
constraintsNames, variablesNames, cobj.name = self.normalisedNames()
f.write("*SENSE:"+LpSenses[mpsSense]+"\n")
n = self.name
if rename: n = "MODEL"
f.write("NAME "+n+"\n")
vs = self.variables()
# constraints
f.write("ROWS\n")
objName = cobj.name
if not objName: objName = "OBJ"
f.write(" N %s\n" % objName)
mpsConstraintType = {LpConstraintLE:"L", LpConstraintEQ:"E", LpConstraintGE:"G"}
for k,c in self.constraints.iteritems():
if rename: k = constraintsNames[k]
f.write(" "+mpsConstraintType[c.sense]+" "+k+"\n")
# matrix
f.write("COLUMNS\n")
# Creation of a dict of dict:
# coefs[nomVariable][nomContrainte] = coefficient
coefs = {}
for k,c in self.constraints.iteritems():
if rename: k = constraintsNames[k]
for v in c:
n = v.name
if rename: n = variablesNames[n]
if n in coefs:
coefs[n][k] = c[v]
else:
coefs[n] = {k:c[v]}
for v in vs:
if mip and v.cat == LpInteger:
f.write(" MARK 'MARKER' 'INTORG'\n")
n = v.name
if rename: n = variablesNames[n]
if n in coefs:
cv = coefs[n]
# Most of the work is done here
for k in cv: f.write(" %-8s %-8s % .5e\n" % (n,k,cv[k]))
# objective function
if v in cobj: f.write(" %-8s %-8s % .5e\n" % (n,objName,cobj[v]))
if mip and v.cat == LpInteger:
f.write(" MARK 'MARKER' 'INTEND'\n")
# right hand side
f.write("RHS\n")
for k,c in self.constraints.iteritems():
c = -c.constant
if rename: k = constraintsNames[k]
if c == 0: c = 0
f.write(" RHS %-8s % .5e\n" % (k,c))
# bounds
f.write("BOUNDS\n")
for v in vs:
n = v.name
if rename: n = variablesNames[n]
if v.lowBound != None and v.lowBound == v.upBound:
f.write(" FX BND %-8s % .5e\n" % (n, v.lowBound))
elif v.lowBound == 0 and v.upBound == 1 and mip and v.cat == LpInteger:
f.write(" BV BND %-8s\n" % n)
else:
if v.lowBound != None:
# In MPS files, variables with no bounds (i.e. >= 0)
# are assumed BV by COIN and CPLEX.
# So we explicitly write a 0 lower bound in this case.
if v.lowBound != 0 or (mip and v.cat == LpInteger and v.upBound == None):
f.write(" LO BND %-8s % .5e\n" % (n, v.lowBound))
else:
if v.upBound != None:
f.write(" MI BND %-8s\n" % n)
else:
f.write(" FR BND %-8s\n" % n)
if v.upBound != None:
f.write(" UP BND %-8s % .5e\n" % (n, v.upBound))
f.write("ENDATA\n")
f.close()
self.restoreObjective(wasNone, dummyVar)
# returns the variables, in writing order
if rename == 0:
return vs
else:
return vs, variablesNames, constraintsNames, cobj.name
def writeLP(self, filename, writeSOS = 1, mip = 1):
"""
Write the given Lp problem to a .lp file.
This function writes the specifications (objective function,
constraints, variables) of the defined Lp problem to a file.
:param filename: the name of the file to be created.
Side Effects:
- The file is created.
"""
f = file(filename, "w")
f.write("\\* "+self.name+" *\\\n")
if self.sense == 1:
f.write("Minimize\n")
else:
f.write("Maximize\n")
wasNone, dummyVar = self.fixObjective()
objName = self.objective.name
if not objName: objName = "OBJ"
f.write(self.objective.asCplexLpAffineExpression(objName, constant = 0))
f.write("Subject To\n")
ks = self.constraints.keys()
ks.sort()
for k in ks:
constraint = self.constraints[k]
if not constraint.keys():
#empty constraint add the dummyVar
constraint += self.get_dummyVar()
f.write(constraint.asCplexLpConstraint(k))
vs = self.variables()
# check if any names are longer than 100 characters
long_names = [v.name for v in vs if len(v.name) > 100]
if long_names:
raise PulpError('Variable names too long for Lp format\n'
+ str(long_names))
# check for repeated names
repeated_names = {}
for v in vs:
repeated_names[v.name] = repeated_names.get(v.name, 0) + 1
repeated_names = [(key, value) for key, value in repeated_names.items()
if value >= 2]
if repeated_names:
raise PulpError('Repeated variable names in Lp format\n'
+ str(repeated_names))
# Bounds on non-"positive" variables
# Note: XPRESS and CPLEX do not interpret integer variables without
# explicit bounds
if mip:
vg = [v for v in vs if not (v.isPositive() and v.cat == LpContinuous) \
and not v.isBinary()]
else:
vg = [v for v in vs if not v.isPositive()]
if vg:
f.write("Bounds\n")
for v in vg:
f.write("%s\n" % v.asCplexLpVariable())
# Integer non-binary variables
if mip:
vg = [v for v in vs if v.cat == LpInteger and not v.isBinary()]
if vg:
f.write("Generals\n")
for v in vg: f.write("%s\n" % v.name)
# Binary variables
vg = [v for v in vs if v.isBinary()]
if vg:
f.write("Binaries\n")
for v in vg: f.write("%s\n" % v.name)
# Special Ordered Sets
if writeSOS and (self.sos1 or self.sos2):
f.write("SOS\n")
if self.sos1:
for sos in self.sos1.itervalues():
f.write("S1:: \n")
for v,val in sos.iteritems():
f.write(" %s: %.12g\n" % (v.name, val))
if self.sos2:
for sos in self.sos2.itervalues():
f.write("S2:: \n")
for v,val in sos.iteritems():
f.write(" %s: %.12g\n" % (v.name, val))
f.write("End\n")
f.close()
self.restoreObjective(wasNone, dummyVar)
def assignVarsVals(self, values):
variables = self.variablesDict()
for name in values:
if name != '__dummy':
variables[name].varValue = values[name]
def assignVarsDj(self,values):
variables = self.variablesDict()
for name in values:
if name != '__dummy':
variables[name].dj = values[name]
def assignConsPi(self, values):
for name in values:
self.constraints[name].pi = values[name]
def assignConsSlack(self, values, activity=False):
for name in values:
if activity:
#reports the activitynot the slack
self.constraints[name].slack = -(self.constraints[name].constant + float(values[name]))
else:
self.constraints[name].slack = float(values[name])
def get_dummyVar(self):
if self.dummyVar is None:
self.dummyVar = LpVariable("__dummy", 0, 0)
return self.dummyVar
def fixObjective(self):
if self.objective is None:
self.objective = 0
wasNone = 1
else:
wasNone = 0
if not isinstance(self.objective, LpAffineExpression):
self.objective = LpAffineExpression(self.objective)
if self.objective.isNumericalConstant():
dummyVar = self.get_dummyVar()
self.objective += dummyVar
else:
dummyVar = None
return wasNone, dummyVar
def restoreObjective(self, wasNone, dummyVar):
if wasNone:
self.objective = None
elif not dummyVar is None:
self.objective -= dummyVar
def solve(self, solver = None, **kwargs):
"""
Solve the given Lp problem.
This function changes the problem to make it suitable for solving
then calls the solver.actualSolve() method to find the solution
:param solver: Optional: the specific solver to be used, defaults to the
default solver.
Side Effects:
- The attributes of the problem object are changed in
:meth:`~pulp.solver.LpSolver.actualSolve()` to reflect the Lp solution
"""
if not(solver): solver = self.solver
if not(solver): solver = LpSolverDefault
wasNone, dummyVar = self.fixObjective()
#time it
self.solutionTime = -clock()
status = solver.actualSolve(self, **kwargs)
self.solutionTime += clock()
self.restoreObjective(wasNone, dummyVar)
self.solver = solver
return status
def sequentialSolve(self, objectives, absoluteTols = None,
relativeTols = None, solver = None, debug = False):
"""
Solve the given Lp problem with several objective functions.
This function sequentially changes the objective of the problem
and then adds the objective function as a constraint
:param objectives: the list of objectives to be used to solve the problem
:param absoluteTols: the list of absolute tolerances to be applied to
the constraints should be +ve for a minimise objective
:param relativeTols: the list of relative tolerances applied to the constraints
:param solver: the specific solver to be used, defaults to the default solver.
"""
#TODO Add a penalty variable to make problems elastic
#TODO add the ability to accept different status values i.e. infeasible etc
if not(solver): solver = self.solver
if not(solver): solver = LpSolverDefault
if not(absoluteTols):
absoluteTols = [0] * len(objectives)
if not(relativeTols):
relativeTols = [1] * len(objectives)
#time it
self.solutionTime = -clock()
statuses = []
for i,(obj,absol,rel) in enumerate(zip(objectives, absoluteTols, relativeTols)):
self.setObjective(obj)
status = solver.actualSolve(self)
statuses.append(status)
if debug: self.writeLP("%sSequence.lp"%i)
if self.sense == LpMinimize:
self += obj <= value(obj)*rel + absol,"%s_Sequence_Objective"%i
elif self.sense == LpMaximize:
self += obj >= value(obj)*rel + absol,"%s_Sequence_Objective"%i
self.solutionTime += clock()
self.solver = solver
return statuses
def resolve(self, solver = None, **kwargs):
"""
resolves an Problem using the same solver as previously
"""
if not(solver): solver = self.solver
if self.resolveOK:
return self.solver.actualResolve(self, **kwargs)
else:
logging.warn('resolve not ok. solving instead')
return self.solve(solver=solver, **kwargs)
def setSolver(self,solver = LpSolverDefault):
"""Sets the Solver for this problem useful if you are using
resolve
"""
self.solver = solver
def setInitial(self,values):
self.initialValues = values
class FixedElasticSubProblem(LpProblem):
"""
Contains the subproblem generated by converting a fixed constraint
:math:`\sum_{i}a_i x_i = b` into an elastic constraint.
:param constraint: The LpConstraint that the elastic constraint is based on
:param penalty: penalty applied for violation (+ve or -ve) of the constraints
:param proportionFreeBound:
the proportional bound (+ve and -ve) on
constraint violation that is free from penalty
:param proportionFreeBoundList: the proportional bound on \
constraint violation that is free from penalty, expressed as a list\
where [-ve, +ve]
"""
def __init__(self, constraint, penalty = None,
proportionFreeBound = None,
proportionFreeBoundList = None):
subProblemName = "%s_elastic_SubProblem" % constraint.name
LpProblem.__init__(self, subProblemName, LpMinimize)
self.objective = LpAffineExpression()
self.constraint = constraint
self.constant = constraint.constant
self.RHS = - constraint.constant
self.objective = LpAffineExpression()
self += constraint, "_Constraint"
#create and add these variables but disabled
self.freeVar = LpVariable("_free_bound",
upBound = 0, lowBound = 0)
self.upVar = LpVariable("_pos_penalty_var",
upBound = 0, lowBound = 0)
self.lowVar = LpVariable("_neg_penalty_var",
upBound = 0, lowBound = 0)
constraint.addInPlace(self.freeVar + self.lowVar + self.upVar)
if proportionFreeBound:
proportionFreeBoundList = [proportionFreeBound, proportionFreeBound]
if proportionFreeBoundList:
#add a costless variable
self.freeVar.upBound = abs(constraint.constant *
proportionFreeBoundList[0])
self.freeVar.lowBound = -abs(constraint.constant *
proportionFreeBoundList[1])
# Note the reversal of the upbound and lowbound due to the nature of the
# variable
if penalty is not None:
#activate these variables
self.upVar.upBound = None
self.lowVar.lowBound = None
self.objective = penalty*self.upVar - penalty*self.lowVar
def _findValue(self, attrib):
"""
safe way to get the value of a variable that may not exist
"""
var = getattr(self, attrib, 0)
if var:
if value(var) is not None:
return value(var)
else:
return 0.0
else:
return 0.0
def isViolated(self):
"""
returns true if the penalty variables are non-zero
"""
upVar = self._findValue("upVar")
lowVar = self._findValue("lowVar")
freeVar = self._findValue("freeVar")
result = abs(upVar + lowVar) >= EPS
if result:
logging.debug("isViolated %s, upVar %s, lowVar %s, freeVar %s result %s"%(
self.name, upVar, lowVar, freeVar, result))
logging.debug("isViolated value lhs %s constant %s"%(
self.findLHSValue(), self.RHS))
return result
def findDifferenceFromRHS(self):
"""
The amount the actual value varies from the RHS (sense: LHS - RHS)
"""
return self.findLHSValue() - self.RHS
def findLHSValue(self):
"""
for elastic constraints finds the LHS value of the constraint without
the free variable and or penalty variable assumes the constant is on the
rhs
"""
upVar = self._findValue("upVar")
lowVar = self._findValue("lowVar")
freeVar = self._findValue("freeVar")
return self.constraint.value() - self.constant - \
upVar - lowVar - freeVar
def deElasticize(self):
""" de-elasticize constraint """
self.upVar.upBound = 0
self.lowVar.lowBound = 0
def reElasticize(self):
"""
Make the Subproblem elastic again after deElasticize
"""
self.upVar.lowBound = 0
self.upVar.upBound = None
self.lowVar.upBound = 0
self.lowVar.lowBound = None
def alterName(self, name):
"""
Alters the name of anonymous parts of the problem
"""
self.name = "%s_elastic_SubProblem" % name
if hasattr(self, 'freeVar'):
self.freeVar.name = self.name + "_free_bound"
if hasattr(self, 'upVar'):
self.upVar.name = self.name + "_pos_penalty_var"
if hasattr(self, 'lowVar'):
self.lowVar.name = self.name + "_neg_penalty_var"
class FractionElasticSubProblem(FixedElasticSubProblem):
"""
Contains the subproblem generated by converting a Fraction constraint
numerator/(numerator+complement) = b
into an elastic constraint
:param name: The name of the elastic subproblem
:param penalty: penalty applied for violation (+ve or -ve) of the constraints
:param proportionFreeBound: the proportional bound (+ve and -ve) on
constraint violation that is free from penalty
:param proportionFreeBoundList: the proportional bound on
constraint violation that is free from penalty, expressed as a list
where [-ve, +ve]
"""
def __init__(self, name, numerator, RHS, sense,
complement = None,
denominator = None,
penalty = None,
proportionFreeBound = None,
proportionFreeBoundList = None):
subProblemName = "%s_elastic_SubProblem" % name
self.numerator = numerator
if denominator is None and complement is not None:
self.complement = complement
self.denominator = numerator + complement
elif denominator is not None and complement is None:
self.denominator = denominator
self.complement = denominator - numerator
else:
raise PulpError, 'only one of denominator and complement must be specified'
self.RHS = RHS
self.lowTarget = self.upTarget = None
LpProblem.__init__(self, subProblemName, LpMinimize)
self.freeVar = LpVariable("_free_bound",
upBound = 0, lowBound = 0)
self.upVar = LpVariable("_pos_penalty_var",
upBound = 0, lowBound = 0)
self.lowVar = LpVariable("_neg_penalty_var",
upBound = 0, lowBound = 0)
if proportionFreeBound:
proportionFreeBoundList = [proportionFreeBound, proportionFreeBound]
if proportionFreeBoundList:
upProportionFreeBound, lowProportionFreeBound = \
proportionFreeBoundList
else:
upProportionFreeBound, lowProportionFreeBound = (0, 0)
#create an objective
self += LpAffineExpression()
#There are three cases if the constraint.sense is ==, <=, >=
if sense in [LpConstraintEQ, LpConstraintLE]:
#create a constraint the sets the upper bound of target
self.upTarget = RHS + upProportionFreeBound
self.upConstraint = LpFractionConstraint(self.numerator,
self.complement,
LpConstraintLE,
self.upTarget,
denominator = self.denominator)
if penalty is not None:
self.lowVar.lowBound = None
self.objective += -1* penalty * self.lowVar
self.upConstraint += self.lowVar
self += self.upConstraint, '_upper_constraint'
if sense in [LpConstraintEQ, LpConstraintGE]:
#create a constraint the sets the lower bound of target
self.lowTarget = RHS - lowProportionFreeBound
self.lowConstraint = LpFractionConstraint(self.numerator,
self.complement,
LpConstraintGE,
self.lowTarget,
denominator = self.denominator)
if penalty is not None:
self.upVar.upBound = None
self.objective += penalty * self.upVar
self.lowConstraint += self.upVar
self += self.lowConstraint, '_lower_constraint'
def findLHSValue(self):
"""
for elastic constraints finds the LHS value of the constraint without
the free variable and or penalty variable assumes the constant is on the
rhs
"""
# uses code from LpFractionConstraint
if abs(value(self.denominator))>= EPS:
return value(self.numerator)/value(self.denominator)
else:
if abs(value(self.numerator))<= EPS:
#zero divided by zero will return 1
return 1.0
else:
raise ZeroDivisionError
def isViolated(self):
"""
returns true if the penalty variables are non-zero
"""
if abs(value(self.denominator))>= EPS:
if self.lowTarget is not None:
if self.lowTarget > self.findLHSValue():
return True
if self.upTarget is not None:
if self.findLHSValue() > self.upTarget:
return True
else:
#if the denominator is zero the constraint is satisfied
return False
class LpVariableDict(dict):
"""An LP variable generator"""
def __init__(self, name, data = {}, lowBound = None, upBound = None, cat = LpContinuous):
self.name = name
dict.__init__(self, data)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
self[key] = LpVariable(name % key, lowBound, upBound, cat)
return self[key]
# Utility functions
def lpSum(vector):
"""
Calculate the sum of a list of linear expressions
:param vector: A list of linear expressions
"""
return LpAffineExpression().addInPlace(vector)
def lpDot(v1, v2):
"""Calculate the dot product of two lists of linear expressions"""
if not isiterable(v1) and not isiterable(v2):
return v1 * v2
elif not isiterable(v1):
return lpDot([v1]*len(v2),v2)
elif not isiterable(v2):
return lpDot(v1,[v2]*len(v1))
else:
return lpSum([lpDot(e1,e2) for e1,e2 in zip(v1,v2)])
def isNumber(x):
"""Returns true if x is an int of a float"""
return type(x) in [int, float]
def value(x):
"""Returns the value of the variable/expression x, or x if it is a number"""
if isNumber(x): return x
else: return x.value()
def valueOrDefault(x):
"""Returns the value of the variable/expression x, or x if it is a number
Variable without value (None) are affected a possible value (within their
bounds)."""
if isNumber(x): return x
else: return x.valueOrDefault()
def combination(orgset, k = None):
"""
returns an iterator that lists the combinations of orgset of
length k
:param orgset: the list to be iterated
:param k: the cardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = combination([1,2,3,4],2)
>>> for s in c:
... print s
(1, 2)
(1, 3)
(1, 4)
(2, 3)
(2, 4)
(3, 4)
"""
try:
import probstat
return probstat.Combination(orgset,k)
except(ImportError):
return __combination(orgset,k)
def __combination(orgset,k):
"""
fall back if probstat is not installed note it is GPL so cannot
be included
"""
if k == 1:
for i in orgset:
yield (i,)
elif k>1:
for i,x in enumerate(orgset):
#iterates though to near the end
for s in __combination(orgset[i+1:],k-1):
yield (x,) + s
def permutation(orgset, k = None):
"""
returns an iterator that lists the permutations of orgset of
length k
:param orgset: the list to be iterated
:param k: the cardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = permutation([1,2,3,4],2)
>>> for s in c:
... print s
(1, 2)
(1, 3)
(1, 4)
(2, 1)
(2, 3)
(2, 4)
(3, 1)
(3, 2)
(3, 4)
(4, 1)
(4, 2)
(4, 3)
"""
try:
import probstat
return probstat.Permutation(orgset, k)
except(ImportError):
return __permutation(orgset, k)
def __permutation(orgset, k):
"""
fall back if probstat is not installed note it is GPL so cannot
be included
"""
if k == 1:
for i in orgset:
yield (i,)
elif k>1:
for i,x in enumerate(orgset):
#iterates though to near the end
for s in __permutation(orgset[:i] + orgset[i+1:],k-1):
yield (x,)+ s
def allpermutations(orgset,k):
"""
returns all permutations of orgset with up to k items
:param orgset: the list to be iterated
:param k: the maxcardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = allpermutations([1,2,3,4],2)
>>> for s in c:
... print s
(1,)
(2,)
(3,)
(4,)
(1, 2)
(1, 3)
(1, 4)
(2, 1)
(2, 3)
(2, 4)
(3, 1)
(3, 2)
(3, 4)
(4, 1)
(4, 2)
(4, 3)
"""
return itertools.chain(*[permutation(orgset,i) for i in range(1,k+1)])
def allcombinations(orgset,k):
"""
returns all permutations of orgset with up to k items
:param orgset: the list to be iterated
:param k: the maxcardinality of the subsets
:return: an iterator of the subsets
example:
>>> c = allcombinations([1,2,3,4],2)
>>> for s in c:
... print s
(1,)
(2,)
(3,)
(4,)
(1, 2)
(1, 3)
(1, 4)
(2, 3)
(2, 4)
(3, 4)
"""
return itertools.chain(*[combination(orgset,i) for i in range(1,k+1)])
def makeDict(headers, array, default = None):
"""
makes a list into a dictionary with the headings given in headings
headers is a list of header lists
array is a list with the data
"""
result, defdict = __makeDict(headers, array, default)
return result
def __makeDict(headers, array, default = None):
#this is a recursive function so end the recursion as follows
result ={}
returndefaultvalue = None
if len(headers) == 1:
result.update(dict(zip(headers[0],array)))
defaultvalue = default
else:
for i,h in enumerate(headers[0]):
result[h],defaultvalue = __makeDict(headers[1:],array[i],default)
if default != None:
f = lambda :defaultvalue
defresult = collections.defaultdict(f)
defresult.update(result)
result = defresult
returndefaultvalue = collections.defaultdict(f)
return result, returndefaultvalue
def splitDict(Data):
"""
Split a dictionary with lists as the data, into smaller dictionaries
:param Data: A dictionary with lists as the values
:return: A tuple of dictionaries each containing the data separately,
with the same dictionary keys
"""
# find the maximum number of items in the dictionary
maxitems = max([len(values) for values in Data.values()])
output =[dict() for i in range(maxitems)]
for key, values in Data.items():
for i, val in enumerate(values):
output[i][key] = val
return tuple(output)
def read_table(data, coerce_type, transpose=False):
'''
Reads in data from a simple table and forces it to be a particular type
This is a helper function that allows data to be easily constained in a
simple script
::return: a dictionary of with the keys being a tuple of the strings
in the first row and colum of the table
::param data: the multiline string containing the table data
::param coerce_type: the type that the table data is converted to
::param transpose: reverses the data if needed
Example:
>>> table_data = """
... L1 L2 L3 L4 L5 L6
... C1 6736 42658 70414 45170 184679 111569
... C2 217266 227190 249640 203029 153531 117487
... C3 35936 28768 126316 2498 130317 74034
... C4 73446 52077 108368 75011 49827 62850
... C5 174664 177461 151589 153300 59916 135162
... C6 186302 189099 147026 164938 149836 286307
... """
>>> table = read_table(table_data, int)
>>> table[("C1","L1")]
6736
>>> table[("C6","L5")]
149836
'''
lines = data.splitlines()
headings = lines[1].split()
result = {}
for row in lines[2:]:
items = row.split()
for i, item in enumerate(items[1:]):
if transpose:
key = (headings[i], items[0])
else:
key = (items[0], headings[i])
result[key] = coerce_type(item)
return result
def configSolvers():
"""
Configure the path the the solvers on the command line
Designed to configure the file locations of the solvers from the
command line after installation
"""
configlist = [(cplex_dll_path,"cplexpath","CPLEX: "),
(coinMP_path, "coinmppath","CoinMP dll (windows only): ")]
print ("Please type the full path including filename and extension \n" +
"for each solver available")
configdict = {}
for (default, key, msg) in configlist:
value = raw_input(msg + "[" + str(default) +"]")
if value:
configdict[key] = value
setConfigInformation(**configdict)
def pulpTestAll():
from tests import pulpTestSolver
solvers = [PULP_CBC_CMD,
CPLEX_DLL,
CPLEX_CMD,
CPLEX_PY,
COIN_CMD,
COINMP_DLL,
GLPK_CMD,
XPRESS,
GUROBI,
GUROBI_CMD,
PYGLPK,
YAPOSIB
]
for s in solvers:
if s().available():
#~ try:
pulpTestSolver(s)
print "* Solver", s, "passed."
#~ except Exception, e:
#~ print e
#~ print "* Solver", s, "failed."
else:
print "Solver", s, "unavailable."
def pulpDoctest():
"""
runs all doctests
"""
import doctest
if __name__ != '__main__':
import pulp
doctest.testmod(pulp)
else:
doctest.testmod()
if __name__ == '__main__':
# Tests
pulpTestAll()
pulpDoctest()
| src/pulp/pulp.py | 75,990 | ! /usr/bin/env python PuLP : Python LP Modeler Version 1.5.1 Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org) Modifications Copyright (c) 2007- Stuart Anthony Mitchell (s.mitchell@auckland.ac.nz) $Id: pulp.py 1791 2008-04-23 22:54:34Z smit023 $ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. iron python does not like an OrderedDictpython 2.7 or 3.1TODO: extend if we ever add another section in the config fileread the old configurationset the new keyswrite the new configuration Default solver selectionto remove illegal characters from the names self.hash MUST be different for each variable else dict() will call the comparison operators that are overloadedcode to add a variable to constraints for column based modelling Cartesian product Note: XPRESS and CPLEX do not interpret integer variables without explicit boundsto remove illegal characters from the namesTODO remove isinstance usage Will not copy the name Proxy functions for variables Functions on expressions Will not copy the namecounts the characters in a list of stringsrefactored to use a list for speed in iron python Supress signraise TypeError, "Constraints and Expressions cannot be added"raise TypeError, "Constraints and Expressions cannot be added"zero divided by zero will return 1old school using dict.keys() for a set localssort the varibles DSUremoved as this test fails for empty constraints if len(constraint) == 0: if not constraint.valid(): raise ValueError, "Cannot add false constraints" allows the user to add a LpVariable as an objective constraints matrix Creation of a dict of dict: coefs[nomVariable][nomContrainte] = coefficient Most of the work is done here objective function right hand side bounds In MPS files, variables with no bounds (i.e. >= 0) are assumed BV by COIN and CPLEX. So we explicitly write a 0 lower bound in this case. returns the variables, in writing orderempty constraint add the dummyVar check if any names are longer than 100 characters check for repeated names Bounds on non-"positive" variables Note: XPRESS and CPLEX do not interpret integer variables without explicit bounds Integer non-binary variables Binary variables Special Ordered Setsreports the activitynot the slacktime itTODO Add a penalty variable to make problems elasticTODO add the ability to accept different status values i.e. infeasible etctime itcreate and add these variables but disabledadd a costless variable Note the reversal of the upbound and lowbound due to the nature of the variableactivate these variablescreate an objectiveThere are three cases if the constraint.sense is ==, <=, >=create a constraint the sets the upper bound of targetcreate a constraint the sets the lower bound of target uses code from LpFractionConstraintzero divided by zero will return 1if the denominator is zero the constraint is satisfied Utility functionsiterates though to near the enditerates though to near the endthis is a recursive function so end the recursion as follows find the maximum number of items in the dictionary~ try:~ except Exception, e:~ print e~ print "* Solver", s, "failed." Tests | 4,101 | en | 0.781879 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TargetDescription(Model):
"""Information about a Target. A target is the component that can process a
specific type of Job.
:param id: Unique target id.
:type id: str
:param name: Display name of this target.
:type name: str
:param description: A description about this target.
:type description: str
:param accepted_data_formats: List of data formats accepted by this
target.
:type accepted_data_formats: list[str]
:param accepted_content_encodings: List of content encodings accepted by
this target.
:type accepted_content_encodings: list[str]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'accepted_data_formats': {'key': 'acceptedDataFormats', 'type': '[str]'},
'accepted_content_encodings': {'key': 'acceptedContentEncodings', 'type': '[str]'},
}
def __init__(self, *, id: str=None, name: str=None, description: str=None, accepted_data_formats=None, accepted_content_encodings=None, **kwargs) -> None:
super(TargetDescription, self).__init__(**kwargs)
self.id = id
self.name = name
self.description = description
self.accepted_data_formats = accepted_data_formats
self.accepted_content_encodings = accepted_content_encodings
| src/quantum/azext_quantum/vendored_sdks/azure_mgmt_quantum/models/target_description_py3.py | 1,923 | Information about a Target. A target is the component that can process a
specific type of Job.
:param id: Unique target id.
:type id: str
:param name: Display name of this target.
:type name: str
:param description: A description about this target.
:type description: str
:param accepted_data_formats: List of data formats accepted by this
target.
:type accepted_data_formats: list[str]
:param accepted_content_encodings: List of content encodings accepted by
this target.
:type accepted_content_encodings: list[str]
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 974 | en | 0.598483 |
# - * - encoding : utf - 8 - * -
# pylint: disable=fixme, line-too-long
"""
Matrix factorization solver.
:copyright: 2017-2019 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import numpy as np
import scipy
import scipy.sparse
def _get_sparse_matrixes(X):
'''Create csc, csr and coo sparse matrix from any of the above
Arguments:
X {array-like, csc, csr or coo sparse matrix}
Returns:
csc, csr, coo
'''
X_coo = X_csc = X_csr = None
if scipy.sparse.isspmatrix_coo(X):
X_coo = X
X_csr = X_coo.tocsr(True)
X_csc = X_coo.tocsc(True)
elif scipy.sparse.isspmatrix_csr(X):
X_csr = X
X_csc = X_csr.tocoo(True)
X_coo = X_csr.tocsc(True)
elif scipy.sparse.isspmatrix_csc(X):
X_csc = X
X_csr = X_csc.tocsr(True)
X_coo = X_csc.tocoo(True)
else:
assert False, "only coo, csc and csr sparse matrixes are supported"
return X_csc, X_csr, X_coo
class FactorizationH2O(object):
'''Matrix Factorization on GPU with Alternating Least Square (ALS) algorithm.
Factors a sparse rating matrix X (m by n, with N_z non-zero elements)
into a m-by-f and a f-by-n matrices.
Parameters
----------
f int
decomposition size
lambda_ float
lambda regularization
max_iter int, default: 100
number of training iterations
double_precision bool, default: False
use double precision, not yet supported
thetaT {array-like} shape (n, f), default: None
initial theta matrix
XT {array-like} shape (m, f), default: None
initial XT matrix
random_state int, default: 1234
Attributes
----------
XT {array-like} shape (m, f)
XT matrix contains user's features
thetaT {array-like} shape (n, f)
transposed theta matrix, item's features
Warnings
--------
Matrixes ``XT`` and ``thetaT`` may contain nan elements. This is because in some datasets,
there are users or items with no ratings in training set. That results in solutions of
a system of linear equations becomes nan. Such elements can be easily removed with numpy
functions like numpy.nan_to_num, but existence of them may be useful for troubleshooting
purposes.
'''
def __init__(self, f, lambda_, max_iter=100, double_precision=False, thetaT=None, XT=None, random_state=1234):
assert not double_precision, 'double precision is not yet supported'
assert f % 10 == 0, 'f has to be a multiple of 10'
self.f = f
self.lambda_ = lambda_
self.double_precision = double_precision
self.dtype = np.float64 if self.double_precision else np.float32
self.thetaT = thetaT
self.XT = XT
self.max_iter = max_iter
self.random_state = random_state
def _load_lib(self):
from ..libs.lib_utils import GPUlib
gpu_lib = GPUlib().get(1)
return gpu_lib
def fit(self, X, y=None, X_test=None, X_BATCHES=1, THETA_BATCHES=1, early_stopping_rounds=None, verbose=False, scores=None):
#pylint: disable=unused-argument
'''Learn model from rating matrix X.
Parameters
----------
X {array-like, sparse matrix}, shape (m, n)
Data matrix to be decomposed.
y None
Ignored
X_test {array-like, coo sparse matrix}, shape (m, n)
Data matrix for cross validation.
X_BATCHES int, default: 1
Batches to split XT, increase this parameter in case out of memory error.
THETA_BATCHES int, default: 1
Batches to split theta, increase this parameter in case out of memory error.
early_stopping_rounds int, default: None
Activates early stopping. Cross validation error needs to decrease
at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.
Returns the model from the last iteration (not the best one). If early stopping occurs,
the model will have three additional fields: best_cv_score, best_train_score and best_iteration.
verbose bool, default: False
Prints training and validation score(if applicable) on each iteration.
scores {list}
List of tuples with train, cv score for every iteration.
Returns
-------
self : returns an instance of self.
'''
csc_X, csr_X, coo_X = _get_sparse_matrixes(X)
if early_stopping_rounds is not None:
assert X_test is not None, 'X_test is mandatory with early stopping'
if X_test is not None:
assert scipy.sparse.isspmatrix_coo(
X_test), 'X_test must be a coo sparse scipy matrix'
assert X.shape == X_test.shape
assert X_test.dtype == self.dtype
assert X.dtype == self.dtype
coo_X_test = X_test
lib = self._load_lib()
if self.double_precision:
make_data = lib.make_factorization_data_double
run_step = lib.run_factorization_step_double
factorization_score = lib.factorization_score_double
copy_fecatorization_result = lib.copy_fecatorization_result_double
free_data = lib.free_data_double
else:
make_data = lib.make_factorization_data_float
run_step = lib.run_factorization_step_float
factorization_score = lib.factorization_score_float
copy_fecatorization_result = lib.copy_fecatorization_result_float
free_data = lib.free_data_float
m = coo_X.shape[0]
n = coo_X.shape[1]
nnz = csc_X.nnz
if coo_X_test is None:
nnz_test = 0
else:
nnz_test = coo_X_test.nnz
rs = np.random.RandomState(self.random_state)
if self.thetaT is None:
self.thetaT = rs.rand(n, self.f).astype(self.dtype)
else:
assert self.thetaT.dtype == self.dtype
if self.XT is None:
self.XT = rs.rand(m, self.f).astype(self.dtype)
else:
assert self.XT.dtype == self.dtype
csrRowIndexDevicePtr = None
csrColIndexDevicePtr = None
csrValDevicePtr = None
cscRowIndexDevicePtr = None
cscColIndexDevicePtr = None
cscValDevicePtr = None
cooRowIndexDevicePtr = None
cooColIndexDevicePtr = None
cooValDevicePtr = None
thetaTDevice = None
XTDevice = None
cooRowIndexTestDevicePtr = None
cooColIndexTestDevicePtr = None
cooValTestDevicePtr = None
status, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, \
cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, \
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, \
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, \
cooColIndexTestDevicePtr, cooValTestDevicePtr = make_data( # pylint: disable=W0212
m, n, self.f, nnz, nnz_test, csr_X.indptr, csr_X.indices, csr_X.data,
csc_X.indices, csc_X.indptr, csc_X.data,
coo_X.row, coo_X.col, coo_X.data,
self.thetaT, self.XT, coo_X_test.row if coo_X_test is not None else None,
coo_X_test.col if coo_X_test is not None else None, coo_X_test.data if coo_X_test is not None else None,
csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr,
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr,
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr, cooValTestDevicePtr)
assert status == 0, 'Failure uploading the data'
self.best_train_score = np.inf
self.best_cv_score = np.inf
self.best_iteration = -1
cv_score = train_score = np.inf
for i in range(self.max_iter):
status = run_step(m,
n,
self.f,
nnz,
self.lambda_,
csrRowIndexDevicePtr,
csrColIndexDevicePtr,
csrValDevicePtr,
cscRowIndexDevicePtr,
cscColIndexDevicePtr,
cscValDevicePtr,
thetaTDevice,
XTDevice,
X_BATCHES,
THETA_BATCHES)
if verbose or scores is not None:
result = factorization_score(m,
n,
self.f,
nnz,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexDevicePtr,
cooColIndexDevicePtr,
cooValDevicePtr)
train_score = result[0]
if X_test is not None and (verbose or early_stopping_rounds is not None or scores is not None):
result = factorization_score(m,
n,
self.f,
nnz_test,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr,
cooValTestDevicePtr)
cv_score = result[0]
if verbose:
print("iteration {0} train: {1} cv: {2}".format(
i, train_score, cv_score))
if scores is not None:
scores.append((train_score, cv_score))
if early_stopping_rounds is not None:
if self.best_cv_score > cv_score:
self.best_cv_score = cv_score
self.best_train_score = train_score
self.best_iteration = i
if (i - self.best_iteration) > early_stopping_rounds:
if verbose:
print('best iteration:{0} train: {1} cv: {2}'.format(
self.best_iteration, self.best_train_score, self.best_cv_score))
break
lib.free_data_int(csrRowIndexDevicePtr)
lib.free_data_int(csrColIndexDevicePtr)
free_data(csrValDevicePtr)
lib.free_data_int(cscRowIndexDevicePtr)
lib.free_data_int(cscColIndexDevicePtr)
free_data(cscValDevicePtr)
lib.free_data_int(cooRowIndexDevicePtr)
lib.free_data_int(cooColIndexDevicePtr)
free_data(cooValDevicePtr)
lib.free_data_int(cooRowIndexTestDevicePtr)
lib.free_data_int(cooColIndexTestDevicePtr)
free_data(cooValTestDevicePtr)
copy_fecatorization_result(self.XT, XTDevice, m * self.f)
copy_fecatorization_result(self.thetaT, thetaTDevice, n * self.f)
free_data(thetaTDevice)
free_data(XTDevice)
return self
def predict(self, X):
'''Predict none zero elements of coo sparse matrix X according to the fitted model.
Parameters
----------
X {array-like, sparse coo matrix} shape (m, n)
Data matrix in coo format. Values are ignored.
Returns
-------
{array-like, sparse coo matrix} shape (m, n)
Predicted values.
'''
assert self.XT is not None and self.thetaT is not None, 'tranform is invoked on an unfitted model'
assert scipy.sparse.isspmatrix_coo(
X), 'convert X to coo sparse matrix'
assert X.dtype == self.dtype
a = np.take(self.XT, X.row, axis=0)
b = np.take(self.thetaT, X.col, axis=0)
val = np.sum(a * b, axis=1)
return scipy.sparse.coo_matrix((val, (X.row, X.col)), shape=X.shape)
| src/interface_py/h2o4gpu/solvers/factorization.py | 12,493 | Matrix Factorization on GPU with Alternating Least Square (ALS) algorithm.
Factors a sparse rating matrix X (m by n, with N_z non-zero elements)
into a m-by-f and a f-by-n matrices.
Parameters
----------
f int
decomposition size
lambda_ float
lambda regularization
max_iter int, default: 100
number of training iterations
double_precision bool, default: False
use double precision, not yet supported
thetaT {array-like} shape (n, f), default: None
initial theta matrix
XT {array-like} shape (m, f), default: None
initial XT matrix
random_state int, default: 1234
Attributes
----------
XT {array-like} shape (m, f)
XT matrix contains user's features
thetaT {array-like} shape (n, f)
transposed theta matrix, item's features
Warnings
--------
Matrixes ``XT`` and ``thetaT`` may contain nan elements. This is because in some datasets,
there are users or items with no ratings in training set. That results in solutions of
a system of linear equations becomes nan. Such elements can be easily removed with numpy
functions like numpy.nan_to_num, but existence of them may be useful for troubleshooting
purposes.
Create csc, csr and coo sparse matrix from any of the above
Arguments:
X {array-like, csc, csr or coo sparse matrix}
Returns:
csc, csr, coo
Learn model from rating matrix X.
Parameters
----------
X {array-like, sparse matrix}, shape (m, n)
Data matrix to be decomposed.
y None
Ignored
X_test {array-like, coo sparse matrix}, shape (m, n)
Data matrix for cross validation.
X_BATCHES int, default: 1
Batches to split XT, increase this parameter in case out of memory error.
THETA_BATCHES int, default: 1
Batches to split theta, increase this parameter in case out of memory error.
early_stopping_rounds int, default: None
Activates early stopping. Cross validation error needs to decrease
at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.
Returns the model from the last iteration (not the best one). If early stopping occurs,
the model will have three additional fields: best_cv_score, best_train_score and best_iteration.
verbose bool, default: False
Prints training and validation score(if applicable) on each iteration.
scores {list}
List of tuples with train, cv score for every iteration.
Returns
-------
self : returns an instance of self.
Predict none zero elements of coo sparse matrix X according to the fitted model.
Parameters
----------
X {array-like, sparse coo matrix} shape (m, n)
Data matrix in coo format. Values are ignored.
Returns
-------
{array-like, sparse coo matrix} shape (m, n)
Predicted values.
Matrix factorization solver.
:copyright: 2017-2019 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
- * - encoding : utf - 8 - * - pylint: disable=fixme, line-too-longpylint: disable=unused-argument pylint: disable=W0212 | 2,938 | en | 0.681597 |
# Copyright 2021 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import crayons
import importlib
import logging
import os
import sys
from cinfo.config import Config
from cinfo.exceptions import usage as usage_exc
LOG = logging.getLogger(__name__)
class Triager(object):
def __init__(self, config_file, source_name=None, target_name=None):
self.config_file = config_file
self.source_name = source_name
self.target_name = target_name
self.workspace = os.path.join(os.path.expanduser('~'), '.cinfo')
def load_config(self):
self.config = Config(file=self.config_file)
self.config.load()
self.sources = self.config.data['sources']
self.targets = self.config.data['targets']
def pull(self):
LOG.info("{}: {}".format(
crayons.yellow("pulling information from the source"),
self.source_name))
try:
driver = getattr(importlib.import_module(
"cinfo.drivers.{}".format(self.source['type'])),
self.source['type'].capitalize())()
except KeyError:
LOG.error("{}: {}...exiting".format(
crayons.red("No such source"), self.source))
sys.exit(2)
self.data = driver.pull(self.source['url'],
jobs=self.source['jobs'])
if not self.data:
LOG.warning("{}".format(crayons.red(
"I've pulled nothing! outrageous!")))
self.write(self.data)
def publish(self):
LOG.info("{}: {}".format(
crayons.yellow("publishing data to target"),
self.target['url']))
try:
publisher = getattr(importlib.import_module(
"cinfo.drivers.{}".format(self.target['type'])),
self.target['type'].capitalize())()
except KeyError:
LOG.error("{}: {}...exiting".format(
crayons.red("No such target"), self.target))
sys.exit(2)
publisher.publish(self.data)
def write(self, data):
pass
def validate(self):
if len(self.sources.keys()) > 1 and not self.source_name:
LOG.error(usage_exc.multiple_options("source"))
sys.exit(2)
elif not self.source_name:
self.source = list(self.sources.values())[0]
else:
try:
self.source = self.sources[self.source_name]
except KeyError:
LOG.error(usage_exc.missing_value(
self.source_name, [key for key in self.sources.keys()]))
sys.exit(2)
if len(self.targets.keys()) > 1 and not self.target:
LOG.error(usage_exc.multiple_options("target"))
sys.exit(2)
elif not self.target_name:
self.target = list(self.targets.values())[0]
else:
self.target = self.targets[self.target_name]
def run(self):
self.load_config()
self.validate()
self.pull()
self.publish()
| cinfo/triager.py | 3,590 | Copyright 2021 Arie Bregman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 577 | en | 0.860829 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.apps_v1beta1_deployment_list import AppsV1beta1DeploymentList
class TestAppsV1beta1DeploymentList(unittest.TestCase):
""" AppsV1beta1DeploymentList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testAppsV1beta1DeploymentList(self):
"""
Test AppsV1beta1DeploymentList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.apps_v1beta1_deployment_list.AppsV1beta1DeploymentList()
pass
if __name__ == '__main__':
unittest.main()
| kubernetes/test/test_apps_v1beta1_deployment_list.py | 1,035 | AppsV1beta1DeploymentList unit test stubs
Test AppsV1beta1DeploymentList
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 FIXME: construct object with mandatory attributes with example valuesmodel = kubernetes.client.models.apps_v1beta1_deployment_list.AppsV1beta1DeploymentList() | 458 | en | 0.584026 |
"""Top-level package for stringdb. Imports the api module"""
from .api import *
__author__ = """Peter C DeWeirdt"""
__email__ = 'petedeweirdt@gmail.com'
__version__ = '0.1.5'
| stringdb/__init__.py | 176 | Top-level package for stringdb. Imports the api module | 54 | en | 0.339824 |
"""
python version compatibility code
"""
import functools
import inspect
import io
import re
import sys
from contextlib import contextmanager
from inspect import Parameter
from inspect import signature
import attr
import py
import _pytest
from _pytest._io.saferepr import saferepr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
NOTSET = object()
MODULE_NOT_FOUND_ERROR = (
"ModuleNotFoundError" if sys.version_info[:2] >= (3, 6) else "ImportError"
)
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata # noqa: F401
else:
import importlib_metadata # noqa: F401
def _format_args(func):
return str(signature(func))
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(""))
def is_generator(func):
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func):
"""
Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
"""
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def getlocation(function, curdir=None):
function = get_real_func(function)
fn = py.path.local(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None and fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(function, *, name: str = "", is_method=False, cls=None):
"""Returns the names of a function's mandatory arguments.
This should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
@RonnyPfannschmidt: This function should be refactored when we
revisit fixtures. The fixture mechanism should ask the node for
the fixture names, and not try to obtain directly from the
function object well after collection has occurred.
"""
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(name, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
if sys.version_info < (3, 7):
@contextmanager
def nullcontext():
yield
else:
from contextlib import nullcontext # noqa
def get_default_arg_names(function):
# Note: this code intentionally mirrors the code at the beginning of getfuncargnames,
# to get the arguments which were excluded from its result because they had default values
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: "\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def _translate_non_printable(s):
return s.translate(_non_printable_ascii_translate_table)
STRING_TYPES = bytes, str
def _bytes_to_ascii(val):
return val.decode("ascii", "backslashreplace")
def ascii_escaped(val):
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> '\\xc3\\xb4\\xc5\\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
@attr.s
class _PytestWrapper:
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object
when we are creating fixtures, because we wrap the function object ourselves with a decorator
to issue warnings when the fixture function is called directly.
"""
obj = attr.ib()
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""
Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
returning a bound method to ``holder`` if the original object was a bound method.
"""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception: # pragma: no cover
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
instead of Exception (for more details check #2707)
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj):
"""Ignore any exception via isinstance on Python 3."""
try:
return inspect.isclass(obj)
except Exception:
return False
COLLECT_FAKEMODULE_ATTRIBUTES = (
"Collector",
"Module",
"Function",
"Instance",
"Session",
"Item",
"Class",
"File",
"_fillfuncargs",
)
def _setup_collect_fakemodule():
from types import ModuleType
import pytest
pytest.collect = ModuleType("pytest.collect")
pytest.collect.__all__ = [] # used for setns
for attr_name in COLLECT_FAKEMODULE_ATTRIBUTES:
setattr(pytest.collect, attr_name, getattr(pytest, attr_name))
class CaptureIO(io.TextIOWrapper):
def __init__(self):
super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True)
def getvalue(self):
return self.buffer.getvalue().decode("UTF-8")
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
import warnings
from _pytest.deprecated import FUNCARGNAMES
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
| src/_pytest/compat.py | 10,389 | helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object
when we are creating fixtures, because we wrap the function object ourselves with a decorator
to issue warnings when the fixture function is called directly.
If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'ôÅÖ' -> '\xc3\xb4\xc5\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
alias attribute for ``fixturenames`` for pre-2.3 compatibility
gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
returning a bound method to ``holder`` if the original object was a bound method.
Returns the names of a function's mandatory arguments.
This should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
@RonnyPfannschmidt: This function should be refactored when we
revisit fixtures. The fixture mechanism should ask the node for
the fixture names, and not try to obtain directly from the
function object well after collection has occurred.
Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
return number of arguments used up by mock arguments (if any)
Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
instead of Exception (for more details check #2707)
Ignore any exception via isinstance on Python 3.
python version compatibility code
noqa: F401 noqa: F401 The type of re.compile objects is not exposed in Python. The parameters attribute of a Signature object contains an ordered mapping of parameter names to Parameter instances. This creates a tuple of the names of the parameters that don't have defaults. If this function should be treated as a bound method even though it's passed as an unbound method or function, remove the first parameter name. Remove any names that will be replaced with mocks. noqa Note: this code intentionally mirrors the code at the beginning of getfuncargnames, to get the arguments which were excluded from its result because they had default values __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function to trigger a warning if it gets called directly instead of by pytest: we don't want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (3774) pragma: no cover xxx let decorators etc specify a sane ordering used for setns | 3,836 | en | 0.795628 |
from neo4j import GraphDatabase
from argparse import ArgumentParser
from concurrent.futures import ThreadPoolExecutor,as_completed,thread
import sys
import csv
from time import time
PRACTICAL = 'practical'
LOGICAL = 'logical'
NETONLY = 'netonly'
ALL = 'all'
PRIVS = 'privileged'
rans = None
def time_to_str(total_time):
hours, rem = divmod(total_time, 3600)
minutes, seconds = divmod(rem, 60)
return "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)
class ransomulator(object):
def __init__(self,user,password,url,maxwaves,edges,simulate,start_hosts,workers=25):
self.url = url
self.username = user
self.password = password
self.use_encryption = False
self.driver = None
self.connected = False
self.maxwaves = 1 if LOGICAL in simulate else maxwaves
self.session = None
self.edges = edges
self.simulate = simulate
self.workers = workers
self.executor = ThreadPoolExecutor(max_workers=workers)
self.start_hosts = start_hosts
def connect(self):
self.connected = False
if self.driver is not None:
self.driver.close()
try:
self.driver = GraphDatabase.driver(
self.url, auth=(self.username, self.password), encrypted=self.use_encryption)
self.connected = True
print("Database Connection Successful.")
except:
self.connected = False
print("Database Connection Failed.")
return self.connected
def get_start_computers(self):
if(self.start_hosts == ALL):
print("Collecting all computer nodes from database...")
result = self.session.run("MATCH (c:Computer) RETURN DISTINCT id(c) AS computer_id, c.name AS computer_name")
else:
print("Collecting computer nodes who have privileged user session from database...")
result = self.session.run("MATCH(g:Group)-[:AdminTo]->(c:Computer) WITH DISTINCT g MATCH ShortestPath((u:User)-[:MemberOf*0..]->(g)) WITH DISTINCT u as privU MATCH(c: Computer)-[: HasSession]->(privU) RETURN DISTINCT c.name AS computer_name")
computers = []
for record in result:
computers.append(record["computer_name"])
return computers
def count_computers(self):
result = self.session.run("MATCH (c:Computer) RETURN count(DISTINCT id(c)) as num_computers")
for record in result:
return record['num_computers']
def generate_wave_query_string(self):
if LOGICAL in self.simulate:
return 'MATCH shortestPath((src:Computer)-[: HasSession | MemberOf | AdminTo * 1..]->(dest:Computer)) WHERE src <> dest AND src.name IN $last_wave AND NOT dest IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif NETONLY in self.simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif PRACTICAL in self.simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave WITH src,dest MATCH (src)-[:HasSession]->(u:User) WITH dest,u MATCH shortestPath((u)-[:MemberOf|AdminTo*1..]->(dest)) RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
else:
return None
def simulate_wave_for_computer(self,computer_name):
last_wave = [computer_name]
computer_waves = [computer_name]
waves = []
total = 0
for wave in range(self.maxwaves):
w_str = self.generate_wave_query_string()
mysession = self.driver.session()
result = mysession.run(w_str,last_wave=last_wave)
for record in result:
next_wave = record["next_wave"]
wave_size = len(next_wave)
total += wave_size
waves.append(str(wave_size))
last_wave += next_wave
if wave_size == 0:
mysession.close()
return total,waves
computer_waves.append(last_wave.copy())
mysession.close()
return total,waves
def somulate(self):
waves_dict = {}
max_wavelen = 0
avg_wavelen = 0
max_total = 0
total_comps= 0
computers_in_environment = 0
score = 0
try:
if not self.connected:
print("Can't simulate without a valid DB connection!")
else:
self.session = self.driver.session()
computers = self.get_start_computers()
print("Running simulation...")
computers_in_environment = self.count_computers()
future_to_totals_waves_pairs = {self.executor.submit(self.simulate_wave_for_computer,computer): computer for computer in computers}
for future in as_completed(future_to_totals_waves_pairs):
computer = future_to_totals_waves_pairs[future]
try:
total_waves_pair = future.result()
total = total_waves_pair[0]
waves = total_waves_pair[1]
score += total
if total > 0:
total_comps += 1
if len(waves) > max_wavelen:
max_wavelen = len(waves)
if total > max_total: max_total = total
avg_wavelen += len(waves)
waves_dict[computer] = {"total":total,"waves":waves}
print("{},{},{}".format(computer,str(total),",".join(waves)))
else:
waves_dict[computer] = {"total": 0, "waves": ['0']}
print("{} - no waves".format(computer))
except Exception as exc:
print('Exception while processing %s: %s' % (computer, exc))
if total_comps > 0:
avg_wavelen = avg_wavelen / total_comps
score = round((score / (computers_in_environment**2))*100)
else:
avg_wavelen = 0
sorted_waves = {k: v for k,v in sorted(waves_dict.items(),key=lambda item: item[1]["total"],reverse=True)}
return sorted_waves,max_wavelen,avg_wavelen,max_total,total_comps,computers_in_environment,score
except Exception as err:
print("Error during simulation: {}".format(err))
def get_waves_for_computer(self, computer):
try:
if not self.connected:
print("Can't create query without a valid DB connection!")
else:
self.session = self.driver.session()
total,waves,computer_waves = self.simulate_wave_for_computer(computer)
return computer_waves
except Exception as err:
print("Error during simulation: {}".format(err))
def stop(self):
print("Stopping execution...")
self.executor._threads.clear()
thread._threads_queues.clear()
print("Execution stopped...")
def output_csv(file_path,wv_dict,max_wave_len):
print("Writing results to file {}".format(file_path))
with open(file_path,'w',encoding="utf-8",newline='') as csvfile:
wave_headers = ['wave_' + str(x + 1) for x in range(max_wave_len)]
header = ['Hostname','Total'] + wave_headers
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(header)
for k in wv_dict:
row = [k,wv_dict[k]["total"]] + wv_dict[k]["waves"]
writer.writerow(row)
def simulate(user,password,url,maxwaves,edges,simulate,workers,start_hosts):
global rans
start_time = time()
rans = ransomulator(user, password, url, maxwaves, edges, simulate,start_hosts,workers)
if rans.connect():
sorted_waves, max_wavelen, avg_wavelen, max_total, total_comps, num_of_computers, score = rans.somulate()
if outfile:
output_csv(outfile, sorted_waves, max_wavelen)
else:
print("Error during connection...")
elapsed = time_to_str(time() - start_time)
print("Ransomulator done: {}".format(elapsed))
print("-----------------------------")
print("Fragility score:\t{}%".format(score))
print("Max number of computers:\t{}".format(num_of_computers))
print("Total computers with paths:\t{}".format(total_comps))
print("Max compromised :\t{}".format(max_total))
print("Avg wave length:\t{}".format(round(avg_wavelen, 1)))
print("Max wave length:\t{}".format(max_wavelen))
def create_query(computer,user, password, url, maxwaves, edges, simulate):
if LOGICAL in simulate:
return 'MATCH shortestPath((src:Computer)-[:HasSession|MemberOf|AdminTo* 1..]->(dest:Computer)) WHERE src <> dest AND src.name IN $last_wave AND NOT dest IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif NETONLY in simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif PRACTICAL in simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave WITH src,dest MATCH (src)-[:HasSession]->(u:User) WITH dest,u MATCH shortestPath((u)-[:MemberOf|AdminTo*1..]->(dest)) RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
else:
return None
def parse_args():
parser = ArgumentParser(prog=ArgumentParser().prog,prefix_chars="-/",add_help=False,description="Simulate ransomware infection through Bloodhound's database")
parser.add_argument('-h', '--help', '/?', '/h', '/help', action='help', help='show this help message and exit')
parser.add_argument('-s', '--simulate', metavar='', dest='simulate', choices=[PRACTICAL, LOGICAL, NETONLY],default=LOGICAL,help='type of lateral movement to simulate. choices: [%(choices)s], (default: logical).')
parser.add_argument('-c', '--computers', metavar='', dest='computers', choices=[ALL,PRIVS], default=ALL, help='which computer edges should be considered as the starting point. choices: [%(choices)s], (default: all)')
parser.add_argument("-u", "--user", dest='user', metavar='', help="Neo4j DB user name", type=str, default="neo4j")
parser.add_argument("-p", "--pass", dest='password', metavar='', help="Neo4j DB password", type=str,default="neo4j")
parser.add_argument("-l", "--url", dest="url", metavar="", help="Neo4j URL", default="bolt://localhost:7687",type=str)
parser.add_argument("-m", "--maxwaves", dest="maxwaves", type=int, default=3,help="maximal number of simulated attack waves")
parser.add_argument("-o", "--output", dest='out_file', metavar='', help="output file name", type=str,default=None)
parser.add_argument("-e","--edges", dest="edges", type=str,default="MemberOf",help="Logical edges between hosts")
parser.add_argument("-w","--workers",dest="workers",type=int,default=25,help="Number of paraller queries to the database")
subprasers = parser.add_subparsers(dest="command")
# sim_parser = subprasers.add_parser('simulate',help='simulate infection waves')
q_parser = subprasers.add_parser('query',help='generate Cypher query')
q_parser.add_argument("computer", type=str, help="starting from computer name")
# parser.add_argument("-a", "--all", dest="do_all", action="store_true", help="Run through all nodes")
args = parser.parse_args()
return args
if __name__ == '__main__':
try:
args = parse_args()
command = args.command
sim = args.simulate
user = args.user
password = args.password
url = args.url
maxwaves = args.maxwaves
edges = args.edges
outfile = args.out_file
workers = args.workers
start_hosts = args.computers
if command and "query" in command:
computer = args.computer
print(create_query(computer,user, password, url, maxwaves, edges, sim))
else:
simulate(user, password, url, maxwaves, edges, sim,workers,start_hosts)
except KeyboardInterrupt:
print("Interrupted! exiting...")
if rans:
rans.stop()
except Exception as err:
print("Exception thrown: {}".format(err))
finally:
sys.exit()
| Ransomulator/ransomulator.py | 12,634 | sim_parser = subprasers.add_parser('simulate',help='simulate infection waves') parser.add_argument("-a", "--all", dest="do_all", action="store_true", help="Run through all nodes") | 179 | en | 0.122901 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from enum import Enum
import pytest
import aiohttp
import asyncio
import requests
import time
import unittest
import os
from datetime import datetime, timedelta
from azure.core.exceptions import (
HttpResponseError,
ResourceNotFoundError,
ResourceExistsError,
ClientAuthenticationError)
from azure.core.pipeline.transport import AsyncioRequestsTransport
from azure.core.pipeline.transport import AioHttpTransport
from multidict import CIMultiDict, CIMultiDictProxy
from azure.storage.blob.aio import (
BlobServiceClient,
ContainerClient,
BlobClient,
upload_blob_to_url,
download_blob_from_url,
)
from azure.storage.blob import (
generate_blob_sas,
generate_account_sas,
generate_container_sas,
BlobType,
StorageErrorCode,
BlobSasPermissions,
ContainerSasPermissions,
ContentSettings,
BlobProperties,
RetentionPolicy,
AccessPolicy,
ResourceTypes,
AccountSasPermissions,
StandardBlobTier)
from devtools_testutils import ResourceGroupPreparer, StorageAccountPreparer
from _shared.testcase import GlobalStorageAccountPreparer
from _shared.asynctestcase import AsyncStorageTestCase
# ------------------------------------------------------------------------------
TEST_CONTAINER_PREFIX = 'container'
TEST_BLOB_PREFIX = 'blob'
# ------------------------------------------------------------------------------
class AiohttpTestTransport(AioHttpTransport):
"""Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461
"""
async def send(self, request, **config):
response = await super(AiohttpTestTransport, self).send(request, **config)
if not isinstance(response.headers, CIMultiDictProxy):
response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers))
response.content_type = response.headers.get("content-type")
return response
class StorageCommonBlobTestAsync(AsyncStorageTestCase):
# --Helpers-----------------------------------------------------------------
async def _setup(self, name, key):
self.bsc = BlobServiceClient(self.account_url(name, "blob"), credential=key, transport=AiohttpTestTransport())
self.container_name = self.get_resource_name('utcontainer')
self.byte_data = self.get_random_bytes(1024)
if self.is_live:
container = self.bsc.get_container_client(self.container_name)
try:
await container.create_container(timeout=5)
except ResourceExistsError:
pass
async def _setup_remote(self, name, key):
self.bsc2 = BlobServiceClient(self.account_url(name, "blob"), credential=key)
self.remote_container_name = 'rmt'
def _teardown(self, FILE_PATH):
if os.path.isfile(FILE_PATH):
try:
os.remove(FILE_PATH)
except:
pass
def _get_container_reference(self):
return self.get_resource_name(TEST_CONTAINER_PREFIX)
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
async def _create_block_blob(self):
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(self.byte_data, length=len(self.byte_data))
return blob_name
async def _create_remote_container(self):
self.remote_container_name = self.get_resource_name('remotectnr')
remote_container = self.bsc2.get_container_client(self.remote_container_name)
try:
await remote_container.create_container()
except ResourceExistsError:
pass
async def _create_remote_block_blob(self, blob_data=None):
if not blob_data:
blob_data = b'12345678' * 1024 * 1024
source_blob_name = self._get_blob_reference()
source_blob = self.bsc2.get_blob_client(self.remote_container_name, source_blob_name)
await source_blob.upload_blob(blob_data, overwrite=True)
return source_blob
async def _wait_for_async_copy(self, blob):
count = 0
props = await blob.get_blob_properties()
while props.copy.status == 'pending':
count = count + 1
if count > 10:
self.fail('Timed out waiting for async copy to complete.')
self.sleep(6)
props = await blob.get_blob_properties()
return props
async def _enable_soft_delete(self):
delete_retention_policy = RetentionPolicy(enabled=True, days=2)
await self.bsc.set_service_properties(delete_retention_policy=delete_retention_policy)
# wait until the policy has gone into effect
if self.is_live:
time.sleep(30)
async def _disable_soft_delete(self):
delete_retention_policy = RetentionPolicy(enabled=False)
await self.bsc.set_service_properties(delete_retention_policy=delete_retention_policy)
def _assert_blob_is_soft_deleted(self, blob):
self.assertTrue(blob.deleted)
self.assertIsNotNone(blob.deleted_time)
self.assertIsNotNone(blob.remaining_retention_days)
def _assert_blob_not_soft_deleted(self, blob):
self.assertFalse(blob.deleted)
self.assertIsNone(blob.deleted_time)
self.assertIsNone(blob.remaining_retention_days)
# -- Common test cases for blobs ----------------------------------------------
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_exists(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
exists = await blob.get_blob_properties()
# Assert
self.assertTrue(exists)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_not_exists(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.get_blob_properties()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_snapshot_exists(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snapshot = await blob.create_snapshot()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=snapshot)
exists = await blob.get_blob_properties()
# Assert
self.assertTrue(exists)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_snapshot_not_exists(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot="1988-08-18T07:52:31.6690068Z")
with self.assertRaises(ResourceNotFoundError):
await blob.get_blob_properties()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_container_not_exists(self, resource_group, location, storage_account, storage_account_key):
# In this case both the blob and container do not exist
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self._get_container_reference(), blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.get_blob_properties()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_question_mark(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = '?ques?tion?'
blob_data = u'???'
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(blob_data)
# Assert
stream = await blob.download_blob()
data = await stream.readall()
self.assertIsNotNone(data)
content = data.decode('utf-8')
self.assertEqual(content, blob_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_special_chars(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
# Act
for c in '-._ /()$=\',~':
blob_name = '{0}a{0}a{0}'.format(c)
blob_data = c
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(blob_data, length=len(blob_data))
data = await (await blob.download_blob()).readall()
content = data.decode('utf-8')
self.assertEqual(content, blob_data)
# Assert
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_lease_id(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
data = b'hello world again'
resp = await blob.upload_blob(data, length=len(data), lease=lease)
# Assert
self.assertIsNotNone(resp.get('etag'))
stream = await blob.download_blob(lease=lease)
content = await stream.readall()
self.assertEqual(content, data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_metadata(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
metadata = {'hello': 'world', 'number': '42'}
# Act
data = b'hello world'
blob = self.bsc.get_blob_client(self.container_name, blob_name)
resp = await blob.upload_blob(data, length=len(data), metadata=metadata)
# Assert
self.assertIsNotNone(resp.get('etag'))
md = (await blob.get_blob_properties()).metadata
self.assertDictEqual(md, metadata)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_generator_async(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
# Act
def gen():
yield "hello"
yield "world!"
yield " eom"
blob = self.bsc.get_blob_client(self.container_name, "gen_blob")
resp = await blob.upload_blob(data=gen())
# Assert
self.assertIsNotNone(resp.get('etag'))
content = await (await blob.download_blob()).readall()
self.assertEqual(content, b"helloworld! eom")
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_requests_async(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
# Act
uri = "http://www.gutenberg.org/files/59466/59466-0.txt"
data = requests.get(uri, stream=True)
blob = self.bsc.get_blob_client(self.container_name, "gutenberg")
resp = await blob.upload_blob(data=data.raw)
self.assertIsNotNone(resp.get('etag'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_aiohttp_async(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
blob = self.bsc.get_blob_client(self.container_name, "gutenberg")
# Act
uri = "http://www.gutenberg.org/files/59466/59466-0.txt"
async with aiohttp.ClientSession() as session:
async with session.get(uri) as data:
async for text, _ in data.content.iter_chunks():
resp = await blob.upload_blob(data=text)
self.assertIsNotNone(resp.get('etag'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
stream = await blob.download_blob()
content = await stream.readall()
# Assert
self.assertEqual(content, self.byte_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_snapshot(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snap = await blob.create_snapshot()
snapshot = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=snap)
# Act
stream = await snapshot.download_blob()
content = await stream.readall()
# Assert
self.assertEqual(content, self.byte_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_snapshot_previous(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snap = await blob.create_snapshot()
snapshot = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=snap)
upload_data = b'hello world again'
await blob.upload_blob(upload_data, length=len(upload_data), overwrite=True)
# Act
blob_previous = await snapshot.download_blob()
blob_previous_bytes = await blob_previous.readall()
blob_latest = await blob.download_blob()
blob_latest_bytes = await blob_latest.readall()
# Assert
self.assertEqual(blob_previous_bytes, self.byte_data)
self.assertEqual(blob_latest_bytes, b'hello world again')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_range(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
stream = await blob.download_blob(offset=0, length=5)
content = await stream.readall()
# Assert
self.assertEqual(content, self.byte_data[:5])
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_lease(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
stream = await blob.download_blob(lease=lease)
content = await stream.readall()
await lease.release()
# Assert
self.assertEqual(content, self.byte_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_non_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.download_blob()
# Assert
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_set_blob_properties_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.set_http_headers(
content_settings=ContentSettings(
content_language='spanish',
content_disposition='inline'),
)
# Assert
props = await blob.get_blob_properties()
self.assertEqual(props.content_settings.content_language, 'spanish')
self.assertEqual(props.content_settings.content_disposition, 'inline')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_set_blob_properties_with_blob_settings_param(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
props = await blob.get_blob_properties()
# Act
props.content_settings.content_language = 'spanish'
props.content_settings.content_disposition = 'inline'
await blob.set_http_headers(content_settings=props.content_settings)
# Assert
props = await blob.get_blob_properties()
self.assertEqual(props.content_settings.content_language, 'spanish')
self.assertEqual(props.content_settings.content_disposition, 'inline')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
props = await blob.get_blob_properties()
# Assert
self.assertIsInstance(props, BlobProperties)
self.assertEqual(props.blob_type, BlobType.BlockBlob)
self.assertEqual(props.size, len(self.byte_data))
self.assertEqual(props.lease.status, 'unlocked')
self.assertIsNotNone(props.creation_time)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_fail(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=1)
with self.assertRaises(HttpResponseError) as e:
await blob.get_blob_properties() # Invalid snapshot value of 1
# Assert
# TODO: No error code returned
# self.assertEqual(StorageErrorCode.invalid_query_parameter_value, e.exception.error_code)
# This test is to validate that the ErrorCode is retrieved from the header during a
# GET request. This is preferred to relying on the ErrorCode in the body.
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_metadata_fail(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=1)
with self.assertRaises(HttpResponseError) as e:
(await blob.get_blob_properties()).metadata # Invalid snapshot value of 1
# Assert
# TODO: No error code returned
# self.assertEqual(StorageErrorCode.invalid_query_parameter_value, e.exception.error_code)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_server_encryption(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = await blob.download_blob()
# Assert
self.assertTrue(data.properties.server_encrypted)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_server_encryption(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
props = await blob.get_blob_properties()
# Assert
self.assertTrue(props.server_encrypted)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_list_blobs_server_encryption(self, resource_group, location, storage_account, storage_account_key):
# test can only run live
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs():
blob_list.append(b)
# Act
# Assert
for blob in blob_list:
self.assertTrue(blob.server_encrypted)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_no_server_encryption(self, resource_group, location, storage_account, storage_account_key):
pytest.skip("Aiohttp headers dict (CIMultiDictProxy) is immutable.")
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
def callback(response):
response.http_response.headers['x-ms-server-encrypted'] = 'false'
props = await blob.get_blob_properties(raw_response_hook=callback)
# Assert
self.assertFalse(props.server_encrypted)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_with_snapshot(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob = self.bsc.get_blob_client(self.container_name, blob_name)
res = await blob.create_snapshot()
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 2)
# Act
snapshot = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=res)
props = await snapshot.get_blob_properties()
# Assert
self.assertIsNotNone(blob)
self.assertEqual(props.blob_type, BlobType.BlockBlob)
self.assertEqual(props.size, len(self.byte_data))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_with_leased_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
props = await blob.get_blob_properties()
# Assert
self.assertIsInstance(props, BlobProperties)
self.assertEqual(props.blob_type, BlobType.BlockBlob)
self.assertEqual(props.size, len(self.byte_data))
self.assertEqual(props.lease.status, 'locked')
self.assertEqual(props.lease.state, 'leased')
self.assertEqual(props.lease.duration, 'infinite')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_metadata(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
md = (await blob.get_blob_properties()).metadata
# Assert
self.assertIsNotNone(md)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_set_blob_metadata_with_upper_case(self, resource_group, location, storage_account, storage_account_key):
# bug in devtools...converts upper case header to lowercase
# passes live.
# Arrange
await self._setup(storage_account.name, storage_account_key)
metadata = {'hello': 'world', 'number': '42', 'UP': 'UPval'}
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.set_blob_metadata(metadata)
# Assert
md = (await blob.get_blob_properties()).metadata
self.assertEqual(3, len(md))
self.assertEqual(md['hello'], 'world')
self.assertEqual(md['number'], '42')
self.assertEqual(md['UP'], 'UPval')
self.assertFalse('up' in md)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
resp = await blob.delete_blob()
# Assert
self.assertIsNone(resp)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_with_non_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.delete_blob()
# Assert
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_snapshot(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snap = await blob.create_snapshot()
snapshot = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=snap)
# Act
await snapshot.delete_blob()
# Assert
container = self.bsc.get_container_client(self.container_name)
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 1)
self.assertEqual(blobs[0].name, blob_name)
self.assertIsNone(blobs[0].snapshot)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_snapshots(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.create_snapshot()
# Act
await blob.delete_blob(delete_snapshots='only')
# Assert
container = self.bsc.get_container_client(self.container_name)
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 1)
self.assertIsNone(blobs[0].snapshot)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_with_snapshots(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.create_snapshot()
# Act
# with self.assertRaises(HttpResponseError):
# blob.delete_blob()
await blob.delete_blob(delete_snapshots='include')
# Assert
container = self.bsc.get_container_client(self.container_name)
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 0)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_blob_without_snapshots(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob = container.get_blob_client(blob_name)
# Soft delete the blob
await blob.delete_blob()
blob_list = []
async for b in container.list_blobs(include='deleted'):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_is_soft_deleted(blob_list[0])
# list_blobs should not list soft deleted blobs if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs():
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 0)
# Restore blob with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include='deleted'):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_not_soft_deleted(blob_list[0])
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_single_blob_snapshot(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob_snapshot_1 = await blob.create_snapshot()
blob_snapshot_2 = await blob.create_snapshot()
# Soft delete blob_snapshot_1
snapshot_1 = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=blob_snapshot_1)
await snapshot_1.delete_blob()
with self.assertRaises(ValueError):
await snapshot_1.delete_blob(delete_snapshots='only')
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for listedblob in blob_list:
if listedblob.snapshot == blob_snapshot_1['snapshot']:
self._assert_blob_is_soft_deleted(listedblob)
else:
self._assert_blob_not_soft_deleted(listedblob)
# list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs(include='snapshots'):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 2)
# Restore snapshot with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for blob in blob_list:
self._assert_blob_not_soft_deleted(blob)
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_only_snapshots_of_blob(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob_snapshot_1 = await blob.create_snapshot()
blob_snapshot_2 = await blob.create_snapshot()
# Soft delete all snapshots
await blob.delete_blob(delete_snapshots='only')
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for listedblob in blob_list:
if listedblob.snapshot == blob_snapshot_1['snapshot']:
self._assert_blob_is_soft_deleted(listedblob)
elif listedblob.snapshot == blob_snapshot_2['snapshot']:
self._assert_blob_is_soft_deleted(listedblob)
else:
self._assert_blob_not_soft_deleted(listedblob)
# list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs(include="snapshots"):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
# Restore snapshots with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for blob in blob_list:
self._assert_blob_not_soft_deleted(blob)
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_blob_including_all_snapshots(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob_snapshot_1 = await blob.create_snapshot()
blob_snapshot_2 = await blob.create_snapshot()
# Soft delete blob and all snapshots
await blob.delete_blob(delete_snapshots='include')
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for listedblob in blob_list:
self._assert_blob_is_soft_deleted(listedblob)
# list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs(include=["snapshots"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 0)
# Restore blob and snapshots with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for blob in blob_list:
self._assert_blob_not_soft_deleted(blob)
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_with_leased_blob(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Soft delete the blob without lease_id should fail
with self.assertRaises(HttpResponseError):
await blob.delete_blob()
# Soft delete the blob
await blob.delete_blob(lease=lease)
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include="deleted"):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_is_soft_deleted(blob_list[0])
# list_blobs should not list soft deleted blobs if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs():
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 0)
# Restore blob with undelete, this also gets rid of the lease
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include="deleted"):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_not_soft_deleted(blob_list[0])
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_copy_blob_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
sourceblob = '{0}/{1}/{2}'.format(
self.account_url(storage_account.name, "blob"), self.container_name, blob_name)
copyblob = self.bsc.get_blob_client(self.container_name, 'blob1copy')
copy = await copyblob.start_copy_from_url(sourceblob)
# Assert
self.assertIsNotNone(copy)
self.assertEqual(copy['copy_status'], 'success')
self.assertFalse(isinstance(copy['copy_status'], Enum))
self.assertIsNotNone(copy['copy_id'])
copy_content = await (await copyblob.download_blob()).readall()
self.assertEqual(copy_content, self.byte_data)
# @GlobalStorageAccountPreparer()
# @AsyncStorageTestCase.await_prepared_test
# TODO: external copy was supported since 2019-02-02
# async def test_copy_blob_with_external_blob_fails(self):
# # Arrange
# await self._setup()
# source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
# copied_blob = self.bsc.get_blob_client(self.container_name, '59466-0.txt')
#
# # Act
# copy = await copied_blob.start_copy_from_url(source_blob)
# self.assertEqual(copy['copy_status'], 'pending')
# props = await self._wait_for_async_copy(copied_blob)
#
# # Assert
# self.assertEqual(props.copy.status_description, '500 InternalServerError "Copy failed."')
# self.assertEqual(props.copy.status, 'failed')
# self.assertIsNotNone(props.copy.id)
#
# @record
# def test_copy_blob_with_external_blob_fails(self):
# loop = asyncio.get_event_loop()
# loop.run_until_complete(self._test_copy_blob_with_external_blob_fails())
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_copy_blob_async_private_blob_no_sas(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob()
# Act
target_blob_name = 'targetblob'
target_blob = self.bsc.get_blob_client(self.container_name, target_blob_name)
# Assert
with self.assertRaises(ResourceNotFoundError):
await target_blob.start_copy_from_url(source_blob.url)
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_copy_blob_async_private_blob_with_sas(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
sas_token = generate_blob_sas(
source_blob.account_name,
source_blob.container_name,
source_blob.blob_name,
snapshot=source_blob.snapshot,
account_key=source_blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
blob = BlobClient.from_blob_url(source_blob.url, credential=sas_token)
# Act
target_blob_name = 'targetblob'
target_blob = self.bsc.get_blob_client(self.container_name, target_blob_name)
copy_resp = await target_blob.start_copy_from_url(blob.url)
# Assert
props = await self._wait_for_async_copy(target_blob)
self.assertEqual(props.copy.status, 'success')
actual_data = await (await target_blob.download_blob()).readall()
self.assertEqual(actual_data, data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_abort_copy_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
copied_blob = self.bsc.get_blob_client(self.container_name, '59466-0.txt')
# Act
copy = await copied_blob.start_copy_from_url(source_blob)
self.assertEqual(copy['copy_status'], 'pending')
await copied_blob.abort_copy(copy)
props = await self._wait_for_async_copy(copied_blob)
self.assertEqual(props.copy.status, 'aborted')
# Assert
actual_data = await copied_blob.download_blob()
bytes_data = await (await copied_blob.download_blob()).readall()
self.assertEqual(bytes_data, b"")
self.assertEqual(actual_data.properties.copy.status, 'aborted')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_abort_copy_blob_with_synchronous_copy_fails(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
source_blob_name = await self._create_block_blob()
source_blob = self.bsc.get_blob_client(self.container_name, source_blob_name)
# Act
target_blob_name = 'targetblob'
target_blob = self.bsc.get_blob_client(self.container_name, target_blob_name)
copy_resp = await target_blob.start_copy_from_url(source_blob.url)
with self.assertRaises(HttpResponseError):
await target_blob.abort_copy(copy_resp)
# Assert
self.assertEqual(copy_resp['copy_status'], 'success')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_snapshot_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
resp = await blob.create_snapshot()
# Assert
self.assertIsNotNone(resp)
self.assertIsNotNone(resp['snapshot'])
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_acquire_and_release(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
await lease.release()
lease2 = await blob.acquire_lease()
# Assert
self.assertIsNotNone(lease)
self.assertIsNotNone(lease2)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_with_duration(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease(lease_duration=15)
resp = await blob.upload_blob(b'hello 2', length=7, lease=lease)
self.sleep(15)
# Assert
with self.assertRaises(HttpResponseError):
await blob.upload_blob(b'hello 3', length=7, lease=lease)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_with_proposed_lease_id(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease_id = 'a0e6c241-96ea-45a3-a44b-6ae868bc14d0'
lease = await blob.acquire_lease(lease_id=lease_id)
# Assert
self.assertEqual(lease.id, lease_id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_change_lease_id(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease_id = 'a0e6c241-96ea-45a3-a44b-6ae868bc14d0'
lease = await blob.acquire_lease()
first_lease_id = lease.id
await lease.change(lease_id)
await lease.renew()
# Assert
self.assertNotEqual(first_lease_id, lease.id)
self.assertEqual(lease.id, lease_id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_break_period(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease(lease_duration=15)
lease_time = await lease.break_lease(lease_break_period=5)
resp = await blob.upload_blob(b'hello 2', length=7, lease=lease)
self.sleep(5)
with self.assertRaises(HttpResponseError):
await blob.upload_blob(b'hello 3', length=7, lease=lease)
# Assert
self.assertIsNotNone(lease.id)
self.assertIsNotNone(lease_time)
self.assertIsNotNone(resp.get('etag'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_acquire_and_renew(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
first_id = lease.id
await lease.renew()
# Assert
self.assertEqual(first_id, lease.id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_acquire_twice_fails(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
with self.assertRaises(HttpResponseError):
await blob.acquire_lease()
# Assert
self.assertIsNotNone(lease.id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_unicode_get_blob_unicode_name(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = '啊齄丂狛狜'
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b'hello world')
# Act
stream = await blob.download_blob()
content = await stream.readall()
# Assert
self.assertEqual(content, b'hello world')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_blob_unicode_data(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
data = u'hello world啊齄丂狛狜'
resp = await blob.upload_blob(data)
# Assert
self.assertIsNotNone(resp.get('etag'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_no_sas_private_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
response = requests.get(blob.url)
# Assert
self.assertFalse(response.ok)
self.assertNotEqual(-1, response.text.find('ResourceNotFound'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_no_sas_public_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'a public blob can be read without a shared access signature'
blob_name = 'blob1.txt'
container_name = self._get_container_reference()
try:
container = await self.bsc.create_container(container_name, public_access='blob')
except ResourceExistsError:
container = self.bsc.get_container_client(container_name)
blob = await container.upload_blob(blob_name, data)
# Act
response = requests.get(blob.url)
# Assert
self.assertTrue(response.ok)
self.assertEqual(data, response.content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_public_access_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'public access blob'
blob_name = 'blob1.txt'
container_name = self._get_container_reference()
try:
container = await self.bsc.create_container(container_name, public_access='blob')
except ResourceExistsError:
container = self.bsc.get_container_client(container_name)
blob = await container.upload_blob(blob_name, data)
# Act
service = BlobClient.from_blob_url(blob.url)
# self._set_test_proxy(service, self.settings)
content = await (await service.download_blob()).readall()
# Assert
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_sas_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
service = BlobClient.from_blob_url(blob.url, credential=token)
# self._set_test_proxy(service, self.settings)
content = await (await service.download_blob()).readall()
# Assert
self.assertEqual(self.byte_data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_sas_signed_identifier(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob = self.bsc.get_blob_client(self.container_name, blob_name)
access_policy = AccessPolicy()
access_policy.start = datetime.utcnow() - timedelta(hours=1)
access_policy.expiry = datetime.utcnow() + timedelta(hours=1)
access_policy.permission = BlobSasPermissions(read=True)
identifiers = {'testid': access_policy}
resp = await container.set_container_access_policy(identifiers)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
policy_id='testid')
# Act
service = BlobClient.from_blob_url(blob.url, credential=token)
# self._set_test_proxy(service, self.settings)
result = await (await service.download_blob()).readall()
# Assert
self.assertEqual(self.byte_data, result)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_account_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
token = generate_account_sas(
self.bsc.account_name,
self.bsc.credential.account_key,
ResourceTypes(container=True, object=True),
AccountSasPermissions(read=True),
datetime.utcnow() + timedelta(hours=1),
)
# Act
blob = BlobClient(
self.bsc.url, container_name=self.container_name, blob_name=blob_name, credential=token)
container = ContainerClient(
self.bsc.url, container_name=self.container_name, credential=token)
await container.get_container_properties()
blob_response = requests.get(blob.url)
container_response = requests.get(container.url, params={'restype': 'container'})
# Assert
self.assertTrue(blob_response.ok)
self.assertEqual(self.byte_data, blob_response.content)
self.assertTrue(container_response.ok)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_token_credential(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
token_credential = self.generate_oauth_token()
# Action 1: make sure token works
service = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=token_credential, transport=AiohttpTestTransport())
result = await service.get_service_properties()
self.assertIsNotNone(result)
# Action 2: change token value to make request fail
fake_credential = self.generate_fake_token()
service = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=fake_credential, transport=AiohttpTestTransport())
with self.assertRaises(ClientAuthenticationError):
await service.get_service_properties()
# Action 3: update token to make it working again
service = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=token_credential, transport=AiohttpTestTransport())
result = await service.get_service_properties()
self.assertIsNotNone(result)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_read_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
await self._setup(storage_account.name, storage_account_key)
# Arrange
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
response = requests.get(sas_blob.url)
# Assert
response.raise_for_status()
self.assertTrue(response.ok)
self.assertEqual(self.byte_data, response.content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_read_access_blob_with_content_query_params(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
cache_control='no-cache',
content_disposition='inline',
content_encoding='utf-8',
content_language='fr',
content_type='text',
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
response = requests.get(sas_blob.url)
# Assert
response.raise_for_status()
self.assertEqual(self.byte_data, response.content)
self.assertEqual(response.headers['cache-control'], 'no-cache')
self.assertEqual(response.headers['content-disposition'], 'inline')
self.assertEqual(response.headers['content-encoding'], 'utf-8')
self.assertEqual(response.headers['content-language'], 'fr')
self.assertEqual(response.headers['content-type'], 'text')
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_write_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
updated_data = b'updated blob data'
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(write=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
headers = {'x-ms-blob-type': 'BlockBlob'}
response = requests.put(sas_blob.url, headers=headers, data=updated_data)
# Assert
response.raise_for_status()
self.assertTrue(response.ok)
data = await (await blob.download_blob()).readall()
self.assertEqual(updated_data, data)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_delete_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
response = requests.delete(sas_blob.url)
# Assert
response.raise_for_status()
self.assertTrue(response.ok)
with self.assertRaises(HttpResponseError):
await sas_blob.download_blob()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information(self, resource_group, location, storage_account, storage_account_key):
# Act
await self._setup(storage_account.name, storage_account_key)
info = await self.bsc.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_container_name(self, resource_group, location, storage_account, storage_account_key):
# Act
# Container name gets ignored
await self._setup(storage_account.name, storage_account_key)
container = self.bsc.get_container_client("missing")
info = await container.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_blob_name(self, resource_group, location, storage_account, storage_account_key):
# Act
# Both container and blob names get ignored
await self._setup(storage_account.name, storage_account_key)
blob = self.bsc.get_blob_client("missing", "missing")
info = await blob.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_container_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
container = self.bsc.get_container_client(self.container_name)
token = generate_container_sas(
container.account_name,
container.container_name,
account_key=container.credential.account_key,
permission=ContainerSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_container = ContainerClient.from_container_url(container.url, credential=token)
# Act
info = await sas_container.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_blob_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
info = await sas_blob.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_sas(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
sas_token = generate_blob_sas(
source_blob.account_name,
source_blob.container_name,
source_blob.blob_name,
snapshot=source_blob.snapshot,
account_key=source_blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
FILE_PATH = '_to_file_with_sas.async.dat'
blob = BlobClient.from_blob_url(source_blob.url, credential=sas_token)
# Act
await download_blob_from_url(blob.url, FILE_PATH)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_credential(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'to_file_with_credential.async.dat'
# Act
await download_blob_from_url(
source_blob.url, FILE_PATH,
max_concurrency=2,
credential=rmt_key)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_stream_with_credential(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'to_stream_with_credential.async.dat'
# Act
with open(FILE_PATH, 'wb') as stream:
await download_blob_from_url(
source_blob.url, stream,
max_concurrency=2,
credential=rmt_key)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_existing_file(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'with_existing_file.async.dat'
# Act
await download_blob_from_url(
source_blob.url, FILE_PATH,
credential=rmt_key)
with self.assertRaises(ValueError):
await download_blob_from_url(source_blob.url, FILE_PATH)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_existing_file_overwrite(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'existing_file_overwrite.async.dat'
# Act
await download_blob_from_url(
source_blob.url, FILE_PATH,
credential=rmt_key)
data2 = b'ABCDEFGH' * 1024 * 1024
source_blob = await self._create_remote_block_blob(blob_data=data2)
await download_blob_from_url(
source_blob.url, FILE_PATH, overwrite=True,
credential=rmt_key)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data2, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(write=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
uploaded = await upload_blob_to_url(sas_blob.url, data)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_credential(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
uploaded = await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b"existing_data")
# Act
with self.assertRaises(ResourceExistsError):
await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
content = await (await blob.download_blob()).readall()
self.assertEqual(b"existing_data", content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_existing_blob_overwrite(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b"existing_data")
# Act
uploaded = await upload_blob_to_url(
blob.url, data,
overwrite=True,
credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_text_with_credential(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = '12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
uploaded = await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
stream = await blob.download_blob(encoding='UTF-8')
content = await stream.readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_file_with_credential(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
FILE_PATH = 'url_file_with_credential.async.dat'
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
with open(FILE_PATH, 'rb'):
uploaded = await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
self._teardown(FILE_PATH)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_transport_closed_only_once(self, resource_group, location, storage_account, storage_account_key):
container_name = self.get_resource_name('utcontainerasync')
transport = AioHttpTransport()
bsc = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=storage_account_key, transport=transport)
blob_name = self._get_blob_reference()
async with bsc:
await bsc.get_service_properties()
assert transport.session is not None
async with bsc.get_blob_client(container_name, blob_name) as bc:
assert transport.session is not None
await bsc.get_service_properties()
assert transport.session is not None
# ------------------------------------------------------------------------------
| sdk/storage/azure-storage-blob/tests/test_common_blob_async.py | 83,924 | Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461
coding: utf-8 ------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------- ------------------------------------------------------------------------------ ------------------------------------------------------------------------------ --Helpers----------------------------------------------------------------- wait until the policy has gone into effect -- Common test cases for blobs ---------------------------------------------- Arrange Act Assert Arrange Act Arrange Act Assert Arrange Act In this case both the blob and container do not exist Arrange Act Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Act Assert Act Act Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Invalid snapshot value of 1 Assert TODO: No error code returned self.assertEqual(StorageErrorCode.invalid_query_parameter_value, e.exception.error_code) This test is to validate that the ErrorCode is retrieved from the header during a GET request. This is preferred to relying on the ErrorCode in the body. Arrange Act Invalid snapshot value of 1 Assert TODO: No error code returned self.assertEqual(StorageErrorCode.invalid_query_parameter_value, e.exception.error_code) Arrange Act Assert Arrange Act Assert test can only run live Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert bug in devtools...converts upper case header to lowercase passes live. Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act with self.assertRaises(HttpResponseError): blob.delete_blob() Assert Arrange Soft delete the blob Assert list_blobs should not list soft deleted blobs if Include(deleted=True) is not specified Assert Restore blob with undelete Assert Arrange Soft delete blob_snapshot_1 Assert list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified Assert Restore snapshot with undelete Assert Arrange Soft delete all snapshots Assert list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified Assert Restore snapshots with undelete Assert Arrange Soft delete blob and all snapshots Assert list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified Assert Restore blob and snapshots with undelete Assert Arrange Soft delete the blob without lease_id should fail Soft delete the blob Assert list_blobs should not list soft deleted blobs if Include(deleted=True) is not specified Assert Restore blob with undelete, this also gets rid of the lease Assert Arrange Act Assert @GlobalStorageAccountPreparer() @AsyncStorageTestCase.await_prepared_test TODO: external copy was supported since 2019-02-02 async def test_copy_blob_with_external_blob_fails(self): Arrange await self._setup() source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt" copied_blob = self.bsc.get_blob_client(self.container_name, '59466-0.txt') Act copy = await copied_blob.start_copy_from_url(source_blob) self.assertEqual(copy['copy_status'], 'pending') props = await self._wait_for_async_copy(copied_blob) Assert self.assertEqual(props.copy.status_description, '500 InternalServerError "Copy failed."') self.assertEqual(props.copy.status, 'failed') self.assertIsNotNone(props.copy.id) @record def test_copy_blob_with_external_blob_fails(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_copy_blob_with_external_blob_fails()) Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act self._set_test_proxy(service, self.settings) Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act self._set_test_proxy(service, self.settings) Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act self._set_test_proxy(service, self.settings) Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert Action 1: make sure token works Action 2: change token value to make request fail Action 3: update token to make it working again SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert Act Assert Act Container name gets ignored Assert Act Both container and blob names get ignored Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert SAS URL is calculated from storage key, so this test runs live only Arrange Act Assert ------------------------------------------------------------------------------ | 6,116 | en | 0.711492 |
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility script to install APKs from the command line quickly."""
import argparse
import glob
import logging
import os
import sys
import devil_chromium
from devil.android import apk_helper
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
from pylib import constants
def main():
parser = argparse.ArgumentParser()
apk_group = parser.add_mutually_exclusive_group(required=True)
apk_group.add_argument('--apk', dest='apk_name',
help='DEPRECATED The name of the apk containing the'
' application (with the .apk extension).')
apk_group.add_argument('apk_path', nargs='?',
help='The path to the APK to install.')
# TODO(jbudorick): Remove once no clients pass --apk_package
parser.add_argument('--apk_package', help='DEPRECATED unused')
parser.add_argument('--split',
action='append',
dest='splits',
help='A glob matching the apk splits. '
'Can be specified multiple times.')
parser.add_argument('--keep_data',
action='store_true',
default=False,
help='Keep the package data when installing '
'the application.')
parser.add_argument('--debug', action='store_const', const='Debug',
dest='build_type',
default=os.environ.get('BUILDTYPE', 'Debug'),
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
parser.add_argument('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
parser.add_argument('-d', '--device', dest='devices', action='append',
default=[],
help='Target device for apk to install on. Enter multiple'
' times for multiple devices.')
parser.add_argument('--adb-path', type=os.path.abspath,
help='Absolute path to the adb binary to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('-v', '--verbose', action='count',
help='Enable verbose logging.')
parser.add_argument('--downgrade', action='store_true',
help='If set, allows downgrading of apk.')
parser.add_argument('--timeout', type=int,
default=device_utils.DeviceUtils.INSTALL_DEFAULT_TIMEOUT,
help='Seconds to wait for APK installation. '
'(default: %(default)s)')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
constants.SetBuildType(args.build_type)
devil_chromium.Initialize(
output_directory=constants.GetOutDirectory(),
adb_path=args.adb_path)
apk = args.apk_path or args.apk_name
if not apk.endswith('.apk'):
apk += '.apk'
if not os.path.exists(apk):
apk = os.path.join(constants.GetOutDirectory(), 'apks', apk)
if not os.path.exists(apk):
parser.error('%s not found.' % apk)
if args.splits:
splits = []
base_apk_package = apk_helper.ApkHelper(apk).GetPackageName()
for split_glob in args.splits:
apks = [f for f in glob.glob(split_glob) if f.endswith('.apk')]
if not apks:
logging.warning('No apks matched for %s.', split_glob)
for f in apks:
helper = apk_helper.ApkHelper(f)
if (helper.GetPackageName() == base_apk_package
and helper.GetSplitName()):
splits.append(f)
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
devices = device_utils.DeviceUtils.HealthyDevices(blacklist=blacklist,
device_arg=args.devices)
def blacklisting_install(device):
try:
if args.splits:
device.InstallSplitApk(apk, splits, reinstall=args.keep_data,
allow_downgrade=args.downgrade)
else:
device.Install(apk, reinstall=args.keep_data,
allow_downgrade=args.downgrade,
timeout=args.timeout)
except device_errors.CommandFailedError:
logging.exception('Failed to install %s', args.apk_name)
if blacklist:
blacklist.Extend([str(device)], reason='install_failure')
logging.warning('Blacklisting %s', str(device))
except device_errors.CommandTimeoutError:
logging.exception('Timed out while installing %s', args.apk_name)
if blacklist:
blacklist.Extend([str(device)], reason='install_timeout')
logging.warning('Blacklisting %s', str(device))
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_install)
if __name__ == '__main__':
sys.exit(main())
| build/android/adb_install_apk.py | 5,348 | Utility script to install APKs from the command line quickly.
!/usr/bin/env python Copyright (c) 2012 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. TODO(jbudorick): Remove once no clients pass --apk_package | 302 | en | 0.81547 |
"""
Reinforcement learning maze example.
Red rectangle: explorer.
Black rectangles: hells [reward = -1].
Yellow bin circle: paradise [reward = +1].
All other states: ground [reward = 0].
This script is the environment part of this example. The RL is in RL_brain.py.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
"""
import numpy as np
import time
import sys
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
UNIT = 40 # pixels
MAZE_H = 4 # grid height
MAZE_W = 4 # grid width
class Maze(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
hell2_center = origin + np.array([UNIT, UNIT * 2])
self.hell2 = self.canvas.create_rectangle(
hell2_center[0] - 15, hell2_center[1] - 15,
hell2_center[0] + 15, hell2_center[1] + 15,
fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.1)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return self.canvas.coords(self.rect)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
s_ = self.canvas.coords(self.rect) # next state
# reward function
if s_ == self.canvas.coords(self.oval):
reward = 1
done = True
#s_ = 'terminal'
elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:
reward = -1
done = True
#s_ = 'terminal'
else:
reward = 0
done = False
return s_, reward, done
def render(self):
time.sleep(0.01)
self.update()
def update():
for t in range(10):
s = env.reset()
while True:
env.render()
a = 1
s, r, done = env.step(a)
if done:
break
if __name__ == '__main__':
env = Maze()
env.after(100, update)
env.mainloop() | contents/2_Q_Learning_maze/maze_env.py | 4,310 | Reinforcement learning maze example.
Red rectangle: explorer.
Black rectangles: hells [reward = -1].
Yellow bin circle: paradise [reward = +1].
All other states: ground [reward = 0].
This script is the environment part of this example. The RL is in RL_brain.py.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
pixels grid height grid width create grids create origin hell hell create oval create red rect pack all return observation up down right left move agent next state reward functions_ = 'terminal's_ = 'terminal' | 587 | en | 0.700193 |
import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
from SCZ_RNAseq.syn4590909.utils import *
path="../../data/SCZ_RNAseq/output/syn4590909/"
dataset="PPI"
features = np.genfromtxt("{}{}.GE_Features.txt".format(path, dataset), dtype=np.dtype(np.float32))
labels = get_clinical_status_syn4590909()
clusters = open("{}{}.clusters_individual_gene.txt".format(path, dataset), encoding="utf-8")
total_clusters = get_top_clusters_without_network(path, dataset, features, labels, clusters)
print("The complete set of clusters that passed the minimal threshold is \n {}".format(total_clusters))
with open("{}{}.top_features_individual_gene.txt".format(path, dataset), "w", newline='', encoding="utf-8") as f:
w_top_clusters = csv.writer(f, delimiter ='\t')
w_top_clusters.writerow(total_clusters)
clust = []
nb_columns = len(labels)
baseline_accuracy = 0
eps = 0.01 #minimum accuracy improvement to consider new cluster (1%)
tmp_Data = object
for i in range(len(total_clusters)):
clust.append(total_clusters[i])
nb_rows = len(clust)
Data = np.zeros((nb_rows, nb_columns), dtype=object)
if(i>0):#if temporary Data vector exist, copy all lines except last
for j in range(nb_rows-1):
Data[j, :] = tmp_Data[j, :]
#Just compute score of newly added cluster
Data[-1, :] = prepare_activity_score_feature_vector(features, labels, clust[nb_rows-1], clusters)
accuracy = logistic_regression_classification_aggregate_activity_scores(np.transpose(Data), labels)
if( accuracy < baseline_accuracy + eps ):
clust = clust[:-1]
tmp_Data = Data
tmp_Data = np.delete(tmp_Data, tmp_Data.shape[0]-1, axis=0)
print("SFS: feature {}/{} checked and rejected".format(i, len(total_clusters)-1))
else:
baseline_accuracy = accuracy
tmp_Data = Data
print("SFS: feature {}/{} checked and retained".format(i, len(total_clusters)-1))
print("The set of clusters to be used in classification is \n {}".format(clust))
with open("{}{}.final_features_individual_gene.txt".format(path, dataset), "w", newline='', encoding="utf-8") as f:
w_final_clusters = csv.writer(f, delimiter ='\t')
w_final_clusters.writerow(clust)
print("Logistic regression accuracy: {}".format(accuracy))
#accuracy = LDA_classification_aggregate_activity_scores(np.transpose(Data), labels)
#print("LDA accuracy: {}".format(accuracy))
#accuracy = SVM_classification_aggregate_activity_scores(np.transpose(Data), labels)
#print("SVM(Linear Kernel) accuracy: {}".format(accuracy))
clusters.close() | scripts/SCZ_RNAseq/syn4590909/rank_individual_genes.py | 2,692 | minimum accuracy improvement to consider new cluster (1%)if temporary Data vector exist, copy all lines except lastJust compute score of newly added clusteraccuracy = LDA_classification_aggregate_activity_scores(np.transpose(Data), labels)print("LDA accuracy: {}".format(accuracy))accuracy = SVM_classification_aggregate_activity_scores(np.transpose(Data), labels)print("SVM(Linear Kernel) accuracy: {}".format(accuracy)) | 421 | en | 0.693821 |
"""An filter that removes operators based on regular expressions.
"""
from argparse import Namespace
import logging
import re
import sys
from cosmic_ray.config import load_config
from cosmic_ray.work_db import WorkDB
from cosmic_ray.work_item import WorkerOutcome, WorkResult
from cosmic_ray.tools.filters.filter_app import FilterApp
log = logging.getLogger()
class OperatorsFilter(FilterApp):
"Implemenents the operators-filter."
def description(self):
return __doc__
def _skip_filtered(self, work_db, exclude_operators):
if not exclude_operators:
return
re_exclude_operators = re.compile('|'.join('(:?%s)' % e for e in exclude_operators))
for item in work_db.pending_work_items:
if re_exclude_operators.match(item.operator_name):
log.info(
"operator skipping %s %s %s %s %s %s",
item.job_id,
item.operator_name,
item.occurrence,
item.module_path,
item.start_pos,
item.end_pos,
)
work_db.set_result(
item.job_id,
WorkResult(
output="Filtered operator",
worker_outcome=WorkerOutcome.SKIPPED,
),
)
def filter(self, work_db: WorkDB, args: Namespace):
"""Mark as skipped all work item with filtered operator
"""
if args.config is None:
config = work_db.get_config()
else:
config = load_config(args.config)
exclude_operators = config.sub('filters', 'operators-filter').get('exclude-operators', ())
self._skip_filtered(work_db, exclude_operators)
def add_args(self, parser):
parser.add_argument('--config', help='Config file to use')
def main(argv=None):
"""Run the operators-filter with the specified command line arguments.
"""
return OperatorsFilter().main(argv)
if __name__ == '__main__':
sys.exit(main())
| src/cosmic_ray/tools/filters/operators_filter.py | 2,105 | Implemenents the operators-filter.
Mark as skipped all work item with filtered operator
Run the operators-filter with the specified command line arguments.
An filter that removes operators based on regular expressions. | 232 | en | 0.882354 |
from userinput import userinput
from ..utils import load_repository_author_name
def get_package_author_name() -> str:
"""Return the package author name to be used."""
return userinput(
name="python_package_author_name",
label="Enter the python package author name to use.",
default=load_repository_author_name(),
validator="non_empty",
sanitizer=[
"strip"
],
cache=False
)
| setup_python_package/queries/get_package_author_name.py | 455 | Return the package author name to be used. | 42 | en | 0.673607 |
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| libs/networks/resnet_dilation.py | 7,486 | 1x1 convolution
3x3 convolution with padding
Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
!/usr/bin/env python coding: utf-8 This code is based on torchvison resnet URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py | 742 | en | 0.651947 |
import unittest
import numpy as np
from overcooked_ai_py.agents.agent import AgentPair, FixedPlanAgent, GreedyHumanModel, RandomAgent, SampleAgent
from overcooked_ai_py.mdp.actions import Direction, Action
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld, OvercookedState, PlayerState, ObjectState
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.planning.planners import MediumLevelActionManager, NO_COUNTERS_PARAMS
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
np.random.seed(42)
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
force_compute_large = False
force_compute = True
DISPLAY = False
simple_mdp = OvercookedGridworld.from_layout_name('cramped_room')
large_mdp = OvercookedGridworld.from_layout_name('corridor')
class TestAgentEvaluator(unittest.TestCase):
def setUp(self):
self.agent_eval = AgentEvaluator.from_layout_name({"layout_name": "cramped_room"}, {"horizon": 100})
def test_human_model_pair(self):
trajs = self.agent_eval.evaluate_human_model_pair()
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail("Trajectories were not returned in standard format:\n{}".format(e))
def test_rollouts(self):
ap = AgentPair(RandomAgent(), RandomAgent())
trajs = self.agent_eval.evaluate_agent_pair(ap, num_games=5)
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail("Trajectories were not returned in standard format:\n{}".format(e))
def test_mlam_computation(self):
try:
self.agent_eval.env.mlam
except Exception as e:
self.fail("Failed to compute MediumLevelActionManager:\n{}".format(e))
class TestBasicAgents(unittest.TestCase):
def setUp(self):
self.mlam_large = MediumLevelActionManager.from_pickle_or_compute(large_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute_large)
def test_fixed_plan_agents(self):
a0 = FixedPlanAgent([s, e, n, w])
a1 = FixedPlanAgent([s, w, n, e])
agent_pair = AgentPair(a0, a1)
env = OvercookedEnv.from_mdp(large_mdp, horizon=10)
trajectory, time_taken, _, _ = env.run_agents(agent_pair, include_final_state=True, display=DISPLAY)
end_state = trajectory[-1][0]
self.assertEqual(time_taken, 10)
self.assertEqual(env.mdp.get_standard_start_state().player_positions, end_state.player_positions)
def test_two_greedy_human_open_map(self):
scenario_2_mdp = OvercookedGridworld.from_layout_name('scenario2')
mlam = MediumLevelActionManager.from_pickle_or_compute(scenario_2_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute)
a0 = GreedyHumanModel(mlam)
a1 = GreedyHumanModel(mlam)
agent_pair = AgentPair(a0, a1)
start_state = OvercookedState(
[P((8, 1), s),
P((1, 1), s)],
{},
all_orders=scenario_2_mdp.start_all_orders
)
env = OvercookedEnv.from_mdp(scenario_2_mdp, start_state_fn=lambda: start_state, horizon=100)
trajectory, time_taken, _, _ = env.run_agents(agent_pair, include_final_state=True, display=DISPLAY)
def test_sample_agent(self):
agent = SampleAgent([RandomAgent(all_actions=False), RandomAgent(all_actions=True)])
probs = agent.action(None)[1]["action_probs"]
expected_probs = np.array([0.18333333, 0.18333333, 0.18333333, 0.18333333, 0.18333333, 0.08333333])
self.assertTrue(np.allclose(probs, expected_probs))
class TestAgentEvaluatorStatic(unittest.TestCase):
layout_name_lst = ["asymmetric_advantages", "asymmetric_advantages_tomato", "bonus_order_test", "bottleneck",
"centre_objects", "centre_pots", "corridor", "forced_coordination_tomato", "unident",
"marshmallow_experiment", "marshmallow_experiment_coordination", "you_shall_not_pass"]
def test_from_mdp(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_mdp(mdp=orignal_mdp, env_params={"horizon": 400})
ae_mdp = ae.env.mdp
self.assertEqual(orignal_mdp, ae_mdp, "mdp with name " + layout_name + " experienced an inconsistency")
def test_from_mdp_params_layout(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_layout_name(mdp_params={"layout_name": layout_name}, env_params={"horizon": 400})
ae_mdp = ae.env.mdp
self.assertEqual(orignal_mdp, ae_mdp, "mdp with name " + layout_name + " experienced an inconsistency")
mdp_gen_params_1 = {
"inner_shape": (10, 7),
"prop_empty": 0.95,
"prop_feats": 0.1,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_2 = {
"inner_shape": (10, 7),
"prop_empty": 0.7,
"prop_feats": 0.5,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_3 = {
"inner_shape": (10, 7),
"prop_empty": 0.5,
"prop_feats": 0.4,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_lst = [mdp_gen_params_1, mdp_gen_params_2, mdp_gen_params_3]
outer_shape = (10, 7)
def test_from_mdp_params_variable_across(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae0 = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
ae1 = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
self.assertFalse(ae0.env.mdp == ae1.env.mdp,
"2 randomly generated layouts across 2 evaluators are the same, which is wrong")
def test_from_mdp_params_variable_infinite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same")
def test_from_mdp_params_variable_infinite_no_regen(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=False)
mdp_1 = ae.env.mdp
self.assertTrue(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=False, the 2 layouts should be the same")
def test_from_mdp_params_variable_infinite_specified(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same")
def test_from_mdp_params_variable_finite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_finite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": 2},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
seen = [mdp_0]
for _ in range(20):
ae.env.reset(regen_mdp=True)
mdp_i = ae.env.mdp
if len(seen) == 1:
if mdp_i != seen[0]:
seen.append(mdp_i.copy())
elif len(seen) == 2:
mdp_0, mdp_1 = seen
self.assertTrue((mdp_i == mdp_0 or mdp_i == mdp_1),
"more than 2 mdp was created, the function failed to perform")
else:
self.assertTrue(False, "theoretically unreachable statement")
layout_name_short_lst = ["cramped_room", "cramped_room_tomato", "simple_o", "simple_tomato", "simple_o_t"]
biased = [0.1, 0.15, 0.2, 0.25, 0.3]
num_reset = 200000
def test_from_mdp_lst_default(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400})
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(0.2, v/self.num_reset, 2, "more than 2 places off for " + k)
def test_from_mdp_lst_uniform(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400}, sampling_freq=[0.2, 0.2, 0.2, 0.2, 0.2])
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(0.2, v/self.num_reset, 2, "more than 2 places off for " + k)
def test_from_mdp_lst_biased(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400}, sampling_freq=self.biased)
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
# construct the ground truth
gt = {self.layout_name_short_lst[i]: self.biased[i] for i in range(len(self.layout_name_short_lst))}
for k, v in counts.items():
self.assertAlmostEqual(gt[k], v/self.num_reset, 2, "more than 2 places off for " + k)
if __name__ == '__main__':
unittest.main()
| testing/agent_test.py | 12,188 | construct the ground truth | 26 | en | 0.907598 |
import sympy
from sympy import *
def check_weak_prime(n):
if not isprime(n):
return(False)
digits=[int(i) for i in str(n)]
# For each digit location - test all other values to see if
# the result is prime. If so - then this is not a weak prime
for position in range(len(digits)):
digits2=[i for i in digits]
for j in range(10):
if j != digits[position]:
digits2[position]=j
m=0
for i in digits2:
m=10*m+i
if isprime(m):
return(False)
return(True)
def search_palindromic_weak_prime(nlow,nhigh):
n=nlow
if not isprime(n):
n=nextprime(n)
while(n<nhigh):
if check_weak_prime(n):
print("Weak prime = ",n)
n2=int(str(n)[::-1])
if check_weak_prime(n2):
print("Solution found:")
print(" n = ",n)
print(" n2 = ",n2)
return True
n=nextprime(n)
return False
| bent/weakprime.py | 1,058 | For each digit location - test all other values to see if the result is prime. If so - then this is not a weak prime | 117 | en | 0.746553 |
# Generated by Django 2.1.9 on 2020-03-20 00:50
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_blocks', '0003_slideitem_title'),
]
operations = [
migrations.AlterField(
model_name='albumitem',
name='image',
field=models.FileField(default=None, max_length=255, null=True, upload_to='blocks/album/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
migrations.AlterField(
model_name='card',
name='image',
field=models.FileField(blank=True, default=None, max_length=255, null=True, upload_to='blocks/card/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
migrations.AlterField(
model_name='hero',
name='image',
field=models.FileField(blank=True, default=None, max_length=255, null=True, upload_to='blocks/hero/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
migrations.AlterField(
model_name='slideitem',
name='image',
field=models.FileField(default=None, max_length=255, null=True, upload_to='blocks/slider/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
]
| cmsplugin_blocks/migrations/0004_change_image_as_filefield_.py | 1,690 | Generated by Django 2.1.9 on 2020-03-20 00:50 | 45 | en | 0.596832 |
"""
================================
Time-related feature engineering
================================
This notebook introduces different strategies to leverage time-related features
for a bike sharing demand regression task that is highly dependent on business
cycles (days, weeks, months) and yearly season cycles.
In the process, we introduce how to perform periodic feature engineering using
the :class:`sklearn.preprocessing.SplineTransformer` class and its
`extrapolation="periodic"` option.
"""
# %%
# Data exploration on the Bike Sharing Demand dataset
# ---------------------------------------------------
#
# We start by loading the data from the OpenML repository.
from sklearn.datasets import fetch_openml
bike_sharing = fetch_openml("Bike_Sharing_Demand", version=2, as_frame=True)
df = bike_sharing.frame
# %%
# To get a quick understanding of the periodic patterns of the data, let us
# have a look at the average demand per hour during a week.
#
# Note that the week starts on a Sunday, during the weekend. We can clearly
# distinguish the commute patterns in the morning and evenings of the work days
# and the leisure use of the bikes on the weekends with a more spread peak
# demand around the middle of the days:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 4))
average_week_demand = df.groupby(["weekday", "hour"]).mean()["count"]
average_week_demand.plot(ax=ax)
_ = ax.set(
title="Average hourly bike demand during the week",
xticks=[i * 24 for i in range(7)],
xticklabels=["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"],
xlabel="Time of the week",
ylabel="Number of bike rentals",
)
# %%
#
# The target of the prediction problem is the absolute count of bike rentals on
# a hourly basis:
df["count"].max()
# %% [markdown]
#
# Let us rescale the target variable (number of hourly bike rentals) to predict
# a relative demand so that the mean absolute error is more easily interpreted
# as a fraction of the maximum demand.
#
# .. note::
#
# The fit method of the models used in this notebook all minimize the
# mean squared error to estimate the conditional mean instead of the mean
# absolute error that would fit an estimator of the conditional median.
#
# When reporting performance measure on the test set in the discussion, we
# instead choose to focus on the mean absolute error that is more
# intuitive than the (root) mean squared error. Note however that the best
# models for one metric are also the best for the other in this study.
y = df["count"] / 1000
# %%
fig, ax = plt.subplots(figsize=(12, 4))
y.hist(bins=30, ax=ax)
_ = ax.set(
xlabel="Fraction of rented fleet demand",
ylabel="Number of hours",
)
# %%
# The input feature data frame is a time annotated hourly log of variables
# describing the weather conditions. It includes both numerical and categorical
# variables. Note that the time information has already been expanded into
# several complementary columns.
#
X = df.drop("count", axis="columns")
X
# %%
# .. note::
#
# If the time information was only present as a date or datetime column, we
# could have expanded it into hour-in-the-day, day-in-the-week,
# day-in-the-month, month-in-the-year using pandas:
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#time-date-components
#
# We now introspect the distribution of the categorical variables, starting
# with `"weather"`:
#
X["weather"].value_counts()
# %%
# Since there are only 3 `"heavy_rain"` events, we cannot use this category to
# train machine learning models with cross validation. Instead, we simplify the
# representation by collapsing those into the `"rain"` category.
#
X["weather"].replace(to_replace="heavy_rain", value="rain", inplace=True)
# %%
X["weather"].value_counts()
# %%
# As expected, the `"season"` variable is well balanced:
#
X["season"].value_counts()
# %%
# Time-based cross-validation
# ---------------------------
#
# Since the dataset is a time-ordered event log (hourly demand), we will use a
# time-sensitive cross-validation splitter to evaluate our demand forecasting
# model as realistically as possible. We use a gap of 2 days between the train
# and test side of the splits. We also limit the training set size to make the
# performance of the CV folds more stable.
#
# 1000 test datapoints should be enough to quantify the performance of the
# model. This represents a bit less than a month and a half of contiguous test
# data:
from sklearn.model_selection import TimeSeriesSplit
ts_cv = TimeSeriesSplit(
n_splits=5,
gap=48,
max_train_size=10000,
test_size=1000,
)
# %%
# Let us manually inspect the various splits to check that the
# `TimeSeriesSplit` works as we expect, starting with the first split:
all_splits = list(ts_cv.split(X, y))
train_0, test_0 = all_splits[0]
# %%
X.iloc[test_0]
# %%
X.iloc[train_0]
# %%
# We now inspect the last split:
train_4, test_4 = all_splits[4]
# %%
X.iloc[test_4]
# %%
X.iloc[train_4]
# %%
# All is well. We are now ready to do some predictive modeling!
#
# Gradient Boosting
# -----------------
#
# Gradient Boosting Regression with decision trees is often flexible enough to
# efficiently handle heteorogenous tabular data with a mix of categorical and
# numerical features as long as the number of samples is large enough.
#
# Here, we do minimal ordinal encoding for the categorical variables and then
# let the model know that it should treat those as categorical variables by
# using a dedicated tree splitting rule. Since we use an ordinal encoder, we
# pass the list of categorical values explicitly to use a logical order when
# encoding the categories as integer instead of the lexicographical order. This
# also has the added benefit of preventing any issue with unknown categories
# when using cross-validation.
#
# The numerical variable need no preprocessing and, for the sake of simplicity,
# we only try the default hyper-parameters for this model:
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.model_selection import cross_validate
categorical_columns = [
"weather",
"season",
"holiday",
"workingday",
]
categories = [
["clear", "misty", "rain"],
["spring", "summer", "fall", "winter"],
["False", "True"],
["False", "True"],
]
ordinal_encoder = OrdinalEncoder(categories=categories)
gbrt_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", ordinal_encoder, categorical_columns),
],
remainder="passthrough",
),
HistGradientBoostingRegressor(
categorical_features=range(4),
),
)
# %%
#
# Lets evaluate our gradient boosting model with the mean absolute error of the
# relative demand averaged accross our 5 time-based cross-validation splits:
def evaluate(model, X, y, cv):
cv_results = cross_validate(
model,
X,
y,
cv=ts_cv,
scoring=["neg_mean_absolute_error", "neg_root_mean_squared_error"],
)
mae = -cv_results["test_neg_mean_absolute_error"]
rmse = -cv_results["test_neg_root_mean_squared_error"]
print(
f"Mean Absolute Error: {mae.mean():.3f} +/- {mae.std():.3f}\n"
f"Root Mean Squared Error: {rmse.mean():.3f} +/- {rmse.std():.3f}"
)
evaluate(gbrt_pipeline, X, y, cv=ts_cv)
# %%
# This model has an average error around 4 to 5% of the maximum demand. This is
# quite good for a first trial without any hyper-parameter tuning! We just had
# to make the categorical variables explicit. Note that the time related
# features are passed as is, i.e. without processing them. But this is not much
# of a problem for tree-based models as they can learn a non-monotonic
# relationship between ordinal input features and the target.
#
# This is not the case for linear regression model as we will see in the
# following.
#
# Naive linear regression
# -----------------------
#
# As usual for linear models, categorical variables need to be one-hot encoded.
# For consistency, we scale the numerical features to the same 0-1 range using
# class:`sklearn.preprocessing.MinMaxScaler`, although in this case it does not
# impact the results much because they are already on comparable scales:
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import RidgeCV
import numpy as np
one_hot_encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
alphas = np.logspace(-6, 6, 25)
naive_linear_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
],
remainder=MinMaxScaler(),
),
RidgeCV(alphas=alphas),
)
evaluate(naive_linear_pipeline, X, y, cv=ts_cv)
# %%
#
# The performance is not good: the average error is around 14% of the maximum
# demand. This is more than three times higher than the average error of the
# gradient boosting model. We can suspect that the naive original encoding of
# the periodic time-related features might prevent the linear regression model
# to properly leverage the time information: linear regression does not model
# non-monotonic relationships between the input features and the target.
# Non-linear terms have to be engineered in the input.
#
# For example, the raw numerical encoding of the `"hour"` feature prevents the
# linear model from recognizing that an increase of hour in the morning from 6
# to 8 should have a strong positive impact on the number of bike rentals while
# a increase of similar magnitude in the evening from 18 to 20 should have a
# strong negative impact on the predicted number of bike rentals.
#
# Time-steps as categories
# ------------------------
#
# Since the time features are encoded in a discrete manner using integers (24
# unique values in the "hours" feature), we could decide to treat those as
# categorical variables and ignore any assumption implied by the ordering of
# the hour values using a one-hot encoding.
#
# Using one-hot encoding for the time features gives the linear model a lot
# more flexibility as we introduce one additional feature per discrete time
# level.
one_hot_linear_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]),
],
remainder=MinMaxScaler(),
),
RidgeCV(alphas=alphas),
)
evaluate(one_hot_linear_pipeline, X, y, cv=ts_cv)
# %%
# The average error rate of this model is 10% which is much better than using
# the original ordinal encoding of the time feature, confirming our intuition
# that the linear regression model benefit from the added flexibility to not
# treat time progression in a monotonic manner.
#
# However, this introduces a very large number of new features. If the time of
# the day was represented in minutes since the start of the day instead of
# hours, one-hot encoding would have introduced 1440 features instead of 24.
# This could cause some significant overfitting. To avoid this we could use
# :func:`sklearn.preprocessing.KBinsDiscretizer` instead to re-bin the number
# of levels of fine-grained ordinal or numerical variables while still
# benefitting from the non-monotonic expressivity advantages of one-hot
# encoding.
#
# Finally, we also observe than one-hot encoding completely ignores the
# ordering of the hour levels while this could be an interesting inductive bias
# to preserve to some level. In the following we try to explore smooth,
# non-monotonic encoding that locally preserves the relative ordering of time
# features.
#
# Trigonometric features
# ----------------------
#
# As a first attempt, we can try to encode each of those periodic features
# using a sine and cosine transform with the matching period.
#
# Each ordinal time feature is transformed into 2 features that together encode
# equivalent information in a non-monotonic way, and more importantly without
# any jump between the first and the last value of the periodic range.
from sklearn.preprocessing import FunctionTransformer
def sin_transformer(period):
return FunctionTransformer(lambda x: np.sin(x / period * 2 * np.pi))
def cos_transformer(period):
return FunctionTransformer(lambda x: np.cos(x / period * 2 * np.pi))
# %%
#
# Let us visualize the effect of this feature expansion on some synthetic hour
# data with a bit of extrapolation beyond hour=23:
import pandas as pd
hour_df = pd.DataFrame(
np.arange(26).reshape(-1, 1),
columns=["hour"],
)
hour_df["hour_sin"] = sin_transformer(24).fit_transform(hour_df)["hour"]
hour_df["hour_cos"] = cos_transformer(24).fit_transform(hour_df)["hour"]
hour_df.plot(x="hour")
_ = plt.title("Trigonometric encoding for the 'hour' feature")
# %%
#
# Let's use a 2D scatter plot with the hours encoded as colors to better see
# how this representation maps the 24 hours of the day to a 2D space, akin to
# some sort of 24 hour version of an analog clock. Note that the "25th" hour is
# mapped back to the 1st hour because of the periodic nature of the sine/cosine
# representation.
fig, ax = plt.subplots(figsize=(7, 5))
sp = ax.scatter(hour_df["hour_sin"], hour_df["hour_cos"], c=hour_df["hour"])
ax.set(
xlabel="sin(hour)",
ylabel="cos(hour)",
)
_ = fig.colorbar(sp)
# %%
#
# We can now build a feature extraction pipeline using this strategy:
cyclic_cossin_transformer = ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("month_sin", sin_transformer(12), ["month"]),
("month_cos", cos_transformer(12), ["month"]),
("weekday_sin", sin_transformer(7), ["weekday"]),
("weekday_cos", cos_transformer(7), ["weekday"]),
("hour_sin", sin_transformer(24), ["hour"]),
("hour_cos", cos_transformer(24), ["hour"]),
],
remainder=MinMaxScaler(),
)
cyclic_cossin_linear_pipeline = make_pipeline(
cyclic_cossin_transformer,
RidgeCV(alphas=alphas),
)
evaluate(cyclic_cossin_linear_pipeline, X, y, cv=ts_cv)
# %%
#
# The performance of our linear regression model with this simple feature
# engineering is a bit better than using the original ordinal time features but
# worse than using the one-hot encoded time features. We will further analyze
# possible reasons for this disappointing outcome at the end of this notebook.
#
# Periodic spline features
# ------------------------
#
# We can try an alternative encoding of the periodic time-related features
# using spline transformations with a large enough number of splines, and as a
# result a larger number of expanded features:
from sklearn.preprocessing import SplineTransformer
def periodic_spline_transformer(period, n_splines=None, degree=3):
if n_splines is None:
n_splines = period
n_knots = n_splines + 1 # periodic and include_bias is True
return SplineTransformer(
degree=degree,
n_knots=n_knots,
knots=np.linspace(0, period, n_knots).reshape(n_knots, 1),
extrapolation="periodic",
include_bias=True,
)
# %%
#
# Again, let us visualize the effect of this feature expansion on some
# synthetic hour data with a bit of extrapolation beyond hour=23:
hour_df = pd.DataFrame(
np.linspace(0, 26, 1000).reshape(-1, 1),
columns=["hour"],
)
splines = periodic_spline_transformer(24, n_splines=12).fit_transform(hour_df)
splines_df = pd.DataFrame(
splines,
columns=[f"spline_{i}" for i in range(splines.shape[1])],
)
pd.concat([hour_df, splines_df], axis="columns").plot(x="hour", cmap=plt.cm.tab20b)
_ = plt.title("Periodic spline-based encoding for the 'hour' feature")
# %%
# Thanks to the use of the `extrapolation="periodic"` parameter, we observe
# that the feature encoding stays smooth when extrapolating beyond midnight.
#
# We can now build a predictive pipeline using this alternative periodic
# feature engineering strategy.
#
# It is possible to use fewer splines than discrete levels for those ordinal
# values. This makes spline-based encoding more efficient than one-hot encoding
# while preserving most of the expressivity:
cyclic_spline_transformer = ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("cyclic_month", periodic_spline_transformer(12, n_splines=6), ["month"]),
("cyclic_weekday", periodic_spline_transformer(7, n_splines=3), ["weekday"]),
("cyclic_hour", periodic_spline_transformer(24, n_splines=12), ["hour"]),
],
remainder=MinMaxScaler(),
)
cyclic_spline_linear_pipeline = make_pipeline(
cyclic_spline_transformer,
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_linear_pipeline, X, y, cv=ts_cv)
# %%
# Spline features make it possible for the linear model to successfully
# leverage the periodic time-related features and reduce the error from ~14% to
# ~10% of the maximum demand, which is similar to what we observed with the
# one-hot encoded features.
#
# Qualitative analysis of the impact of features on linear models predictions
# ---------------------------------------------------------------------------
#
# Here, we want to visualize the impact of the feature engineering choices on
# the time related shape of the predictions.
#
# To do so we consider an arbitrary time-based split to compare the predictions
# on a range of held out data points.
naive_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
naive_linear_predictions = naive_linear_pipeline.predict(X.iloc[test_0])
one_hot_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
one_hot_linear_predictions = one_hot_linear_pipeline.predict(X.iloc[test_0])
cyclic_cossin_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_cossin_linear_predictions = cyclic_cossin_linear_pipeline.predict(X.iloc[test_0])
cyclic_spline_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_spline_linear_predictions = cyclic_spline_linear_pipeline.predict(X.iloc[test_0])
# %%
# We visualize those predictions by zooming on the last 96 hours (4 days) of
# the test set to get some qualitative insights:
last_hours = slice(-96, None)
fig, ax = plt.subplots(figsize=(12, 4))
fig.suptitle("Predictions by linear models")
ax.plot(
y.iloc[test_0].values[last_hours],
"x-",
alpha=0.2,
label="Actual demand",
color="black",
)
ax.plot(naive_linear_predictions[last_hours], "x-", label="Ordinal time features")
ax.plot(
cyclic_cossin_linear_predictions[last_hours],
"x-",
label="Trigonometric time features",
)
ax.plot(
cyclic_spline_linear_predictions[last_hours],
"x-",
label="Spline-based time features",
)
ax.plot(
one_hot_linear_predictions[last_hours],
"x-",
label="One-hot time features",
)
_ = ax.legend()
# %%
# We can draw the following conclusions from the above plot:
#
# - the **raw ordinal time-related features** are problematic because they do
# not capture the natural periodicity: we observe a big jump in the
# predictions at the end of each day when the hour features goes from 23 back
# to 0. We can expect similar artifacts at the end of each week or each year.
#
# - as expected, the **trigonometric features** (sine and cosine) do not have
# these discontinuities at midnight but the linear regression model fails to
# leverage those features to properly model intra-day variations.
# Using trigonometric features for higher harmonics or additional
# trigonometric features for the natural period with different phases could
# potentially fix this problem.
#
# - the **periodic spline-based features** fix those two problems at once: they
# give more expressivity to the linear model by making it possible to focus
# on specific hours thanks to the use of 12 splines. Furthermore the
# `extrapolation="periodic"` option enforces a smooth representation between
# `hour=23` and `hour=0`.
#
# - the **one-hot encoded features** behave similarly to the periodic
# spline-based features but are more spiky: for instance they can better
# model the morning peak during the week days since this peak lasts shorter
# than an hour. However, we will see in the following that what can be an
# advantage for linear models is not necessarily one for more expressive
# models.
# %%
# We can also compare the number of features extracted by each feature
# engineering pipeline:
naive_linear_pipeline[:-1].transform(X).shape
# %%
one_hot_linear_pipeline[:-1].transform(X).shape
# %%
cyclic_cossin_linear_pipeline[:-1].transform(X).shape
# %%
cyclic_spline_linear_pipeline[:-1].transform(X).shape
# %%
# This confirms that the one-hot encoding and the spline encoding strategies
# create a lot more features for the time representation than the alternatives,
# which in turn gives the downstream linear model more flexibility (degrees of
# freedom) to avoid underfitting.
#
# Finally, we observe that none of the linear models can approximate the true
# bike rentals demand, especially for the peaks that can be very sharp at rush
# hours during the working days but much flatter during the week-ends: the most
# accurate linear models based on splines or one-hot encoding tend to forecast
# peaks of commuting-related bike rentals even on the week-ends and
# under-estimate the commuting-related events during the working days.
#
# These systematic prediction errors reveal a form of under-fitting and can be
# explained by the lack of non-additive modeling of the interactions between
# features (in this case "workingday" and features derived from "hours"). This
# issue will be addressed in the following section.
# %%
# Modeling pairwise interactions with splines and polynomial features
# -------------------------------------------------------------------
#
# Linear models alone cannot model interaction effects between input features.
# It does not help that some features are marginally non-linear as is the case
# with features constructed by `SplineTransformer` (or one-hot encoding or
# binning).
#
# However, it is possible to use the `PolynomialFeatures` class on coarse
# grained splined encoded hours to model the "workingday"/"hours" interaction
# explicitly without introducing too many new variables:
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import FeatureUnion
hour_workday_interaction = make_pipeline(
ColumnTransformer(
[
("cyclic_hour", periodic_spline_transformer(24, n_splines=8), ["hour"]),
("workingday", FunctionTransformer(lambda x: x == "True"), ["workingday"]),
]
),
PolynomialFeatures(degree=2, interaction_only=True, include_bias=False),
)
# %%
# Those features are then combined with the ones already computed in the
# previous spline-base pipeline. We can observe a nice performance improvemnt
# by modeling this pairwise interaction explicitly:
cyclic_spline_interactions_pipeline = make_pipeline(
FeatureUnion(
[
("marginal", cyclic_spline_transformer),
("interactions", hour_workday_interaction),
]
),
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_interactions_pipeline, X, y, cv=ts_cv)
# %%
# Modeling non-linear feature interactions with kernels
# -----------------------------------------------------
#
# The previous analysis highlighted the need to model the interactions between
# `"workingday"` and `"hours"`. Another example of a such a non-linear
# interactions that we would like to model could be the impact of the rain that
# might not be the same during the working days and the week-ends and holidays
# for instance.
#
# To model all such interactions, we could either use a polynomial expansion on
# all marginal features at once, after their spline-based expansion. However
# this would create a quadratic number of features which can cause overfitting
# and computational tractability issues.
#
# Alternatively we can use the Nyström method to compute an approximate
# polynomial kernel expansion. Let us try the latter:
from sklearn.kernel_approximation import Nystroem
cyclic_spline_poly_pipeline = make_pipeline(
cyclic_spline_transformer,
Nystroem(kernel="poly", degree=2, n_components=300, random_state=0),
RidgeCV(alphas=alphas),
)
evaluate(cyclic_spline_poly_pipeline, X, y, cv=ts_cv)
# %%
#
# We observe that this model can almost rival the performance of the gradient
# boosted trees with an average error around 6% of the maximum demand.
#
# Note that while the final step of this pipeline is a linear regression model,
# the intermediate steps such as the spline feature extraction and the Nyström
# kernel approximation are highly non-linear. As a result the compound pipeline
# is much more expressive than a simple linear regression model with raw features.
#
# For the sake of completeness, we also evaluate the combination of one-hot
# encoding and kernel approximation:
one_hot_poly_pipeline = make_pipeline(
ColumnTransformer(
transformers=[
("categorical", one_hot_encoder, categorical_columns),
("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]),
],
remainder="passthrough",
),
Nystroem(kernel="poly", degree=2, n_components=300, random_state=0),
RidgeCV(alphas=alphas),
)
evaluate(one_hot_poly_pipeline, X, y, cv=ts_cv)
# %%
# While one-hot features were competitive with spline-based features when using
# linear models, this is no longer the case when using a low-rank approximation
# of a non-linear kernel: this can be explained by the fact that spline
# features are smoother and allow the kernel approximation to find a more
# expressive decision function.
#
# Let us now have a qualitative look at the predictions of the kernel models
# and of the gradient boosted trees that should be able to better model
# non-linear interactions between features:
gbrt_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
gbrt_predictions = gbrt_pipeline.predict(X.iloc[test_0])
one_hot_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
one_hot_poly_predictions = one_hot_poly_pipeline.predict(X.iloc[test_0])
cyclic_spline_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0])
cyclic_spline_poly_predictions = cyclic_spline_poly_pipeline.predict(X.iloc[test_0])
# %%
# Again we zoom on the last 4 days of the test set:
last_hours = slice(-96, None)
fig, ax = plt.subplots(figsize=(12, 4))
fig.suptitle("Predictions by non-linear regression models")
ax.plot(
y.iloc[test_0].values[last_hours],
"x-",
alpha=0.2,
label="Actual demand",
color="black",
)
ax.plot(
gbrt_predictions[last_hours],
"x-",
label="Gradient Boosted Trees",
)
ax.plot(
one_hot_poly_predictions[last_hours],
"x-",
label="One-hot + polynomial kernel",
)
ax.plot(
cyclic_spline_poly_predictions[last_hours],
"x-",
label="Splines + polynomial kernel",
)
_ = ax.legend()
# %%
# First, note that trees can naturally model non-linear feature interactions
# since, by default, decision trees are allowed to grow beyond a depth of 2
# levels.
#
# Here we can observe that the combinations of spline features and non-linear
# kernels works quite well and can almost rival the accuracy of the gradient
# boosting regression trees.
#
# On the contrary, one-hot time features do not perform that well with the low
# rank kernel model. In particular they significantly over-estimate the low
# demand hours more than the competing models.
#
# We also observe that none of the models can successfully predict some of the
# peak rentals at the rush hours during the working days. It is possible that
# access to additional features would be required to further improve the
# accuracy of the predictions. For instance, it could be useful to have access
# to the geographical repartition of the fleet at any point in time or the
# fraction of bikes that are immobilized because they need servicing.
#
# Let us finally get a more quantative look at the prediction errors of those
# three models using the true vs predicted demand scatter plots:
fig, axes = plt.subplots(ncols=3, figsize=(12, 4), sharey=True)
fig.suptitle("Non-linear regression models")
predictions = [
one_hot_poly_predictions,
cyclic_spline_poly_predictions,
gbrt_predictions,
]
labels = [
"One hot + polynomial kernel",
"Splines + polynomial kernel",
"Gradient Boosted Trees",
]
for ax, pred, label in zip(axes, predictions, labels):
ax.scatter(y.iloc[test_0].values, pred, alpha=0.3, label=label)
ax.plot([0, 1], [0, 1], "--", label="Perfect model")
ax.set(
xlim=(0, 1),
ylim=(0, 1),
xlabel="True demand",
ylabel="Predicted demand",
)
ax.legend()
# %%
# This visualization confirms the conclusions we draw on the previous plot.
#
# All models under-estimate the high demand events (working days rush hours),
# but gradient boosting a bit less so. The low demand events are well predicted
# on average by gradient boosting while the one-hot polynomial regression
# pipeline seems to systematically over-estimate demand in that regime. Overall
# the predictions of the gradient boosted trees are closer to the diagonal than
# for the kernel models.
#
# Concluding remarks
# ------------------
#
# We note that we could have obtained slightly better results for kernel models
# by using more components (higher rank kernel approximation) at the cost of
# longer fit and prediction durations. For large values of `n_components`, the
# performance of the one-hot features would even match the spline features.
#
# The `Nystroem` + `RidgeCV` classifier could also have been replaced by
# :class:`~sklearn.neural_network.MLPRegressor` with one or two hidden layers
# and we would have obtained quite similar results.
#
# The dataset we used in this case study is sampled on a hourly basis. However
# cyclic spline-based features could model time-within-day or time-within-week
# very efficiently with finer-grained time resolutions (for instance with
# measurements taken every minute instead of every hours) without introducing
# more features. One-hot encoding time representations would not offer this
# flexibility.
#
# Finally, in this notebook we used `RidgeCV` because it is very efficient from
# a computational point of view. However it models the target variable as a
# Gaussian random variable with constant variance. For positive regression
# problems, it is likely that using a Poisson or Gamma distribution would make
# more sense. This could be achieved by using
# `GridSearchCV(TweedieRegressor(power=2), param_grid({"alpha": alphas}))`
# instead of `RidgeCV`.
| examples/applications/plot_cyclical_feature_engineering.py | 30,894 | ================================
Time-related feature engineering
================================
This notebook introduces different strategies to leverage time-related features
for a bike sharing demand regression task that is highly dependent on business
cycles (days, weeks, months) and yearly season cycles.
In the process, we introduce how to perform periodic feature engineering using
the :class:`sklearn.preprocessing.SplineTransformer` class and its
`extrapolation="periodic"` option.
%% Data exploration on the Bike Sharing Demand dataset --------------------------------------------------- We start by loading the data from the OpenML repository. %% To get a quick understanding of the periodic patterns of the data, let us have a look at the average demand per hour during a week. Note that the week starts on a Sunday, during the weekend. We can clearly distinguish the commute patterns in the morning and evenings of the work days and the leisure use of the bikes on the weekends with a more spread peak demand around the middle of the days: %% The target of the prediction problem is the absolute count of bike rentals on a hourly basis: %% [markdown] Let us rescale the target variable (number of hourly bike rentals) to predict a relative demand so that the mean absolute error is more easily interpreted as a fraction of the maximum demand. .. note:: The fit method of the models used in this notebook all minimize the mean squared error to estimate the conditional mean instead of the mean absolute error that would fit an estimator of the conditional median. When reporting performance measure on the test set in the discussion, we instead choose to focus on the mean absolute error that is more intuitive than the (root) mean squared error. Note however that the best models for one metric are also the best for the other in this study. %% %% The input feature data frame is a time annotated hourly log of variables describing the weather conditions. It includes both numerical and categorical variables. Note that the time information has already been expanded into several complementary columns. %% .. note:: If the time information was only present as a date or datetime column, we could have expanded it into hour-in-the-day, day-in-the-week, day-in-the-month, month-in-the-year using pandas: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.htmltime-date-components We now introspect the distribution of the categorical variables, starting with `"weather"`: %% Since there are only 3 `"heavy_rain"` events, we cannot use this category to train machine learning models with cross validation. Instead, we simplify the representation by collapsing those into the `"rain"` category. %% %% As expected, the `"season"` variable is well balanced: %% Time-based cross-validation --------------------------- Since the dataset is a time-ordered event log (hourly demand), we will use a time-sensitive cross-validation splitter to evaluate our demand forecasting model as realistically as possible. We use a gap of 2 days between the train and test side of the splits. We also limit the training set size to make the performance of the CV folds more stable. 1000 test datapoints should be enough to quantify the performance of the model. This represents a bit less than a month and a half of contiguous test data: %% Let us manually inspect the various splits to check that the `TimeSeriesSplit` works as we expect, starting with the first split: %% %% %% We now inspect the last split: %% %% %% All is well. We are now ready to do some predictive modeling! Gradient Boosting ----------------- Gradient Boosting Regression with decision trees is often flexible enough to efficiently handle heteorogenous tabular data with a mix of categorical and numerical features as long as the number of samples is large enough. Here, we do minimal ordinal encoding for the categorical variables and then let the model know that it should treat those as categorical variables by using a dedicated tree splitting rule. Since we use an ordinal encoder, we pass the list of categorical values explicitly to use a logical order when encoding the categories as integer instead of the lexicographical order. This also has the added benefit of preventing any issue with unknown categories when using cross-validation. The numerical variable need no preprocessing and, for the sake of simplicity, we only try the default hyper-parameters for this model: %% Lets evaluate our gradient boosting model with the mean absolute error of the relative demand averaged accross our 5 time-based cross-validation splits: %% This model has an average error around 4 to 5% of the maximum demand. This is quite good for a first trial without any hyper-parameter tuning! We just had to make the categorical variables explicit. Note that the time related features are passed as is, i.e. without processing them. But this is not much of a problem for tree-based models as they can learn a non-monotonic relationship between ordinal input features and the target. This is not the case for linear regression model as we will see in the following. Naive linear regression ----------------------- As usual for linear models, categorical variables need to be one-hot encoded. For consistency, we scale the numerical features to the same 0-1 range using class:`sklearn.preprocessing.MinMaxScaler`, although in this case it does not impact the results much because they are already on comparable scales: %% The performance is not good: the average error is around 14% of the maximum demand. This is more than three times higher than the average error of the gradient boosting model. We can suspect that the naive original encoding of the periodic time-related features might prevent the linear regression model to properly leverage the time information: linear regression does not model non-monotonic relationships between the input features and the target. Non-linear terms have to be engineered in the input. For example, the raw numerical encoding of the `"hour"` feature prevents the linear model from recognizing that an increase of hour in the morning from 6 to 8 should have a strong positive impact on the number of bike rentals while a increase of similar magnitude in the evening from 18 to 20 should have a strong negative impact on the predicted number of bike rentals. Time-steps as categories ------------------------ Since the time features are encoded in a discrete manner using integers (24 unique values in the "hours" feature), we could decide to treat those as categorical variables and ignore any assumption implied by the ordering of the hour values using a one-hot encoding. Using one-hot encoding for the time features gives the linear model a lot more flexibility as we introduce one additional feature per discrete time level. %% The average error rate of this model is 10% which is much better than using the original ordinal encoding of the time feature, confirming our intuition that the linear regression model benefit from the added flexibility to not treat time progression in a monotonic manner. However, this introduces a very large number of new features. If the time of the day was represented in minutes since the start of the day instead of hours, one-hot encoding would have introduced 1440 features instead of 24. This could cause some significant overfitting. To avoid this we could use :func:`sklearn.preprocessing.KBinsDiscretizer` instead to re-bin the number of levels of fine-grained ordinal or numerical variables while still benefitting from the non-monotonic expressivity advantages of one-hot encoding. Finally, we also observe than one-hot encoding completely ignores the ordering of the hour levels while this could be an interesting inductive bias to preserve to some level. In the following we try to explore smooth, non-monotonic encoding that locally preserves the relative ordering of time features. Trigonometric features ---------------------- As a first attempt, we can try to encode each of those periodic features using a sine and cosine transform with the matching period. Each ordinal time feature is transformed into 2 features that together encode equivalent information in a non-monotonic way, and more importantly without any jump between the first and the last value of the periodic range. %% Let us visualize the effect of this feature expansion on some synthetic hour data with a bit of extrapolation beyond hour=23: %% Let's use a 2D scatter plot with the hours encoded as colors to better see how this representation maps the 24 hours of the day to a 2D space, akin to some sort of 24 hour version of an analog clock. Note that the "25th" hour is mapped back to the 1st hour because of the periodic nature of the sine/cosine representation. %% We can now build a feature extraction pipeline using this strategy: %% The performance of our linear regression model with this simple feature engineering is a bit better than using the original ordinal time features but worse than using the one-hot encoded time features. We will further analyze possible reasons for this disappointing outcome at the end of this notebook. Periodic spline features ------------------------ We can try an alternative encoding of the periodic time-related features using spline transformations with a large enough number of splines, and as a result a larger number of expanded features: periodic and include_bias is True %% Again, let us visualize the effect of this feature expansion on some synthetic hour data with a bit of extrapolation beyond hour=23: %% Thanks to the use of the `extrapolation="periodic"` parameter, we observe that the feature encoding stays smooth when extrapolating beyond midnight. We can now build a predictive pipeline using this alternative periodic feature engineering strategy. It is possible to use fewer splines than discrete levels for those ordinal values. This makes spline-based encoding more efficient than one-hot encoding while preserving most of the expressivity: %% Spline features make it possible for the linear model to successfully leverage the periodic time-related features and reduce the error from ~14% to ~10% of the maximum demand, which is similar to what we observed with the one-hot encoded features. Qualitative analysis of the impact of features on linear models predictions --------------------------------------------------------------------------- Here, we want to visualize the impact of the feature engineering choices on the time related shape of the predictions. To do so we consider an arbitrary time-based split to compare the predictions on a range of held out data points. %% We visualize those predictions by zooming on the last 96 hours (4 days) of the test set to get some qualitative insights: %% We can draw the following conclusions from the above plot: - the **raw ordinal time-related features** are problematic because they do not capture the natural periodicity: we observe a big jump in the predictions at the end of each day when the hour features goes from 23 back to 0. We can expect similar artifacts at the end of each week or each year. - as expected, the **trigonometric features** (sine and cosine) do not have these discontinuities at midnight but the linear regression model fails to leverage those features to properly model intra-day variations. Using trigonometric features for higher harmonics or additional trigonometric features for the natural period with different phases could potentially fix this problem. - the **periodic spline-based features** fix those two problems at once: they give more expressivity to the linear model by making it possible to focus on specific hours thanks to the use of 12 splines. Furthermore the `extrapolation="periodic"` option enforces a smooth representation between `hour=23` and `hour=0`. - the **one-hot encoded features** behave similarly to the periodic spline-based features but are more spiky: for instance they can better model the morning peak during the week days since this peak lasts shorter than an hour. However, we will see in the following that what can be an advantage for linear models is not necessarily one for more expressive models. %% We can also compare the number of features extracted by each feature engineering pipeline: %% %% %% %% This confirms that the one-hot encoding and the spline encoding strategies create a lot more features for the time representation than the alternatives, which in turn gives the downstream linear model more flexibility (degrees of freedom) to avoid underfitting. Finally, we observe that none of the linear models can approximate the true bike rentals demand, especially for the peaks that can be very sharp at rush hours during the working days but much flatter during the week-ends: the most accurate linear models based on splines or one-hot encoding tend to forecast peaks of commuting-related bike rentals even on the week-ends and under-estimate the commuting-related events during the working days. These systematic prediction errors reveal a form of under-fitting and can be explained by the lack of non-additive modeling of the interactions between features (in this case "workingday" and features derived from "hours"). This issue will be addressed in the following section. %% Modeling pairwise interactions with splines and polynomial features ------------------------------------------------------------------- Linear models alone cannot model interaction effects between input features. It does not help that some features are marginally non-linear as is the case with features constructed by `SplineTransformer` (or one-hot encoding or binning). However, it is possible to use the `PolynomialFeatures` class on coarse grained splined encoded hours to model the "workingday"/"hours" interaction explicitly without introducing too many new variables: %% Those features are then combined with the ones already computed in the previous spline-base pipeline. We can observe a nice performance improvemnt by modeling this pairwise interaction explicitly: %% Modeling non-linear feature interactions with kernels ----------------------------------------------------- The previous analysis highlighted the need to model the interactions between `"workingday"` and `"hours"`. Another example of a such a non-linear interactions that we would like to model could be the impact of the rain that might not be the same during the working days and the week-ends and holidays for instance. To model all such interactions, we could either use a polynomial expansion on all marginal features at once, after their spline-based expansion. However this would create a quadratic number of features which can cause overfitting and computational tractability issues. Alternatively we can use the Nyström method to compute an approximate polynomial kernel expansion. Let us try the latter: %% We observe that this model can almost rival the performance of the gradient boosted trees with an average error around 6% of the maximum demand. Note that while the final step of this pipeline is a linear regression model, the intermediate steps such as the spline feature extraction and the Nyström kernel approximation are highly non-linear. As a result the compound pipeline is much more expressive than a simple linear regression model with raw features. For the sake of completeness, we also evaluate the combination of one-hot encoding and kernel approximation: %% While one-hot features were competitive with spline-based features when using linear models, this is no longer the case when using a low-rank approximation of a non-linear kernel: this can be explained by the fact that spline features are smoother and allow the kernel approximation to find a more expressive decision function. Let us now have a qualitative look at the predictions of the kernel models and of the gradient boosted trees that should be able to better model non-linear interactions between features: %% Again we zoom on the last 4 days of the test set: %% First, note that trees can naturally model non-linear feature interactions since, by default, decision trees are allowed to grow beyond a depth of 2 levels. Here we can observe that the combinations of spline features and non-linear kernels works quite well and can almost rival the accuracy of the gradient boosting regression trees. On the contrary, one-hot time features do not perform that well with the low rank kernel model. In particular they significantly over-estimate the low demand hours more than the competing models. We also observe that none of the models can successfully predict some of the peak rentals at the rush hours during the working days. It is possible that access to additional features would be required to further improve the accuracy of the predictions. For instance, it could be useful to have access to the geographical repartition of the fleet at any point in time or the fraction of bikes that are immobilized because they need servicing. Let us finally get a more quantative look at the prediction errors of those three models using the true vs predicted demand scatter plots: %% This visualization confirms the conclusions we draw on the previous plot. All models under-estimate the high demand events (working days rush hours), but gradient boosting a bit less so. The low demand events are well predicted on average by gradient boosting while the one-hot polynomial regression pipeline seems to systematically over-estimate demand in that regime. Overall the predictions of the gradient boosted trees are closer to the diagonal than for the kernel models. Concluding remarks ------------------ We note that we could have obtained slightly better results for kernel models by using more components (higher rank kernel approximation) at the cost of longer fit and prediction durations. For large values of `n_components`, the performance of the one-hot features would even match the spline features. The `Nystroem` + `RidgeCV` classifier could also have been replaced by :class:`~sklearn.neural_network.MLPRegressor` with one or two hidden layers and we would have obtained quite similar results. The dataset we used in this case study is sampled on a hourly basis. However cyclic spline-based features could model time-within-day or time-within-week very efficiently with finer-grained time resolutions (for instance with measurements taken every minute instead of every hours) without introducing more features. One-hot encoding time representations would not offer this flexibility. Finally, in this notebook we used `RidgeCV` because it is very efficient from a computational point of view. However it models the target variable as a Gaussian random variable with constant variance. For positive regression problems, it is likely that using a Poisson or Gamma distribution would make more sense. This could be achieved by using `GridSearchCV(TweedieRegressor(power=2), param_grid({"alpha": alphas}))` instead of `RidgeCV`. | 19,096 | en | 0.91318 |
from django.conf.urls import url
from api.views import movie_views
from api.views import auth_views
from api.views import rating_views
from api.views import recommend_views
from api.views import collabo_test
from api.views import content_based
from api.algorithms import kmeansClustering
urlpatterns = [
# user 접근 URL
url(r'auth/signup-many/$', auth_views.signup_many, name='sign_up_many'),
url(r'auth/getUsers/$', auth_views.getUsers, name='get_users'),
url(r'auth/deleteUser/$', auth_views.deleteUser, name='delete_user'),
url(r'auth/similarUser/$', auth_views.similarUser, name='similarUser'),
url(r'^auth/loginmember/$', auth_views.login, name='login_member'),
url(r'^auth/registermember/$', auth_views.register, name='register_member'),
url(r'^auth/logoutmember/$', auth_views.logout, name='logout_member'),
url(r'^auth/session/$', auth_views.session_member, name="session_member"),
url(r'^auth/updateUser/$', auth_views.updateUser, name="update_user"),
url(r'^auth/predictRating/$', auth_views.predictMovieRating, name="predictRating"),
# 중복체크 검사
url(r'^auth/duplicateInspection/$', auth_views.duplicate_inspection, name="duplicate_inspection"),
# movie 접근 URL
url(r'movies/$', movie_views.movies, name='movie_list'),
url(r'movies/pref/$', movie_views.moviesPref, name='movie_pref'),
url(r'movies/views/$', movie_views.views, name='movie_views'),
url(r'movies/modify/$', movie_views.modify, name='movie_modify'),
url(r'movies/neverSeenMovies/$', movie_views.never_seen_movie_list, name='never_seen_movie_list'),
url(r'movies/faculites/$', movie_views.faculites, name='faculites'),
url(r'movies/rating/$', movie_views.get_rating_movie, name='get_rating_movie'),
# 추천 URL
url(r'^auth/recommendMovie/$', recommend_views.RecommendMovie, name='recommendMovie'),
# 평점정보 접근 URL
url(r'rateMovie/$', rating_views.rate_movie, name='rate_movie'),
url(r'getRatings/$', rating_views.get_ratings, name='get_ratings'),
url(r'getEvaluatedRating/$', rating_views.get_evaluate_rating, name='get_evaluate_rating'),
url(r'ratings/comment/$', rating_views.create_comment, name='create_comment'),
# clustering 실행 URL
url('clustering/kmeansClustering/C/', kmeansClustering.C_Cluster, name="c_Cluster"),
# Content-Based Algorithm
url(r'preprocessing/$', content_based.preprocessing_for_cb, name='preprocessing'),
url(r'content_based/$', content_based.algorithm, name='content_based')
]
| django-vue/djangoAPI/api/urls.py | 2,547 | user 접근 URL 중복체크 검사 movie 접근 URL 추천 URL 평점정보 접근 URL clustering 실행 URL Content-Based Algorithm | 93 | ko | 0.682654 |
"""HelloWorld Integration for Cortex XSOAR (aka Demisto)
This integration is a good example on you can build a Cortex XSOAR Integration
using Python 3. Please follow the documentation links below and make sure that
your integration follows the Code Conventions and passes the Linting phase.
Developer Documentation: https://xsoar.pan.dev/docs/welcome
Code Conventions: https://xsoar.pan.dev/docs/integrations/code-conventions
Linting: https://xsoar.pan.dev/docs/integrations/linting
When building a Cortex XSOAR integration that is reusable, a lot of effort
must be placed in the design. We recommend to fill a Design Document template,
that allows you to capture Use Cases, Requirements and Inputs/Outputs.
Example Design document for the this Integration (HelloWorld):
https://docs.google.com/document/d/1wETtBEKg37PHNU8tYeB56M1LE314ux086z3HFeF_cX0
HelloWorld API
--------------
The HelloWorld API is a simple API that shows a realistic use case for an XSOAR
integration. It's actually a real API that is available to the following URL:
https://soar.mastersofhack.com - if you need an API Key to test it out please
reach out to your Cortex XSOAR contacts.
This API has a few basic functions:
- Alerts: the endpoint returns mocked alerts and allows you to search based on
a number of parameters, such as state (ACTIVE or CLOSED), type, timestamp. It
can also return a single alert by ID. This is used to create new Incidents in
XSOAR by using the ``fetch-incidents`` command, which is by default invoked
every minute.
There is also an endpoint that allows to retrieve additional details about a
specific alert by ID, and one to change the alert status to "CLOSED" once
it has been resolved.
- Reputation (ip and domain): these endpoints return, for an IP and
domain respectively, a WHOIS lookup of the entity as well as a reputation score
(from 0 to 100) that is used to determine whether the entity is malicious. This
endpoint is called by XSOAR reputation commands ``ip`` and ``domain`` that
are run automatically every time an indicator is extracted in XSOAR. As a best
practice of design, it is important to map and document the mapping between
a score in the original API format (0 to 100 in this case) to a score in XSOAR
format (0 to 3). This score is called ``DBotScore``, and is returned in the
context to allow automated handling of indicators based on their reputation.
More information: https://xsoar.pan.dev/docs/integrations/dbot
- Scan: to demonstrate how to run commands that are not returning instant data,
the API provides a scan endpoint that simulates scanning a host and generating
a report after the scan is completed. The API has endpoints to start a scan,
which returns a job ID, poll for the scan status and, if the scan is completed,
retrieved the job results.
This function is used in conjunction of the HelloWorld Scan playbook that uses
the GenericPolling mechanism to implement the job polling loop. The results
can be returned in JSON or attachment file format.
Info on GenericPolling: https://xsoar.pan.dev/docs/playbooks/generic-polling
Please check the HelloWorld Design Document referenced above for details about
the raw API responsens as well as the design details for this integration.
This integration also has a ``say-hello`` command for backward compatibility,
that doesn't connect to an API and just returns a ``Hello {name}`` string,
where name is the input value provided.
Integration File Structure
--------------------------
An integration usually consists of the following parts:
- Imports
- Constants
- Client Class
- Helper Functions
- Command Functions
- Main Function
- Entry Point
Imports
-------
Here you can import Python module you need for your integration. If you need
a module that is not part of the default XSOAR Docker images, you can add
a custom one. More details: https://xsoar.pan.dev/docs/integrations/docker
There are also internal imports that are used by XSOAR:
- demistomock (imported as demisto): allows your code to work offline for
testing. The actual ``demisto`` module is provided at runtime when the
code runs in XSOAR.
- CommonServerPython.py: contains a set of helper functions, base classes
and other useful components that will make your integration code easier
to maintain.
- CommonServerUserPython.py: includes a set of user defined commands that
are specific to an XSOAR installation. Do not use it for integrations that
are meant to be shared externally.
These imports are automatically loaded at runtime within the XSOAR script
runner, so you shouldn't modify them
Constants
---------
Usually some constants that do not require user parameters or inputs, such
as the default API entry point for your service, or the maximum numbers of
incidents to fetch every time.
Client Class
------------
We recommend to use a Client class to wrap all the code that needs to interact
with your API. Moreover, we recommend, when possible, to inherit from the
BaseClient class, defined in CommonServerPython.py. This class already handles
a lot of the work, such as system proxy settings, SSL certificate verification
and exception handling for HTTP errors.
Note that the Client class should NOT contain any Cortex XSOAR specific code,
i.e. it shouldn't use anything in the ``demisto`` class (functions such as
``demisto.args()`` or ``demisto.results()`` or even ``return_results`` and
``return_error``.
You will use the Command Functions to handle XSOAR inputs and outputs.
When calling an API, you should use the ``_http.request()`` method and you
can return the raw data to the calling function (usually a Command function).
You should usually have one function for each API endpoint.
Look at the code and the commends of this specific class to better understand
the implementation details.
Helper Functions
----------------
Helper functions are usually used as utility functions that are used by several
command functions throughout your code. For example they map arguments to types
or convert severity formats from integration-specific to XSOAR.
Many helper functions are already defined in ``CommonServerPython.py`` and are
often very handy.
Command Functions
-----------------
Command functions perform the mapping between XSOAR inputs and outputs to the
Client class functions inputs and outputs. As a best practice, they shouldn't
contain calls to ``demisto.args()``, ``demisto.results()``, ``return_error``
and ``demisto.command()`` as those should be handled through the ``main()``
function.
However, in command functions, use ``demisto`` or ``CommonServerPython.py``
artifacts, such as ``demisto.debug()`` or the ``CommandResults`` class and the
``Common.*`` classes.
Usually you will have one command function for every specific XSOAR command
you want to implement in your integration, plus ``test-module``,
``fetch-incidents`` and ``fetch-indicators``(if the latter two are supported
by your integration). Each command function should invoke one specific function
of the Client class.
Command functions, when invoked through an XSOAR command usually return data
using the ``CommandResults`` class, that is then passed to ``return_results()``
in the ``main()`` function.
``return_results()`` is defined in ``CommonServerPython.py`` to return
the data to XSOAR. ``return_results()`` actually wraps ``demisto.results()``.
You should never use ``demisto.results()`` directly.
Sometimes you will need to return values in a format that is not compatible
with ``CommandResults`` (for example files): in that case you must return a
data structure that is then pass passed to ``return.results()``. (i.e.
check the ``scan_results_command`` function in this file that has the option
to return a file to Cortex XSOAR).
In any case you should never call ``return_results()`` directly from the
command functions.
When you use create the CommandResults object in command functions, you
usually pass some types of data:
- Human Readable: usually in Markdown format. This is what is presented to the
analyst in the War Room. You can use ``tableToMarkdown()``, defined in
``CommonServerPython.py``, to convert lists and dicts in Markdown and pass it
to ``return_results()`` using the ``readable_output`` argument, or the
``return_results()`` function will call ``tableToMarkdown()`` automatically for
you.
- Context Output: this is the machine readable data, JSON based, that XSOAR can
parse and manage in the Playbooks or Incident's War Room. The Context Output
fields should be defined in your integration YML file and is important during
the design phase. Make sure you define the format and follow best practices.
You can use ``demisto-sdk json-to-outputs`` to autogenerate the YML file
outputs section. Context output is passed as the ``outputs`` argument in ``demisto_results()``,
and the prefix (i.e. ``HelloWorld.Alert``) is passed via the ``outputs_prefix``
argument.
More information on Context Outputs, Standards, DBotScore and demisto-sdk:
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/context-standards
https://xsoar.pan.dev/docs/integrations/dbot
https://github.com/demisto/demisto-sdk/blob/master/demisto_sdk/commands/json_to_outputs/README.md
Also, when you write data in the Context, you want to make sure that if you
return updated information for an entity, to update it and not append to
the list of entities (i.e. in HelloWorld you want to update the status of an
existing ``HelloWorld.Alert`` in the context when you retrieve it, rather than
adding a new one if you already retrieved it). To update data in the Context,
you can define which is the key attribute to use, such as (using the example):
``outputs_key_field='alert_id'``. This means that you are using the ``alert_id``
key to determine whether adding a new entry in the context or updating an
existing one that has the same ID. You can look at the examples to understand
how it works.
More information here:
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/dt
- Raw Output: this is usually the raw result from your API and is used for
troubleshooting purposes or for invoking your command from Automation Scripts.
If not specified, ``return_results()`` will use the same data as ``outputs``.
Main Function
-------------
The ``main()`` function takes care of reading the integration parameters via
the ``demisto.params()`` function, initializes the Client class and checks the
different options provided to ``demisto.commands()``, to invoke the correct
command function passing to it ``demisto.args()`` and returning the data to
``return_results()``. If implemented, ``main()`` also invokes the function
``fetch_incidents()``with the right parameters and passes the outputs to the
``demisto.incidents()`` function. ``main()`` also catches exceptions and
returns an error message via ``return_error()``.
Entry Point
-----------
This is the integration code entry point. It checks whether the ``__name__``
variable is ``__main__`` , ``__builtin__`` (for Python 2) or ``builtins`` (for
Python 3) and then calls the ``main()`` function. Just keep this convention.
"""
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
import urllib3
import dateparser
import traceback
from typing import Any, Dict, Tuple, List, Optional, Union, cast
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_INCIDENTS_TO_FETCH = 50
HELLOWORLD_SEVERITIES = ['Low', 'Medium', 'High', 'Critical']
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this HelloWorld implementation, no special attributes defined
"""
def get_ip_reputation(self, ip: str) -> Dict[str, Any]:
"""Gets the IP reputation using the '/ip' API endpoint
:type ip: ``str``
:param ip: IP address to get the reputation for
:return: dict containing the IP reputation as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/ip',
params={
'ip': ip
}
)
def get_domain_reputation(self, domain: str) -> Dict[str, Any]:
"""Gets the Domain reputation using the '/domain' API endpoint
:type domain: ``str``
:param domain: domain name to get the reputation for
:return: dict containing the domain reputation as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/domain',
params={
'domain': domain
}
)
def search_alerts(self, alert_status: Optional[str], severity: Optional[str],
alert_type: Optional[str], max_results: Optional[int],
start_time: Optional[int]) -> List[Dict[str, Any]]:
"""Searches for HelloWorld alerts using the '/get_alerts' API endpoint
All the parameters are passed directly to the API as HTTP POST parameters in the request
:type alert_status: ``Optional[str]``
:param alert_status: status of the alert to search for. Options are: 'ACTIVE' or 'CLOSED'
:type severity: ``Optional[str]``
:param severity:
severity of the alert to search for. Comma-separated values.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type: type of alerts to search for. There is no list of predefined types
:type max_results: ``Optional[int]``
:param max_results: maximum number of results to return
:type start_time: ``Optional[int]``
:param start_time: start timestamp (epoch in seconds) for the alert search
:return: list containing the found HelloWorld alerts as dicts
:rtype: ``List[Dict[str, Any]]``
"""
request_params: Dict[str, Any] = {}
if alert_status:
request_params['alert_status'] = alert_status
if alert_type:
request_params['alert_type'] = alert_type
if severity:
request_params['severity'] = severity
if max_results:
request_params['max_results'] = max_results
if start_time:
request_params['start_time'] = start_time
return self._http_request(
method='GET',
url_suffix='/get_alerts',
params=request_params
)
def get_alert(self, alert_id: str) -> Dict[str, Any]:
"""Gets a specific HelloWorld alert by id
:type alert_id: ``str``
:param alert_id: id of the alert to return
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/get_alert_details',
params={
'alert_id': alert_id
}
)
def update_alert_status(self, alert_id: str, alert_status: str) -> Dict[str, Any]:
"""Changes the status of a specific HelloWorld alert
:type alert_id: ``str``
:param alert_id: id of the alert to return
:type alert_status: ``str``
:param alert_status: new alert status. Options are: 'ACTIVE' or 'CLOSED'
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/change_alert_status',
params={
'alert_id': alert_id,
'alert_status': alert_status
}
)
def scan_start(self, hostname: str) -> Dict[str, Any]:
"""Starts a HelloWorld scan on a specific hostname
:type hostname: ``str``
:param hostname: hostname of the machine to scan
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/start_scan',
params={
'hostname': hostname
}
)
def scan_status(self, scan_id: str) -> Dict[str, Any]:
"""Gets the status of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve status for
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/check_scan',
params={
'scan_id': scan_id
}
)
def scan_results(self, scan_id: str) -> Dict[str, Any]:
"""Gets the results of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve results for
:return: dict containing the scan results as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/get_scan_results',
params={
'scan_id': scan_id
}
)
def say_hello(self, name: str) -> str:
"""Returns 'Hello {name}'
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: string containing 'Hello {name}'
:rtype: ``str``
"""
return f'Hello {name}'
''' HELPER FUNCTIONS '''
def parse_domain_date(domain_date: Union[List[str], str], date_format: str = '%Y-%m-%dT%H:%M:%S.000Z') -> Optional[str]:
"""Converts whois date format to an ISO8601 string
Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format
in a datetime. If a list is returned with multiple elements, takes only
the first one.
:type domain_date: ``Union[List[str],str]``
:param date_format:
a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'
:return: Parsed time in ISO8601 format
:rtype: ``Optional[str]``
"""
if isinstance(domain_date, str):
# if str parse the value
domain_date_dt = dateparser.parse(domain_date)
if domain_date_dt:
return domain_date_dt.strftime(date_format)
elif isinstance(domain_date, list) and len(domain_date) > 0 and isinstance(domain_date[0], str):
# if list with at least one element, parse the first element
domain_date_dt = dateparser.parse(domain_date[0])
if domain_date_dt:
return domain_date_dt.strftime(date_format)
# in any other case return nothing
return None
def convert_to_demisto_severity(severity: str) -> int:
"""Maps HelloWorld severity to Cortex XSOAR severity
Converts the HelloWorld alert severity level ('Low', 'Medium',
'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``str``
:param severity: severity as returned from the HelloWorld API (str)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
"""
# In this case the mapping is straightforward, but more complex mappings
# might be required in your integration, so a dedicated function is
# recommended. This mapping should also be documented.
return {
'Low': IncidentSeverity.LOW,
'Medium': IncidentSeverity.MEDIUM,
'High': IncidentSeverity.HIGH,
'Critical': IncidentSeverity.CRITICAL
}[severity]
''' COMMAND FUNCTIONS '''
def test_module(client: Client, first_fetch_time: int) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: HelloWorld client to use
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
# INTEGRATION DEVELOPER TIP
# Client class should raise the exceptions, but if the test fails
# the exception text is printed to the Cortex XSOAR UI.
# If you have some specific errors you want to capture (i.e. auth failure)
# you should catch the exception here and return a string with a more
# readable output (for example return 'Authentication Error, API Key
# invalid').
# Cortex XSOAR will print everything you return different than 'ok' as
# an error
try:
client.search_alerts(max_results=1, start_time=first_fetch_time, alert_status=None, alert_type=None,
severity=None)
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def say_hello_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-say-hello command: Returns Hello {somename}
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``str``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['name']`` is used as input name
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains the hello world message
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# In this case 'name' is an argument set in the HelloWorld.yml file as mandatory,
# so the null check here as XSOAR will always check it before your code is called.
# Although it's not mandatory to check, you are welcome to do so.
name = args.get('name', None)
if not name:
raise ValueError('name not specified')
# Call the Client function and get the raw response
result = client.say_hello(name)
# Create the human readable output.
# It will be in markdown format - https://www.markdownguide.org/basic-syntax/
# More complex output can be formatted using ``tableToMarkDown()`` defined
# in ``CommonServerPython.py``
readable_output = f'## {result}'
# More information about Context:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# We return a ``CommandResults`` object, and we want to pass a custom
# markdown here, so the argument ``readable_output`` is explicit. If not
# passed, ``CommandResults``` will do a ``tableToMarkdown()`` do the data
# to generate the readable output.
return CommandResults(
readable_output=readable_output,
outputs_prefix='hello',
outputs_key_field='',
outputs=result
)
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int],
first_fetch_time: Optional[int], alert_status: Optional[str],
min_severity: str, alert_type: Optional[str]
) -> Tuple[Dict[str, int], List[dict]]:
"""This function retrieves new alerts every interval (default is 1 minute).
This function has to implement the logic of making sure that incidents are
fetched only onces and no incidents are missed. By default it's invoked by
XSOAR every minute. It will use last_run to save the timestamp of the last
incident it processed. If last_run is not provided, it should use the
integration parameter first_fetch_time to determine when to start fetching
the first time.
:type client: ``Client``
:param Client: HelloWorld client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, int]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[int]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type alert_status: ``Optional[str]``
:param alert_status:
status of the alert to search for. Options are: 'ACTIVE'
or 'CLOSED'
:type min_severity: ``str``
:param min_severity:
minimum severity of the alert to search for.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type:
type of alerts to search for. There is no list of predefined types
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
# Get the last fetch time, if exists
# last_run is a dict with a single key, called last_fetch
last_fetch = last_run.get('last_fetch', None)
# Handle first fetch time
if last_fetch is None:
# if missing, use what provided via first_fetch_time
last_fetch = first_fetch_time
else:
# otherwise use the stored last fetch
last_fetch = int(last_fetch)
# for type checking, making sure that latest_created_time is int
latest_created_time = cast(int, last_fetch)
# Initialize an empty list of incidents to return
# Each incident is a dict with a string as a key
incidents: List[Dict[str, Any]] = []
# Get the CSV list of severities from min_severity
severity = ','.join(HELLOWORLD_SEVERITIES[HELLOWORLD_SEVERITIES.index(min_severity):])
alerts = client.search_alerts(
alert_type=alert_type,
alert_status=alert_status,
max_results=max_results,
start_time=last_fetch,
severity=severity
)
for alert in alerts:
# If no created_time set is as epoch (0). We use time in ms so we must
# convert it from the HelloWorld API response
incident_created_time = int(alert.get('created', '0'))
incident_created_time_ms = incident_created_time * 1000
# to prevent duplicates, we are only adding incidents with creation_time > last fetched incident
if last_fetch:
if incident_created_time <= last_fetch:
continue
# If no name is present it will throw an exception
incident_name = alert['name']
# INTEGRATION DEVELOPER TIP
# The incident dict is initialized with a few mandatory fields:
# name: the incident name
# occurred: the time on when the incident occurred, in ISO8601 format
# we use timestamp_to_datestring() from CommonServerPython.py to
# handle the conversion.
# rawJSON: everything else is packed in a string via json.dumps()
# and is included in rawJSON. It will be used later for classification
# and mapping inside XSOAR.
# severity: it's not mandatory, but is recommended. It must be
# converted to XSOAR specific severity (int 1 to 4)
# Note that there are other fields commented out here. You can do some
# mapping of fields (either out of the box fields, like "details" and
# "type") or custom fields (like "helloworldid") directly here in the
# code, or they can be handled in the classification and mapping phase.
# In either case customers can override them. We leave the values
# commented out here, but you can use them if you want.
incident = {
'name': incident_name,
# 'details': alert['name'],
'occurred': timestamp_to_datestring(incident_created_time_ms),
'rawJSON': json.dumps(alert),
# 'type': 'Hello World Alert', # Map to a specific XSOAR incident Type
'severity': convert_to_demisto_severity(alert.get('severity', 'Low')),
# 'CustomFields': { # Map specific XSOAR Custom Fields
# 'helloworldid': alert.get('alert_id'),
# 'helloworldstatus': alert.get('alert_status'),
# 'helloworldtype': alert.get('alert_type')
# }
}
incidents.append(incident)
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': latest_created_time}
return next_run, incidents
def ip_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""ip command: Returns IP reputation for a list of IPs
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['ip']`` is a list of IPs or a single IP
``args['threshold']`` threshold to determine whether an IP is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an IP is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains IPs
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
ips = argToList(args.get('ip'))
if len(ips) == 0:
raise ValueError('IP(s) not specified')
# It's a good practice to document the threshold you use to determine
# if a score is malicious in your integration documentation.
# Thresholds should also be possible to override, as in this case,
# where threshold is an actual argument of the command.
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return
# each CommandResult will contain context standard for IP
command_results: List[CommandResults] = []
for ip in ips:
ip_data = client.get_ip_reputation(ip)
ip_data['ip'] = ip
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(ip_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (IP) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the HelloWorld Design Document
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='HelloWorld',
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=ip,
asn=ip_data.get('asn'),
dbot_score=dbot_score
)
# INTEGRATION DEVELOPER TIP
# In the integration specific Context output (HelloWorld.IP) in this
# example you want to provide a lot of information as it can be used
# programmatically from within Cortex XSOAR in playbooks and commands.
# On the other hand, this API is way to verbose, so we want to select
# only certain keys to be returned in order not to clog the context
# with useless information. What to actually return in the context and
# to define as a command output is subject to design considerations.
# INTEGRATION DEVELOPER TIP
# To generate the Context Outputs on the YML use ``demisto-sdk``'s
# ``json-to-outputs`` option.
# Define which fields we want to exclude from the context output as
# they are too verbose.
ip_context_excluded_fields = ['objects', 'nir']
ip_data = {k: ip_data[k] for k in ip_data if k not in ip_context_excluded_fields}
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('IP', ip_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.IP``, using ``ip`` as the key field.
# ``indicator`` is used to provide the context standard (IP)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.IP',
outputs_key_field='ip',
outputs=ip_data,
indicator=ip_standard_context
))
return command_results
def domain_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return,
# each CommandResult will contain context standard for Domain
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
# INTEGRATION DEVELOPER TIP
# We want to convert the dates to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(domain_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# INTEGRATION DEVELOPER TIP
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (Domain) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the sample Design Document
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HelloWorld',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the Domain Standard Context structure using Common.Domain and
# add dbot_score to it.
domain_standard_context = Common.Domain(
domain=domain,
creation_date=domain_data.get('creation_date', None),
expiration_date=domain_data.get('expiration_date', None),
updated_date=domain_data.get('updated_date', None),
organization=domain_data.get('org', None),
name_servers=domain_data.get('name_servers', None),
registrant_name=domain_data.get('name', None),
registrant_country=domain_data.get('country', None),
registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('Domain', domain_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.Domain``, using ``domain`` as the key
# field.
# ``indicator`` is used to provide the context standard (Domain)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Domain',
outputs_key_field='domain',
outputs=domain_data,
indicator=domain_standard_context
))
return command_results
def search_alerts_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-search-alerts command: Search alerts in HelloWorld
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['status']`` alert status. Options are 'ACTIVE' or 'CLOSED'
``args['severity']`` alert severity CSV
``args['alert_type']`` alert type
``args['start_time']`` start time as ISO8601 date or seconds since epoch
``args['max_results']`` maximum number of results to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains alerts
:rtype: ``CommandResults``
"""
status = args.get('status')
# Check if severity contains allowed values, use all if default
severities: List[str] = HELLOWORLD_SEVERITIES
severity = args.get('severity', None)
if severity:
severities = severity.split(',')
if not all(s in HELLOWORLD_SEVERITIES for s in severities):
raise ValueError(
f'severity must be a comma-separated value '
f'with the following options: {",".join(HELLOWORLD_SEVERITIES)}')
alert_type = args.get('alert_type')
# Convert the argument to a timestamp using helper function
start_time = arg_to_datetime(
arg=args.get('start_time'),
arg_name='start_time',
required=False
)
# Convert the argument to an int using helper function
max_results = arg_to_number(
arg=args.get('max_results'),
arg_name='max_results',
required=False
)
# Severity is passed to the API as a CSV
alerts = client.search_alerts(
severity=','.join(severities),
alert_status=status,
alert_type=alert_type,
start_time=int(start_time.timestamp()) if start_time else None,
max_results=max_results
)
# INTEGRATION DEVELOPER TIP
# We want to convert the "created" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
for alert in alerts:
if 'created' not in alert:
continue
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
# in this example we are not providing a custom markdown, we will
# let ``CommandResults`` generate it by default.
return CommandResults(
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alerts
)
def get_alert_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-get-alert command: Returns a HelloWorld alert
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an alert
:rtype: ``CommandResults``
"""
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
alert = client.get_alert(alert_id=alert_id)
# INTEGRATION DEVELOPER TIP
# We want to convert the "created" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'created' in alert:
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
# tableToMarkdown() is defined is CommonServerPython.py and is used very
# often to convert lists and dicts into a human readable format in markdown
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def update_alert_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-update-alert-status command: Changes the status of an alert
Changes the status of a HelloWorld alert and returns the updated alert info
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to update
``args['status']`` new status, either ACTIVE or CLOSED
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an updated alert
:rtype: ``CommandResults``
"""
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
status = args.get('status', None)
if status not in ('ACTIVE', 'CLOSED'):
raise ValueError('status must be either ACTIVE or CLOSED')
alert = client.update_alert_status(alert_id, status)
# INTEGRATION DEVELOPER TIP
# We want to convert the "updated" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'updated' in alert:
updated_time_ms = int(alert.get('updated', '0')) * 1000
alert['updated'] = timestamp_to_datestring(updated_time_ms)
# tableToMarkdown() is defined is CommonServerPython.py and is used very
# often to convert lists and dicts into a human readable format in markdown
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def scan_start_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-start-scan command: Starts a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['hostname']`` hostname to run the scan on
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan job
:rtype: ``CommandResults``
"""
hostname = args.get('hostname', None)
if not hostname:
raise ValueError('hostname not specified')
scan = client.scan_start(hostname=hostname)
# INTEGRATION DEVELOPER TIP
# The API doesn't return the hostname of the scan it was called against,
# which is the input. It could be useful to have that information in the
# XSOAR context, so we are adding it manually here, based on the command
# input argument.
scan['hostname'] = hostname
scan_id = scan.get('scan_id')
readable_output = f'Started scan {scan_id}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan
)
def scan_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-scan-status command: Returns status for HelloWorld scans
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` list of scan IDs or single scan ID
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan status
:rtype: ``CommandResults``
"""
scan_id_list = argToList(args.get('scan_id', []))
if len(scan_id_list) == 0:
raise ValueError('scan_id(s) not specified')
scan_list: List[Dict[str, Any]] = []
for scan_id in scan_id_list:
scan = client.scan_status(scan_id=scan_id)
scan_list.append(scan)
readable_output = tableToMarkdown('Scan status', scan_list)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan_list
)
def scan_results_command(client: Client, args: Dict[str, Any]) -> Union[Dict[str, Any], CommandResults, List[CommandResults]]:
"""helloworld-scan-results command: Returns results for a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` scan ID to retrieve results
``args['format']`` format of the results. Options are 'file' or 'json'
:return:
A ``CommandResults`` compatible to return ``return_results()``,
that contains a scan result when json format is selected, or
A Dict of entries also compatible to ``return_results()`` that
contains the output file when file format is selected.
:rtype: ``Union[Dict[str, Any],CommandResults]``
"""
scan_id = args.get('scan_id', None)
if not scan_id:
raise ValueError('scan_id not specified')
scan_format = args.get('format', 'file')
# INTEGRATION DEVELOPER TIP
# This function supports returning data in multiple formats, either in a json
# format that is then mapped to a table, or as a file attachment.
# In this case, if the format is "file", the return value is different and
# uses a raw format and ``fileResult()`` directly instead of
# ``CommandResults``. In either case you should return data to main and
# call ``return_results()`` from there.
# Always use ``CommandResults`` when possible but, if you need to return
# anything special like a file, you can use this raw format.
results = client.scan_results(scan_id=scan_id)
if scan_format == 'file':
return (
fileResult(
filename=f'{scan_id}.json',
data=json.dumps(results, indent=4),
file_type=entryTypes['entryInfoFile']
)
)
elif scan_format == 'json':
# This scan returns CVE information. CVE is also part of the XSOAR
# context standard, so we must extract CVE IDs and return them also.
# See: https://xsoar.pan.dev/docs/integrations/context-standards#cve
cves: List[Common.CVE] = []
command_results: List[CommandResults] = []
entities = results.get('entities', [])
for e in entities:
if 'vulns' in e.keys() and isinstance(e['vulns'], list):
cves.extend([Common.CVE(id=c, cvss=None, published=None, modified=None, description=None) for c in e['vulns']])
# INTEGRATION DEVELOPER TIP
# We want to provide a unique result for every CVE indicator.
# Since every entity may contain several CVE indicators,
# we will split the entities result and CVE indicator results.
readable_output = tableToMarkdown(f'Scan {scan_id} results', entities)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=results
))
cves = list(set(cves)) # make the indicator list unique
for cve in cves:
command_results.append(CommandResults(
readable_output=f"CVE {cve}",
indicator=cve
))
return command_results
else:
raise ValueError('Incorrect format, must be "json" or "file"')
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/v1')
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=demisto.params().get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
# INTEGRATION DEVELOPER TIP
# You can use functions such as ``demisto.debug()``, ``demisto.info()``,
# etc. to print information in the XSOAR server log. You can set the log
# level on the server configuration
# See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, first_fetch_timestamp)
return_results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
alert_status = demisto.params().get('alert_status', None)
alert_type = demisto.params().get('alert_type', None)
min_severity = demisto.params().get('min_severity', None)
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
alert_status=alert_status,
min_severity=min_severity,
alert_type=alert_type
)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(next_run)
# fetch-incidents calls ``demisto.incidents()`` to provide the list
# of incidents to crate
demisto.incidents(incidents)
elif demisto.command() == 'ip':
default_threshold_ip = int(demisto.params().get('threshold_ip', '65'))
return_results(ip_reputation_command(client, demisto.args(), default_threshold_ip))
elif demisto.command() == 'domain':
default_threshold_domain = int(demisto.params().get('threshold_domain', '65'))
return_results(domain_reputation_command(client, demisto.args(), default_threshold_domain))
elif demisto.command() == 'helloworld-say-hello':
return_results(say_hello_command(client, demisto.args()))
elif demisto.command() == 'helloworld-search-alerts':
return_results(search_alerts_command(client, demisto.args()))
elif demisto.command() == 'helloworld-get-alert':
return_results(get_alert_command(client, demisto.args()))
elif demisto.command() == 'helloworld-update-alert-status':
return_results(update_alert_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-start':
return_results(scan_start_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-status':
return_results(scan_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-results':
return_results(scan_results_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | 56,888 | Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this HelloWorld implementation, no special attributes defined
Maps HelloWorld severity to Cortex XSOAR severity
Converts the HelloWorld alert severity level ('Low', 'Medium',
'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``str``
:param severity: severity as returned from the HelloWorld API (str)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
This function retrieves new alerts every interval (default is 1 minute).
This function has to implement the logic of making sure that incidents are
fetched only onces and no incidents are missed. By default it's invoked by
XSOAR every minute. It will use last_run to save the timestamp of the last
incident it processed. If last_run is not provided, it should use the
integration parameter first_fetch_time to determine when to start fetching
the first time.
:type client: ``Client``
:param Client: HelloWorld client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, int]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[int]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type alert_status: ``Optional[str]``
:param alert_status:
status of the alert to search for. Options are: 'ACTIVE'
or 'CLOSED'
:type min_severity: ``str``
:param min_severity:
minimum severity of the alert to search for.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type:
type of alerts to search for. There is no list of predefined types
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
Gets a specific HelloWorld alert by id
:type alert_id: ``str``
:param alert_id: id of the alert to return
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
helloworld-get-alert command: Returns a HelloWorld alert
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an alert
:rtype: ``CommandResults``
Gets the Domain reputation using the '/domain' API endpoint
:type domain: ``str``
:param domain: domain name to get the reputation for
:return: dict containing the domain reputation as returned from the API
:rtype: ``Dict[str, Any]``
Gets the IP reputation using the '/ip' API endpoint
:type ip: ``str``
:param ip: IP address to get the reputation for
:return: dict containing the IP reputation as returned from the API
:rtype: ``Dict[str, Any]``
ip command: Returns IP reputation for a list of IPs
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['ip']`` is a list of IPs or a single IP
``args['threshold']`` threshold to determine whether an IP is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an IP is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains IPs
:rtype: ``CommandResults``
main function, parses params and runs command functions
:return:
:rtype:
Converts whois date format to an ISO8601 string
Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format
in a datetime. If a list is returned with multiple elements, takes only
the first one.
:type domain_date: ``Union[List[str],str]``
:param date_format:
a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'
:return: Parsed time in ISO8601 format
:rtype: ``Optional[str]``
Returns 'Hello {name}'
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: string containing 'Hello {name}'
:rtype: ``str``
helloworld-say-hello command: Returns Hello {somename}
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``str``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['name']`` is used as input name
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains the hello world message
:rtype: ``CommandResults``
Gets the results of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve results for
:return: dict containing the scan results as returned from the API
:rtype: ``Dict[str, Any]``
helloworld-scan-results command: Returns results for a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` scan ID to retrieve results
``args['format']`` format of the results. Options are 'file' or 'json'
:return:
A ``CommandResults`` compatible to return ``return_results()``,
that contains a scan result when json format is selected, or
A Dict of entries also compatible to ``return_results()`` that
contains the output file when file format is selected.
:rtype: ``Union[Dict[str, Any],CommandResults]``
Starts a HelloWorld scan on a specific hostname
:type hostname: ``str``
:param hostname: hostname of the machine to scan
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
helloworld-start-scan command: Starts a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['hostname']`` hostname to run the scan on
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan job
:rtype: ``CommandResults``
Gets the status of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve status for
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
helloworld-scan-status command: Returns status for HelloWorld scans
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` list of scan IDs or single scan ID
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan status
:rtype: ``CommandResults``
Searches for HelloWorld alerts using the '/get_alerts' API endpoint
All the parameters are passed directly to the API as HTTP POST parameters in the request
:type alert_status: ``Optional[str]``
:param alert_status: status of the alert to search for. Options are: 'ACTIVE' or 'CLOSED'
:type severity: ``Optional[str]``
:param severity:
severity of the alert to search for. Comma-separated values.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type: type of alerts to search for. There is no list of predefined types
:type max_results: ``Optional[int]``
:param max_results: maximum number of results to return
:type start_time: ``Optional[int]``
:param start_time: start timestamp (epoch in seconds) for the alert search
:return: list containing the found HelloWorld alerts as dicts
:rtype: ``List[Dict[str, Any]]``
helloworld-search-alerts command: Search alerts in HelloWorld
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['status']`` alert status. Options are 'ACTIVE' or 'CLOSED'
``args['severity']`` alert severity CSV
``args['alert_type']`` alert type
``args['start_time']`` start time as ISO8601 date or seconds since epoch
``args['max_results']`` maximum number of results to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains alerts
:rtype: ``CommandResults``
Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: HelloWorld client to use
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
Changes the status of a specific HelloWorld alert
:type alert_id: ``str``
:param alert_id: id of the alert to return
:type alert_status: ``str``
:param alert_status: new alert status. Options are: 'ACTIVE' or 'CLOSED'
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
helloworld-update-alert-status command: Changes the status of an alert
Changes the status of a HelloWorld alert and returns the updated alert info
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to update
``args['status']`` new status, either ACTIVE or CLOSED
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an updated alert
:rtype: ``CommandResults``
HelloWorld Integration for Cortex XSOAR (aka Demisto)
This integration is a good example on you can build a Cortex XSOAR Integration
using Python 3. Please follow the documentation links below and make sure that
your integration follows the Code Conventions and passes the Linting phase.
Developer Documentation: https://xsoar.pan.dev/docs/welcome
Code Conventions: https://xsoar.pan.dev/docs/integrations/code-conventions
Linting: https://xsoar.pan.dev/docs/integrations/linting
When building a Cortex XSOAR integration that is reusable, a lot of effort
must be placed in the design. We recommend to fill a Design Document template,
that allows you to capture Use Cases, Requirements and Inputs/Outputs.
Example Design document for the this Integration (HelloWorld):
https://docs.google.com/document/d/1wETtBEKg37PHNU8tYeB56M1LE314ux086z3HFeF_cX0
HelloWorld API
--------------
The HelloWorld API is a simple API that shows a realistic use case for an XSOAR
integration. It's actually a real API that is available to the following URL:
https://soar.mastersofhack.com - if you need an API Key to test it out please
reach out to your Cortex XSOAR contacts.
This API has a few basic functions:
- Alerts: the endpoint returns mocked alerts and allows you to search based on
a number of parameters, such as state (ACTIVE or CLOSED), type, timestamp. It
can also return a single alert by ID. This is used to create new Incidents in
XSOAR by using the ``fetch-incidents`` command, which is by default invoked
every minute.
There is also an endpoint that allows to retrieve additional details about a
specific alert by ID, and one to change the alert status to "CLOSED" once
it has been resolved.
- Reputation (ip and domain): these endpoints return, for an IP and
domain respectively, a WHOIS lookup of the entity as well as a reputation score
(from 0 to 100) that is used to determine whether the entity is malicious. This
endpoint is called by XSOAR reputation commands ``ip`` and ``domain`` that
are run automatically every time an indicator is extracted in XSOAR. As a best
practice of design, it is important to map and document the mapping between
a score in the original API format (0 to 100 in this case) to a score in XSOAR
format (0 to 3). This score is called ``DBotScore``, and is returned in the
context to allow automated handling of indicators based on their reputation.
More information: https://xsoar.pan.dev/docs/integrations/dbot
- Scan: to demonstrate how to run commands that are not returning instant data,
the API provides a scan endpoint that simulates scanning a host and generating
a report after the scan is completed. The API has endpoints to start a scan,
which returns a job ID, poll for the scan status and, if the scan is completed,
retrieved the job results.
This function is used in conjunction of the HelloWorld Scan playbook that uses
the GenericPolling mechanism to implement the job polling loop. The results
can be returned in JSON or attachment file format.
Info on GenericPolling: https://xsoar.pan.dev/docs/playbooks/generic-polling
Please check the HelloWorld Design Document referenced above for details about
the raw API responsens as well as the design details for this integration.
This integration also has a ``say-hello`` command for backward compatibility,
that doesn't connect to an API and just returns a ``Hello {name}`` string,
where name is the input value provided.
Integration File Structure
--------------------------
An integration usually consists of the following parts:
- Imports
- Constants
- Client Class
- Helper Functions
- Command Functions
- Main Function
- Entry Point
Imports
-------
Here you can import Python module you need for your integration. If you need
a module that is not part of the default XSOAR Docker images, you can add
a custom one. More details: https://xsoar.pan.dev/docs/integrations/docker
There are also internal imports that are used by XSOAR:
- demistomock (imported as demisto): allows your code to work offline for
testing. The actual ``demisto`` module is provided at runtime when the
code runs in XSOAR.
- CommonServerPython.py: contains a set of helper functions, base classes
and other useful components that will make your integration code easier
to maintain.
- CommonServerUserPython.py: includes a set of user defined commands that
are specific to an XSOAR installation. Do not use it for integrations that
are meant to be shared externally.
These imports are automatically loaded at runtime within the XSOAR script
runner, so you shouldn't modify them
Constants
---------
Usually some constants that do not require user parameters or inputs, such
as the default API entry point for your service, or the maximum numbers of
incidents to fetch every time.
Client Class
------------
We recommend to use a Client class to wrap all the code that needs to interact
with your API. Moreover, we recommend, when possible, to inherit from the
BaseClient class, defined in CommonServerPython.py. This class already handles
a lot of the work, such as system proxy settings, SSL certificate verification
and exception handling for HTTP errors.
Note that the Client class should NOT contain any Cortex XSOAR specific code,
i.e. it shouldn't use anything in the ``demisto`` class (functions such as
``demisto.args()`` or ``demisto.results()`` or even ``return_results`` and
``return_error``.
You will use the Command Functions to handle XSOAR inputs and outputs.
When calling an API, you should use the ``_http.request()`` method and you
can return the raw data to the calling function (usually a Command function).
You should usually have one function for each API endpoint.
Look at the code and the commends of this specific class to better understand
the implementation details.
Helper Functions
----------------
Helper functions are usually used as utility functions that are used by several
command functions throughout your code. For example they map arguments to types
or convert severity formats from integration-specific to XSOAR.
Many helper functions are already defined in ``CommonServerPython.py`` and are
often very handy.
Command Functions
-----------------
Command functions perform the mapping between XSOAR inputs and outputs to the
Client class functions inputs and outputs. As a best practice, they shouldn't
contain calls to ``demisto.args()``, ``demisto.results()``, ``return_error``
and ``demisto.command()`` as those should be handled through the ``main()``
function.
However, in command functions, use ``demisto`` or ``CommonServerPython.py``
artifacts, such as ``demisto.debug()`` or the ``CommandResults`` class and the
``Common.*`` classes.
Usually you will have one command function for every specific XSOAR command
you want to implement in your integration, plus ``test-module``,
``fetch-incidents`` and ``fetch-indicators``(if the latter two are supported
by your integration). Each command function should invoke one specific function
of the Client class.
Command functions, when invoked through an XSOAR command usually return data
using the ``CommandResults`` class, that is then passed to ``return_results()``
in the ``main()`` function.
``return_results()`` is defined in ``CommonServerPython.py`` to return
the data to XSOAR. ``return_results()`` actually wraps ``demisto.results()``.
You should never use ``demisto.results()`` directly.
Sometimes you will need to return values in a format that is not compatible
with ``CommandResults`` (for example files): in that case you must return a
data structure that is then pass passed to ``return.results()``. (i.e.
check the ``scan_results_command`` function in this file that has the option
to return a file to Cortex XSOAR).
In any case you should never call ``return_results()`` directly from the
command functions.
When you use create the CommandResults object in command functions, you
usually pass some types of data:
- Human Readable: usually in Markdown format. This is what is presented to the
analyst in the War Room. You can use ``tableToMarkdown()``, defined in
``CommonServerPython.py``, to convert lists and dicts in Markdown and pass it
to ``return_results()`` using the ``readable_output`` argument, or the
``return_results()`` function will call ``tableToMarkdown()`` automatically for
you.
- Context Output: this is the machine readable data, JSON based, that XSOAR can
parse and manage in the Playbooks or Incident's War Room. The Context Output
fields should be defined in your integration YML file and is important during
the design phase. Make sure you define the format and follow best practices.
You can use ``demisto-sdk json-to-outputs`` to autogenerate the YML file
outputs section. Context output is passed as the ``outputs`` argument in ``demisto_results()``,
and the prefix (i.e. ``HelloWorld.Alert``) is passed via the ``outputs_prefix``
argument.
More information on Context Outputs, Standards, DBotScore and demisto-sdk:
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/context-standards
https://xsoar.pan.dev/docs/integrations/dbot
https://github.com/demisto/demisto-sdk/blob/master/demisto_sdk/commands/json_to_outputs/README.md
Also, when you write data in the Context, you want to make sure that if you
return updated information for an entity, to update it and not append to
the list of entities (i.e. in HelloWorld you want to update the status of an
existing ``HelloWorld.Alert`` in the context when you retrieve it, rather than
adding a new one if you already retrieved it). To update data in the Context,
you can define which is the key attribute to use, such as (using the example):
``outputs_key_field='alert_id'``. This means that you are using the ``alert_id``
key to determine whether adding a new entry in the context or updating an
existing one that has the same ID. You can look at the examples to understand
how it works.
More information here:
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/dt
- Raw Output: this is usually the raw result from your API and is used for
troubleshooting purposes or for invoking your command from Automation Scripts.
If not specified, ``return_results()`` will use the same data as ``outputs``.
Main Function
-------------
The ``main()`` function takes care of reading the integration parameters via
the ``demisto.params()`` function, initializes the Client class and checks the
different options provided to ``demisto.commands()``, to invoke the correct
command function passing to it ``demisto.args()`` and returning the data to
``return_results()``. If implemented, ``main()`` also invokes the function
``fetch_incidents()``with the right parameters and passes the outputs to the
``demisto.incidents()`` function. ``main()`` also catches exceptions and
returns an error message via ``return_error()``.
Entry Point
-----------
This is the integration code entry point. It checks whether the ``__name__``
variable is ``__main__`` , ``__builtin__`` (for Python 2) or ``builtins`` (for
Python 3) and then calls the ``main()`` function. Just keep this convention.
Disable insecure warnings if str parse the value if list with at least one element, parse the first element in any other case return nothing In this case the mapping is straightforward, but more complex mappings might be required in your integration, so a dedicated function is recommended. This mapping should also be documented. INTEGRATION DEVELOPER TIP Client class should raise the exceptions, but if the test fails the exception text is printed to the Cortex XSOAR UI. If you have some specific errors you want to capture (i.e. auth failure) you should catch the exception here and return a string with a more readable output (for example return 'Authentication Error, API Key invalid'). Cortex XSOAR will print everything you return different than 'ok' as an error INTEGRATION DEVELOPER TIP In this case 'name' is an argument set in the HelloWorld.yml file as mandatory, so the null check here as XSOAR will always check it before your code is called. Although it's not mandatory to check, you are welcome to do so. Call the Client function and get the raw response Create the human readable output. It will be in markdown format - https://www.markdownguide.org/basic-syntax/ More complex output can be formatted using ``tableToMarkDown()`` defined in ``CommonServerPython.py`` More information about Context: https://xsoar.pan.dev/docs/integrations/context-and-outputs We return a ``CommandResults`` object, and we want to pass a custom markdown here, so the argument ``readable_output`` is explicit. If not passed, ``CommandResults``` will do a ``tableToMarkdown()`` do the data to generate the readable output. Get the last fetch time, if exists last_run is a dict with a single key, called last_fetch Handle first fetch time if missing, use what provided via first_fetch_time otherwise use the stored last fetch for type checking, making sure that latest_created_time is int Initialize an empty list of incidents to return Each incident is a dict with a string as a key Get the CSV list of severities from min_severity If no created_time set is as epoch (0). We use time in ms so we must convert it from the HelloWorld API response to prevent duplicates, we are only adding incidents with creation_time > last fetched incident If no name is present it will throw an exception INTEGRATION DEVELOPER TIP The incident dict is initialized with a few mandatory fields: name: the incident name occurred: the time on when the incident occurred, in ISO8601 format we use timestamp_to_datestring() from CommonServerPython.py to handle the conversion. rawJSON: everything else is packed in a string via json.dumps() and is included in rawJSON. It will be used later for classification and mapping inside XSOAR. severity: it's not mandatory, but is recommended. It must be converted to XSOAR specific severity (int 1 to 4) Note that there are other fields commented out here. You can do some mapping of fields (either out of the box fields, like "details" and "type") or custom fields (like "helloworldid") directly here in the code, or they can be handled in the classification and mapping phase. In either case customers can override them. We leave the values commented out here, but you can use them if you want. 'details': alert['name'], 'type': 'Hello World Alert', Map to a specific XSOAR incident Type 'CustomFields': { Map specific XSOAR Custom Fields 'helloworldid': alert.get('alert_id'), 'helloworldstatus': alert.get('alert_status'), 'helloworldtype': alert.get('alert_type') } Update last run and add incident if the incident is newer than last fetch Save the next_run as a dict with the last_fetch key to be stored INTEGRATION DEVELOPER TIP Reputation commands usually support multiple inputs (i.e. arrays), so they can be invoked once in XSOAR. In this case the API supports a single IP at a time, so we will cycle this for all the members of the array. We use argToList(), implemented in CommonServerPython.py to automatically return a list of a single element even if the provided input is a scalar. It's a good practice to document the threshold you use to determine if a score is malicious in your integration documentation. Thresholds should also be possible to override, as in this case, where threshold is an actual argument of the command. Initialize an empty list of CommandResults to return each CommandResult will contain context standard for IP HelloWorld score to XSOAR reputation mapping See: https://xsoar.pan.dev/docs/integrations/dbot We are using Common.DBotScore as macros to simplify the mapping. unknown bad suspicious good The context is bigger here than other commands, as it consists in 3 parts: the vendor-specific context (HelloWorld), the standard-context (IP) and the DBotScore. More information: https://xsoar.pan.dev/docs/integrations/context-and-outputs https://xsoar.pan.dev/docs/integrations/context-standards https://xsoar.pan.dev/docs/integrations/dbot Also check the HelloWorld Design Document Create the DBotScore structure first using the Common.DBotScore class. Create the IP Standard Context structure using Common.IP and add dbot_score to it. INTEGRATION DEVELOPER TIP In the integration specific Context output (HelloWorld.IP) in this example you want to provide a lot of information as it can be used programmatically from within Cortex XSOAR in playbooks and commands. On the other hand, this API is way to verbose, so we want to select only certain keys to be returned in order not to clog the context with useless information. What to actually return in the context and to define as a command output is subject to design considerations. INTEGRATION DEVELOPER TIP To generate the Context Outputs on the YML use ``demisto-sdk``'s ``json-to-outputs`` option. Define which fields we want to exclude from the context output as they are too verbose. In this case we want to use an custom markdown to specify the table title, but otherwise ``CommandResults()`` will call ``tableToMarkdown()`` automatically INTEGRATION DEVELOPER TIP The output key will be ``HelloWorld.IP``, using ``ip`` as the key field. ``indicator`` is used to provide the context standard (IP) INTEGRATION DEVELOPER TIP Reputation commands usually support multiple inputs (i.e. arrays), so they can be invoked once in XSOAR. In this case the API supports a single IP at a time, so we will cycle this for all the members of the array. We use argToList(), implemented in CommonServerPython.py to automatically return a list of a single element even if the provided input is a scalar. Initialize an empty list of CommandResults to return, each CommandResult will contain context standard for Domain INTEGRATION DEVELOPER TIP We want to convert the dates to ISO8601 as Cortex XSOAR customers and integrations use this format by default HelloWorld score to XSOAR reputation mapping See: https://xsoar.pan.dev/docs/integrations/dbot We are using Common.DBotScore as macros to simplify the mapping. unknown bad suspicious good INTEGRATION DEVELOPER TIP The context is bigger here than other commands, as it consists in 3 parts: the vendor-specific context (HelloWorld), the standard-context (Domain) and the DBotScore. More information: https://xsoar.pan.dev/docs/integrations/context-and-outputs https://xsoar.pan.dev/docs/integrations/context-standards https://xsoar.pan.dev/docs/integrations/dbot Also check the sample Design Document Create the Domain Standard Context structure using Common.Domain and add dbot_score to it. In this case we want to use an custom markdown to specify the table title, but otherwise ``CommandResults()`` will call ``tableToMarkdown()`` automatically INTEGRATION DEVELOPER TIP The output key will be ``HelloWorld.Domain``, using ``domain`` as the key field. ``indicator`` is used to provide the context standard (Domain) Check if severity contains allowed values, use all if default Convert the argument to a timestamp using helper function Convert the argument to an int using helper function Severity is passed to the API as a CSV INTEGRATION DEVELOPER TIP We want to convert the "created" time from timestamp(s) to ISO8601 as Cortex XSOAR customers and integrations use this format by default in this example we are not providing a custom markdown, we will let ``CommandResults`` generate it by default. INTEGRATION DEVELOPER TIP We want to convert the "created" time from timestamp(s) to ISO8601 as Cortex XSOAR customers and integrations use this format by default tableToMarkdown() is defined is CommonServerPython.py and is used very often to convert lists and dicts into a human readable format in markdown INTEGRATION DEVELOPER TIP We want to convert the "updated" time from timestamp(s) to ISO8601 as Cortex XSOAR customers and integrations use this format by default tableToMarkdown() is defined is CommonServerPython.py and is used very often to convert lists and dicts into a human readable format in markdown INTEGRATION DEVELOPER TIP The API doesn't return the hostname of the scan it was called against, which is the input. It could be useful to have that information in the XSOAR context, so we are adding it manually here, based on the command input argument. INTEGRATION DEVELOPER TIP This function supports returning data in multiple formats, either in a json format that is then mapped to a table, or as a file attachment. In this case, if the format is "file", the return value is different and uses a raw format and ``fileResult()`` directly instead of ``CommandResults``. In either case you should return data to main and call ``return_results()`` from there. Always use ``CommandResults`` when possible but, if you need to return anything special like a file, you can use this raw format. This scan returns CVE information. CVE is also part of the XSOAR context standard, so we must extract CVE IDs and return them also. See: https://xsoar.pan.dev/docs/integrations/context-standardscve INTEGRATION DEVELOPER TIP We want to provide a unique result for every CVE indicator. Since every entity may contain several CVE indicators, we will split the entities result and CVE indicator results. make the indicator list unique get the service API url if your Client class inherits from BaseClient, SSL verification is handled out of the box by it, just pass ``verify_certificate`` to the Client constructor How much time before the first fetch to retrieve incidents Using assert as a type guard (since first_fetch_time is always an int when required=True) if your Client class inherits from BaseClient, system proxy is handled out of the box by it, just pass ``proxy`` to the Client constructor INTEGRATION DEVELOPER TIP You can use functions such as ``demisto.debug()``, ``demisto.info()``, etc. to print information in the XSOAR server log. You can set the log level on the server configuration See: https://xsoar.pan.dev/docs/integrations/code-conventionslogging This is the call made when pressing the integration Test button. Set and define the fetch incidents command to run after activated via integration settings. Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH getLastRun() gets the last run dict saves next_run for the time fetch-incidents is invoked fetch-incidents calls ``demisto.incidents()`` to provide the list of incidents to crate Log exceptions and return errors print the traceback | 33,589 | en | 0.783395 |
# CMD
import torch
import torch.nn.functional as F
import cv2
def calculate_psnr(img1, img2):
"""
data range [0, 1]
"""
img1 = img1.clamp(0, 1)
img2 = img2.clamp(0, 1)
mse = torch.mean((img1 - img2) ** 2, [1, 2, 3])
# if mse == 0:
# return 100
PIXEL_MAX = 1
return 20 * torch.mean(torch.log10(PIXEL_MAX / torch.sqrt(mse)))
def calculate_ssim(img1, img2):
# implemented with pytorch
assert isinstance(img1, torch.Tensor)
assert isinstance(img1, torch.Tensor)
img1 = img1.clamp(0, 1)
img2 = img2.clamp(0, 1)
C1 = (0.01 * 1)**2
C2 = (0.03 * 1)**2
# img1 = img1.to(torch.float32)
# img2 = img2.to(torch.float32)
kernel = gaussian(11, 1.5).to(img1).unsqueeze(1)
window = kernel.mm(kernel.t()).float().expand(3, 1, 11, 11)
mu1 = F.conv2d(img1, window, groups = 3) # valid
mu2 = F.conv1d(img2, window, groups = 3)
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1**2, window, groups=3) - mu1_sq
sigma2_sq = F.conv2d(img2**2, window, groups=3) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, groups=3) - mu1_mu2
# mu1 = F.conv2d(img1, window, padding = 11//2, groups = 3) # same
# mu2 = F.conv1d(img2, window, padding = 11//2, groups = 3)
# mu1_sq = mu1**2
# mu2_sq = mu2**2
# mu1_mu2 = mu1 * mu2
# sigma1_sq = F.conv2d(img1**2, window, padding=11//2, groups=3) - mu1_sq
# sigma2_sq = F.conv2d(img2**2, window, padding=11//2, groups=3) - mu2_sq
# sigma12 = F.conv2d(img1 * img2, window, padding=11//2, groups=3) - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def gaussian(window_size, sigma):
gauss = torch.exp(torch.Tensor([-(x - window_size//2)**2/float(2*sigma**2) for x in range(window_size)]).float())
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = (_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim2(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
if __name__ == "__main__":
img1 = torch.ones(1, 3, 256, 256)*0.95
img2 = torch.ones(1, 3, 256, 256)
print(ssim2(img1, img2))
print(ssim(img1, img2))
print(psnr(img1, img2)) | utils/metrics.py | 4,531 | data range [0, 1]
CMD if mse == 0: return 100 implemented with pytorch img1 = img1.to(torch.float32) img2 = img2.to(torch.float32) valid mu1 = F.conv2d(img1, window, padding = 11//2, groups = 3) same mu2 = F.conv1d(img2, window, padding = 11//2, groups = 3) mu1_sq = mu1**2 mu2_sq = mu2**2 mu1_mu2 = mu1 * mu2 sigma1_sq = F.conv2d(img1**2, window, padding=11//2, groups=3) - mu1_sq sigma2_sq = F.conv2d(img2**2, window, padding=11//2, groups=3) - mu2_sq sigma12 = F.conv2d(img1 * img2, window, padding=11//2, groups=3) - mu1_mu2 | 536 | en | 0.331042 |
import sys
import time
import torch
import random
import argparse
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision import datasets
from torch.utils.data import DataLoader
# new #
import torch.cuda.amp as amp
def printParaNum(model):
'''
function: print the number of total parameters and trainable parameters
'''
total_params = sum(p.numel() for p in model.parameters())
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Total parameters: %d' % total_params)
print('Trainable parameters: %d' % total_trainable_params)
def set_random_seed(seed, deterministic=False):
'''
function: Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
'''
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.model = nn.Sequential(
nn.ReflectionPad2d(1), nn.Conv2d(1, 3, 3, 2), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 3, 3, 1), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 8, 3, 2), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 8, 3, 1), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 16, 3, 2), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 16, 3, 1), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 32, 3, 2), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(32, 32, 3, 1), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.Flatten(), nn.Linear(128, 10)
)
self.initialize_weights()
def forward(self, img):
out = self.model(img)
return out
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data, 0, 0.01)
m.bias.data.zero_()
time_begin = time.time()
print('---------------------------------------- step 1/5 : parameters preparing... ----------------------------------------')
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=5, help="number of epochs of training")
parser.add_argument("--lr", type=float, default=0.0002, help="learning rate")
parser.add_argument("--batch_size", type=int, default=2048, help="size of the batches")
parser.add_argument("--workers", type=int, default=4, help="number of cpu threads to use during batch generation")
parser.add_argument("--dataset", type=str, default='../dataset/mnist', help="dataset root")
parser.add_argument("--result_dir", type=str, default='../result', help="dir for saving the results")
opt = parser.parse_args()
print(opt)
set_random_seed(1234, deterministic=True)
time_1 = time.time()
print('---------------------------------------- step 2/5 : data loading... ------------------------------------------------')
dataset = datasets.MNIST(opt.dataset, train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]))
dataloader = DataLoader(dataset=dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
time_2 = time.time()
print('---------------------------------------- step 3/5 : model defining... ----------------------------------------------')
model = Model().cuda()
printParaNum(model)
time_3 = time.time()
print('---------------------------------------- step 4/5 : requisites defining... -----------------------------------------')
# Loss function
loss_func = nn.CrossEntropyLoss()
# Optimizers
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(0.5, 0.999))
# NEW #
scaler = amp.GradScaler()
time_4 = time.time()
print('---------------------------------------- step 5/5 : training... ----------------------------------------------------')
f = open(opt.result_dir + '/log_' + sys.argv[0][0:-3] + '.txt', 'w')
f.write('Type: single machine, single card, mixing precision' + '\n')
f.write('Parallel manner: none' + '\n')
f.write('Mixing manner: amp' + '\n')
f.write('Setting: epochs: {}, lr: {}, batch_size: {}, workers: {}'.format(opt.epochs, opt.lr, opt.batch_size, opt.workers) + '\n')
f.write('----------------------------' + '\n')
f.write('Training: ' + '\n')
f.write('----------------------------' + '\n')
time_4_dataloading = 0
time_4_computing = 0
for epoch in range(opt.epochs):
time_4_begin = time.time()
for i, (imgs, labels) in enumerate(dataloader):
imgs = imgs.cuda()
labels = labels.cuda()
time_temp = time.time()
time_4_dataloading += time_temp - time_4_begin
optimizer.zero_grad()
# new #
with amp.autocast():
pred = model(imgs)
loss = loss_func(pred, labels)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
_, pred = torch.max(pred, 1)
acc = (pred == labels).sum().item() / len(labels)
print('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc))
f.write('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc) + '\n')
time_4_computing += time.time() - time_temp
time_4_begin = time.time()
time_5 = time.time()
f.write('\n')
f.write('TIME COST' + '\n')
f.write('Parameters preparing: {:.6f}(s)'.format(time_1 - time_begin) + '\n')
f.write('Data loading: {:.6f}(s)'.format(time_2 - time_1) + '\n')
f.write('Model defining: {:.6f}(s)'.format(time_3 - time_2) + '\n')
f.write('Requisites defining: {:.6f}(s)'.format(time_4 - time_3) + '\n')
f.write('Training: {:.6f}(s)'.format(time_5 - time_4) + '\n')
f.write(' Training (dataloading): {:.6f}(s)'.format(time_4_dataloading) + '\n')
f.write(' Training (computing): {:.6f}(s)'.format(time_4_computing) + '\n')
f.close()
torch.save(model.state_dict(), opt.result_dir + '/model_' + sys.argv[0][0:-3] + '.pkl') | src/train_amp.py | 7,245 | function: print the number of total parameters and trainable parameters
function: Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
new Loss function Optimizers NEW new | 406 | en | 0.454216 |
import csnd6
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
from random import randint, random
import time
# For Directory Searching
import glob
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
class RandomLine(object):
def __init__(self, base, range):
self.curVal = 0.0
self.reset()
self.base = base
self.range = range
def reset(self):
self.dur = randint(256,512)
self.end = random()
self.slope = (self.end - self.curVal) / self.dur
def getValue(self):
self.dur -= 1
if(self.dur < 0):
self.reset()
retVal = self.curVal
self.curVal += self.slope
return self.base + (self.range * retVal)
def createChannel(csound, channelName):
chn = csnd6.CsoundMYFLTArray(1)
csound.GetChannelPtr(chn.GetPtr(), channelName,
csnd6.CSOUND_CONTROL_CHANNEL | csnd6.CSOUND_INPUT_CHANNEL)
return chn
class ChannelUpdater(object):
def __init__(self, csound, channelName, updater):
self.updater = updater
self.channel = createChannel(csound, channelName)
def update(self):
self.channel.SetValue(0, self.updater.getValue())
class InputData(object):
def __init__(self, channel):
self.curVal = 0.0
self.channel = channel
self.mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
def getValue(self):
self.curVal = (((self.mcp.read_adc(self.channel)) / 1023.0) + 0.01) * 4;
return self.curVal
class StoredFiles(object):
def __init__(self):
self.reset()
self.scanfiles()
def reset(self):
self.numFiles = 0
self.files = []
def scanfiles(self):
mypath = "../"
self.files = glob.glob("../*.wav")
###############################
# Our Orchestra for our project
orc = """
sr=44100
ksmps=64
nchnls=2
0dbfs=1
instr 1
ainl, ainr inch 1, 2
outs ainl, ainr
endin"""
inputFiles = StoredFiles()
inputFiles.reset()
inputFiles.scanfiles()
for f in inputFiles.files:
print f
c = csnd6.Csound() # create an instance of Csound
c.SetOption("-iadc")
c.SetOption("-odac") # Set option for Csound
c.SetOption("-b 64")
c.SetOption("-B 128")
c.SetOption("-+rtaudio=alsa") # Set option for Csound
c.SetOption("--realtime")
c.SetOption("--sched")
c.SetOption("-m7") # Set option for Csound
c.CompileOrc(orc) # Compile Orchestra from String
# Set the Instrument to Play for 60 seconds. Change this to infinite later.
sco = "f0 $INF\n" + "i1 0 -10\n"
# Set the ftables based on the files within the specified directory.
#fsco = "f 1 0 0 1 \"" + inputFiles.files[0] + "\" 0 0 0\n" #sco = isco + fsco
c.ReadScore(sco) # Read in Score generated from notes
c.Start() # When compiling from strings, this call is necessary before doing any performing
# Create a set of ChannelUpdaters
#channels = [ChannelUpdater(c, "amp", RandomLine(-2.0, 2.0)),
# ChannelUpdater(c, "freq", RandomLine(0.6, 8.0)),
# ChannelUpdater(c, "resonance", RandomLine(0.4, .3))]
#freq_ctrl = InputData(0)
#amp_ctrl = InputData(1)
#res_ctrl = InputData(2)
freq_ctrl = InputData(1)
amp_ctrl = InputData(0)
res_ctrl = RandomLine(0.6, 8.0)
channels = [ChannelUpdater(c, "amp", freq_ctrl),
ChannelUpdater(c, "freq", amp_ctrl),
ChannelUpdater(c, "resonance", res_ctrl)]
# Initialize all Channel Values
for chn in channels:
chn.update()
while (c.PerformKsmps() == 0):
for chn in channels: # update all channel values
chn.update()
c.Stop()
| Code/tests/python_tests/nebulae_live.py | 3,737 | Import SPI library (for hardware SPI) and MCP3008 library. For Directory Searching Hardware SPI configuration: Our Orchestra for our project create an instance of Csound Set option for Csound Set option for Csound Set option for Csound Compile Orchestra from String Set the Instrument to Play for 60 seconds. Change this to infinite later. Set the ftables based on the files within the specified directory.fsco = "f 1 0 0 1 \"" + inputFiles.files[0] + "\" 0 0 0\n" sco = isco + fsco Read in Score generated from notes When compiling from strings, this call is necessary before doing any performing Create a set of ChannelUpdaterschannels = [ChannelUpdater(c, "amp", RandomLine(-2.0, 2.0)), ChannelUpdater(c, "freq", RandomLine(0.6, 8.0)), ChannelUpdater(c, "resonance", RandomLine(0.4, .3))]freq_ctrl = InputData(0)amp_ctrl = InputData(1)res_ctrl = InputData(2) Initialize all Channel Values update all channel values | 940 | en | 0.645674 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import uuid
from st2common import log as logging
from st2common.runners.base import get_metadata as get_runner_metadata
from winrm_runner.winrm_base import WinRmBaseRunner
__all__ = [
'WinRmPsCommandRunner',
'get_runner',
'get_metadata'
]
LOG = logging.getLogger(__name__)
RUNNER_COMMAND = 'cmd'
class WinRmPsCommandRunner(WinRmBaseRunner):
def run(self, action_parameters):
powershell_command = self.runner_parameters[RUNNER_COMMAND]
# execute
return self.run_ps(powershell_command)
def get_runner():
return WinRmPsCommandRunner(str(uuid.uuid4()))
def get_metadata():
metadata = get_runner_metadata('winrm_runner')
metadata = [runner for runner in metadata if
runner['runner_module'] == __name__.split('.')[-1]][0]
return metadata
| contrib/runners/winrm_runner/winrm_runner/winrm_ps_command_runner.py | 1,635 | Licensed to the StackStorm, Inc ('StackStorm') under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. execute | 758 | en | 0.878367 |
import numpy as np
import pandas as pd
%matplotlib auto
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
from sklearn.covariance import EllipticEnvelope
from sklearn.cluster import KMeans
from sklearn.
plt.style.use("fivethirtyeight")
fig=plt.figure(figsize=(12,15))
data=pd.read_csv("Social_Network_Ads.csv")
data=data.iloc[:,2:]
treeclass=RandomForestClassifier(n_estimators=100,max_depth=10)
X,y=data.iloc[:,:-1].values,data.iloc[:,-1].values
def plotting_decision_(X,Y,CL):
X=StandardScaler().fit_transform(X)
X_train,x_test,Y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)
xx_min,xx_max=X[:,0].min()-0.5,X[:,0].max()+0.6
xx,yy=np.meshgrid(np.arange(xx_min,xx_max,0.2),np.arange(xx_min,xx_max,0.2))
cmap_bright=ListedColormap(["red","azure"])
cl=CL()
cl.fit(X_train,Y_train)
score=cl.predict(x_test)
Z=cl.decision_function(np.c_[xx.ravel(),yy.ravel()])
Z=Z.reshape(xx.shape)
plt.contour(xx,yy,Z,cmap=plt.cm.jet)
plt.scatter(X_train[:,0],X_train[:,1],c=Y_train,cmap=cmap_bright)
plt.text(xx.max()-.3,xx.min()+.3,(np.mean(score)),size=15,horizontalalignment="right")
#sns.relplot(x="Age",y="EstimatedSalary",data=data,hue="Purchased")
#sns.boxplot(x=data["Purchased"],y=data["EstimatedSalary"],whis=2,saturation=0.6)
#from sklearn.ensemble import IsolationForest
#IF=IsolationForest(n_estimators=100,bootstrap=False)
#IF.fit(X[:,0].reshape(-1,1))
#xx=np.linspace(X[:,0].min()-5,X[:,0].max()+5,len(data)).reshape(-1,1)
#outlier=IF.predict(xx)
#anomaly_score=IF.decision_function(xx)
#plt.plot(xx,anomaly_score,label="automated")
| AnomalyDetection/DB.py | 1,944 | sns.relplot(x="Age",y="EstimatedSalary",data=data,hue="Purchased")sns.boxplot(x=data["Purchased"],y=data["EstimatedSalary"],whis=2,saturation=0.6)from sklearn.ensemble import IsolationForestIF=IsolationForest(n_estimators=100,bootstrap=False)IF.fit(X[:,0].reshape(-1,1))xx=np.linspace(X[:,0].min()-5,X[:,0].max()+5,len(data)).reshape(-1,1)outlier=IF.predict(xx)anomaly_score=IF.decision_function(xx)plt.plot(xx,anomaly_score,label="automated") | 443 | en | 0.200193 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for automatic batching and unbatching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.batching.ops import gen_batch_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.batching.ops.gen_batch_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_batch_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_batch_ops.so"))
@ops.RegisterGradient("Batch")
def _BatchGrad(op, *out_grads): # pylint: disable=invalid-name
"""Gradient for batch op."""
gradients = []
for i in range(len(op.inputs)):
gradients.append(
gen_batch_ops.unbatch(
out_grads[i],
op.outputs[-2],
op.outputs[-1],
timeout_micros=op.get_attr("grad_timeout_micros"),
shared_name="batch_gradient_{}_{}".format(op.name, i)))
return gradients
@ops.RegisterGradient("Unbatch")
def _UnbatchGrad(op, grad): # pylint: disable=invalid-name
return [
gen_batch_ops.unbatch_grad(
op.inputs[0],
op.inputs[1],
grad,
op.inputs[2],
shared_name="unbatch_gradient_{}".format(op.name)), None, None
]
def batch_function(num_batch_threads, max_batch_size, batch_timeout_micros,
allowed_batch_sizes=None,
grad_timeout_micros=60 * 1000 * 1000,
unbatch_timeout_micros=60 * 1000 * 1000):
"""Batches the computation done by the decorated function.
So, for example, in the following code
```python
@batch_function(1, 2, 3)
def layer(a):
return tf.matmul(a, a)
b = layer(w)
```
if more than one session.run call is simultaneously trying to compute `b`
the values of `w` will be gathered, non-deterministically concatenated
along the first axis, and only one thread will run the computation. See the
documentation of the `Batch` op for more details.
Assumes that all arguments of the decorated function are Tensors which will
be batched along their first dimension.
SparseTensor is not supported. The return value of the decorated function
must be a Tensor or a list/tuple of Tensors.
Args:
num_batch_threads: Number of scheduling threads for processing batches
of work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
grad_timeout_micros: The timeout to use for the gradient. See the
documentation of the unbatch op for more details. Defaults to 60s.
unbatch_timeout_micros: The timeout to use for unbatching. See the
documentation of the unbatch op for more details. Defaults to 60s.
Returns:
The decorated function will return the unbatched computation output Tensors.
"""
def decorator(f): # pylint: disable=missing-docstring
def decorated(*args):
with ops.name_scope("batch") as name:
for a in args:
if not isinstance(a, ops.Tensor):
raise ValueError("All arguments to functions decorated with "
"`batch_function` are supposed to be Tensors; "
"found %s" % repr(a))
batched_tensors, batch_index, id_t = gen_batch_ops.batch(
args,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
grad_timeout_micros=grad_timeout_micros,
shared_name=name)
outputs = f(*batched_tensors)
if isinstance(outputs, ops.Tensor):
outputs_list = [outputs]
else:
outputs_list = outputs
with ops.name_scope("unbatch") as unbatch_name:
unbatched = [
gen_batch_ops.unbatch(t, batch_index, id_t,
timeout_micros=unbatch_timeout_micros,
shared_name=unbatch_name)
for t in outputs_list]
if isinstance(outputs, ops.Tensor):
return unbatched[0]
return unbatched
return decorated
return decorator
| tensorflow/contrib/batching/python/ops/batch_ops.py | 5,433 | Gradient for batch op.
Batches the computation done by the decorated function.
So, for example, in the following code
```python
@batch_function(1, 2, 3)
def layer(a):
return tf.matmul(a, a)
b = layer(w)
```
if more than one session.run call is simultaneously trying to compute `b`
the values of `w` will be gathered, non-deterministically concatenated
along the first axis, and only one thread will run the computation. See the
documentation of the `Batch` op for more details.
Assumes that all arguments of the decorated function are Tensors which will
be batched along their first dimension.
SparseTensor is not supported. The return value of the decorated function
must be a Tensor or a list/tuple of Tensors.
Args:
num_batch_threads: Number of scheduling threads for processing batches
of work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
grad_timeout_micros: The timeout to use for the gradient. See the
documentation of the unbatch op for more details. Defaults to 60s.
unbatch_timeout_micros: The timeout to use for unbatching. See the
documentation of the unbatch op for more details. Defaults to 60s.
Returns:
The decorated function will return the unbatched computation output Tensors.
Operations for automatic batching and unbatching.
Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== go/tf-wildcard-import pylint: disable=wildcard-import pylint: enable=wildcard-import pylint: disable=invalid-name pylint: disable=invalid-name pylint: disable=missing-docstring | 2,577 | en | 0.81486 |
'''
####################################################################
# author wudong
# date 20190816
# 在连续的puckworld空间中测试DDPG
# 状态空间和行为空间连续
# 状态空间:x,y
# 行为空间:水平和竖直方向上的力的大小[-1,1]
# ps 不知道是计算机的原因还是算法的原因,训练不动
######################################################################
'''
import gym
from puckworld_continuous import PuckWorldEnv
from ddpg_agent import DDPGAgent
from utils import learning_curve
import numpy as np
# 建立env和DDPG agent
env = PuckWorldEnv()
agent = DDPGAgent(env)
# 训练并保存模型
data = agent.learning(max_episode_num=200,display=True,explore=True)
# # 加载训练好的模型,观察angent的表现
# agent.load_models(300)
# data = agent.learning(max_episode_num=100,display=True,explore = False)
| DDPG/test_ddpg_puckWorld.py | 877 | ####################################################################
# author wudong
# date 20190816
# 在连续的puckworld空间中测试DDPG
# 状态空间和行为空间连续
# 状态空间:x,y
# 行为空间:水平和竖直方向上的力的大小[-1,1]
# ps 不知道是计算机的原因还是算法的原因,训练不动
######################################################################
建立env和DDPG agent 训练并保存模型 加载训练好的模型,观察angent的表现 agent.load_models(300) data = agent.learning(max_episode_num=100,display=True,explore = False) | 420 | zh | 0.31812 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file contains code to serve a web application to convert HTML to PDF.
This application uses a local install of the `wkhtmltopdf` binary for the conversion.
"""
import os
from subprocess import check_output
from tempfile import TemporaryDirectory
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Route
async def execute_wkhtmltopdf(uri: str) -> bytes:
"""Run wkhtmltopdf on the command-line and return the output."""
cmd = [
"wkhtmltopdf",
"--log-level",
"none",
uri,
"-",
]
return check_output(cmd)
async def convert_body(request: Request):
"""
It's just _way_ easier to deal with files rather than STDIN.
Take the body of the request, write it to a temporary file, then use
wkhtmltopdf to convert it.
"""
data = await request.body()
if not data:
return Response("ERROR: No body", status_code=400)
with TemporaryDirectory() as tmpdirname:
outfile = os.path.join(tmpdirname, "out.html")
with open(outfile, "w") as fh:
fh.write(data.decode("utf-8"))
bytes = await execute_wkhtmltopdf(outfile)
return Response(bytes, media_type="application/pdf")
async def convert_uri(request: Request):
data = await request.json()
if "uri" not in data:
return Response("Invalid JSON in request", status_code=400)
bytes = await execute_wkhtmltopdf(data["uri"])
return Response(bytes, media_type="application/pdf")
app = Starlette(
debug=True,
routes=[
Route("/uri", convert_uri, methods=["POST"]),
Route("/data", convert_body, methods=["POST"]),
],
)
| src/app.py | 1,800 | This file contains code to serve a web application to convert HTML to PDF.
This application uses a local install of the `wkhtmltopdf` binary for the conversion.
!/usr/bin/env python3 -*- coding: utf-8 -*- | 206 | en | 0.764365 |
import os
import shutil
import subprocess
import sys
from enum import Enum
from PyQt5 import QtCore
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMainWindow, QApplication, QListWidgetItem, QFileDialog, QComboBox, QMessageBox, \
QAbstractItemView, QDialogButtonBox, QLabel, QWidget, QPushButton, QListWidget, QFrame, QProgressBar, QStatusBar
class Status(Enum):
OK = 0
WARN = 1
FAIL = 2
def get_qt_data_keys(num_keys):
assert num_keys <= 255 and "too many keys queried"
possible_keys = range(256)
used_keys = list(map(int, [QtCore.Qt.CheckStateRole,
QtCore.Qt.DecorationRole,
QtCore.Qt.AccessibleDescriptionRole,
QtCore.Qt.AccessibleTextRole,
QtCore.Qt.BackgroundColorRole,
QtCore.Qt.BackgroundRole,
QtCore.Qt.DisplayRole,
QtCore.Qt.EditRole,
QtCore.Qt.FontRole,
QtCore.Qt.ForegroundRole,
QtCore.Qt.InitialSortOrderRole,
QtCore.Qt.SizeHintRole,
QtCore.Qt.StatusTipRole,
QtCore.Qt.TextAlignmentRole,
QtCore.Qt.TextColorRole,
QtCore.Qt.ToolTipRole,
QtCore.Qt.UserRole,
QtCore.Qt.WhatsThisRole]))
c, keys = 0, []
for key in possible_keys:
if c < num_keys and key not in used_keys:
keys.append(key)
c += 1
return keys
class ChooseStagePopupUI:
def __init__(self):
self._stage_select_combobox = None # type: QComboBox
self._dialog_button_box = None # type: QDialogButtonBox
self._choose_stage_label = None # type: QLabel
self._stage_base_names = []
def _setup_ui(self, choose_stage_popup):
choose_stage_popup.setObjectName("choose_stage_popupI")
choose_stage_popup.resize(493, 108)
self._stage_select_combobox = QComboBox(choose_stage_popup)
self._stage_select_combobox.setGeometry(QtCore.QRect(10, 30, 471, 27))
self._stage_select_combobox.setObjectName("stage_select_combobox")
self._load_stages()
self._dialog_button_box = QDialogButtonBox(choose_stage_popup)
self._dialog_button_box.setGeometry(QtCore.QRect(150, 70, 176, 27))
self._dialog_button_box.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
self._dialog_button_box.setObjectName("dialog_button_box")
self._dialog_button_box.rejected.connect(self.close)
self._choose_stage_label = QLabel(choose_stage_popup)
self._choose_stage_label.setGeometry(QtCore.QRect(10, 10, 461, 17))
self._choose_stage_label.setObjectName("choose_stage_label")
self._choose_stage_label.setText("Choose monkeyball stage to replace (Challenge Mode)")
choose_stage_popup.setWindowTitle("Choose Stage to Replace")
def _load_stages(self):
with open(os.path.join(get_mbreplacer_dir(), 'resources', 'challenge_stages_list.txt'), 'r') as f:
for line in f:
clean_line = line.strip()
self._stage_select_combobox.addItem(clean_line)
self._stage_base_names.append(clean_line)
class ChooseStagePopup(QMainWindow, ChooseStagePopupUI):
def __init__(self):
QMainWindow.__init__(self)
ChooseStagePopupUI.__init__(self)
self._setup_ui(self)
def connect(self, callback):
self._dialog_button_box.accepted.connect(callback)
def get_selected_stage_index(self):
return int(self._stage_select_combobox.currentIndex())
def set_associated_stage(self, index, associated_stage):
self._stage_select_combobox.setItemText(index, self._stage_base_names[index] + " [{}]".format(associated_stage))
def remove_associated_stage(self, stage_index):
self._stage_select_combobox.setItemText(stage_index, self._stage_base_names[stage_index])
def get_stage_name(self, index):
return self._stage_base_names[index].split(":")[1][1:]
def get_stage_id(self, index):
return self._stage_base_names[index].split(":")[0]
def increment_stage_index(self):
current_idx = self._stage_select_combobox.currentIndex()
if current_idx == self._stage_select_combobox.count() - 1:
current_idx = 0
else:
current_idx += 1
self._stage_select_combobox.setCurrentIndex(current_idx)
class MBReplacerUI:
def __init__(self):
self._central_widget = None # type: QWidget
self._import_multiple_stages_btn = None # type: QPushButton
self._import_root_btn = None # type: QPushButton
self._imported_stages_list = None # type: QListWidget
self._imported_stages_label = None # type: QLabel
self._replace_queue_list = None # type: QListWidget
self._stages_to_be_replaced_label = None # type: QLabel
self._replace_btn = None # type: QPushButton
self._add_to_replace_btn = None # type: QPushButton
self._remove_from_replace_btn = None # type: QPushButton
self._progress_bar = None # type: QProgressBar
self._line = None # type: QFrame
self._add_single_stage_btn = None # type: QPushButton
self._remove_single_stage_btn = None # type: QPushButton
self._status_bar = None # type: QStatusBar
def _setup_ui(self, mbreplacer):
mbreplacer.setObjectName("mbreplacer")
mbreplacer.resize(961, 545)
self._central_widget = QWidget(mbreplacer)
self._central_widget.setObjectName("centralWidget")
self._import_multiple_stages_btn = QPushButton(self._central_widget)
self._import_multiple_stages_btn.setGeometry(QtCore.QRect(150, 490, 151, 27))
self._import_multiple_stages_btn.setObjectName("import_multiple_stages_btn")
self._import_multiple_stages_btn.setText("import multiple from folder")
self._import_root_btn = QPushButton(self._central_widget)
self._import_root_btn.setGeometry(QtCore.QRect(10, 10, 161, 31))
self._import_root_btn.setObjectName("import_root_btn")
self._import_root_btn.setText("import root folder")
self._imported_stages_list = QListWidget(self._central_widget)
self._imported_stages_list.setGeometry(QtCore.QRect(10, 80, 431, 401))
self._imported_stages_list.setObjectName("imported_stages_list")
self._imported_stages_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
self._imported_stages_label = QLabel(self._central_widget)
self._imported_stages_label.setGeometry(QtCore.QRect(170, 50, 111, 31))
self._imported_stages_label.setObjectName("imported_stages_label")
self._imported_stages_label.setText("imported stages")
self._replace_queue_list = QListWidget(self._central_widget)
self._replace_queue_list.setGeometry(QtCore.QRect(520, 80, 431, 401))
self._replace_queue_list.setObjectName("replace_queue_list")
self._replace_queue_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
self._stages_to_be_replaced_label = QLabel(self._central_widget)
self._stages_to_be_replaced_label.setGeometry(QtCore.QRect(660, 50, 151, 31))
self._stages_to_be_replaced_label.setObjectName("stages_to_be_replaced_label")
self._stages_to_be_replaced_label.setText("stages to be replaced")
self._replace_btn = QPushButton(self._central_widget)
self._replace_btn.setGeometry(QtCore.QRect(670, 490, 131, 31))
self._replace_btn.setObjectName("replace_btn")
self._replace_btn.setText("replace!")
self._add_to_replace_btn = QPushButton(self._central_widget)
self._add_to_replace_btn.setGeometry(QtCore.QRect(460, 230, 41, 27))
self._add_to_replace_btn.setObjectName("add_to_replace_btn")
self._add_to_replace_btn.setText("->")
self._remove_from_replace_btn = QPushButton(self._central_widget)
self._remove_from_replace_btn.setGeometry(QtCore.QRect(460, 280, 41, 27))
self._remove_from_replace_btn.setObjectName("remove_from_replace_btn")
self._remove_from_replace_btn.setText("<-")
self._line = QFrame(self._central_widget)
self._line.setGeometry(QtCore.QRect(0, 40, 961, 20))
self._line.setFrameShape(QFrame.HLine)
self._line.setFrameShadow(QFrame.Sunken)
self._line.setObjectName("line")
self._add_single_stage_btn = QPushButton(self._central_widget)
self._add_single_stage_btn.setGeometry(QtCore.QRect(310, 490, 31, 27))
self._add_single_stage_btn.setObjectName("add_single_stage_btn")
self._add_single_stage_btn.setText("+")
self._remove_single_stage_btn = QPushButton(self._central_widget)
self._remove_single_stage_btn.setGeometry(QtCore.QRect(110, 490, 31, 27))
self._remove_single_stage_btn.setObjectName("remove_single_stage_btn")
self._remove_single_stage_btn.setText("-")
self._root_folder_label = QLabel(self._central_widget)
self._root_folder_label.setGeometry(QtCore.QRect(220, 16, 341, 21))
self._root_folder_label.setObjectName("root_folder_label")
mbreplacer.setCentralWidget(self._central_widget)
self._status_bar_label = QLabel(self._central_widget)
self._status_bar_label.setGeometry(QtCore.QRect(5, 525, 961, 24))
self._status_bar_label.setObjectName("status_bar_label")
mbreplacer.setWindowTitle("mbreplacer: stage replacer")
class MBReplacer(QMainWindow, MBReplacerUI):
def __init__(self):
QMainWindow.__init__(self)
MBReplacerUI.__init__(self)
self._setup_ui(self)
self._import_multiple_stages_btn.clicked.connect(self._import_multiple_stages_btn_clicked)
self._import_root_btn.clicked.connect(self._import_root_btn_clicked)
self._add_to_replace_btn.clicked.connect(self._add_to_replace_btn_clicked)
self._remove_from_replace_btn.clicked.connect(self._remove_from_replace_btn_clicked)
self._replace_btn.clicked.connect(self._replace_btn_clicked)
self._add_single_stage_btn.clicked.connect(self._add_single_stage_btn_clicked)
self._remove_single_stage_btn.clicked.connect(self._remove_single_stage_btn_clicked)
self._root_folder_path = None
self._imported_stages = []
self._stages_to_be_replaced = []
self._choose_stage_popup = ChooseStagePopup()
self._input_filenames_key, self._output_stage_id_key, self._is_valid_input_key = get_qt_data_keys(3)
# the tuple allows for replacement files for the given element. obj and mtl are required and have no replacement
# but for config we can take xml lz or lz.raw. let the order of the tuple denote priority (we want xml over all)
self._required_extensions = [("obj",), ("mtl",), ("xml", "lz", "lz.raw")]
self._required_tools = ['GxModelViewer.exe', 'ws2lzfrontend.exe', 'SMB_LZ_Tool.exe']
self._tool_filepaths = self._find_required_tools()
self._imported_obj_filepaths = []
self._replace_queue = []
self._temp_dir = os.path.join(get_mbreplacer_dir(), 'temp')
def _find_required_tools(self):
tool_filepaths = {}
[tool_filepaths.update({f: os.path.join(dp, f)})
for dp, dn, filenames in os.walk(get_mbreplacer_dir())
for f in filenames if f in self._required_tools]
return tool_filepaths
# button callbacks:
def _add_single_stage(self, obj_filepath):
import_stage_directory = os.path.dirname(obj_filepath)
import_stage_base_name = str(os.path.basename(obj_filepath).split(".")[0])
all_filenames = os.listdir(import_stage_directory)
collected_filepaths = {}
item_string = import_stage_base_name + " | has: ["
for required_extension in self._required_extensions:
for extension in required_extension:
filename = import_stage_base_name + "." + extension
if filename in all_filenames:
collected_filepaths[os.path.splitext(filename)[1][1:]] = os.path.join(import_stage_directory, filename)
item_string += extension + ", "
break
item_string = item_string[:-2] + "]"
all_textures_present = False
if 'mtl' in collected_filepaths:
with open(collected_filepaths['mtl'], 'r') as f:
required_textures = []
for line in f:
split_line = line.strip().split()
if split_line and split_line[0] == 'map_Kd':
if os.path.isabs(split_line[1]):
required_textures.append(split_line[1])
else:
required_textures.append(os.path.join(import_stage_directory, split_line[1]))
all_textures_present = all([os.path.exists(texture) for texture in required_textures])
item_string += " | textures: " + ("yes" if all_textures_present else "no")
with open(obj_filepath, 'r') as f:
obj_lines = f.readlines()
num_vertices = len([line for line in obj_lines if line.startswith('v ')])
num_faces = len([line for line in obj_lines if line.startswith('f ')])
item_string += " | v:" + str(num_vertices) + " f: " + str(num_faces)
all_inputs_met = len(collected_filepaths.keys()) == len(self._required_extensions) and all_textures_present
item = QListWidgetItem()
item.setData(self._input_filenames_key, collected_filepaths)
item.setData(self._is_valid_input_key, all_inputs_met)
item.setText(item_string)
item.setIcon(QIcon("resources/green_checkmark.png") if all_inputs_met else QIcon("resources/red_xmark.png"))
self._imported_stages_list.addItem(item)
return Status.OK
def _add_single_stage_btn_clicked(self):
file_dialog = QFileDialog()
obj_filepath = QFileDialog.getOpenFileName(file_dialog,
"import stage .obj file",
get_mbreplacer_dir(),
"*.obj")[0]
if obj_filepath in self._imported_obj_filepaths:
duplicate_idx = self._imported_obj_filepaths.index(obj_filepath)
duplicate_item = self._imported_stages_list.item(duplicate_idx)
self._imported_stages_list.takeItem(self._imported_stages_list.row(duplicate_item))
del self._imported_obj_filepaths[duplicate_idx]
if obj_filepath:
self._add_single_stage(obj_filepath)
self._imported_obj_filepaths.append(obj_filepath)
self._imported_stages_list.sortItems()
return Status.OK
def _remove_single_stage_btn_clicked(self):
selected_items = self._imported_stages_list.selectedItems()
for selected_item in selected_items:
self._imported_stages_list.takeItem(self._imported_stages_list.row(selected_item))
return Status.OK
def _import_multiple_stages_btn_clicked(self):
file_dialog = QFileDialog()
file_dialog.setParent(self.sender())
stages_folder_path = QFileDialog.getExistingDirectory(file_dialog,
"import folder with multiple objs/mtls/configs",
get_mbreplacer_dir())
stages_folder_path = QtCore.QDir.toNativeSeparators(stages_folder_path)
obj_filepaths = [os.path.join(dp, f)
for dp, dn, filenames in os.walk(stages_folder_path)
for f in filenames if os.path.splitext(f)[1] == '.obj']
for obj_filepath in obj_filepaths:
if obj_filepath in self._imported_obj_filepaths:
duplicate_idx = self._imported_obj_filepaths.index(obj_filepath)
duplicate_item = self._imported_stages_list.item(duplicate_idx)
self._imported_stages_list.takeItem(self._imported_stages_list.row(duplicate_item))
del self._imported_obj_filepaths[duplicate_idx]
self._add_single_stage(obj_filepath)
self._imported_obj_filepaths.append(obj_filepath)
if obj_filepaths:
self._imported_stages_list.sortItems()
return Status.OK
def _import_root_btn_clicked(self):
file_dialog = QFileDialog()
file_dialog.setParent(self.sender())
self._root_folder_path = QFileDialog.getExistingDirectory(file_dialog,
"import root folder extracted from .iso",
get_mbreplacer_dir())
self._root_folder_path = QtCore.QDir.toNativeSeparators(self._root_folder_path)
if not os.path.exists(os.path.join(self._root_folder_path, 'stage')):
self._root_folder_path = None
self._give_error_message("root folder seems to be invalid, no 'stage' folder found")
return
self._root_folder_label.setText(self._root_folder_path)
def _add_to_replace_btn_clicked(self):
selected_items = self._imported_stages_list.selectedItems()
if not selected_items:
return Status.OK
elif not all([selected_item.data(self._is_valid_input_key) for selected_item in selected_items]):
required = [', or '.join(required_extension) for required_extension in self._required_extensions]
self._give_error_message("Could not find all required files for one of the selected stages!\n"
"Please sure the required files are in the same directory as the .obj,\n"
"then reimport the stage!\n\n"
"Required Extensions: " + str(required) + "\n\n"
"Also requires that all linked textures are found. "
"(open the mtl file as txt to see the texture paths)\n\n"
)
return Status.WARN
else:
self._choose_stage_popup.setWindowModality(QtCore.Qt.WindowModal)
self._choose_stage_popup.connect(self._on_choose_stage)
self._choose_stage_popup.show()
return Status.OK
def _remove_from_replace_btn_clicked(self):
selected_items = self._replace_queue_list.selectedItems()
for i, selected_item in enumerate(selected_items):
self._replace_queue_list.takeItem(self._replace_queue_list.row(selected_item))
self._choose_stage_popup.remove_associated_stage(self._replace_queue[i][1])
return Status.OK
def _replace_stage_in_root(self, obj_filepath, config_filepath, stage_id):
config_ext = os.path.splitext(config_filepath)[1]
base_filepath = os.path.splitext(obj_filepath)[0]
gma_filepath = base_filepath + ".gma"
tpl_filepath = base_filepath + ".tpl"
lz_raw_filepath = base_filepath + ".lz.raw"
lz_filepath = os.path.splitext(lz_raw_filepath)[0]
needs_lz_raw_creation = config_ext == ".xml"
needs_lz_compression = config_ext == ".xml" or config_ext == ".raw"
if not needs_lz_compression and not needs_lz_raw_creation and not os.path.exists(lz_filepath):
self._give_error_message(".lz file promised not found")
return Status.WARN
tool_id = 'GxModelViewer.exe'
if tool_id not in self._tool_filepaths:
self._give_error_message("Cannot find tool: " + tool_id +
"\n\nPlease make sure the tool with this exact name "
"is somewhere in the mbreplacer directory")
return Status.WARN
# make gma and tpl in another thread while we do other things
gx_process = subprocess.Popen([self._tool_filepaths['GxModelViewer.exe'], obj_filepath])
# make .lz.raw
if needs_lz_raw_creation:
tool_id = 'ws2lzfrontend.exe'
if tool_id not in self._tool_filepaths:
self._give_error_message("Cannot find tool: " + tool_id +
"\n\nPlease make sure the tool with this exact name "
"is somewhere in the mbreplacer directory")
return Status.WARN
subprocess.call([self._tool_filepaths[tool_id], '-c', config_filepath, '-o', lz_raw_filepath, "-g", '2'])
if needs_lz_compression and not os.path.exists(lz_raw_filepath):
self._give_error_message("Failure to create .lz.raw file, ensure the config/obj/mtl files are valid, "
"as well as the ws2lzfrontend.exe tool")
return Status.WARN
# make .lz
if needs_lz_compression:
tool_id = 'SMB_LZ_Tool.exe'
if tool_id not in self._tool_filepaths:
self._give_error_message("Cannot find tool: " + tool_id +
"\n\nPlease make sure the tool with this exact name "
"is somewhere in the mbreplacer directory")
return
subprocess.call([self._tool_filepaths[tool_id], lz_raw_filepath])
if needs_lz_compression and not os.path.exists(lz_raw_filepath + '.lz'):
self._give_error_message("Failure to create .lz.raw file, ensure the config/obj/mtl files are valid, "
"as well as the ws2lzfrontend.exe tool")
return Status.WARN
if needs_lz_compression:
if os.path.exists(lz_filepath):
os.remove(lz_filepath)
os.rename(lz_raw_filepath + '.lz', lz_filepath)
os.remove(lz_raw_filepath)
# wait for the gx process to finish
gx_process.wait()
if not os.path.exists(gma_filepath) or not os.path.exists(tpl_filepath):
self._give_error_message("Failure to create gma and tpl files, ensure these files are correct, "
"as well as the GxModelViewer.exe (No GUI) tool")
return Status.WARN
stage_gma_filepath = os.path.join(self._root_folder_path, 'stage', 'st' + stage_id + '.gma')
stage_tpl_filepath = os.path.join(self._root_folder_path, 'stage', 'st' + stage_id + '.tpl')
stage_lz_filepath = os.path.join(self._root_folder_path, 'stage', 'STAGE' + stage_id + '.lz')
shutil.copy(gma_filepath, stage_gma_filepath)
shutil.copy(tpl_filepath, stage_tpl_filepath)
shutil.copy(lz_filepath, stage_lz_filepath)
return Status.OK
def _replace_btn_clicked(self):
if self._root_folder_path is None:
self._give_error_message("Please import your monkeyball root folder created by gamecube rebuilder")
return
self._tool_filepaths = self._find_required_tools()
for i in range(self._replace_queue_list.count()):
item = self._replace_queue_list.item(i)
input_filepaths = item.data(self._input_filenames_key)
obj_filepath = input_filepaths['obj']
config_filepath = [value for key, value in input_filepaths.items() if key != 'obj' and key != 'mtl'][0]
stage_id = item.data(self._output_stage_id_key)
status = self._replace_stage_in_root(obj_filepath, config_filepath, stage_id)
if status in (Status.WARN, Status.FAIL):
item.setIcon(QIcon("resources/red_xmark.png"))
return status
item.setIcon(QIcon("resources/green_checkmark.png"))
self._status_bar_label.setText("written " + os.path.basename(os.path.splitext(obj_filepath)[0]) + " to root")
return Status.OK
def _on_choose_stage(self):
if not self._choose_stage_popup.isActiveWindow():
return Status.OK
self._choose_stage_popup.close()
selected_items = self._imported_stages_list.selectedItems()
for selected_item in selected_items:
stage_index = self._choose_stage_popup.get_selected_stage_index()
replacement_stage_name = selected_item.text().split("|")[0][:-1]
# if theres a conflict or duplicate, remove it
if self._replace_queue:
stage_indices = list(zip(*self._replace_queue))[1]
# conflict
if stage_index in stage_indices:
conflict_index = stage_indices.index(stage_index)
conflict_item = self._replace_queue_list.item(conflict_index)
self._replace_queue_list.takeItem(self._replace_queue_list.row(conflict_item))
del self._replace_queue[conflict_index]
# duplicate
if (replacement_stage_name, stage_index) in self._replace_queue:
return Status.OK
self._choose_stage_popup.set_associated_stage(stage_index, replacement_stage_name)
item = QListWidgetItem()
item.setData(self._output_stage_id_key, self._choose_stage_popup.get_stage_id(stage_index))
item.setData(self._input_filenames_key, selected_item.data(self._input_filenames_key))
item_text = replacement_stage_name + " -> " + self._choose_stage_popup.get_stage_name(stage_index)
item.setText(item_text)
item.setIcon(QIcon("resources/gray_dot.png"))
self._replace_queue_list.addItem(item)
self._replace_queue.append((replacement_stage_name, stage_index))
self._choose_stage_popup.increment_stage_index()
return Status.OK
def _give_error_message(self, message, raise_exception=False):
error_message = QMessageBox()
error_message.setParent(self.sender())
error_message.setWindowTitle("ERROR")
error_message.setText(message)
error_message.setWindowModality(QtCore.Qt.WindowModal)
error_message.exec_()
if raise_exception:
raise Exception(message)
def get_mbreplacer_dir():
"""
Get the mbreplacer dir
:return str: mbreplacer root dir
"""
return os.getcwd()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MBReplacer()
window.show()
sys.exit(app.exec_())
| mbreplacer.py | 26,863 | Get the mbreplacer dir
:return str: mbreplacer root dir
type: QComboBox type: QDialogButtonBox type: QLabel type: QWidget type: QPushButton type: QPushButton type: QListWidget type: QLabel type: QListWidget type: QLabel type: QPushButton type: QPushButton type: QPushButton type: QProgressBar type: QFrame type: QPushButton type: QPushButton type: QStatusBar the tuple allows for replacement files for the given element. obj and mtl are required and have no replacement but for config we can take xml lz or lz.raw. let the order of the tuple denote priority (we want xml over all) button callbacks: make gma and tpl in another thread while we do other things make .lz.raw make .lz wait for the gx process to finish if theres a conflict or duplicate, remove it conflict duplicate | 780 | en | 0.585662 |
"""
This is the runner of the entire eva.jvc system.
Version 1,
the steps for the entire pipeline are as follows:
1. preprocessor -- get rep indices, save children metadata
2. encoder -- encode video by forcing i-frames (also modify the i-frame skip rate)
3. decoder -- using metadata, select the i-frames you want to decode.
If the user wants more frames than the number of i frames, then I guess we have to decode the entire video??
@Jaeho Bang
"""
import os
from eva_storage.jvc.preprocessor import *
from eva_storage.jvc.encoder import *
from eva_storage.jvc.decoder import *
from loaders.seattle_loader import SeattleLoader
from timer import Timer
class JVCRunner_v2:
def __init__(self):
self.preprocessor = Preprocessor()
self.compressor = Compressor()
self.decompressor = Decompressor()
self.video_loader = SeattleLoader()
def encode(self, path_to_video):
video_filename = os.path.basename(path_to_video)
###TODO: eliminate the extension
video_filename = video_filename.split('.')[0]
images, metadata = self.video_loader.load_images(
path_to_video) ## we might need metadata such as fps, frame_width, frame_height, fourcc from here
rep_indices = self.preprocessor.run(images, video_filename)
self.compressor.run(images, rep_indices, metadata)
def decode(self, path_to_video, number_of_samples=None):
images = self.decompressor.run(path_to_video, number_of_samples)
return images
if __name__ == "__main__":
timer = Timer() ##TODO: use the timer to run the pipeline
preprocessor = Preprocessor()
compressor = Compressor()
decompressor = Decompressor()
video_loader = SeattleLoader()
images = video_loader.load_images()
meta_data = preprocessor.run(images)
save_directory = compressor.run(images, meta_data)
number_of_frames = 100 ## we can change this to whatever number we want
images_jvc = decompressor.run(save_directory, number_of_frames)
| eva_storage/jvc/jvc_runner_v2.py | 2,044 | This is the runner of the entire eva.jvc system.
Version 1,
the steps for the entire pipeline are as follows:
1. preprocessor -- get rep indices, save children metadata
2. encoder -- encode video by forcing i-frames (also modify the i-frame skip rate)
3. decoder -- using metadata, select the i-frames you want to decode.
If the user wants more frames than the number of i frames, then I guess we have to decode the entire video??
@Jaeho Bang
TODO: eliminate the extension we might need metadata such as fps, frame_width, frame_height, fourcc from hereTODO: use the timer to run the pipeline we can change this to whatever number we want | 654 | en | 0.825554 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class SspaceLongread(Package):
"""SSPACE-LongRead is a stand-alone program for scaffolding pre-assembled
contigs using long reads
Note: A manual download is required for SSPACE-LongRead.
Spack will search your current directory for the download file.
Alternatively, add this file to a mirror so that Spack can find it.
For instructions on how to set up a mirror, see
http://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "https://www.baseclear.com/genomics/bioinformatics/basetools/SSPACE-longread"
manual_download = True
version('1.1', '0bb5d8603d7ead4ff1596135a520cc26')
depends_on('perl', type=('build', 'run'))
def url_for_version(self, version):
return "file://{0}/40SSPACE-LongRead_v{1}.tar.gz".format(
os.getcwd(), version.dashed)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('blasr', prefix.bin)
install('SSPACE-LongRead.pl', prefix.bin)
| var/spack/repos/builtin/packages/sspace-longread/package.py | 1,223 | SSPACE-LongRead is a stand-alone program for scaffolding pre-assembled
contigs using long reads
Note: A manual download is required for SSPACE-LongRead.
Spack will search your current directory for the download file.
Alternatively, add this file to a mirror so that Spack can find it.
For instructions on how to set up a mirror, see
http://spack.readthedocs.io/en/latest/mirrors.html
Copyright 2013-2021 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) | 575 | en | 0.801468 |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Oct 15 12:53:43 2018
# by: The Resource Compiler for PySide (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = b"\x00\x006x\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006\x05IDATx\xda\xec\xddOl-Y~\x17\xf0\xaaIG\xf9G\xe27\xd2\xf0O#\xe2\xfbX0j\x08\xb2G\x10\xd1 F\xbe\x1d!\x116\xb1Gb\x93\xd5\xbb\xbd`\x91\xc5\xf0\xdc+f\xf7\xca\x12\x8b\xd9\xb5\x1f-$$\x16\xefzE\xc4\x22m/C\x82\xfaZ\x83\xa0\xc3\x1f\x8d\x1dF\x0aC \xcfF\x84\x89\x84F\xf3\x9c\x88\x10 \x89\xa9\xd3>\x9e\xf6\xbc\xb6OU\xdd\xbfUu?\x1f\xa9t\xdf\xf3-\xdf?\xa7\xae\xef\xfd\xdes\xea\xfcN~}}\x9d\x01i\x1b_yk\xaf\xbc\x18\x5c\xbd\xff\xd1\xa1\xd6\x00\xa0k>\xa3\x09\xa0\x96\xfd\xb8\x01\x80\xc0\x07}\xb3\xf1\x95\xb7\x06\xe5\xc5N\xb9m\xc6\x9e>\x00\x10\xf8\xa0g\x8a;\xff\xd6\xcb\x07@\xe7\xe4\xce\xe1\x83\x87m|\xe5\xadG\xe5\xc5E\xf8\xe7\x9d\x1f?\xbez\xff\xa3\x0b\xad\x03@W\xe8\xe1\x83\xb4\xbd\xd7\xc2^Ph\x16\x00\x04>\xe8\x8f\xfb\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x0cu\xc3\xf2b\xf3\xbe\xab\xb2\x9b\x9e?\x00\x10\xf8\xa0\xe3R\x134\x0a\xcd\x03@W\x98\xb4\x01\xf7\x88\xa5X^V\xec\xf6\xf6\xd5\xfb\x1fM\xb4\x16\x00m\xa7\x87\x0f\xee7\xaa\xb1\x8f\x12-\x00\x08|\xd0au\xc2\xdcn\xec\x09\x04\x00\x81\x0f\xba\xa4\x0cq\xa3\xec\xd3\xa5X\x1e2\xd2b\x00\x08|\xd0=\xfb\x0b\xda\x17\x00\x04>X\xb5X\x8ae\xab\xc9\xaf\xc4\x1eA\x00\x10\xf8\xa0#\xa6\x09oz\xf9\x00h5eY \x8a\xabg|w\xca_W\xa2\x05\x80\xd6\xd2\xc3\x07\x9f\x98\xa5\xa7n\xa4\xf9\x00\x10\xf8\xa0\xfdf\x09mO\xac\xaf\x0b\x80\xc0\x07-\x16'^l\xcex3\xce\xe5\x03@\xe0\x83\x16\x1b\xb5\xe46\x00@\xe0\x83y\x8b\xabe\xec\xcc\xe1\xa66\x95h\x01@\xe0\x83v*\xe6x[\x02\x1f\x00\xad\xa3,\x0bk-N\xb4\xb8\xc8\xea/\xa5V\xc7\xe3\xab\xf7?\xba\xd0\xba\x00\xb4\x85\x1e>\xd6\xddh\xcea/(4+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x86\xb2\xbdl\xf6R,\xf7\xdet\xe6\x5c>\x00\x04>h\x85E\x8625\xf9\x00\x10\xf8`\x95b)\x96\xdd\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96\x87(\xd1\x02\xc0\xca\xe9\xe1c\x1d\xed-)\xec\x05\xce\xe5\x03@\xe0\x83\x15Xf\x08\x1b)\xd1\x02\x80\xc0\x07KT\x86\xafay\xb1\xb5\xcc\xbb\xccnz\x14\x01@\xe0\x83%\x19\xad\xe0>\x0d\xeb\x02\xb0R&m\xb06b)\x96\x97+\xba\xfb\xb7\xaf\xde\xffh\xe2(\x00\xb0\x0az\xf8X'\xa35\xbdo\x00\x04>\x10\xf8\x96\xe0I\xeca\x04\x00\x81\x0f\x16\xa1\x0c[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6vy\xb1\xd3\x82\x87\xb2\x19{\x1a\x01@\xe0\x839kSY\x14\x81\x0f\x80\xa5S\x96\x85^\x8b\xab\x5c|\xb7\xe6\xeeW\xb7\xbf\xd6\xf0n\xc2\xef\xbd\xca\xea\x9f#\xf8\xc5\xab\xf7?:st\x00X\x16=|\xf4\xdd\xa8\xc6>\x97\xe5\xf6N\xb9\x0d\xcam\x9a vV\x06\xb8A\xbc\x8d\xd3\x1a\xfb+\xc4\x0c\x80\xc0\x07s\x94\x0aW'\xd9MA\xe4A\xb9\x8d\xcb\xed\xd5,w\x14ocX\xfe\xf3\x8b\xe5v\x94\xd8\xf5\x89\xf5u\x01\x10\xf8`\x0e\xcaP\x15\xd6\xb0}}\x985\x0c\xbf>/\xb7\xc7e8\xdb[\xc4\xea\x17a\xb8\xb6\xdcF\xe5??[n\x07\xd9M\x0f\xe2\xebF\x8e\x10\x00\xcb\xf2\x86&\xa0\xc7\xee\xf6\xee\x9d\x97\xdba\xe8\x85[\xd6\x9d\xc7\x1e\xc3\x22lqvn\xd8v\xee<\xb6C\x87\x08\x80e\xd0\xc3G/\xc5U-B\xb8\x0aC\xaba\xd8v{\x99a\xef\x9e\xf0w;\xdc\xfb8>\xa6G\xb1\x07\x12\x00\x16N\x0f\x1f}\x16\x86m/\xda\xf4\x80\xe2\xe3\x19\xc5s\xf8\x9c\xc7\x07\x80\xc0\x073\x06\xab6?\xbe0\xdc\xfb\xca\x91\x02`\x19\x0c\xe9\x02\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x02\x1f\x00,I\x9e\xe7\x8f\xcam\xa8%@\xe0\x03\xa0\x9fao\xbb\xbc\xb8\xd0\x12 \xf0\x01\xd0\xcf\xb0\xb7_^|\xa3\xdc6\xb4\x06,\x96\xb5t\x01Xv\xd0{T^\x8c\xcbm\xf7\xf6g\xd7\xd7\xd7\x13-\x03\x02\x1f\x00\xfd\x08{a\x08\xf7\xb8\xdc6\xb5\x06,\x8f!]\x00\x96\x15\xf6n\x87p_\x0f{\xa7Z\x07\x16K\x0f\x1f\x00\x8b\x0ez\x9f\x1a\xc2\x05\x04>\x00\xfa\x13\xf6\xea\x0c\xe1N\xb4\x14,\x96!]\x00\x16\x15\xf6\x1e\x1a\xc2\x05\x96L\x0f\x1f\x00\xf3\x0ezM\x87p'Z\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b\xc0\xbc\xc2\xde(\xbb\xe9\xadk\x14\xf6\xd4\xe0\x83\xc5\xd3\xc3\x07\xc0\xacA/\x0c\xe1\x1e\x96\xdb\x13\xad\x01\x02\x1f\x00\xfd\x0b{a\x08w\x5cn[S\xde\x84\x1a|\xb0\x04\x86t\x01\x986\xec\x8d\xb2\x9b!\xdc-\xad\x01\xed\xa6\x87\x0f\x80\xa6Ao\x9eC\xb8\x13-\x0a\x02\x1f\x00\xed\x0a{\xb3\x0e\xe1\x02+`H\x17\x80\xbaao\x94\xcd\x7f\x08w\xa2ea\xf1\xf4\xf0\x01P\x15\xf4\xcc\xc2\x05\x81\x0f\x80\x1e\x87\xbdAvSHy!C\xb8j\xf0\xc1r\x18\xd2\x05\xe0\xa1\xb0\xb7W^\x9ce\xce\xd7\x03\x81\x0f\x80^\x86\xbd0\x84\xfbA\xb9m,\xf0n\xd4\xe0\x83%1\xa4\x0b\xc0\xdd\xa07\xc8\x168\x84\x0b\xac\x86\x1e>\x00n\xc3\xde\xb2\x87p'Z\x1d\x04>\x00\x96\x17\xf6\x961\x84\x0b\xac\x88!]\x80\xf5\x0ez\x83luC\xb8\x13G\x00\x96C\x0f\x1f\xc0\xfa\x86=\xb3pA\xe0\x03\xa0\xc7ao\xe5C\xb8j\xf0\xc1\xf2\x18\xd2\x05X\xaf\xa0\x17V\xcd\x08AK\xaf\x1e\xac\x11=|\x00\xeb\x13\xf6\x86\xe5\xc5EK\xc2\x9e\x1a| \xf0\x010\xe7\xb0W\x94\x17\x1fff\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\x9d\x96=\xb4\x89\xa3\x03\x02\x1f\x00\xf31\x88\xe1*\xcc\xc6\xdd\x8e?\xdb\xd1, \xf0\x01\xd0\x13\xd7\xd7\xd7g1\xec}J<\xa7/x\xfdr\x19\x81p\xe2\xe8\x80\xc0\x07\xc0\xe2\xc3\xe0$\x15\xbe^\x0b\x84\xc3L\xcf \x08|\x00\xf47\x10\x96\xe1\xefbA\xb7\x0d,\x81Y\xba\x00$\xc5\x19\xbe\x9bZ\x02\x04>\x00\xfa\x19\xf6\xc2,\xdf\xfd9\xdf\xac\x1a| \xf0\x01\xd0\x22a\x09\xb6T\xed\xbe\x83r\xbb\xd2L \xf0\x01\xd0Aq\xd2\xc6\x93\xc4.\xe7\xd7\xd7\xd7Ey9jx\xd3\x13\xad\x0b\x02\x1f\x00\xedPT\x5c\xff\xf1Po\x19\xfaBa\xe7\xe7\x9a\x0b\x04>\x00:$\xcf\xf3Q\x96.\xc3rrw\xa6m\xf9\xef\x10\xfe\xcek\xde\xfcD\x0b\xc3r)\xcb\x02p\x13p\x06\xd9\xcd\xaa\x14\xc3r\x0b\x13\x15\x92\xabR\x94\x01'\xefq[\x84\xe7_T\xecv\xdfD\x8e\xbd\xec\xa6\xc8\xb3\xf5zA\xe0\x03hE\xa8\xd9\xce>)(<lcH\x89\xe7\xd0}\x18\xff{\x1a\xc3\xd4E\xb9M\xe2\x0a\x1a\x8b\x12\xc2\x5c\xaa\x0c\xcbAy\xff\x17\xf7\x84\xe0\x8b\xd83\xf8A\xea\xc6\xd5\xe0\x03\x81\x0f`\xd1!/\x04\x92\xbd\xac\x1bu\xe5\x06w\xfe\xbd\x93\xdd\xe9m,\x9f\xcbm\x08<\x9eg\x00\x8c=\x9d\xa92,\x97\xd9\xcd\xcc\xdd\x87\xc2\xdcqy\x1b\xe1|\xbe\xa7^q \xf0\x01,+\xe4=\x8a\x01/\x84\x98\xad\x8e=\xfcA\xc5\xf5\xdf\x0b\x81\xe5\xf3\x0cAl\x12\x02`\x9cD1\xad\xaa2,Ey\xfb\xafR7\x10\xce\xe7\x8b\xe1\xfa\xbe\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11\x17\xe5\xf6\xa2\x83a/\x186\xd87\xf4X\x86\x12*\x1f\x94\xcf\xfbU\xb9\x8d\xcbm\xafa\x9b\x85\xfb\xdbM\xecrZ\x86\xb9q\xcd\x9b\x0b\xf7}_}\xbeW^\x9d \xf0\x01\xcc3\xe8=\xcb\xba=\x81`0\xe5\xefm\xdc\x09\x7f\xe1\xbc\xba\x22\x0e\xd5V9\xac\xb8\xbe\xa8\xfb\x00b/\xe0}\x81\xf3\xcc\xab\x14\x96\xcf\x90.\xd0G\xfb3\x04\xbd\xab\xec\x93\xc9\x11a{\xb5\xc2\x90\xb29\xa7\xdb\x08\xc1\xf7Y\x19\xfaNB\xa8\xbbo\xd2D\x9cl\x91\xea\x05=j:\xd9\x22\xec_\xde\xeeA\xbc\xff[z\xf8@\xe0\x03\x98M\xe8Y*CF8\x87\xedI\xcd_\x09\xe7\x94M\xe2vVu~\xda\x92\xbd\x9d}R\x22f\x18/g\xe9\xb1\x0c\xc3\xb5\xbb\xf1|\xbf\xe2vx6\x9e\xe7xX\x11\x82\xf7\xa7<\x1eE\x1c*\xbe=\x9fO\x0f\x1f\x08|\x00sq\x98\x08|!\xec\xdc\xcel=n\xf3\x93\xb8\xd3\xa3\xf6\xbd\xc7\x19\x87f\x87w\xb6iz\x01\xc3\xef\xbc\x88C\xdfE\x8d y8c\x10\x0eC\xbb\x17\xf1>\xf4\xf0\x81\xc0\x070\x97\xa0tV\x86\x99\xb0\xea\xc3\xdd!\xca\xa3r\x1bw\xbd\x06\x5c\xac\x7f7\x8e\xdb\xddz\x82\xa3\xac\xf9\xc4\x94\x10\xfc\xfea\xb9}>\xb1\xcfe\x5c/w\x96\xc7\xfc*N \xf9p\xc1\xf5\x03\x81\x07\x98\xb4\x01\xf4U\xe8\xe5\x0bC\x91\xe1\x1c\xb2\xcf\x96Ac\xd4\xc7\x82\xbf!@\x95[\xe8\x81\x0b\xc1\xefq\xb9\xbd\x9b\xd5_\xe2,\xa8\xeaq\xdb\x9f\xd3\xe3\x9c\xc4\xc7\x06\x08|\x00s\x0bB\xa17\xefQ\xe8\x9dj\xd9yy\x8b|\xce\x17\xaf\x85\xbfP\x00\xf92\xf1+!\x18\xfe\xa5\xc4\xf5\xa7\xf3\x1c\xf6\x0e\x8f\xcd+\x13\x04>\x00\xe6\x1b\xfe\xf6\xcbmP\xfe\xf7\xcb\xe5vr\xcfn\x9f\xab\xb8\x99\x91\x96\x04\x81\x0f\x80n\x84\xbf\xb0\xfaF8\x87\xee\xb6\xd7/\x0cu\x87\xd9\xc9\xa9s\xf7\x9e\xdf\xb7^. \xf0\x01\xd0\xee\xe0\xf7q\xaf_vS\xd0y;\xb1k\x08\x84\x85\x16\x03\x81\x0f\x80\xee\x9ay\xbd\x5c@\xe0\x03\xa0\xa5b\x19\x97TQ\xeas\x93+@\xe0\x03\xa0\xdb\xaa\xc2\xdc\xbe&\x02\x81\x0f\x80\x8e\x8a\xeb\xe5\xee$v9\xe9c\xadB@\xe0\x03X\x97\xb0\x17\xd6\xcb-*v\xd3\xbb\x07\x02\x1f\x00\x1d\x16\xc2\x5cj\xdd\xdd\x03eX@\xe0\x03\xa0\xa3\xf2<\x1fd\xe9\xde\xbb\xb0\x1a\x87\x89\x1a \xf0\x01,$\x88\x1c\x96\xdbY\x1cndq\x94a\x01\x81\x0f`%aoX^<-\xb7\xadr\xbb\x88\xe5BXL;\xef&v\x09\xeb\xe5\x8e\x17p\xbfca\x1e\x04>\x80\xbbC\x88\xa1\xf7\xe9\x1bq\x16)\x8bk\xe7\xfb\x14\x8b\x08{\xd9M\xad\xbf\x10\xe6'\xc2<\x08|\xc0\x1a*\x03\xc0~\x0c\x03\xaf{\x11\xc3\x02\xf3i\xe7\xd1\x03\xed|\xebh\xdeeX\xee\x84\xbd[B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad4\xb7vN\xf5\xee\xcd}\xbd\xdc{\xc2\xde\xad\x0d\xa1\x0f\x04>`\xbd\xecg\xe9\x09\x04#M\xb4\x94v>\x5c@\x19\x96T\xa0\x13\xfa@\xe0\x03\xd6A\xecuJ\x95\x079\xb2\xd2\xc3\x5c\xdayP^<K\xecrY\xb6s\xb1\x80\xbb\x1e\x96\xdb\xb9\xd0\x07\x02\x1f\xb0\xdeR\xbdNW\x99\x95\x1e\xe6e\x5c\xe38\xcc],\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xe2\xbaC\xb5\xe0f\x17\xcb\xb0\xa4\xd6\xcb\x0deX\x8e\x17u\xff\x0dC\x9f\x92- \xf0\x01=\x0b\x22!\xec=\xb4\xb4W\xe8\xdd\xb3\xd2\xc3|\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6\x12\xd7\xe9\xdd\x9bO\xa8\xaeZ/\xf7y\xd9\xceg\xcbx,\xf1x\x8eb\x98\x7fH(\xd9r\xec\xc8\x81\xc0\x07\xf4#\x88\x0c\xb2\x87W{\xd0\xbb7\x9f6\xae*w3\xf72,5B_\x08\x97\xc3\x8a\xd0\xb7\xa3\xf6\x22\x08|@?\xa4z\xf7\x8e\xf5\xee\xcdE+\xd7\xcb\x8d\xa1\xafj\x18\xf9I\xec\x9d\x04\x04>\xa0\xa7\x81O\xef\xde\x8c\xe2\x8c\xd7'\x89]B\x19\x96\x95\xb5s\x5c\xab\xf7\xa0b\xb7\xf7\xca\xe7\xb1\xe7h\x82\xc0\x07t3\x8c\x84\xa1\xc6\x87f\x8d\x9e/\xeb\x9c\xb2\x9e\xab\x0as\xa3U?\xc0X\xf7\xef\xa4b\xb7q\x1c\xfe\x07\x04>\xa0c\x86\xa9\x0fx\xcd3s\xa0\xde\xcb\xd2eXNZT\xcc:\x04\xcf\xaa\x99\xbb\xc7f\xee\x82\xc0\x07tO\xf2\xfc=\xcd3S\xd8\xabZ/7h\xcd\xb9q\x0df\xee\x1a\xe6\x07\x81\x0f\xe8\x98\xe1\x03??_\xc0Z\xae\xeb\xa6\xaa\x0c\xcbA\xdb\xda8\x0e\xe1\x17\x15\xbb=\x89u\x1b\x01\x81\x0fh\xbb\xd8\x03\xf5P \x99h\xa1\x99\xdav\x90\xa5{\xefZ[\xee&N \xa9:\x9f\xef\xd0\xf2k \xf0\x01\xdd\x90\xfa\xc0\x16\xf8fSd\xe92,\xfb-/w3\xca\xd2C\xbb\xe1\xb9\x8d\x1df\x10\xf8\x80\xf6\x1b\x0a|\xf3\x17\xd7\xcbM\x95a9\x8d\xa5PZ\xeb\xce\xf9|)\xce\xf1\x04\x81\x0f\xe8\x80\x87z\xf8\xce\x15[\x9eI\xd5Pm\xd1\x85'Q\xbe\x06B\xa0\xbboh7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcb=T^\xe3B\xd3L'Nf\xd8J\xecr\xd4\xa22,u\x84\xf3\x10\xef\x0e\xed\x86\x89&\xdb\xea3\x82\xc0\x07t\xc7C\xf5\xe1|\x98O\x17\xf6\xaa\xca\xb0,}\xbd\xdcY\xc5Y\xc4\xe19\xe9\xd5\x03\x81\x0f\xe8h8\xc9\x04\xbe\xb9\x0a\xbda\xa9\x89\x1a\x87],u\x13B\x9e^=X\xac74\x01\xb0@o\x97\xdb n\xc3x\x19\xca\xb48\x7f\xafy\x80\x0em\xf7,\xb1\xcb\xa5\xde1@\xe0\x03\x96*N\xca\x98h\x89\xb9\x19W\x5c\xbf\xaf\x89\x80\x87\x18\xd2\x05h\xb9X\x86%\xb5^\xeei\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0Uy\x9eW\xad\x97{d\xb2\x03 \xf0\x01t7\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01tX\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa3\xf2<\x0f\xcb\xd2=M\xec\x12\xca\xb0\x1cj)@\xe0\x03\xe8\xae\xaa07\xd2D\x80\xc0\x07\xd0Qy\x9e\xefe\xd5eX&Z\x0a\x10\xf8\x00\xbaK\xef\x1e \xf0\x01\xf4U\x9e\xe7E\x96.\xc3r\xd0\xc5\xf5r\x01\x81\x0f\x80\xec{\xeb\xe5\xa6\xca\xac\x842,&j\x00\x02\x1f@\x87\x15Y\xba\x0c\xcb\xbe2,\x80\xc0\x07\xd0Qq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x80\xc0\x07\xd0]E\xc5\xf5V\xd4\x00\x04>\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x1c)\xc3\x02\x08|\x00\xdd\x0d{a\xbd\xdc\xd4D\x8c0Q\xa3\xd0R\x80\xc0\x07\xd0]a\xa865Q\xe3P\x19\x16@\xe0\x03\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x96\xaa6\xdc\x8e\xbd\xa4@\xc2\x1b\x9a\x00`e\xaa\xc2\x9c2,\xdf\x1f\xee\x86\xe5\xc5v\xdcBX\xbe=\xef\xf1\xedr\x9bh!\x10\xf8\x00\xda\x18^v\x13\xbb\x84\xf5r\x8f\xb5\xd4\xf7\xf9\xf0\x81\x9fo\x0b|\x90fH\x17`5\xc6\x15\xd7+\xc3rO\x08~\xe0\xe7\x86tA\xe0\x03h\x97<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ci\xa9O\xb9x\xe0\xe7CM\x03\x02\x1f@\x9b\xc2^\xe8\x8d*\x12\xbb\x842,z\xf7\x9a\x05>\xa0\x82s\xf8\x80e\x84\x9cavs\x92}\xd8n\xff\x1dz\xb8\xbe\xbc\x86\xe7\xa9\x85\xb0\x97*\xc3R\x98\xa8\xd18\xf0\xedh\x1a\x10\xf8\x80\xd5;~ \xe4\x0c\xe3u\xeb\x12|\xc3\xe4\x82\xa7\x89].\xcb\xb0\xa7\x0cK\xf3\xc0\x07T0\xa4\x0b,+\xf0\xddgo\xcd\xda\xa1*\xcc\x8d\xbcT\x00\x81\x0f\xe8[\xe0\xdb\x8c\xbd^\xbdW>\xcf\x10nSC\x8f\xa7\xd6\xcb\xadt\x91h\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xd5\x03W\x8f\xd6\xa4\x19\xf4\xee\xcd\xfe:\xba\xd0\x0a \xf0\x01\xed6^\xd7\xa0\x93\xe7y\x91\xa5\xcb\xb0<\x17f\x00\x81\x0f\xe8\x83\x87z\xb86\xca@\xd4\xdb\xd0\x17\xcb\xb0\xa4\xca\xac\x84\x9e\xcf\xc2\xcb\x03\x10\xf8\x80\xce\x8b=X\x0f\xad\x94\xd0\xe7\xc0\x13\x82n\xaa\x0c\x8b\xf5r\x01\x81\x0f\xe8]\xf8\xb9\xcff\x1f{\xf9\xe2D\x82'\x89]\xce\xcb\xb07\xf6\xb2h\xf4\xc5!\x7f`\x9bh\x1d\x10\xf8\x80v|X\x87\xc9\x1b\x97\x0f\x5c]\xf4\xf0)W='+j\x00\x02\x1f\xd0K\x0f\x85\xa0^\xf5\xf2\xc5\xe7\x92*\xc3r\xa2W\x0a\x10\xf8\x80^\x8aC\x98\x0f\xf6\xf2\xc5I\x0e]\x0f{\xd6\xcb\x05\x04>`\xed=\x14\x866{\x12\x84\xf6\xb3t\x19\x96CeX\x00\x81\x0f\xe8\xb5\x8a^\xbegy\x9e\x0f\xba\xfa\xdc\xe2c\x7f\x96\xd8%<o\xeb\xe5\x02\x02\x1f\xb0\x16R=y\xe3\x0e?\xaf\xaa0W(\xc3\x02\x08|\xc0Z\x883v\x1f\xaa\xcb\xb7\x93\xe7y\xe7\x86vc\x19\x96\xdd\xc4.\xa7\xca\xb0\x00\x02\x1f\xb0nF\x89\xeb\x8a\x0e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0z\x89\x13\x17\x0e\x1e\xb8:\xacN1\xee\xcas\x89=\x92[\x89]\x8e\xca\xe7{\xe6\xa8\x03\x02\x1f\xb0\x8e\xa1\xaf(/\xce\x1f\xb8\xba\x13C\xbb\xca\xb0\x00\x02\x1f@\xb5\xbd\x18\x8a\xee\xf3^\x19\xa8\xb6[\xfe\xf8C\xd8K\xad\x97{h\xa2\x06 \xf0\x01k-\x0e\xed\x8e\x12\xbb\x8c\xdb\xfa\xd8\xe3y\x86O\x13\xbb\x5c\xc6^L\x00\x81\x0fX\xfb\xd0\x17f\xed>\x7f\xe0\xea6\x9f\xfbV\x15FG\x8e. \xf0\x01|\x12\xfa\xc2ynGw~\x14\x86y\xdf)\x7f\xde\xca\xd0\x94\xe7y\x18\x8aN\xad\x97{j\xbd\x5c\xa0\x0d\xde\xd0\x04@\xcbB\xdf(N\x82\x18\x94\xdb\xa8\xe53[\xab\xca\xb0\x8c\x1cQ@\xe0\x03\xb8?\xf4\xed\xb5\xfd1\x96\xa1\xb4\xc8\xd2\xeb\xe5>\xb7^.\xd0\x16\x86t\x01\x9a\x87\xbd\xd0\x03\x99*\xb3\x12\x86\xa2\x0b-\x05\x08|\x00\xdd\x15\x86rSeX\xf6\x95a\x01\x04>\x80\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c@\xe0\x03\xe8\xb6\xa2\xe2z+j\x00\x02\x1f@W\xe5y>\xca\xd2eXN\x94a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\xa5\xca\xb0\x1c(\xc3\x02\x08|\x00\x1d\x15\xd7\xcbM\xf5\xde]f\xd5E\x98\x01\x04>\x80\x16\xab*\xc3R(\xc3\x02\x08|\x00\x1d\x15\xcb\xb0\xec&v9U\x86\x05\x10\xf8\x00\xba\xadj\xa8\xb6\xd0D\x80\xc0\x07\xd0Q\xb1\x0c\xcbVb\x97#eX\x00\x81\x0f\xa0}!n\x5cng\xe5\xb6]\xb1_(\xc3\x92\xea\xdd\x0b\xeb\xe5*\xc3\x02\x08|\x00-\x0b{\xa3\xecfY\xb4\xd0k7)\xff\x9f\x0alE\x96\x9e\xa8qh\xa2\x06 \xf0\x01\xb4+\xec\x85\x1e\xbd\x17w~\x14\xc2\xdc{\xe5\xcf'\xb1\xec\xca\xdd}\xc3\xff\x9f&n\xee\xb2\x0c{\x85V\x05\xba\xe2\x0dM\x00\xacA\xd8\x0b\xc3\xb3\x93\x07\xae\x0eK\xa5\xbd,\xf79\xc8>\xe9\xb5\x1bW\xdc\xe4\xfe\x94\x8fc/\xde\xf6E\xb9\xbd\xde;xV\xe7g\xce\x19\x04\x04>\x80\xfb\x85\x90\xb4Q\xb1\xcf\xb3\x10\xe4\xcaP\xf6O\xb2\xf4z\xb9\xa1\x0c\xcb\xf14\x0f\x22\xfc^,\xf3r\xdf\xe3\xd9\xa9\x19\x1a\xef}L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb0:\x08\x08|\x00}\x11\x02\xda\xa0F\xe8\x0b\xd7\xff\x83\x8a}F\xb3<\x902`\xddN\x18\x09\x8fikN\xcf\xef\xbe\xb0\xb8;e\x80\x0c\xab\x86\xbc\x1e\x02k\xf5>\x86\x9f9\xaf\x11\x04>\x80\x95\x08\xe7\xdb\x85\xd9\xb9\xd9\xcd\xac\xdb\xdd\x19n\xea\xf9<z\xc4\xc2m\xc4\x9e\xbe\xe3\xacf\xcf\xde\x12mf\x9f^3\xb8\xf6c\xbc'@\x9e\xde\xb3\xdb$\x86\xc3c\xafN\x10\xf8\x00\xe6\x19\xfaBP\xdb\x8bAk|O\xa8\xa9\xf2\xff\xca\xed?\xcf\xf1\xf1\x84\x9e\xb0a\x0c\xa2Oz\xdc\xf4\xf7\x85\xc5\xaa\x927\xc0\x9c\x99\xa5\x0b\xac[\xf0\x9b\x94\xdb\xa0\xfcg\x98\xa4q\xd5\xe0W\x7f\xb0\xdc\xfeq\xac\xe1\xf7h\x8e\x8fgT^\xbc\xbbF\x87 \xf4\xf8\x0d\x0d\xfd\x82\xc0\x07\xb0\x8c\xe0Wd7\xe7\xf5\x1d5\xfc\xd5\xdb\x1a~\xdbs|,\xa1\xb7\xeb\x9d\x86\x01\xb4\x8b\xc2\xca$\xc2\x1e\x08|\x00K\x0d}\xafb\x0f\xdb\x7f\x98\x22\xf4}\xa3\x0c}\xc5\xbcz\xfb\xca\xc71./\x86=\x0e}\xcfc[\x03\x02\x1f\xc0r\xc5\xd57\xfe\xca\x94\xbf\x1eJ\xb9\x84\x09\x18\xc5\x9cB_\x98\xf9\x1az\x0e\xcf{\xd6\xcc\xef\x94\xcf\xcd2t \xf0\x01\xac$\xec\x85\xde\xb9Y\xc3Z(\xe52\xb70\x13'\x97\x0c\xb3\xfbg\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A-5[\xf7$\xbb\xa9KWe\xae\xe5E\xe2Ps\x08}G\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdcT\xcf\x5c\x08z\xa38\xa3\xf7\x9d\x8a\xe0\xb7\x90\x12#\x1d\x9e\xc1{\x1e\xc3\xde\x99W\x1a\x08|\x00\xab\x14BZj\xe5\x8d\xe2v6i\x18\x92\x8c\xc1\xef\xed\xec\xd3\xbdn\xe7\x8b\x0c6\x1d\x9c\xc1+\xec\x81\xc0\x07\xb0z\xb1\xf8rj\xc5\x8d\xd3\xfb\xce;\x8b5\xfcF\xe5?\x1fg\x9f\xd4\xf1[x\x01\xe1\x0e\xcd\xe0=\xc9\xd4\xd8\x03\x81\x0f\xa0%\xaaBZQ\x11\xc0.B\x1d\xbfr{\xb4\xac\x09\x09\x1d\x98\xc1\x1bj\xec\xed\x09{ \xf0\x01\xac\x5c,\xc3\xb2U\x11\x5c&m|\xec-\x9e\xc1\xfb\xae\x1a{\xd0n\xd6\xd2\x05\xd6)\xecU\xad\xe1\x1a\x86L\xf7[\xf8\xb8\xc3c\xbe\xed9\x0b=}E\xb9}\xb5\xdc\xfev\x0b\x1e\xde;\xca\xae\x80\xc0\x07\xd0&!\xcc\xa5&j\x1c\xb6tH2\x0c\xe5\xee\xb4\xec1\x85p\xbc\xd7\xd6\xdeP@\xe0\x03\xd6P,\xc3\xf2,\xb1\xcbe\x5c_\xb7\x8d\xb6[\xf6xnk\xec\x99\x89\x0b\x1d\xe1\x1c>`]\x8c+\xaeo\xf3\xd2_\x1b-z,a\xd2\xc8\xb6\xb0\x07\xdd\xa2\x87\x0f\xe8\xbdX\x86%5$z\xda\xd6\x15!\xe2coS\xd8Sv\x05:H\x0f\x1f\xb0\x0e\xc6\x15\xd7\xb7\xb9w\xefQK\x1e\xc7\x91\xb0\x07\xdd\xa5\x87\x0f\xe8\xb5<\xcf\xab\xd6\xcb}\xde\xf2\xe1\xc9\x10\xb0Nb\xf0\x0b\xdb\xd6*\xc2\x9e\xb2+ \xf0\x01\xb45\xec\x85\x80T$v\xb9\xaa\xb8~\xe5\xe2,\xd8I\xc5s\xfc\xee\x02\x1f\x82\xb2+\xd0\x03\x86t\x81>\xab\xbd^n\x87-r\x06\xaf\xb0\x07=\xa1\x87\x0f\xe8\xa5<\xcfC\x10z\x92\xd8%\x94a9\xec\xc1S]D\xe0Sv\x05zF\x0f\x1f\xd0WUan\xd4\x93\xe79\xef\xc0'\xec\x81\xc0\x07\xd0~y\x9e\xefe\xe92,'=Z!b\x9e\x81/\x94]\x19\x08{ \xf0\x01\xb4=\xecU\xad\x97\x1b\xec\xf7\xe8)\xcfk\xd6\xae\x1a{ \xf0\x01tFU\x19\x96\x832\xd4\x5c\xf4$\xdc\x0e\xe7tS\xa1\xec\xca\xb6\xb0\x07\x02\x1f@\x17\x02\xd0 K\xf7\xde\x85\xf3\xd3\x0e{\xf4\x94\xe71\x9c\xfb\x5c\x8d=\xe8?\xb3t\x81>)\xb2t\x19\x96\xfd\x9e\xf5b\x0df\xfc}eW`M\xe8\xe1\x03z!\x0eo\xa6\xca\xb0\x9c\xf60\xdcL\xdb\xc3w%\xec\xc1z\xd1\xc3\x07\xf4E\xd5Pm\xd1\xc3\xe7\xbc3e\xd8Sv\x05\xd6\x8c\x1e>\xa0\xf3\xf2<\x1fe\xe9\xd9\xaaG=*\xc3r\xfb\x9c\xa7\xe9\xdd\xbb\x14\xf6`=\xe9\xe1\x03\xba\x1e|\xaa\xca\xb0\xb4~\xbd\xdc)5\x0d|\xca\xae\xc0\x1a\xd3\xc3\x07t]\x98\x95\x9b\x9a\xa8q\xd8\x972,\xaf\x194\xd8\xf7D\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Yb\x97\xb0^n\xd1\xd3\xa7?\xac\xb9\xdf\x91\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\xef\xf7\xf8\xb9\xd7\x19\xd2=\x10\xf6\x80@\x0f\x1f\xd0I\xb1\x0cKj\x96j(\xc3r\xdc\xd3\xe7>\xc8\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07t^U\x98Y\xd7\xde\xbd0Ie\xd4\xd7\xb0\x0b\x08|\xc0\x9a\xc8\xf3\xbcj\xbd\xdc\xe7=/=\xb2\x9d\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0ka/\x94a)\x12\xbb\xf4\xb5\x0c\xcb]\xc3{~v.\xec\x01\x0f\xd1\xc3\x07tM\x08s\xa9\xf3\xd7\x8a5(?2x \xec)\xbb\x02\xdcK\x0f\x1f\xd0\x19qu\x89\xa7\x89]B\x19\x96\xc3\x9e\xb7A\xe8\xe1\xbc;\x9c}$\xec\x01U\xf4\xf0\x01]R\x15\xe6Fk\xd0\x06w\xcf\xdfSc\x0f\xa8E\x0f\x1f\xd0\x09y\x9e\xefe\xd5eX&k\xd0\x14\xc3x\xf9\xae\xb0\x07\xd4\xa5\x87\x0f\xe8\x0a\xbd{7B\x0f\x9f\x1a{\x80\xc0\x07\xf4K\x9e\xe7E\x96.\xc3r\xd0\xd3\xf5r\xef\xb3\xbfF\xcf\x15\x98\x13C\xba@\xdb\xc3\xde K\x17Q\x0eeX\x0e\xd7\xa5=\x84=@\xe0\x03\xfa\xa8\xc8\xd2eX\xf6\xcdP\x05\x10\xf8\x80\x8e\x8a\xeb\xe5>I\xecr\xee\x5c6\x00\x81\x0f\xe8\xb6\xa2\xe2\xfa}M\x04 \xf0\x01\x1d\x95\xe7\xf9(K\x97a9Z\x932,\x00\x02\x1f\xd0\xcb\xb0\x17V\x93HM\xc4X\x87\xf5r\x01\x04>\xa0\xd7\xc2Pmj\xa2\xc6\xa1\xd9\xaa\x00\x02\x1f\xd0Q\xb1\x0c\xcb\xb3\xc4.\x97\xd9\x1a\x95a\x01\x10\xf8\x80>\xaa\x0as\xca\xb0\x00\x08|@W\xc52,\xbb\x89]\xc2z\xb9\xc7Z\x0a@\xe0\x03\xbak\x5cq\xbd2,\x00\x02\x1f\xd0Uy\x9e\x870\x97Z/7\x94a9\xd3R\x00\x02\x1f\xd0\xcd\xb0\x17\xca\xb0\x14\x89]B\x19\x16\xbd{\x00\x02\x1f\xd0a!\xec\xa5\xca\xb0\x14&j\x00\x08|@G\xe5y\xbe]^<M\xecrY\x86=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\xe5y\xbe\x97\xa5\xd7\xcb=\xb5^.\x80\xc0\x07t\x9b\xde=\x00\x81\x0f\xe8\xab<\xcf\x8b,]\x86\xe5\xc0z\xb9\x00\x02\x1f\xd0\xdd\xb07\xc8\xd2eVB\x19\x16\x135\x00\x04>\xa0\xc3\x8a,]\x86\xc5z\xb9\x00\x02\x1f\xd0Uq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x00\x02\x1f\xd0]E\xc5\xf5V\xd4\x00\x10\xf8\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x9c(\xc3\x02 \xf0\x01\xdd\x0d{\xd6\xcb\x05\x10\xf8\x80\x9e\x0ba.U\x86\xe5P\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x16\x00\x81\x0f\xe8\xb4\xaa0W(\xc3\x02 \xf0\x01\x1d\x15\xcb\xb0\xec&v9U\x86\x05@\xe0\x03\xba\xad\xaawom&j\x94\xe1wRn\xd7\x1d\xdd&^\xca\xf4\xe0opTng\xaf\xbd\xb6\x8f\xe3\x17S\x81\x0f`\xca7\xd7\x10\xe6\xb6\x12\xbb\x1c]__\x9fi)`\x09\xefG\xe3\xf2\xe2\xc5=\xefIa\x04\xe2\xc3\xb8\xbe\xb7\xc0\x07\xd0\xf0\xcdU\x19\x16\xa0-\xefG\xe1\xbd\xe8I\xc5n\xcf\xfa\xdc\xd3'\xf0\x01\x8b\x12\xde`S\xeb\xe5\x1e\x9a\xa8\x01,I\xdd/\x97\x85\xc0\x07P\xff\xdb\xf4\xa0\xbcx\x9a\xd8\xe5\xb2\x0c{\x85\x96\x02\x96\xf0~4\xac\xf8\xf2y\xd7\x8e\xc0\x07P\xdf\xb8\xe2\xfa\x91&\x02\x10\xf8\x80\xee~\x9b\xde\xab\xf8\x96|j\xbd\xdcN*4\x01t\xd7\x1b\x9a\x00\x98\xb3\xaa2,\xa35n\x9b6\xcdH\x0e\x93j\xb6j\xee\xfb\x5cH\xa7\xc3.\x1a\xec{%\xf0\x01T\x883\xe16+\x82\xc3\xc5\xba\xb6O\xf9\xdc\xf7[t\xac\x8ek\x06\xbe\xf36=n\x98\xe2\xef\xee\xa2|\xbd\x9ff\xf5\xce\xcf\xeb\xed\x12\x8f\x86t\x81y\x05\x88\xd0c\xb4_\xf1\xcd\xb9\xd0R\xad8V\xe18\xed\xd6\xdc}\xa4\xc5\xe8\x81QV\xdd{w.\xf0\x01\xd4\xfbf\x9c\x9a\x09\xb7\xaf\x0cK+\xc2\xde\xa0A\xf0>P\x18\x9b>\x88#\x0b\xc3r;}`\x97\xa3p}\x9f\xdf\xa3\x0c\xe9\x02\xf3\x08\x11\xe1\x8d4U\xd4\xf4\xdcz\xb9\xad1\xce\xea\x95\xa88W:\x87\x9e\x85\xbe\xf0\xe5eX\xbe_m\x97\x97a\x0b_~&\xe5v\xb6\x0e_F\x05>`\x1e\xaa\x82\x81s\xc0\xda\x11\xcc\xc3q\xa8[gl\xa4\xc5\xe8q\xf0[\xbb\x9ekC\xba\xc0\xac!bT\x11\x22N\xcc\xf0l\xc5q\xda\xce\xea\x0f\xe5\xbek(\x17\x04>\x80\xdb\x10a\xbd\xdc\xee\x18g\xf5\x86rC\x9d\xc4C\xcd\x05\x02\x1f\xc0\xad\x10\xe6ReX\x0e\xd7\xb9\x0cK\x8b\x82y\x08\xe5uJ\xb0\x84\x80>\xd2b \xf0\x01\xdc\x86\x88Ay\xf1,\xb1\xcbe\xd6\xe3\x12\x07\x1d:N\xdb\x15\xc7\xe9\xaeB@\x07\x81\x0f\xe0\xae\xaa0W(\xc3\xd2\x0a\xe3\x9a\xfb\x19\xca\x85\x1e3K\x17h,\x96a\xd9\xad\x08\x0fc-\xb5\xf2\xe3\x14\x02\x5c\xdd\xa1\xdc=-\x06+\xf9;\x1dd7%b\xb2ENp\x13\xf8\x80iT\xf6\xeei\xa2V\x84\xf2\xa75w\x1f\xe9\x8d]\xd9q\x0a\x13\x9f\xc2\xb1\xba\xad\x0d\x17\xfe\xbfS\x11\xce\xcf\xeel\x13\xc3\xf0\x9d:\xd6{\xf1x\x0f\xee;\xce\xe5>\x0f\x1d\xe7\xe3Y\xffF\x05>\xa0\xe9\x9b\xd6(K\xf7\x1a\x1d)\xc3\xd2\x8a\x0f\x96q\xcd\xddC\xd9\x9cc\xad\xb6\xd4\xe3\x13>\xecG\xf1\xc3\x7f\xab\xe1\xafo\xc4\xa0\xb0s\xe7\xf6\xc2\xea\x11\xe3y\xf4\xaa\xc7/\x0a\x1f\xd6\xdc\xfd\xcb\x8bz\xed\xc4\xd7\xf0EVofy\xe5\xe3\xa8\xfb\xbc\xca\xdb\xc9\x17\xf4\xe5+\x1c\xef'3\x1c\xe7\x17\xe5\xed\xdc.\xfd6U\xf8s\x0e\x1f\xd0\xf4M\xf8\xb0\xa2\xf7A\x19\x96\xd5+\xb2\xf4\xec\xe9\xbb\xc7k\xa4\xb9\x96\xf7e\xa9\xdcBo\xcd\xcb\xecf\x22\xcd\xd6\x9cnz'\x06\x82\x8b\x18.\xa6\x16\xbf\xac=\xaf\xb9\xfb8\xbe',\xc2\xb8f\xd8{\xde\xd6/,!\xd8\x97\xdb$\x06\xcd's\xb8\xc9\xf0zy\x11\x82p\x98y\xdf\xb4\xed\x05>\xa0i\x90H\xbd\x09\x1f\x1a\x1a\x5c\xf9\x87L\xe852\x94\xdb\xbe\xa0w\x11?\xac\xb7\x16xW!\xe4\x7f\x18\xcf\xdd\x9c%\xf4\x85/m\xe75v\x0d\xef\x05\xc7\x0bz\x0d\xef\xd6\xd8\xf5<k\xe9\xe9#q$\xe4,\xab\xbf\xb2M\x13\x1b\xf1\x0b\xc3\xb6\xc0\x07,\xe4\xdbjE\x90\xb8\xb4\xf6\xea\xca\x8fQ\x93\xa1\xdc\xe7\x86r\x97rL\x0ec\xd0\xdb\x5c\xe2\xdd>\x0d=K3\xf6\xbe\x8dj\xee\xb7\x13\x97\xec[\xc5k\xb8\x95_X\xee\x1c\xf3\x8d\x05\xde\xcdU\xd3Sg\x04>\xa0\xae\xaa7aC\xb9\xed8Fu>dB\x8dD\xe1|9V\xb5D\xddN\x83\xe0\xf4)qi\xbdwk\xee^\xc4/\x84\xcb|\x0d\xb7r\xf9\xbfX\xe4\xfc\xe9\x12\xee\xaa\xf1\x975\x81\x0f\xa8\xf3&6\xcc\xd2C\x13\xa7z\x8bV~\x8c\xea\x0e\x83\x05\x86r\x97g\x96\xbf\x8b\xf3\x19\xef{7\x06\x90iC_\xe8\xa9:\xad\xb1\xeb\xc6,\xe1r\x8a\xd7p+kF\xc6\xc7\xff\xac\xad\xaf+\x81\x0f\xa8\xfb\xad;\x19 4\xd1J?h\x9a\x0e\xe5N\xb4\xdar\xc4`}\x94\xd8%L\x9c9\xc9nz\xd3\xde.\xb7\xcf\x86\x99\xa2q\xdb\xbe\xfdw\xf9\xf3\xc7\xe5\xf6\xe5x[W\x0d\x1e\xc2\xb3\xb8\xda\xca\xb4F5\xefo\xa6\xa1\xdd\x06\xaf\xe1VN4\xaa1\xa1\xedS\xa1\xf5\xf6\x98\xdf9\xde\xb7\xc7\xf9\xedx\xddC\xc7\xfaj\x9a/\xd8\xca\xb2\x00Uod\xa1\x87`\xb3\x22@\x5ch\xa9\x95\x0ao\xfe\x86r\xdb}|\x9e\xbc\x16Z\xc2\xcf\x0e\xeb\x0eK\xc6\xbf\xb1\xb0\x1d\xc7p\x11\xc2U\xdd\xde\xa4\x10D\x86S\x06\xd6\x8b8\x01\xe1\x83\x1a\xbb\x87\xa1\xdd\xe3)\xdf\x0f\xc65_\xc3\xa3\x96\xbe\xdfT\xad+~+\xf4\xda\xee?\xf4\xa5\xeb\xceq\x9e\xdcy\x0f\x0e=\x87{w^C\xe3i\x1e\xa0\x1e>\xa0\xea[\xeb~\xc5\xb7m\x01b\xb5\xc7(\x1c\x9f\xba3\x01\xf7\x0c\xe5._\xec\x8d\xb9\x8a\xdbA\xb9\x0d\xca\x9f\x8d\xa6=\x07-\x1c\xc38A\xea\xed\xac~\xef\xdb\xf6\x8c\x8f\xff\xa4\xc6\xaeS\x0d\xed6\x18\xca=j\xf1\xa9#\xa3\x9aao\xd8\xb4\x87=<\xe7\xf0z\xc9nz\xffN\x04>`\x11\x0e+\xbeu[/w\xb5ao\xd0 p\x1f\xb4\xf1$\xf752\x8aAon\x7f318\x8cj\xee\xbe?\x87\xc7?\xf7\xa1\xdd\x06C\xb9\x97YK'\x86\xc5s\x9c\xabz\xf7\xaeb\xd8{5\xc3\xf1\xbe(\xb7\xbdi\xff\x8e\x05>\xe0\xa17\xb1\xd0#\x90*\x16z\xde\xc6\x13\xa7\xd7\xcc8\xab7\x0cv\xaed\xcej\xc5^\x9aW\x8b\xb8\xdd,}\x8e\xe0\xad\xbd\x19\xef\xe7U\x83\xdbh2k\xb7\xeek\xb8\xcd\xbd\xd3\xc3:\xcfs\xd5\x8f_\xe0\x03\x1eR\x15\xe6\x94aYm o2\x94;\xd2b\xbdV'\xcco,q\x15\x8eZC\xbb\xf1\xf1\xd4\x19\xcam{\xeft\x9dv]\xf9P\xb4\xc0\x07\xdc\xf7F<\xaa\x08\x13'fz\xae\xf4\xf8\x84\xde\xd7\xf7j\xeen(\xb7\xe7\xe2\x89\xfeu\xce\xb1\x1b\xce\xe1\xeeB\xb8\xacS.&9\xb4\xdb`(\xf7\xb4'\xbd\xd3+\xff\x1b\x14\xf8\x80\xfb\xde\x88\xab\xde`\xf5\xee\xad\xd6\xb8\xe6~\xa7\x86r\xd7F\x9d/`\xdb\xb3\xdeI\x1c\x96\x1c\xd5\x0d\x87\x89\xd5>\xea\xccj\xed\xd3Z\xcf\xdb\xab~\x00\x02\x1f\xd0\xf4\x8d\xf8@\x19\x96\x95\x06\xf2\x10\xe0\xea\xac\xc7\xda\xa7\x0fK\xaa\xd5\xe9Az4\x8f;\x8a=\xc6\x075v\xbdwh7\xf6P\xd7))3\xea\xd1{\xcd@\xe0\x03\xda\x14&\x06Y\xba\xf7.\xcc\x943Qcu\xc7\xa7\xee\x07eP\x08\xe6k\xa5\xce\x84\x80\x9dy\xddY\xec9\xae\xb3\x0a\xc7\xee=\xe7\x0e\xd6y\x0f9\xea\xd9\xea=\x85\xc0\x07\xb4\x892,\xed\x0d{MV\xd385\x83z\xbd\xac\xe8<\xcdQV\xafT\xcb\xf8\xce\xebxT#x\xb6\xb6\x04\xcb\x03\xea\xb4\xfdf\xf9\xdc\xc7\xab|\x90\x02\x1fp\xfbF\x1c\xbe\x85\xefV\x84\x88\xb1\x96Zi\x0f\x81\xa1\x5c\xda\x142/j\x06\xb3\x10v\x8a\x06\xcb\x8fu\xad@\xf8\xa4\xe6~OB\xe8K\x9c\xd7\xb8P\x96V\x03nU\xbd\x11\x17\x9ah\xa5a\xfci\xcd\xdd;q\xdeS\xf9\x9c&K\xb8\x9b\xb3\xb2-\xf6;z\xccC(\xd8\x8e[\xf8\xf70^5\xc8\xea-\xe1\xb5\xac\xd07\xae\xb9R\xc6~|\xecU5\xf7:7\xab<\x0c=\x97mpY\xf3\xb8\x84\xda\xa6\xc3\x10\x80\x97\xfd\x05Z\xe0\x03n\x87YR\xbdGG\xca\xb0\xac\xf4\x83\xbf\xee\x07\xc3I\x87\xce{\xdaqt?u\x9c\xf7b\xb0\x1b\xb6)\xd4\xd5\xf9\x92\x91\xdd\xac\xff\x9a\x0as\x1bY\xba\x90{\xd0\xe5Y\xe5\xe1q\xbf\xa8\xb9o8\xb6/\xe2\x04\xac\xb0\x1d/\xa3G\xd3\x90.\xf8\xa0\xa9\x1af\xb9\xca\x94aY\xf5\x07I\x9d\x0f\x7fC\xb9\x1d\xfd\xb2Un!\xa4\x7f7\x06\x86'\x1d\x0b{MW\xe1\xe8\xe5\xeb7\xf6\xd6\x9d6\xfc\xb5\xcdx\xcc/\xe2P\xefBK\xb7\x08|@Q\xf1\xcd\xfc\xd0D\x8d\x95\x85\x81\xf0!\xdad(\xd7q\xea\xc8\x97\xacxN\xdb\xab\xf8\x81\xbf\xdb\xf5\xe7\xd4`\x15\x8e\x87\xec\xf7`Vy\xf8{=\x9f\xe2\xf7n{?\xbfQ\xbe&\xce\xe2\x97\x80\xb9\x9f\xe7'\xf0\xc1z\x7f\xf0\x0c*\x02\xc5\xa5\xc2\xbd\xab\x0b\x05Y\xfd\xa1\xdc\xbe\x95\xb0\xe8\xf3q\x0d\xbd\xe5!\xd8<\xcb\xea\xad!\xdb\xb5/\x8f\xd3\x04\x9e\x93>L\x08\x8b_\xb8\x86S\xb6\xc1\xad\xad\xec\x93^\xbfb\x9e\xc1O\xe0\x83\xf5V\xf5&k(w\xb5\xc7\xa6N \xb8t\x9c\xba\x11\xe0\xe3D\x95\xf7z\x18\xf4\xee\x06\x9eQ\xc3_\xbb\xcczt*\xc2\x9d\xd0w:\xe3Mm\xc4/\x05\x17\xf1\x5c\xbf\x99\x99\xb4\x01\xeb\xfb\x01\x14\xde\x94R'\xce\x9f\xea5Z\xd9\xb1\xa93\xeb\xf1\x96\xa1\xdc\xf6\x1f\xcfpn\xd6d\xc6\xa0\x17z\x8d\xc2q>\xcb\xbe\xbf\xc8\xf2\xe4\xce\xbf\xc3\xb9\xb8[\xab|\xaea\x86m\xf9|\x8f\xb2\xea\x09\x1a\xb7\xce\xfa\xf6\xfa\xbd\x0d}1\xa8=\x9b\xf1\xe6>\x0e~\xf1=a4\xcb\x0cf\x81\x0f\xd6\xd7\xb8\xe2z\xbdF\xab\x09\x07M\x86r\x9fwu\xf6t\xf9\xb8sa/)\xf4\x10\x1d\xc7@4\xa9y_\xafZ\xf2|\x9f4\xf8\x95\xb0\x12\xc7^\x1f\xbf\x5c\x86\xd3ab\xb1\xe5\xc3l\xf6\xf34C\x90\x9f\x84S\x02\xa6\x1d\xfe\x16\xf8`=CE\xd5z\xb9\xcf\xbbV\x0b\xabG\x8e\xb3\xfaC\xb9\x85\xe6j}xo\x12\xf6\xaeb8\x18wq\x02C|\xbe\xd3\x04\xb7\x8fg\xa8\xf6q)\xc0\xf8\x9c\xf6\xe2\x88\xca\xa8a\x18~]x\x1d\x85r.\xd94\xa1\xcf9|\xb0\x9e\x1fBE\xc5\x87\x8e \xb1\xba ^\xb7>\x9d\xa1\xdc\xfe\x84\xf7\x8f\xbfd\x95\xdb \xf4\x0au8\xf8\x1cf\xd3\x95\x94\x09m4\xee\xf3\x0b!\xf4\xd2\x96[\x08|\x8f\xcb\xed ~a\x9b\xd6\x8b8\xc4+\xf0\x01\x95o\xca\xd6\xcbm_\xd8\x1b4\x08\xda\x07\x0aa\xb7\xfex\x8ej\x86\xf7\xf0\x05\xeb\xed\xb0\x22H\x97\xff\xeeb\x00\x99\xa5\xf7j'~\xe1\xe9\xb5\x10\xe6c\xa8\x0f\x7f\xef_,\xb7\xa3\xac\xdez\xc4\xaf\x1b\xc7\xf7\x0c\x81\x0f\xb8\xf7M\xb9\xea\xfc\x9aP\x86\xe5PK\xad\xc48\xab\xd7\x1bt\xaeTN'\xd4=F\xc3\xae\x87\xf7\x86\xe7\x9d&\xdb\xaci\x88\xe9x\xf8;\x8b\xbd~\xe19\x1f4\x0c~\x1bY\xc3\x91\x18\x81\x0f\xd6KU\x98\x1bi\xa2\x95|`\x867\xee\xdaC\xb9Z\xac\xf5\xc73\xf4v\xd5\x19\xda<\xe8\xc9\xb9\xb2u\x86\xaeOj\x86\x98\xb5\xab\x0c\x10zv\xe3\x97\xb8\x10\xfc\x8e\x1a\xfc\xea\x93&\x01Y\xe0\x83\xf5\xfa\x10J\x85\x8a\x13\xc3\x84+9.\xa1\xd7\xb5n\xe9\x86\x03\x93i:\xa1\xce\xf9UW}\xe8\xa9\xady\xdei\x189\xd8\xab\x19f\xb6\xe6Uw\xae\xa3\xc1/|\xa1{g\xce\xaf5\x81\x0f\xd6(TT\xad\x97\x1b(\xc3\xb2\x1a\xe3\x9a\xfb\x19\xca\xed\x8e\xe1\x1c\x8f{\xdb\xbf\xac\xd4yM\x8e\xee\xbc\xc7\xd4\x19\xb6|\xb6\xe8ue[\x1e\xfc\xc6\x0dB\x9f\xc0\x07|*\xcc\xa5\x86\x98\x0e\xfaX\x12\xa1\x03\x1f\x98\xe1\xc3\xb2N\xa1\xdc\xabl\xf6\xc5\xe9Y\xde\x97\xab:\xc3\xb9\x93\x05\xdc\xfd\xce\x92\x9fn\x08&\x95C\xb9\xb7#\x07qRJ\xd1\xe0\xb6\xd7V\x0c}u\x86\xc1k\x07c\x81\x0f\xfa\xff\x014\xc8\xd2\xbdw\xb7\xb5\xbfX\xeeqi2\x94[\x08\xe4\x9dQ\xf7\x03\xf8b\x01\xaf\xa7e\xbe~\xeb\xac\xeaq\xf5\xfa{O\x9c\x14Vg\xad\xd9\xadx\x1f\xeb\xacN8\xae]\xd0[\xe0\x83\xf5x\xd3H\xbd)\xec+\xc3\xb2\xf4\xb0\xd7\xa4@\xed\xa9\x99\xd3\xfd\xb3\x80s1\x07K|\xfd\x0e\xcb\x8b\xa73|Q\x19\xd5\xbc\xab\xa7\xf1\xbe\xbcF\xe6@\xe0\x83~\x07\x8b\xf0f\xf9\xa4\x22L\x8c\xb5\xd4JBx\x9da\xbf\xab\xcc\xac\xdc\xbe\xfem\xce;\xa0\xed-\xe9q\xd7-\xc1r\xfe\xd0\x17\x95\x18d\x9e\xd7\xbc\xcbq\xbc\xcfuu9\xaf\x1b\x12\xf8\xa0\xdf\xaaz\x86\x0aM\xb4\x92\x10\xfe\xb4\xe6\xee\xfb\x86r{k0\xc7\xd7\xd4 \x9b\xad\xe8q\x13\xe3\x9a_V\xaa\xbe\xa8\x145\xc3\xcc\xe6\x9a\xbfO\xcd-\xec\x0a|\xd0\xdf`\x11\xdepS\xe7\xd8\x1c)\xc3\xb2\xf4c\xd2\xa4@\xed\x89\xde\xd7^\x9bg\x8f\xdcxI\xaf\xdf\xf0\x98wk\xecZY>(\x9eF2\xaay\xd7k9\xb4\x1b\xdf/6\xe6u{\x02\x1f\xf4\xf7\x8d\x22\xd5\xbbg\xbd\xdc\xd5\xa8\xbb\xd6\xa8\xa1\xdc\xee\xaa{\xde\xd5\xde\x9c\xfe\xd6\xc3\xdf\xf1N\xc3\xf7\x86i\xeegP3X^f5'\x81\xc5/\x9cu\x87v\x8f\xd7ph\xb7\xcek\xe4T\xe0\x83\xf5\xb6_\xf1\xcd\xf0\xd0P\xe1\xd2Cx\x93\xb5FG&\xd2tS<n\xb5\x86*g];6\xf6\xe2?k\xf8k\xd3\xce\xe6\x1dg\xf5z\x9b\x9a\xbev\x8b\x9a\xed\xb5\x91\xb5\xb4TK\x08\xa2\xf3\xee\x81\x8c\xe1\xb6\xce\x97\xf2\x89\xc0\x07\xeb\x1b,\x06\x15\x1f\x02\x97\x0a\xf8.\xff\x03!k6\x94{\xac\xd5:\xad\xee\xf1+\xa6-\xa7\x12{\xf6^,\xe9\xf5[g5\x8d\xe0y\xd3\xd3D\x1a\x0e\xed\xee\xc6/Nm\x13\x1e\xd3\x87\xe5c\x9b\xcc#\xf8\xc5\xf7\x8b\xd0\x8euF\x03j\x87\xe07\xfc]B\xefT\xbd\x01XQc5\xc7\xa4\xee\xb98\x83\xf0\xc1\xd1\xd2\xe7\xb1oi\xb7Z\xc2\x90f\x9d\x899\xe15\x11B\xc2\xa8n\xc8\x8f\x81\xa2N\x0d\xbcy\x85\xbd\x10H\xdf\xab\xb1\xeb\xd4\xa7\x89\x84\x90X\xde\xcf\xf3\x9am\x16f\xed\x0eZ\xd6\x03~\x1bBwb\xf0;\x8d\x7f\xf3\xc7M\x1fg\xc3\xe3{\xdad\xa4F\xe0\x83\x1e\x89o\x16;\x15o\x10z\x8f\x96{L\xea\x9e\xe8~k\xab\xc5O\xe7\x91#Z+\xc0\x5c\xc4\x0f\xfd:\xbdb!\xf4}\x90\x0a\x091t\x85\xbf\xedQ\xc5\xeb\xe3*\x9b\xe3I\xfe\x0d{\xa6g=\x0d\xa1\x88\xc1i\xb3F{\x1dg\xf5\x96\xaf[\xc6\xdf\xf7\xa3{\xfe\xbew\xe2\xf6\x22\x1e\xd7I\xdc\xce\xeek\xa3\x06\xc7w\xa6/\xef\x02\x1f\xf4K\xd5\x9b\xb3\xde\xbd\xf6\x1d\x13\xfa)|x\xbfl\xb0\xff\xdd\x900Mx\xbb\x9d\xe8\xf3\xc1\x1c\x9fCQ3\x80\xcc|\x1aB\x08B\xf1\x9c\xc4\x0f\xeb\xb4U\x18fnIA\xf2\xbd\x9a\xc7\xf5Y\x0cw\xb7?\xbf\xcc\xea\x0d\xd9>\xe4\xa0io\xbbs\xf8\xa0'\xe2y6\xa97\x90\xe7\x86\xe3VbC\x13\xac\x9f8\xd4\xf6\xee\x92^7!\xec\x0dk\x86\xaeZ\xe7\x0c6\xa8\x179\xb7\x19\xe5\x0dg\xed\x16\x0b(^\xbd\x88\xc0\xf7\x90Y\xc2\xde\xd14\xe7a\x0b|\xd0\x8f\xb0W5\xa3K\x19\x16X~\xe8\x0b=PG\x0b\xbe\x9b\xdb\xb0W\xf7\xcb\x5c\xe5\xb0|\xc3\xa5\xff\xe6=\xa3\xbc\x88\xcf\xa9N \x1e\xb7\xe0}ww\xc9w\x1b\xc2\xdeT\x01[\xe0\x83~8\xac\xe8\x11(\x94\xf9\x80\x95\x84\xbe\xf0\xe1\xfc|A7\x1f\xce\x0f\x1b\xbc\x16\xf6\xaa\xea\xb2\xd59\x0fs\x9c\xd5\xeba\x9c\xfb9\xc1\x0dg\xed\xee\xc4\xd9\xca\xab2X\xf2\xfd\xbd;m\xd8\x13\xf8\xa0\x07\xe2\x09\xbf\xa9\xfan\x97-9\xd7\x05\xd65\xf4\x85\xd3-\xbe\x9c\xd5\xeb\xb9\xaa#\x9c\xff\xf5Ny\xbb\xc3)\xbe\xc8mW\xbc\x9f\x84@Q\xa7\xd7ja\xc5\xc1c\x88<\xa9\xb9\xfb\xb3iK\xdb\xcc\xe1q\x86\xa0\xfd8[|/n\xb8\xfd\xc7\xb3\xbe\x8f\x0b|\xd0}Uo\x02#M\x04+\x0f}!\xc4\x0c\xca\xed`\x86\xe0w\x1e\x83\xde \xb1\xec^\xd5\xd0\xee \x11\xf6\x06Y\xcdU2\xb2\x9bQ\x83\x8b\x056\xd9\xa8A;\x8dWx\x5c/b\xaf\xdbg\xb3\x9bs6O\xe7t\xd3\xe1\xb9?\x8fAo4\x8f\xb6\xce\xcb\x1b\xf1\x97\x08\xd1\xc6W\xde\x9ad\x0d\x96)\x8aN\xaf\xde\xffh\xb8\x8a\xc7\x1bK~\xa4f\xe5\x85!\x97\xa1#\x0b\xed\x12\xffv\xc3\xdf\xe6v\xe2=\xe72\x06\xb8\xf0\xbetlu\x9c\xce\x1c\xdbG\xf1\xb8\xde\x1e\xdfG5>WBP|\x15\x8f\xf5d\x11\x13\xec\x94e\x81n\xd3\xbb\x07\x1d\x14{\xfc\xd4\xc4\xec\xe7\xb1\xfd^pk\xd3\xe32\xa4\x0b\xdd\xfd\x16Yd\xe9\xa9\xfd\x07z\x04\x00\x10\xf8\xa0\xbbao\x90\xa5\x8b(\x87\xf3?L\xd4\x00@\xe0\x83\x0e+\xb2t\xd9\x84}eX\x00\x10\xf8\xa0\xa3b\x05\xfcT\x19\x96\xf3\xc4\x0c>\x00\x04>\xa0\x03\x8a\x8a\xeb\xad\x97\x0b\x80\xc0\x07]\x15\x8b\xa2\xa6\xa6\xf7\x1f\xc5\xf5(\x01@\xe0\x83\x0e\x86\xbdP\xcb)5\x11\xc3z\xb9\x00\x08|\xd0qa\xa865Q\xe3P\x19\x16\x00\x04>\xe8\xa8X\x86\xe5Yb\x97P\x91_\x19\x16\x00\x04>\xe8\xb0\xaa0\xa7\x0c\x0b\x00\x02\x1ftU,\xc3\xb2\x9b\xd8\xe54.\xd3\x04\x00\x02\x1ft\xd4\xb8\xe2zeX\x00\x10\xf8\xa0\xab\xf2<\x0fa.\xb5^n(\xc3r\xa6\xa5\x00\x10\xf8\xa0\x9ba/\x94a)\x12\xbb\x842,z\xf7\x00\x10\xf8\xa0\xc3B\xd8K\x95a)L\xd4\x00@\xe0\x83\x8e\xca\xf3|\xbb\xbcx\x9a\xd8\xe5\xb2\x0c{\xca\xb0\x00 \xf0A\x87U\x85\xb9\x91&\x02@\xe0\x83\x8e\xca\xf3|/K\xaf\x97{j\xbd\x5c\x00\x04>\xe86\xbd{\x00\x08|\xd0Wy\x9e\x17Y\xba\x0c\xcb\x81\xf5r\x01\x10\xf8\xa0\xbbao\x90\xa5\xcb\xac\x842,&j\x00 \xf0A\x87\x15Y\xba\x0c\x8b\xf5r\x01\x10\xf8\xa0\xab\xe2z\xb9O\x12\xbb\x9c\x97ao\xac\xa5\x00\x10\xf8\xa0\xbb\x8a\x8a\xeb\xad\xa8\x01\x80\xc0\x07]\x95\xe7\xf9(K\x97a9R\x86\x05\x00\x81\x0f\xba\x1b\xf6\xea\xac\x97[h)\x00\x04>\xe8\xae0T\x9b*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT,\xc3\xf2,\xb1\xcbe\xa6\x0c\x0b\x00\x02\x1ftZU\x98+\x94a\x01@\xe0\x83\x8e\x8aeXv\x13\xbb\x9c*\xc3\x02\x80\xc0\x07\xddV\xd5\xbb\xa7\x0c\x0b\x00\x02\x1ftU\x9e\xe7!\xccm%v\x09eX\xce\xb4\x14\x00\x02\x1ft3\xec\xd5)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xd4z\xb9\x87&j\x00 \xf0AG\xc52,O\x13\xbb\x5c\x96a\xaf\xd0R\x00\x08|\xd0]\xe3\x8a\xebG\x9a\x08\x00\x81\x0f:*\xcf\xf3\xbd,\xbd^\xee\xa9\xf5r\x01\x10\xf8\xa0\xdb\xaa\xca\xb0\x8c4\x11\x00\x02\x1ftT\x9e\xe7E\x96^/\xf7\xb9\xf5r\x01\x10\xf8\xa0\xbba/\x94aI\x95Y\x09eX\x0a-\x05\x80\xc0\x07\xdd\x15\x86rSeX\xf6\x95a\x01@\xe0\x83\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c\x00\x04>\xe8\xb6\xa2\xe2z+j\x00 \xf0AW\xe5y>\xca\xd2eXN\x94a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850\x97*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT\x5c/\xf7Yb\x97\xcb\xac\xba\x083\x00\x08|\xd0bUa\xaeP\x86\x05\x00\x81\x0f:*\x96a\xd9M\xecr\xaa\x0c\x0b\x00\x02\x1ft[U\xef\x9e\x89\x1a\x00\xac\xc4\x1b\x9a\x00f\x17\xcb\xb0l%v9\xba\xbe\xbe>\xd3R\x00\xdd\xf3\xf5\xc7_\x18\xc7\x7f\x16_z\xf9\xad\x8b.>\x07=|0\xa3?\xf9\xc7?\x10\xbe8\xa5z\xf7\x94a\x01\xe8\xb6\xe3\xecf\xe5\xa4\x97e\xf8\x9b\x94\xdbH\xe0\x835\xf3\xf8\xdb?6\xc8\xd2\xeb\xe5\x1e\x9a\xa8\x01\xd0]_z\xf9\xad\x10\xf8.\xe3\x7fCQ\xfd\x17e\xe8\xbb(\xb7\xa2\xdc\x1e\x09|\xd0s\x7f\xee\x0f\xdf\xc8~\xf5\x83_\xf9|b\x97\xcb2\xec\x15Z\x0a\xa0\xf3^\x1f\xc9\x09\xf5VC\x19\xae\xef\x86!\xdfr\xdb\x16\xf8\xa0\xa7>\xf7\xf2\x07\xabv1\x94\x0b\xd0\x0f\xe3\xec\xe6\x14\x9d\xfb\x84\xe1\xdeo\x94\xa1\xef\xac\xad\xc3\xbd\x02\x1fL\xe9\xa7\xfe\xe0G\xb3\xd3_\x9e\xa4v\x09eX\x8e\xb5\x14@\xf7}\xe9\xe5\xb7\xc2\xa99U\xef\xe9a\xf2^\x18\xee}\x15\x87{\x07\x02\x1ft\xdc\xef\xfe\xfb\xefT\xed2\xd2J\x00\xbdRw\xa5\xa4p^w\x18\xee\x0d\x93<\x8e\xcbm(\xf0A\x07\xfd\xb5\xef<\xca\xbey\xf6\xeb\xa9]\x9e[/\x17\xa0_\xbe\xf4\xf2[\xa1\xbc\xd6i\xc3_\x0b\x05\xf9?\x8c\x93<F\xab\x9a\xe4!\xf0AC?\xfe\xc7\x9f\xc9\xbe9I\x86\xbdp\x8eG\xa1\xa5\x00zi<\xe5\xef\x85I\x1e/\xca\xed\x22N\xf2\x18\x08|\xd0bo\xfe\xce\x8fg\xdf\xfe\xf6\xb7S\xbbX/\x17\xa0\xa7\xbe\xf4\xf2[!\xf0]\xcep\x13a\xb8\xf7nM\xbf=\x81\x0fZ\xe6/\xfc\xdf\x1f\xca~\xf5\x97\xfeEj\x97\xf32\xec\x1dj)\x80^\x1b\xcf\xe9vBM\xbf\x0f\xe2p\xef\xfe\x22\x87{\x05>h\xe0\xc7.+\xffd\x94a\x01\xe8\xbfy\x7f\xb1\x0f\xc3\xbd\xefe\x0b\xac\xe9'\xf0AM\x7f\xf5\xf7~\xa2\xaa\x0c\xcb\xc9\xf5\xf5\xf5DK\x01\xf4[,\xd1r\xb4\xa0\x9b\xbf\xad\xe97\xd7%\xdc\xdep\xd8\xa0Z\x98\xa8\xf1\xea\x9b\xff3\xb9\xcf?\xfa\xb9\xbf\xf9\x8bm\x98z\x0f\xc0R\x9c\xc5p\xb6(a\xb8w\xa7\xfc\x5c\x09\xbd\x89a\x1b\x97A\xf3b\xda\x1b\xcb\xaf\xaf\xaf\x1d2\x886\xbe\xf2\xd6$\xfe\x91}\x9fP\x86\xe5W\xfe\xd9/?\xf8{\xc5\xde\xcfd?s\xf6\xdf5 \x00\x8bt\x14\x83\xdf\xa4\xe9/\x1a\xd2\x85{\xfc\xe8\xef}\xee{\xff\x0e\xeb\xe5\xa6\xca\xb0\xec\xbc\xf5\xd3\xd9\xdf\xf8\xadok4\x00\x16-\xf4(~x\xbb\x84[\x93I\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f\xbe\xf1S\xd9\x0f}\xe7'?\xfe\xff\x9f\xf9\xed\x1fI\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xf1\x12n\xd9MM\xbf\xc3:5\xfd\x0c\xe9\xc2\x1d\x9f\xff\xd9_\xf8\x9d\x9f\xf8\xf6\xc6\x9f\xbe\xfd\xff\xe7\xfe\xe2\xcb\xec_\xfd\xe2?\x7fp\xff\x9f\xff\xd9\xb7\xb3_\xf8O\xbf\xad\xe1\x00X\xb5\x93\xecf\xb8\xf7\xde\xf5~M\xda\x80\xe8\xcd\xad\xaf\x8e\x7f\x22\xfb$\xec\x05\x9f\xf9\xed\xab\xe4\xef\xfcl\xf6\xbf4\x1c\x00m\x10\x96p\xdb\xfd\xfa\xe3/\x84\xa2\xd0\xb7\x93<\xbe\xb7\x08\x80\x1e>\x88a/\xbbg\xb6\xd5\xc6\x8f\xfcQ\xf6'~\xf47\xb3\x7f\xf9\xe1\xc9\xa7~\xe7\xef\xff\xdc\xdf\xca\xfe\xee\xaf\xff7\x8d\x07@\x1b\x85\x1e\x8b\xd0\xdbw\x18\xd6\x00\x16\xf8\x10\xf6\x1e\x08{w\xfd\xd9\xcf\xfeA\xf6\x7f~\xff\xdfd\xff\xfa\xdf\xfd\xdb\x8f\xff\xbf\xb99\xc8\xfe\xe9\xc6\x8f8w\x0f\x80.\x04\xbf}\x81\x0fa\xafA\x1d\xa5\xbf\xf3\x93\xdf\xca~\xe9?\x9ee\xef|\xf1\xcf+\xc3\x02@\x9b}\xdf\xd0\xae\xc0\x87\xb0\xd7\xd0;?\xf8\xcd\xec\xaf\x7f\xe775 \x00mt\xef\xe4\x0d\x81\x0fao\x0a\x7f\xef\x0f\x7f=\xfb\xe9\xdf\xff\xaf\x1a\x12\x806\x08\xc3\xb6\xe1s\xed\xf0\xa1\xd58\x04>\x84=\xa1\x0f\x80n:\xcfn\x86m\x8f\xef\xce\xc8\xbd\x8f\xb2,\x08{S\xfa/?\xf0\xb9\xec\xa73\x81\x0f\x80\xa5k\xbc\xc4\x9a\xc0\x87\xb07\x85\xb7\xaf\xffG\xf6\xf3\xbf\xf7k\x1a\x15\x80e\x09\xc3\xb6\xb7\x930.\x9a\xfe\xb2\xc0\x87\xb0\xd7\xd0\xe7\xf3\xff\xfd\xcb?\xff\xbb\xbf\xf65\xad\x0a\xb0\xd6\x86\xe5\xf6l\x09\xf7s\x1aC\xdex\x96\x1b\x11\xf8\x10\xf6\x9a9\xfa\xd5\xb3\xe7#\xad\x0a\xb0\xde\xbe\xfe\xf8\x0b\xfb\x0b\xbe\x8b0l\xfbq\xd1\xe4y\xdc\x98I\x1b\x08{\x0d\xfe\xf8~\xe3\xfck\xc2\x1e\x80\xb07(/^.\xe0\xa6\xef]\x16m\x1e\xf4\xf0!\xec\x09{\x0043\xef\xde\xbd0l{\xf8z\xed\xbcy\xd2\xc3\x87\xb0'\xec\x01P\xd3\xd7\x1f\x7f\xe1QyqQn\x1b3\xde\xd4\xedZ\xb7\xc54\x930\x9a\xd2\xc3\x87\xb0'\xec\x01P\xdf\xde\x8ca/\x0c\xdb\x16Y\x8d\xday\x02\x1f\x08{\x00\xacF1\xe5\xef\x85%\xcf\x0e\x9b\xd4\xce\x13\xf8@\xd8\x03`\xc9\xbe\xfe\xf8\x0b\xc3\xf2b\xb3\xc1\xaf\xccT;O\xe0\x03a\x0f\x80\xe5\xab\xfb\xd9\xf0\xf1\x92g\xb3\xd6\xce\x9b'\x936\x10\xf6\x84=\x00*\xd4,\xc52\xd7\xday\xf3\xa4\x87\x0faO\xd8\x03\xa0\xdaC\x9f\x0fa\x12\xc68\x06\xbdWm}\xf0\x02\x1f\xc2\x9e\xb0\x07@\xb5\xd7k\xef\xcde\xc93\x81\x0f\x84=\x00Z\xe0\xeb\x8f\xbf\x10>#B)\x96\xa5\xd6\xce\x13\xf8\x10\xf6\x84=\x00\x96gXn\xeff\x0bX\xf2lYL\xda@\xd8\x03\x80\x9e\xfb\x8c&@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\xd3\xaa\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\xb0\xb6\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x16\x05\xbe2\x9c=\x12\xf6\x00\x00z\x1c\xf8J\x87eH\x1b\x08{\x00\x00=\x0c|\xb1w/\x04\xb4\xd1\x1cnK\xd8\x03\x00h[\xe0+\xed\xc7\xcb\x99\xc2\x95\xb0\x07\x00\xd0\xde\xc0w\x1b\xac6\xcb\xd0\xb6'\xec\x01\x00\xf4(\xf0\xc5\x80\xb7yO\xf8\x13\xf6\x00\x00\xfa\x10\xf8\xee\x09x\xbbM&o\x08{\x00\x00-\x0e|1\xd8\xed\xd6\x08\x81\xc2\x1e\x00@\x17\x03_\x22\xd8U\x06/a\x0f\x00\xa0\xdb\x81/9yC\xd8\x03\x00\xe8@\xe0+C[\x08Y\x9b\x89]\xf6\x85=\x00\x80\x0e\x07\xbe\xacz\xd8v\xe7\xf5\xc9\x1b\xc2\x1e\x00@G\x02_\x0cr;5v\xdd\x17\xf6\x00\x00:\x18\xf8\xb2\x07\x86k\xef1\x12\xf6\x00\x00V'\xbf\xbe\xbe\x9e\xea\x17\xcb\x00\xf7\xaa\xbc\xd8\xa8\xb9\xfby\xb9m\x09{\x00\x00\xcb7U\x0f_\x9c\xac\xb1\xd1\xe0W\x84=\x00\x80.\x05\xbel\x8a\xa5\xd3\x84=\x00\x80\xd5h<\xa4\x1b'k\xbc\x14\xf6\x00\x00\xbaa\x9a\x1e\xbeB\xd8\x03\x00\xe8\x8eF=|on}\xf5Qyq\x915;\x7fO\xd8\x03\x00X\xa1\xa6=|{\xc2\x1e\x00@\xbf\x03\xdf\xfe\x92\x1e\x97\xb0\x07\x00\xb0\xec\xc0\xf7\xe6\xd6W\xb7\xb3\xf9\x95W\x11\xf6\x00\x00\xda\x16\xf8\xb2\xe5\xf4\xee\x09{\x00\x00\xab\x08|q\xb2\xc6\xde2\x1ePy_C\x87\x05\x00`~j\xcd\xd2\x8d+k\xbcX\xe2\xe3\xba,\xb7\xc3r\x1b\xff\xc6\xf9\xd7^9L\x00\x00\x8b\x0f|\x17\xe5\xc5\xe6\x0a\x1e\xdfU\xb9\x1d\x87\xf0W\x06\xbf3\x87\x0b\x00`\x01\x81/\x0e\xb1~\xd8\x82\xc7z\x9e\xdd\xf4\xfa\x1d\xeb\xf5\x03\x00\xa8\xaf\xce9|\xa3\x96<\xd60C8\x0c+_\x94!t\x1cg\x0d\x03\x00P!\xd9\xc3\x17'k|\xb7\xc5\x8f?\xf4\xfa\x15\xbfq\xfe\xb5c\x87\x12\x00\xe0~U=|\xa3\x96\x87\xbdCa\x0f\x00 \xed\x8d\x8a\xeb\xf7[\xf6xM\xe2\x00\x00\x98W\xe0\x8b\x9356[\xf28M\xd8\x00\x00\x98w\xe0\xcb\xda1\x9c{\x94\xe9\xcd\x03\x00\x98\xc9\xbd\x936\xde\xdc\xfa\xea\xa0\xbcx\xb9\xa2\xc7\xa4\xe82\x00\xc0\x1c=\xd4\xc37Z\xc1c9\x8a!o\xe2\xb0\x00\x00\xf4'\xf0\xe9\xcd\x03\x00Xv\xe0{s\xeb\xab{\xd9\xe2'k\x9c\xc4\x90\xa7\xa4\x0a\x00\xc0\xb2\x03_\xb6\xb8\xde\xbd\xd0\x9b7\x8eA\xefB\xd3\x03\x00,\xc7\xf7M\xdaX\xd0d\x8d\xd3L\x81d\x00\x80\x95y\xbd\x87o4\xa7\xdb\x0d\x05\x92\xc71\xe8]hf\x00\x80\xfe\x04\xbe\xd0\x9b\x17\x86l\xc7\x9a\x16\x00\xa0e\x81\xef\xcd\xad\xaf\x86\xb07\xcdd\x0d\xbdy\x00\x00]\x08|Y\xf3\xde=\xcb\x9d\x01\x00t\xc0\xc7\x936\x1aL\xd6\x08\xbdya\xf2\x85\xe5\xce\x00\x00:\xe2\xb6\x87o\xbfb?\xbdy\x00\x00\x1d\x0f|\xa3\x07\xae\xb7\xdc\x19\x00@\xd7\x03_\x9c\xac\xb1q\xe7g\x96;\x03\x00\xe8S\xe0\xcb>\xe9\xdd\xd3\x9b\x07\x00\xd0\xd3\xc0\x17\x02\xde\x9e\xde<\x00\x80~\xfa\xff\x02\x0c\x00\x1eE:\x8bH\x8b\x05S\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x0f\xb6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x12t\x00\x00\x12t\x01\xdef\x1fx\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x0f3IDATx\x9c\xe5\x9by|TU\x96\xc7\xbf\xaf*KU\xaa\x0a\xb2\x90\xa4*\x8e\xa3\x01\x82c\x98\x86@l\xb7@\xc0fQ\x16\xa1\xb5\x05\x5c\x80\x06\x84\xb0((\xc8\x08\xd2\x0dQTF\xa4\x05Y\x14\x08K\x90\xc8\x9anA\x09\x81\x04ADtD\x0d\x04\xdbFIB@gHU\xc0\x90\xa4\xeaU\xaa*\xa9zw\xfe\xa8\xa4$\x92\x04*\x9b\xd3\xce\xef\xf3y\x9fT\xee;\xf7\x9c\xdf9\xef\xdc\xe5\xdd{\x9fD\x03HMM\x0du:\x9d\xcf\x02#%IJhH\xe6\x9f\x05B\x88|\xe0}`\xcb\xd2\xa5K/\xfc\xfc\xbe\xf4\xf3\x82y\xf3\xe6\xbd\x08<\x13\x1e\x1e\x1e\x1a\x1f\x1fO||<\x00\x9d;wn[\xa6\xad\x8c\xe2\xe2b\x00\xce\x9c9\xc3\x993g\xb8r\xe5J\x05\xb0r\xe9\xd2\xa5/^-\xe7\x0b@jjj\xa8\xcb\xe5Z\x01L\x188p \x03\x06\x0chO\xbem\x8e\xc3\x87\x0f\xf3\xe1\x87\x1f\x02l\x09\x0e\x0e\x9e\xfd\xd2K/U\x00\x04\xd4\x09\xb8\x5c\xae\x15\x1a\x8dfBJJ\x0a&\x93\xe9\x97\xe2\xd9f\x180`\x00\xf1\xf1\xf1\xa4\xa5\xa5Mp:\x9d\x00\x13\x01\xd4\x00\xf3\xe7\xcf\x7f\x16\x98?c\xc6\x8c_\xa5\xf3u0\x18\x0c\xdcv\xdbm\x9c8q\x22\xa1O\x9f>\x95\xc7\x8f\x1f\xff\x5c\x9d\x9a\x9a\x1a\xeav\xbb\xf7\x0c\x1a4H\xd3\xa3G\x8f_\x9ac\x9b\xc3`0 I\x12\xe7\xce\x9d\xbb{\xe0\xc0\x81\xebU.\x97k\x82V\xab\x0dMJJjW\x22\xa2\xa4\x18w\xe6rDIq\xbb\xda\x05HJJB\xab\xd5\x86\xba\x5c\xae\x09\x01\xc0\xc8\xee\xdd\xbb\xa3\xd1h\xda\xc5\xb8p\xc8\xb8?X\x8f\xe7\xabC\x00\xb8\xbf\xc8E}\xc7 \x02FLE\xd2\xea\xdb\x85\x83F\xa3\xa1{\xf7\xee\xe4\xe5\xe5\x8d\x0c\x00\xfa\xd7\x0dum\x8d\x9a\x83[q\x1f\xdb\x03N{\xbdr\xf7\x89\x1c\xdc\xa7\x8f\x13\x90\xfc\x10\x81\x0f\x8co\x17.\xf1\xf1\xf1\xe4\xe5\xe5\xf5W\x01m\xfe\xf4=\x85\xa7\xa9z\xf1\x09\xaa\xb3\xb7\xa2\xc86\x14\xb7B\xc0\xa0\xb1\xf5\xfe*\xb2\x8d\xea\xec\xadT\xbd\xf8\x04\x9e\xc2\xd3m\xca\x07~\xf29\x00 &&\xa6M\x8c(e\x16\x5c\x19K\xf1\x14\xe6\xfb\xca\x02z\xf6!\xe8\x0fO\xa1\x8a0\xe2\xcaJ'p\xc8x\xd4w\x0e\xa6\xfaoo\xe1>}\x1cq\xb9\x04\xc7\x9b\xcf\xa2\x8eK x\xdc<T\x11\xc66\xe1V\xe7s\x00\xb4~\x06\x88*\x19W\xd6\x16\xaa\x0fg\xfa\xca\xd47wE3z&\xean?\xcd\xac\x15\xb7\x02\x80*\xc2\x88&\xe5e<\x05\xf98w\xaf\xc6\xf3\xdfE(\xdf\x9e\xa4f\xc1\x18\x82\x06\x8c\x22x\xf8\x04\xa4\x90\xd6\xed\x1f\xeae@k\xc2uh7\xae\x0f\xd2\x11\x0e\x19\x00)D\x8f\xe6\xd1Y\x04\xdd;\xe4\x1aY\xe1Q\xea\xfd\xaf\xee\x96\x80\xee\xcf\x9b\xa8\xfe\xec\x00\xce\x9d\xab\xbc\x81\xcc\xddE\xf5'\xfb\x09\x1e1\x91\xe0A\xa3[\x9bn\xeb\x05\xc0\xfd\xddI\xec\x1b\x97 ~4\xfb\xca\x82\xef\x1f\x83f\xe4\xa4F\x9f\x9ep+\x0d\x96\x07\xdd;\x84\xc0\x84\xbe8\xdf\xdf\x8c+g\x17\xc2f\xc5\xb1m%\xce\x9c\xdd\xe8&/ \xe0\xdfz\xb7\x16\xed\x96\x07@\xb9l\xa6\xea\xdd7\xa9>y\xccW\x16x{ot)\x7fF\xd5\xa9\xe9Y\xa5\xe2i8\x00\xe0\xcd\x1c\xedc\xb3\x08\x1e4\x0a{\xda+\xd4|{\x12J/b}\xf5)\x82z'\x132\xf6YT\x91-\x9f\xb56;\x00\xc2n\xc3q`'\x8e\xbfm\xf4\x95\xa9\x22M\xe8\xa7-\x220\xfe\xc6\x9ePc\x19p5T\x9dL\x18\x16\xbcE\xcd\x99\x93\xc8\xeb\x16\xa3\x5c6\xe3\xfa\xe2(\xae/\x8e\xa2\xfd\xc3d\xb4C\x1eE\xd2\x19\x9a\xebF\xf3\x02\xe0<\xba\x0f\xf9\x9d\xe5\x08{m;\xd7\x19\xd0=2\x05\xed\xb0\xc7\xea\xc9\xd5\xfc#\xcf\xf7;\xb0{\xe25z\x9a\xca\x80\x9f#0\xbe7a\xab\xf6\xe2\xd8\xbf\x03\xfb_7 \xec6\xec\xbb\xd3\xa8\xda\xbf\x1d\xfd\x1f\xe7\xa0\xe9\xff`s\x5c\xf1/\x00\xd5\xff\xc8CN_\x8e\xfbB\x81\xaf,d\xf8c\xe8FM\xa9\xf7\x14\xaa\xb2v\xe0\xc8\xde\x81\xe7\x92\xb9^}\xcd}\xc3\xd1\x8dJA\x1d\xe5M]q\xe3\xfe\xfb\xa0\x1d\xf6\x18\x9a\xfe\xc3\xb1gn\xa0*k\x07\xc2&c]\xb3\x98\xaa\xac\x9d\xe8'\xce!\xa8\x81@7\x05\x95?\xc2\xe5\x8b\xa6Ss\xbe\x10!$\x02\xe3\x13\x09_\xb6\x0d\xfd\x849\xf5\x9c\xb7\xaeY\x8c-}\x05\xeeR\x0bBH\xf5.\xc7\x91\xfd\x94\xcd\x1d\x8b\xe3\xa3,\x00\x84\xb8f=\xe6\x86 \xe9\x0c\xe8'\xcc!|\xd96\x02\xe3\x13\x11B\xa2\xe6|!\xe5\x8b\xa6\xfb\xad\xcb\xaf\x0c\xa8#\x1c:\xefu4w\xf5\xbb\xe6\xbe\xbcs\x03UG\xf6S\xb7\xce\xa2\x8e4\xa1\x1f3\x99\xaa\xac]\xd4\x5c(@\xa5\xd3\xa3\xc82\x95\xab_F\x1d\x19\xd3\xec\x00\xf8\xc8\xc7v#l\xf1:\x9c'>\xa6b\xe9\xf3\xcd\xd2\xe1W\x06\x08\xe1\xbd\x1ar^\xb1\xdb\xb0\xef\xdb\x85\x10\x10|g?:>\xbd\x10Ig\xa0b\xf5\xcb\xb8/\x99\x89x#\x83\x887\xdeE\x15iB\x08\xa8X\xb5\x18!\x9a\xc5\xf9\x1ah\xee\xea\xe7\xe3\xe6/\xfc\x0b\x80\x22!\x94\x86\x9f\x9a\xe3\xf0~<\xb2\x8cP$\x02n\xe9F\xf0]\xfdp[,\x04\xdcr\x1bB\x91\xa8:\x92\x8d:\xcaD\xc7\x99\x8b\x10\x8a\xe4m\x22\x8d\xe8j\x0e\x9a\xe2\xd6\x14\x9a\xd5\x04\x1a\x82\xe3\xc4'\xbe\xfb\xf6\xc3\xd98N\x1cC\x15e\x22\xe2\xd5\xb7q~\xfe1\xe5+_A\x0a\xd1\xe3<q\xac\xc5\xa9\xef/\xb7\xa6\xe0g\x13\x90\x1a5Tw/\xe4w\xc3q\x97Zp[,\x84\xcdZ\x88J\xa7G\x1deB\x08\x09\xeb\xf6\xcd\xa8#cjS\xb6q]\xcdAs\xf5\xb5Z\x06\xb8K\xcd\x08!\xe1*\xf6\x8e\x12\x1e\xd9N\xd5\xe7\xc7\x08(.\xe4\xca\x9b\xaf\xa2\xbd;\x19\xd7\xdfO\x12\x10\x1b\xe7\xd5\xf5_\x9f\x5c\xa3\xa3\xba\xb8\x10\xc7\xe7\xde\x19e\xf0oz\xa3\xf9M\xafV\xe1\xd6\x14Z-\x00\xea\xc8\x18j,\xa5T\x9f+B{O2!w\xf7\xa5l\xc5\x12\x00t\x03\x87\x121\xfbOTn\xdb\x84uo&\xa1OL\xf4\xe9\x92?\xcc& \xcaD\xe5\xf6\xcd\xb8\xfe~\xea*\x8d\xe9\xb5\xf5\x16\xb4\x98[S\xf03\x00\x8d\xdf\xd3\xdc\xdd\x17\xc7\xd7^\x07\xb4w\xf5E{O2\xaaw\xd3\x91$\x90\x0f\x1d@\x0a1\x10:v\x12\xd6\xbd\x99\x5cY\xbf\xdaW\xafl\xf9\x92\xfa\x84\xa2M\xdeE\x13\xbb\x8c|(\x9b\xc0\xd8\xaet\xf8\xfd\xf5\xdf\x02\x9b;\xa2\xb4Z\x1f\xa0\x1f4\xd4\xd7\xd6\xcb\xd6\xaf\xa6l\xdd*\xdc\xa5\x16Lk\xd2\x09K\x99\x85|\xe8\x00\xa5/-\xc0#\xdb\x11B\xa2\xc3\xe3\x93\x08\x8c\xed\xe6\xd3\xa9\x8e2\x111\xfbO\xdc\x94\x9eI`\xe7\x9f\xca\xcb\xdfMG\x91\xe5\x16qk\x0a\xad6\x0c\xaatz\xa2\x16.A\x0a1\xa0\xd8\xec\xc8\xb9\x07\x09\x88\xed\x06\x02:\xfc~4\xa1OL\xc2y:\x1f\xdd\xc0\xa1\xe8\x06\x0e\xa5\x22c\x0b\x8a\xf0\xea\x0b\x8c\xedF\xcc\x9at\xf4\x83\x86\xa0\xc82\x8e\xfc|\xc2Sf!\x14\x09\xc5f\xc7v\xe8@\x8b\xb85\x05\xbf\x02\xa0\x08\x09\xa5\x89(\x07u\x89\xc3\xf4\xfaJTQ&\x14!\xe1**\xe2\xfb\xf1c\xa8|/\x13k\xeeA\x14!\x11\xf9\xdc\x0bD>\xf7\x02\xc1=z\xe1**\x22\xb8G/\x9c\xb5r\xf6\xcf>\xa1bO&\xe8\x0ctxh\x14\xda{\x92Q\x84\x84\xfd\xb3\xe3-\xe6\xd6\x18Z\xad\x13\xacCP\x978n^\xbb\x89+\x19[\xa8x\xef\xafxlv.\xaf}\xab\xf6\xae\x845\xf7 \xb6\xdc\x83\xb8\xce\x15\xf1/k7\x11\xdc\xa5+\x8a,\xf3\xe3\xda5\x98S\x17\xa2\xd2\xeb\x09}\xe8\x11\x00B\xeeMB\xfe\xf48U\xf9\xd7_$m\xa7y\xc0\x8du6*\xbd\x9eN\xd3\x9f\xe6\x96\x8c\x9d\x04u\xee\x8a\x10\x10\x10mD\xd3#\x81\xd2\xd7_\xa3*?\x9f\xa8\xb9/\x10\xdc\xa5+\x005\xa5\x16$\xbd\x1e!\xc0c\x93\x09\x1f?\x11\x80\xe0.q>\x9b\xaesE\xad\xc2\xed\xe7h\xf5\x0c\xb8\x1a\x81F#\xd1\xff1\x9f\xef\xa7N\xa1\xda\x5c\x8a\xdbf' \xda\x84G\x96)Y\xb4\x10}R\x1f\xaaN\xe7\xa3\xc82\x81F#\x86\xc1C\xb0\xe6\x1c\xa4\xeat>\x9a.]){\xe7\x1d\x9fM\xcfu:\xc2_|&\xd8\x18<\xb2L@\xb4wt\xf0\xd8\xecDN\x7f\x0a\x95\xce\x80\x10\x12\xb6\xe3\x9f\xd2a\xf0\x03\xc4n\xdbA\xec\xb6\x1dt\x18|\xbfw2Ux\x8e\x0b))\xd8\xf3O\xffd\xf3:O\xb7}F\x01?\x8c\xd4X,\x5c\x5c\xb8\x88\x1ff?G@\xb4\xd1W\xd7\xbct\x19*\x9d\x81[\xd3\xd2\x10BB\x9f\xd4\x87@\xa3\x11\x8f,S\x99\x93\x8b\x10\x12\xa5o\xbd\x8d\xb6g\x02]\xb6o\xbf\xcaf\xd3v\xdbi*|}\x99\x1a\x8b\x85\xcb[\xb6Ry0\x87@\xa3\x91\x7f]\xb1\x9c\x90\x84\x9e\x94\xbc\xf6:\x95\x07sp\xdbdnzy\x06\xa2V\x9f\xb3\xb0\x88\x8a\x839T\x1e\xcc\x01\xc0\xd0'\x89\xe8\xa7g\xf8\x82Rg\xf3\xf2\x96w\x88\x99\xff<\x81\xc6\x867J\xdae\x22\x84\x90\xbcW\x03\xf0\xc82\x97\xd3\xb7R4f,U\xa7\xbe&f\xde\xf3t\xdd\xb9\x8d\x90\x84\x9e\x00\x18\x92\x92|\xf5\xed\xa7NS\xf2\x9f\xcb@H\x94\xaeY\x8b\x22\xdb1\xf4\xe9\x03B\xf29\x0fP\xbaf\xad\xaf\x8eb\xb3S4f,%\xaf-\xa3\xc6b\xf1\x8b[Shq'\xe8\x91e\xcav\xbf\xc7\x95\xcc=H\x12D?=\x83\xf0Q\x0f_#g\xe8\x9b\xe4\xab_q \x97 \x93\xb7Y\xdc\x96\xbd\x07\xb5^\xcf\x0f\x0b^D\xdf\xc7\xdb\x1cj,\xa5\x98W\xadE>\xfe) \x11>\xeaa\x8c3\xa7Sq \x97\xcb\xe9[\xb1~\xfc\x19\xe1\xa3\x1e\x22b\xf4\xc3\xa8\xf5\xfaF\xb9\xdd\x08Z\x14\x80\xf2\xec\x5c,\xab\xd7!!\xae!\xd4\xa0\xb1h\xafs\x1d\x1f\xb8\x1fM\x5c\x17\xe4\x93_\xfb\xe4\xab-\xa5\x04\x19\xa3\xb9\xb8\xe4/T\x1c\xc8\xad\xad!a\xe8{/\xc6\x99\xde\xb5\xbe\xd0!\x83\x09\x1d2\x98K\x9b\xb7r%s\x0fe\x99{\x89\x18\xf5\x10Q\x93\xc6\xb5\xef\xcbPyv.\x977o\xa5\xdar\x89\x88\xd1\x0f\x135il\x93\x8e\x03\x5c\xda\xb4\x95js)\x005f\x0b5f\x0b\x81\xd1Q\x98W\xad\xc5Yp\xcewi\xe2\xba\xa0\xd2\xe9\xf1\xc82A\xa6hnZ0\xf7\x1a]Q\x93\xc6\x131\xfaa\xcav\xef\xa1l\xf7\x1e*\xb2s\x9a\xdd\x07\xf8\xb7/\xa0x\xd7\xb1/\xbe\xf2:a\xc3\xee\xe7\xd6I\xe3\x082\xdd\xd8\xee\xad\xfd\xd4i_\xfd\xf2\xfd\xde\x0eO\xad\xd7\xe3,(B\xd7\xab'\xf6\x93\xf9\xc4\xaey\x03\xb5AG\xd1\x1f\xa7\x01`\x9c9\xad\xd1\xc0\xaa\xf5z\xa2&\x8d#t\xc8 ~\x98\x9f\xea\xd3\xed/\xfc\x1c\x06\x95\xab.\xffB.\x84@\x08\x85\xc8I\xe3\xf8\xf7\xcf>$r\xd28\x82\xe3:\x13\xbb\xe6\x0dB\x87\x0eF\x08\x05\xb5AG\xe5\xc7\x9f\x22\x84B\xa01\x8a\x0e\xc9M\x1f\xdb\xa96[\xb8\xb4i+\x8e\x82B\x1f/\x7f\xe1W\x00nym1A\xc6(\x10\x0a\xe5Y\xd9\x14\x8d\x9fL\xe9\xc6-xl\xd7\x7f]U\xebC@(T\xec?P+/|;#5f\x0b\x08\x05M\x5cW\x9c\x85\x85 \x14t\xbd\x1a?\xb0\xe5\xb1\xc9\x94n\xdcB\xd1\xf8\xc9\x94ge\x83P\x082Fq\xcbk\x8b\xfdq\x07\xf0\xb3\x09t\xe8\xd7\x07]\xef\x04~\xdc\x99I\xd9\xaeL<V+\x976l\xa6b_6\xa693\xe9\xd0\xafo\xa3u;\x8dy\x04\xeb\xd1cT\x97\x94\xf0\xfd\xf3\x7f\x02\xa0\xa6\xc4L\xe9\x86t_\x00\x8a\xa7?\x83\xfd\xa4wQ%\xd0\x18\xdd\xa0\x1e\xeb\xc7\x9f`^\xbe\x9a\xea\xda\xa1Pm\xd0\x131f\x14\x9d\x1e\x1d\x85\xda\xe0\xff\x19\x02\xff\xe6\x01\xb5\x06\xa3\xa7L\xa4k\xc6fB\x87>\x80P\x14\x5c%%\x5c\x98\xfb\x02\xc5\xd3f\xe2((l\xb0\x9e\xaew\x82O\xdeq\xf6,\x8e\xb3g\x11B`\xcf;\x89\xe3l\x01BQ\xd0\xf5\xee\x89P\x94\xda\xdf\xf5\x8f(;\x0a\x0a)\x9e6\x93\x0bs_\xc0UR\x82P\x14B\x87>@\xd7\x8c\xcdDO\x99\xd8,\xe7\xa1\x05\xbb\xc3A\xa7. l\xf8\x10.\xa5mB\xce;\x85\xfc\xd5I\x0a\x1f\x9f@\xd8\x83C\x89\x99\xf3\xcc5\xa4\x82L\xd1 \x04\xda8\xef[\xa0.\xb1\x17\xd1)O\x22\xe7\x9d\xa2x\xea\xd3\xe8z\xf7\xf2\x0d5\xdan\xde\xc5S\x8fM\xa6d\xf9J\xca\xf7e\xfb\xf4\xe8\x13{\x11\x95\xf2$\xfa\xc4\x1b_4m\x0c->\x1f\xa0O\xec\x85~\xfd\x1a\xca\xf7\xed\xa74m\x13\xd5%f\xca?\xc8\xc2z\xe4(\x9d\x1e\x1fC\xf4\xd4\xc9>Y\xc7\xd9\x02\x10\x0a\x81%\xe6\xab\xe6\xaf\xde\xfe\xc0y\xf6\xac\xaf_P\x1b\xf4\x94\xae\xdf\xc8\x8f\xdbw\xf9\xde\x04\x83bLD\xa7<I\xd8\x83\xc3ZJ\xdb\x87V;!\x12\xf6\xe00:\xf4O\xe6\xf2\xf6\x9d\x94\xae\xdb\x88\xdbj\xc5\xb2n\x03W>\xd8G\xcc\xdc9t\xbc\xaf\x1f\x1e\xab\x15\xa1(\xc8_~\x89\xc7&\xe3\xba\xf8?\xc8_\xe5\xe1\xb1\xd9\x10\x8a\xc2\xc5e+\x00\x08\x8a1\xf2\xed\xb0\x91T\x97\xfc4\xe5\x8d\x9e6\x99\xc8\xc7\x1fEmh\xfeY\x80\x86\xe0w\x1f\xd0\x14\xd4\x06\x03\xc6\xa9S\xb8}\xff^:\xf6O\x06\xa1P}\xb1\x84\x0b\xb3\xe7rn\xf24<V\xab\xb7\xc76\x99\xf0X\xad\xe8\x13{\xa3K\xec\xe5Mw\xa1\xd4\xabS}\xb1\xc4Wv\xfb\xfe\xbd\x18\xa7Niu\xe7\xa1\x95\x03P\x87\xa0\x18\x13\xb7\xaeXF\x97\x8d\xeb|\xce\xc9_~\x89\xe3\xbb\xef@(DO\x9b\x02B!l\xc4p\x8c\xd3R\x08\x1b1\x1c\x84B\xe5\x91#\xde& \x14\xb4\xdd\xe2\xe8\xb2q\x1d\xb7\xaeXFPL\xdb\x1d\xe0n\xf5SbWC\x7fG\x22\xddvo\xe7\xca\xfb\x1fpq\xe9_\xf0\xd8l\x00\x9c\x9f5\x1b\xa1( \x04\x973\xb6aY\xbb\xde\xfb?\xde,\xbai\xde\x5c\xc2G\x8ehKj>\xb4i\x00\xea\x10>r\x04\x1d\x7fw\x9f\xd7\xd9\xb7\xd7\xe2\xb1V\x02P4\xf1\xc9zr\xc6\x19\xd3\x89\x1c\xf7D\x9b\xa4zch\x93&\xd0\x10\xd4\x06\x03\xc6\x19\xd3\x88\xcf9\x80\xfe\x8eD_\xaa#\x14\xf4w$\x12\x9fs\x00\xe3\x8ci\xed\xea<\xb4S\x06\x5c\x8d\xa0\x9bb\xe8\xbae3\xf2\x17_b~\xebmLO\xcd@\x7f\xe7o\xdb\x9b\x86\x0f\x01\x00f\xb3\xb9\xdd\xbf\x14\xd1\xdf\xf9[\xe2\xeeLoW\x9bW\xa3\xee\xa3*\x15\x80\xc3\xe1\xf8\xc5\x88\xfc\xd2Pi\xb5\xda\x0a\xb3\xd9|}\xc9_\x19j}\xbe\xa0R\xab\xd5G\xf3\xf2\xf2\xae'\xff\xabC\xad\xcfGU\xd1\xd1\xd1+\xcdf\xb3\xafM\xfc\x7f@qq1f\xb3\x19\x95J\xb5R5e\xca\x94\xa3Z\xad\xf6hFF\x06\xb5\xdf\xd3\xfd\xaa\xe1t:\xc9\xc8\xc8@\x08\xb1w\xc9\x92%\xf9*\x80\xe4\xe4\xe4\x89\x1a\x8d\xa6\x22--\xedW\x1d\x04\xa7\xd3IZZ\x1a\x0e\x87#_\xa3\xd1L\x84\xab\xf6\x9b\xbe\xf9\xe6\x9b\x84\xac\xac\xac\x8f\x9cNg\xe8\xf0\xe1\xc3IL\xf4\xef\xcc\xed\xffu\xe4\xe5\xe5\x91\x95\x95U\xe7\xfc}u\x9f\xce\xd6[L\xff\xe8\xa3\x8fn-((H?\x7f\xfe|\xff\xb0\xb00\x12\x13\x13\x89\x8d\x8d\x05\xfe\xb9>\x9ev:\x9d\x94\x94\x94\x00p\xfe\xfcy\xf2\xf2\xf2(//G\x08\xf1\xa6F\xa3y\xa9\xceyhd\xc7\xf1\xd0\xa1C\xfd\xcf\x9c9\xf3LEEE\x7f\x87\xc3\x11\xdaN\xbc\xdb\x0a\x17\x84\x10{\xf1~9~\xe1\xe77\xff\x17\xd7q\x00\x14\xc6\xb0\x7f\x82\x00\x00\x00\x00IEND\xaeB`\x82\x00\x008k\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x007\xf8IDATx\xda\xec\xddOllY~\x17\xf0[\x93\x8e\xf2\x8f\xc4o\xa4\xe1\x9fF\xc4~,\x185\x04\xd9#\x88h\x10#WGH\x84M\x9eGb\x93\x95\xab\x17,\xb2\x18\x9e{\xc5\xec\xde\xb5\xc4bv\xe3\xc7\x08\x09\x89\xc5+\xaf\x88X\xa4\xedeHP\xdb\x1a\x04\x1d\xfe\xa8\xed0R\x18\x02y6\x22L$4\x9a\xe7D\x84\x00I\xcc=\xed\xe3i\xf7\xeb\xaa{\xcf\xad\xbaUu\xef\xad\xcfG\xba\xaa\xf7\x5c\xd7\xf5\xe7\xdcr\xd5\xb7\xce\xb9\xe7w\x06\xb7\xb7\xb7\x19Pn\xe3+o\xed\x15\x17[7\xdf\xf8\xe0Hk\x00\xd05\x9f\xd1\x04\x90\xe4 n\x00 \xf0A\xdfl|\xe5\xad\xad\xe2b\xb7\xd86cO\x1f\x00\x08|\xd03\xf9\x83\x7f\xeb\xe5\x03\xa0s\x06\xce\xe1\x83\xe96\xbe\xf2\xd6\xa3\xe2\xe2*\xfc\xf3\xc1\x8f\x1f\xdf|\xe3\x83+\xad\x03@W\xe8\xe1\x83r{\xaf\x85\xbd \xd7,\x00\x08|\xd0\x1f\x93\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x08u\xc3\xe2bs\xd2U\xd9]\xcf\x1f\x00\x08|\xd0qe\x134r\xcd\x03@W\x98\xb4\x01\x13\xc4R,/+v{\xfb\xe6\x1b\x1f\x9ci-\x00\xdaN\x0f\x1fL6J\xd8G\x89\x16\x00\x04>\xe8\xb0\x940\xf7$\xf6\x04\x02\x80\xc0\x07]R\x84\xb8Q\xf6\xe9R,\xd3\x8c\xb4\x18\x00\x02\x1ft\xcf\xc1\x82\xf6\x05\x00\x81\x0fV-\x96b\xd9\xae\xf3+\xb1G\x10\x00\x04>\xe8\x88Y\xc2\x9b^>\x00ZMY\x16\x88\xe2\xea\x19\xdf\x9b\xf1\xd7\x95h\x01\xa0\xb5\xf4\xf0\xc1\xc7\xe6\xe9\xa9\x1bi>\x00\x04>h\xbfyB\xdb\xbe\xf5u\x01\x10\xf8\xa0\xc5\xe2\xc4\x8b\xcd9o\xc6\xb9|\x00\x08|\xd0b\xa3\x96\xdc\x06\x00\x08|\xd0\xb4\xb8Z\xc6n\x037\xb5\xa9D\x0b\x00\x02\x1f\xb4S\xde\xe0m\x09|\x00\xb4\x8e\xb2,\xac\xb58\xd1\xe2*K_J-\xc5\xe3\x9bo|p\xa5u\x01h\x0b=|\xac\xbbQ\xc3a/\xc85+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x84\xb2\xbdl\xfeR,\x13o:s.\x1f\x00\x02\x1f\xb4\xc2\x22C\x99\x9a|\x00\x08|\xb0J\xb1\x14\xcb\x93\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96i\x94h\x01`\xe5\xf4\xf0\xb1\x8e\xf6\x96\x14\xf6\x02\xe7\xf2\x01 \xf0\xc1\x0a,3\x84\x8d\x94h\x01@\xe0\x83%*\xc2\xd7\xb0\xb8\xd8^\xe6]fw=\x8a\x00 \xf0\xc1\x92\x8cVp\x9f\x86u\x01X)\x936X\x1b\xb1\x14\xcb\xcb\x15\xdd\xfd\xdb7\xdf\xf8\xe0\xccQ\x00`\x15\xf4\xf0\xb1NFkz\xdf\x00\x08| \xf0-\xc1~\xeca\x04\x00\x81\x0f\x16\xa1\x08[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6Nq\xb1\xdb\x82\x87\xb2\x19{\x1a\x01@\xe0\x83\x86\xb5\xa9,\x8a\xc0\x07\xc0\xd2)\xcbB\xaf\xc5U.\xbe\x97\xb8\xfb\xcd\xfd\xaf\xd5\xbc\x9b\xf0{\xaf\xb2\xf4s\x04\xbfx\xf3\x8d\x0f.\x1c\x1d\x00\x96E\x0f\x1f}7J\xd8\xe7\xba\xd8\xde)\xb6\xadb\x9b%\x88]\x14\x01n+\xde\xc6y\xc2\xfe\x0a1\x03 \xf0A\x83\xca\xc2\xd5ivW\x10y\xab\xd8\xc6\xc5\xf6j\x9e;\x8a\xb71,\xfe\xf9\xc5b;.\xd9u\xdf\xfa\xba\x00\x08|\xd0\x80\x22T\x855l_\x1ff\x0d\xc3\xaf\xcf\x8b\xedq\x11\xce\xf6\x16\xb1\xfaE\x18\xae-\xb6Q\xf1\xcf\xcf\x16\xdbav\xd7\x83\xf8\xba\x91#\x04\xc0\xb2\xbc\xa1\x09\xe8\xb1\x87\xbd{\x97\xc5v\x14z\xe1\x96u\xe7\xb1\xc70\x0f[\x9c\x9d\x1b\xb6\xdd\x07\x8f\xed\xc8!\x02`\x19\xf4\xf0\xd1KqU\x8b\x10\xae\xc2\xd0j\x18\xb6\xddYf\xd8\x9b\x10\xfe\xee\x87{\x1f\xc7\xc7\xf4(\xf6@\x02\xc0\xc2\xe9\xe1\xa3\xcf\xc2\xb0\xedU\x9b\x1eP|<\xa3x\x0e\x9f\xf3\xf8\x00\x10\xf8`\xce`\xd5\xe6\xc7\x17\x86{_9R\x00,\x83!]\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00@\xe0\x03\x80%\x19\x0c\x06\x8f\x8am\xa8%@\xe0\x03\xa0\x9fao\xa7\xb8\xb8\xd2\x12 \xf0\x01\xd0\xcf\xb0wP\x5c|Xl\x1bZ\x03\x16\xcbZ\xba\x00,;\xe8=*.\xc6\xc5\xf6\xe4\xfeg\xb7\xb7\xb7gZ\x06\x04>\x00\xfa\x11\xf6\xc2\x10\xeeI\xb1mj\x0dX\x1eC\xba\x00,+\xec\xdd\x0f\xe1\xbe\x1e\xf6\xce\xb5\x0e,\x96\x1e>\x00\x16\x1d\xf4>5\x84\x0b\x08|\x00\xf4'\xec\xa5\x0c\xe1\x9ei)X,C\xba\x00,*\xecM\x1b\xc2\x05\x96L\x0f\x1f\x00M\x07\xbd\xbaC\xb8gZ\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b@Sao\x94\xdd\xf5\xd6\xd5\x0a{j\xf0\xc1\xe2\xe9\xe1\x03`\xde\xa0\x17\x86p\x8f\x8am_k\x80\xc0\x07@\xff\xc2^\x18\xc2\x1d\x17\xdb\xf6\x8c7\xa1\x06\x1f,\x81!]\x00f\x0d{\xa3\xecn\x08w[k@\xbb\xe9\xe1\x03\xa0n\xd0kr\x08\xf7L\x8b\x82\xc0\x07@\xbb\xc2\xde\xbcC\xb8\xc0\x0a\x18\xd2\x05 5\xec\x8d\xb2\xe6\x87p\xcf\xb4,,\x9e\x1e>\x00\xaa\x82\x9eY\xb8 \xf0\x01\xd0\xe3\xb0\xb7\x95\xdd\x15R^\xc8\x10\xae\x1a|\xb0\x1c\x86t\x01\x98\x16\xf6\xf6\x8a\x8b\x8b\xcc\xf9z \xf0\x01\xd0\xcb\xb0\x17\x86p\xdf+\xb6\x8d\x05\xde\x8d\x1a|\xb0$\x86t\x01x\x18\xf4\xb6\xb2\x05\x0e\xe1\x02\xab\xa1\x87\x0f\x80\xfb\xb0\xb7\xec!\xdc3\xad\x0e\x02\x1f\x00\xcb\x0b{\xcb\x18\xc2\x05V\xc4\x90.\xc0z\x07\xbd\xadluC\xb8g\x8e\x00,\x87\x1e>\x80\xf5\x0d{f\xe1\x82\xc0\x07@\x8f\xc3\xde\xca\x87p\xd5\xe0\x83\xe51\xa4\x0b\xb0^A/\xac\x9a\x11\x82\x96^=X#z\xf8\x00\xd6'\xec\x0d\x8b\x8b\xab\x96\x84=5\xf8@\xe0\x03\xa0\xe1\xb0\x97\x17\x17\xefgf\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\xdd\x96=\xb43G\x07\x04>\x00\x9a\xb1\x15\xc3U\x98\x8d\xbb\x13\x7f\xb6\xabY@\xe0\x03\xa0'noo/b\xd8\xfb\x94xN_\xf0\xfa\xe52\x02\xe1\x99\xa3\x03\x02\x1f\x00\x8b\x0f\x83ge\xe1\xeb\xb5@8\xcc\xf4\x0c\x82\xc0\x07@\x7f\x03a\x11\xfe\xae\x16t\xdb\xc0\x12\x98\xa5\x0b@\xa98\xc3wSK\x80\xc0\x07@?\xc3^\x98\xe5{\xd0\xf0\xcd\xaa\xc1\x07\x02\x1f\x00-\x12\x96`+\xab\xddwXl7\x9a\x09\x04>\x00:(N\xda\xd8/\xd9\xe5\xf2\xf6\xf66/.G5o\xfaL\xeb\x82\xc0\x07@;\xe4\x15\xd7\x7f4\xd4[\x84\xbeP\xd8\xf9\xb9\xe6\x02\x81\x0f\x80\x0e\x19\x0c\x06\xa3\xac\xbc\x0c\xcb\xe9\xc3\x99\xb6\xc5\xbfC\xf8\xbbL\xbc\xf93-\x0c\xcb\xa5,\x0b\xc0]\xc0\xd9\xca\xeeV\xa5\x18\x16[\x98\xa8P\xba*E\x11p\x06=n\x8b\xf0\xfc\xf3\x8a\xdd&M\xe4\xd8\xcb\xee\x8a<[\xaf\x17\x04>\x80V\x84\x9a\x9d\xec\xe3\x82\xc2\xc36\x86\x94x\x0e\xdd\xfb\xf1\xbf\xe71L]\x15\xdbY\x5cAcQB\x98++\xc3rX\xdc\xff\xd5\x84\x10|\x15{\x06\xdf+\xbbq5\xf8@\xe0\x03Xt\xc8\x0b\x81d/\xebF]\xb9\xad\x07\xff\xde\xcd\x1e\xf46\x16\xcf\xe5>\x04\x9e4\x19\x00cOgY\x19\x96\xeb\xecn\xe6\xee\xb40wR\xdcF8\x9f\xef\xa9W\x1c\x08|\x00\xcb\x0ay\x8fb\xc0\x0b!f\xbbc\x0f\x7f\xab\xe2\xfa\xef\x87\xc0\xe2y\x86 v\x16\x02`\x9cD1\xab\xaa2,yq\xfb\xaf\xcan \x9c\xcf\x17\xc3\xf5\xa4\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11W\xc5\xf6\xa2\x83a/\x18\xd6\xd87\xf4X\x86\x12*\xef\x15\xcf\xfbU\xb1\x8d\x8bm\xaff\x9b\x85\xfb{R\xb2\xcby\x11\xe6\xc6\x897\x17\xee{R}\xbeW^\x9d \xf0\x014\x19\xf4\x9ee\xdd\x9e@\xb05\xe3\xefm<\x08\x7f\xe1\xbc\xba<\x0e\xd5V9\xaa\xb8>O}\x00\xb1\x17pR\xe0\xbc\xf0*\x85\xe53\xa4\x0b\xf4\xd1\xc1\x1cA\xef&\xfbxrD\xd8^\xad0\xa4l6t\x1b!\xf8>+B\xdfi\x08u\x93&M\xc4\xc9\x16e\xbd\xa0\xc7u'[\x84\xfd\x8b\xdb=\x8c\xf7\x7fO\x0f\x1f\x08|\x00\xf3\x09=KE\xc8\x08\xe7\xb0\xed'\xfeJ8\xa7\xec,n\x17U\xe7\xa7-\xd9\xdb\xd9\xc7%b\x86\xf1r\x9e\x1e\xcb0\x5c\xfb$\x9e\xef\x97\xdf\x0f\xcf\xc6\xf3\x1c\x8f*B\xf0\xc1\x8c\xc7#\x8fC\xc5\xf7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x11G%\x81/\x84\x9d\xfb\x99\xad'm~\x12\x0fz\xd4\xbe\xff8\xe3\xd0\xec\xf0\xc16K/`\xf8\x9d\x17q\xe8;O\x08\x92Gs\x06\xe10\xb4{\x15\xefC\x0f\x1f\x08|\x00\x8d\x04\xa5\x8b\x22\xcc\x84U\x1f\x1e\x0eQ\x1e\x17\xdb\xb8\xeb5\xe0b\xfd\xbbq\xdc\x1e\xd6\x13\x1ce\xf5'\xa6\x84\xe0\xf7\x0f\x8b\xed\xf3%\xfb\x5c\xc7\xf5r\xe7y\xcc\xaf\xe2\x04\x92\xf7\x17\x5c?\x10\x98\xc2\xa4\x0d\xa0\xafB/_\x18\x8a\x0c\xe7\x90}\xb6\x08\x1a\xa3>\x16\xfc\x0d\x01\xaa\xd8B\x0f\x5c\x08~\x8f\x8b\xed\xdd,}\x89\xb3\xa0\xaa\xc7\xed\xa0\xa1\xc7y\x16\x1f\x1b \xf0\x014\x16\x84Bo\xde\xa3\xd0;\xd5\xb2\xf3\xf2\x16\xf9\x9c\xaf^\x0b\x7f\xa1\x00\xf2u\xc9\xaf\x84`\xf8\x97J\xae?or\xd8;<6\xafL\x10\xf8\x00h6\xfc\x1d\x14\xdbV\xf1\xdf/\x17\xdb\xe9\x84\xdd>Wq3#-\x09\x02\x1f\x00\xdd\x08\x7fa\xf5\x8dp\x0e\xdd}\xaf_\x18\xea\x0e\xb3\x93\xcb\xce\xdd{>i\xbd\x5c@\xe0\x03\xa0\xdd\xc1\xef\xa3^\xbf\xec\xae\xa0\xf3N\xc9\xae!\x10\xe6Z\x0c\x04>\x00\xbak\xee\xf5r\x01\x81\x0f\x80\x96\x8ae\x5c\xca\x8aR_\x9a\x5c\x01\x02\x1f\x00\xddV\x15\xe6\x0e4\x11\x08|\x00tT\x5c/w\xb7d\x97\xd3>\xd6*\x04\x04>\x80u\x09{a\xbd\xdc\xbcb7\xbd{ \xf0\x01\xd0a!\xcc\x95\xad\xbb{\xa8\x0c\x0b\x08|\x00t\xd4`0\xd8\xca\xca{\xef\xc2j\x1c&j\x80\xc0\x07\xb0\x90 rTl\x17q\xb8\x91\xc5Q\x86\x05\x04>\x80\x95\x84\xbdaq\xf1\xb4\xd8\xb6\x8b\xed*\x96\x0ba1\xed\xfc\xa4d\x97\xb0^\xeex\x01\xf7;\x16\xe6A\xe0\x03x8\x84\x18z\x9f>\x8c\xb3HY\x5c;O\x92/\x22\xecew\xb5\xfeB\x98?\x13\xe6A\xe0\x03\xd6P\x11\x00\x0eb\x18x\xdd\x8b\x18\x16h\xa6\x9dGS\xda\xf9\xdeq\xd3eX\x1e\x84\xbd{B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad\xd4X;\x97\xf5\xee5\xbe^\xee\x84\xb0woC\xe8\x03\x81\x0fX/\x07Y\xf9\x04\x82\x91&ZJ;\x1f-\xa0\x0cKY\xa0\x13\xfa@\xe0\x03\xd6A\xecu*+\x0frl\xa5\x87F\xday\xab\xb8xV\xb2\xcbu\xd1\xce\xf9\x02\xeezXl\x97B\x1f\x08|\xc0z+\xebu\xba\xc9\xac\xf4\xd0\x94q\xc2qh\x5c,\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xc9uGj\xc1\xcd/\x96a)[/7\x94a9Y\xd4\xfd\xd7\x0c}J\xb6\x80\xc0\x07\xf4,\x88\x84\xb07mi\xaf\xd0\xbbg\xa5\x87f\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6J\xae\xd3\xbb\xd7L\xa8\xaeZ/\xf7y\xd1\xce\x17\xcbx,\xf1x\x8eb\x98\x9f&\x94l9q\xe4@\xe0\x03\xfa\x11D\xb6\xb2\xe9\xab=\xe8\xddk\xa6\x8d\xab\xca\xdd4^\x86%!\xf4\x85p9\xac\x08}\xbbj/\x82\xc0\x07\xf4CY\xef\xde\x89\xde\xbdF\xb4r\xbd\xdc\x18\xfa\xaa\x86\x91\xf7c\xef$ \xf0\x01=\x0d|z\xf7\xe6\x14g\xbc\xee\x97\xec\x12\xca\xb0\xac\xac\x9d\xe3Z\xbd\x87\x15\xbb}\xbdx\x1e{\x8e&\x08|@7\xc3H\x18j\x9c6k\xf4rY\xe7\x94\xf5\x5cU\x98\x1b\xad\xfa\x01\xc6\xba\x7f\xa7\x15\xbb\x8d\xe3\xf0? \xf0\x01\x1d3,\xfb\x80\xd7<s\x07\xea\xbd\xac\xbc\x0c\xcbi\x8b\x8aY\x87\xe0Y5s\xf7\xc4\xcc]\x10\xf8\x80\xee)=\x7fO\xf3\xcc\x15\xf6\xaa\xd6\xcb\x0dZsn\x5c\x8d\x99\xbb\x86\xf9A\xe0\x03:f8\xe5\xe7\x97\x0bX\xcbu\xddT\x95a9l[\x1b\xc7!\xfc\xbcb\xb7\xfdX\xb7\x11\x10\xf8\x80\xb6\x8b=P\xd3\x02\xc9\x99\x16\x9a\xabm\xb7\xb2\xf2\xde\xbb\xd6\x96\xbb\x89\x13H\xaa\xce\xe7;\xb2\xfc\x1a\x08|@7\x94}`\x0b|\xf3\xc9\xb3\xf22,\x07-/w3\xca\xca\x87v\xc3s\x1b;\xcc \xf0\x01\xed7\x14\xf8\x9a\x17\xd7\xcb-+\xc3r\x1eK\xa1\xb4\xd6\x83\xf3\xf9\xca8\xc7\x13\x04>\xa0\x03\xa6\xf5\xf0]*\xb6<\x97\xaa\xa1\xda\xbc\x0bO\xa2x\x0d\x84@7ih7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcbM+\xafq\xa5if\x13'3l\x97\xecr\xdc\xa22,)\xc2y\x88\x0f\x87v\xc3D\x93\x1d\xf5\x19A\xe0\x03\xbacZ}8\x1f\xe6\xb3\x85\xbd\xaa2,K_/w^q\x16qxNz\xf5@\xe0\x03:\x1aN2\x81\xafQ\xa17\xacl\xa2\xc6Q\x17K\xdd\x84\x90\xa7W\x0f\x16\xeb\x0dM\x00,\xd0\xdb\xc5\xb6\x15\xb7a\xbc\x0ceZ\x9c\xbfW?@\x87\xb6{V\xb2\xcb\xb5\xde1@\xe0\x03\x96*N\xca8\xd3\x12\x8d\x19W\x5c\x7f\xa0\x89\x80i\x0c\xe9\x02\xb4\x5c,\xc3R\xb6^\xeey\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0U\x83\xc1\xa0j\xbd\xdcc\x93\x1d\x00\x81\x0f\xa0\xbba/\xcct\xceKv\x09eX\xf4\xee\x01\x02\x1f@\x87\x85\xb0WV\x86%\xb7b\x09 \xf0\x01t\xd4`0\x08\xcb\xd2=-\xd9%\x94a9\xd2R\x80\xc0\x07\xd0]Uan\xa4\x89\x00\x81\x0f\xa0\xa3\x06\x83\xc1^V]\x86\xe5LK\x01\x02\x1f@w\xe9\xdd\x03\x04>\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xb0\x8b\xeb\xe5\x02\x02\x1f\x00\xd9\xf7\xd7\xcb-+\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00\x1d\x96g\xe5eX\x0e\x94a\x01\x04>\x80\x8e\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x04>\x80\xee\xca+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eX\x8e\x95a\x01\x04>\x80\xee\x86\xbd\xb0^n\xd9D\x8c0Q#\xd7R\x80\xc0\x07\xd0]a\xa8\xb6l\xa2\xc6\x912,\x80\xc0\x07\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\xaa\xdap'\xf6\x92\x02%\xde\xd0\x04\x00+S\x15\xe6\x94a\xf9d\xb8\x1b\x16\x17;q\x0ba\xf9\xfe\xbc\xc7\xb7\x8b\xedL\x0b\x81\xc0\x07\xd0\xc6\xf0\xf2\xa4d\x97\xb0^\xee\x89\x96\xfa\x84\xf7\xa7\xfc|G\xe0\x83r\x86t\x01Vc\x5cq\xbd2,\x13B\xf0\x94\x9f\x1b\xd2\x05\x81\x0f\xa0]\x06\x83A\x08se\xeb\xe5\x862,\x17Z\xeaS\xae\xa6\xfc|\xa8i@\xe0\x03hS\xd8\x0b\xbdQy\xc9.\xa1\x0c\x8b\xde\xbdz\x81\x0f\xa8\xe0\x1c>`\x19!g\x98\xdd\x9dd\x1f\xb6\xfb\x7f\x87\x1e\xae/\xaf\xe1yj!\xec\x95\x95a\xc9M\xd4\xa8\x1d\xf8v5\x0d\x08|\xc0\xea\x9dL\x099\xc3x\xdd\xba\x04\xdf0\xb9\xe0i\xc9.\xd7E\xd8S\x86\xa5~\xe0\x03*\x18\xd2\x05\x96\x15\xf8&\xd9[\xb3v\xa8\x0as#/\x15@\xe0\x03\xfa\x16\xf86c\xafW\xef\x15\xcf3\x84\xdb\xb2\xa1\xc7s\xeb\xe5V\xba*i\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xcd\x94\xabGk\xd2\x0cz\xf7\xe6\x7f\x1d]i\x05\x10\xf8\x80v\x1b\xafk\xd0\x19\x0c\x06yV^\x86\xe5\xb90\x03\x08|@\x1fL\xeb\xe1\xda(\x02QoC_,\xc3RVf%\xf4|\xe6^\x1e\x80\xc0\x07t^\xec\xc1\x9a\xb6RB\x9f\x03O\x08\xbaeeX\xac\x97\x0b\x08|@\xef\xc2\xcf$\x9b}\xec\xe5\x8b\x13\x09\xf6Kv\xb9,\xc2\xde\xd8\xcb\xa2\xd6\x17\x87\xc1\x94\xedL\xeb\x80\xc0\x07\xb4\xe3\xc3:L\xde\xb8\x9eru\xde\xc3\xa7\x5c\xf5\x9c\xac\xa8\x01\x08|@/M\x0bA\xbd\xea\xe5\x8b\xcf\xa5\xac\x0c\xcb\xa9^)@\xe0\x03z)\x0eaN\xed\xe5\x8b\x93\x1c\xba\x1e\xf6\xac\x97\x0b\x08|\xc0\xda\x9b\x16\x866{\x12\x84\x0e\xb2\xf22,G\xca\xb0\x00\x02\x1f\xd0k\x15\xbd|\xcf\x06\x83\xc1VW\x9f[|\xec\xcfJv\x09\xcf\xdbz\xb9\x80\xc0\x07\xac\x85\xb2\x9e\xbcq\x87\x9fWU\x98\xcb\x95a\x01\x04>`-\xc4\x19\xbb\xd3\xea\xf2\xed\x0e\x06\x83\xce\x0d\xed\xc62,OJv9W\x86\x05\x10\xf8\x80u3*\xb9.\xef\xe0\xd0nU\xef\x9e\x89\x1a\x80\xc0\x07\xac\x978q\xe1p\xca\xd5au\x8aqW\x9eK\xec\x91\xdc.\xd9\xe5\xb8x\xbe\x17\x8e: \xf0\x01\xeb\x18\xfa\xf2\xe2\xe2r\xca\xd5\x9d\x18\xdaU\x86\x05\x10\xf8\x00\xaa\xed\xc5P4\xc9\xd7\x8b@\xb5\xd3\xf2\xc7\x1f\xc2^\xd9z\xb9G&j\x00\x02\x1f\xb0\xd6\xe2\xd0\xee\xa8d\x97q[\x1f{<\xcf\xf0i\xc9.\xd7\xb1\x17\x13@\xe0\x03\xd6>\xf4\x85Y\xbb\xcf\xa7\x5c\xdd\xe6s\xdf\xaa\xc2\xe8\xc8\xd1\x05\x04>\x80\x8fC_8\xcf\xed\xf8\xc1\x8f\xc20\xef;\xc5\xcf[\x19\x9a\x06\x83A\x18\x8a.[/\xf7\xdcz\xb9@\x1b\xbc\xa1\x09\x80\x96\x85\xbeQ\x9c\x04\xb1Ul\xa3\x96\xcfl\xad*\xc32rD\x01\x81\x0f`r\xe8\xdbk\xfbc,Bi\x9e\x95\xaf\x97\xfb\xdcz\xb9@[\x18\xd2\x05\xa8\x1f\xf6B\x0fdY\x99\x950\x14\x9dk)@\xe0\x03\xe8\xae0\x94[V\x86\xe5@\x19\x16@\xe0\x03\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05\x04>\x80n\xcb+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eXN\x95a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\x95\x95a9T\x86\x05\x10\xf8\x00:*\xae\x97[\xd6{w\x9dU\x17a\x06\x10\xf8\x00Z\xac\xaa\x0cK\xae\x0c\x0b \xf0\x01tT,\xc3\xf2\xa4d\x97seX\x00\x81\x0f\xa0\xdb\xaa\x86jsM\x04\x08|\x00\x1d\x15\xcb\xb0l\x97\xecr\xac\x0c\x0b \xf0\x01\xb4/\xc4\x8d\x8b\xed\xa2\xd8v*\xf6\x0beX\xcaz\xf7\xc2z\xb9\xca\xb0\x00\x02\x1f@\xcb\xc2\xde(\xbb[\x16-\xf4\xda\x9d\x15\xff/\x0blyV>Q\xe3\xc8D\x0d@\xe0\x03hW\xd8\x0b=z/\x1e\xfc(\x84\xb9\xaf\x17??\x8beW\x1e\xee\x1b\xfe\xff\xb4\xe4\xe6\xae\x8b\xb0\x97kU\xa0+\xde\xd0\x04\xc0\x1a\x84\xbd0<{6\xe5\xea\xb0T\xda\xcbb\x9f\xc3\xec\xe3^\xbbq\xc5M\x1e\xcc\xf88\xf6\xe2m_\x15\xdb\xeb\xbd\x83\x17)?s\xce \xf0\x01L\x16B\xd2F\xc5>\xcfB\x90+B\xd9?\xc9\xca\xd7\xcb\x0deXNfy\x10\xe1\xf7b\x99\x97I\x8fg714N|L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb2:\x08\x08|\x00}\x11\x02\xdaVB\xe8\x0b\xd7\xff\x83\x8a}F\xf3<\x90\x22`\xddO\x18\x09\x8fi\xbb\xa1\xe77),>\x991@\x86UC^\x0f\x81I\xbd\x8f\xe1g\xcek\x04\x81\x0f`%\xc2\xf9vavnv7\xeb\xf6\xc9\x1c7\xf5\xbc\x89\x1e\xb1p\x1b\xb1\xa7\xef$K\xec\xd9[\xa2\xcd\xec\xd3k\x06'?\xc6\x09\x01\xf2|\xc2ng1\x1c\x9exu\x82\xc0\x07\xd0d\xe8\x0bAm/\x06\xad\xf1\x84PS\xe5\xff\x15\xdb\x7fn\xf0\xf1\x84\x9e\xb0a\x0c\xa2\xfb=n\xfaIa\xb1\xaa\xe4\x0d\xd00\xb3t\x81u\x0b~g\xc5\xb6U\xfc3L\xd2\xb8\xa9\xf1\xab?Xl\xff8\xd6\xf0{\xd4\xe0\xe3\x19\x15\x17\xef\xae\xd1!\x08=~CC\xbf \xf0\x01,#\xf8\xe5\xd9\xddy}\xc75\x7f\xf5\xbe\x86\xdfN\x83\x8f%\xf4v\xbdS3\x80vQX\x99D\xd8\x03\x81\x0f`\xa9\xa1\xefU\xeca\xfb\x0f3\x84\xbe\x0f\x8b\xd0\x977\xd5\xdbW<\x8eqq1\xecq\xe8{\x1e\xdb\x1a\x10\xf8\x00\x96+\xae\xbe\xf1Wf\xfc\xf5P\xca%L\xc0\xc8\x1b\x0a}a\xe6k\xe89\xbc\xecY3\xbfS<7\xcb\xd0\x81\xc0\x07\xb0\x92\xb0\x17z\xe7\xe6\x0dk\xa1\x94Kca&N.\x19f\x93g\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A\xadl\xb6\xeeivW\x97\xaeJ\xa3\xe5E\xe2Ps\x08}\xc7\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdc\xb2\x9e\xb9\x10\xf4FqF\xef;\x15\xc1o!%F:<\x83\xf72\x86\xbd\x0b\xaf4\x10\xf8\x00V)\x84\xb4\xb2\x957\xf2\xfb\xd9\xa4aH2\x06\xbf\xb7\xb3O\xf7\xba].2\xd8tp\x06\xaf\xb0\x07\x02\x1f\xc0\xea\xc5\xe2\xcbe+n\x9cO:\xef,\xd6\xf0\x1b\x15\xff|\x9c}\x5c\xc7o\xe1\x05\x84;4\x83\xf74Sc\x0f\x04>\x80\x96\xa8\x0aiyE\x00\xbb\x0au\xfc\x8a\xed\xd1\xb2&$t`\x06o\xa8\xb1\xb7'\xec\x81\xc0\x07\xb0r\xb1\x0c\xcbvEp9k\xe3co\xf1\x0c\xdew\xd5\xd8\x83v\xb3\x96.\xb0Na\xafj\x0d\xd70dz\xd0\xc2\xc7\x1d\x1e\xf3}\xcfY\xe8\xe9\xcb\x8b\xed\xab\xc5\xf6\xb7[\xf0\xf0\xdeQv\x05\x04>\x806\x09a\xael\xa2\xc6QK\x87$\xc3P\xeen\xcb\x1eS\x08\xc7{m\xed\x0d\x05\x04>`\x0d\xc52,\xcfJv\xb9\x8e\xeb\xeb\xb6\xd1N\xcb\x1e\xcf}\x8d=3q\xa1#\x9c\xc3\x07\xac\x8bq\xc5\xf5m^\xfak\xa3E\x8f%L\x1a\xd9\x11\xf6\xa0[\xf4\xf0\x01\xbd\x17\xcb\xb0\x94\x0d\x89\x9e\xb7uE\x88\xf8\xd8\xdb\x14\xf6\x94]\x81\x0e\xd2\xc3\x07\xac\x83q\xc5\xf5m\xee\xdd{\xd4\x92\xc7q,\xecAw\xe9\xe1\x03zm0\x18T\xad\x97\xfb\xbc\xe5\xc3\x93!`\x9d\xc6\xe0\x17\xb6\xedU\x84=eW@\xe0\x03hk\xd8\x0b\x01)/\xd9\xe5\xa6\xe2\xfa\x95\x8b\xb3`\xcf*\x9e\xe3\xf7\x16\xf8\x10\x94]\x81\x1e0\xa4\x0b\xf4Y\xf2z\xb9\x1d\xb6\xc8\x19\xbc\xc2\x1e\xf4\x84\x1e>\xa0\x97\x06\x83A\x08B\xfb%\xbb\x842,G=x\xaa\x8b\x08|\xca\xae@\xcf\xe8\xe1\x03\xfa\xaa*\xcc\x8dz\xf2<\x9b\x0e|\xc2\x1e\x08|\x00\xed7\x18\x0c\xf6\xb2\xf22,\xa7=Z!\xa2\xc9\xc0\x17\xca\xael\x09{ \xf0\x01\xb4=\xecU\xad\x97\x1b\x1c\xf4\xe8)75kW\x8d=\x10\xf8\x00:\xa3\xaa\x0c\xcba\x11j\xaez\x12n\x87\x0d\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x85\x00\xb4\x95\x95\xf7\xde\x85\xf3\xd3\x8ez\xf4\x94\x9b\x18\xce}\xae\xc6\x1e\xf4\x9fY\xba@\x9f\xe4Yy\x19\x96\x83\x9e\xf5bm\xcd\xf9\xfb\xca\xae\xc0\x9a\xd0\xc3\x07\xf4B\x1c\xde,+\xc3r\xde\xc3p3k\x0f\xdf\x8d\xb0\x07\xebE\x0f\x1f\xd0\x17UC\xb5y\x0f\x9f\xf3\xee\x8caO\xd9\x15X3z\xf8\x80\xce\x1b\x0c\x06\xa3\xac|\xb6\xeaq\x8f\xca\xb0\xdc?\xe7Yz\xf7\xae\x85=XOz\xf8\x80\xae\x07\x9f\xaa2,\xad_/wFu\x03\x9f\xb2+\xb0\xc6\xf4\xf0\x01]\x17f\xe5\x96M\xd48\xeaK\x19\x96\xd7l\xd5\xd8\xf7T\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Y\xc9.a\xbd\xdc\xbc\xa7O\x7f\x98\xb8\xdf\xb1\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\x1f\xf4\xf8\xb9\xa7\x0c\xe9\x1e\x0a{@\xa0\x87\x0f\xe8\xa4X\x86\xa5l\x96j(\xc3r\xd2\xd3\xe7\xbe\x95\x95\x0fc\x07\xca\xae\x00\x02\x1f\xd0yUaf]{\xf7\xc2$\x95Q_\xc3. \xf0\x01kb0\x18T\xad\x97\xfb\xbc\xe7\xa5GvJ\xc2\x9e\xb2+\xc0\xa78\x87\x0f\xe8Z\xd8\x0beX\xf2\x92]\xfaZ\x86\xe5\xa1\xe1\x84\x9f]\x0a{\xc04z\xf8\x80\xae\x09a\xae\xec\xfc\xb5|\x0d\xca\x8flM\x09{\xca\xae\x00\x13\xe9\xe1\x03:#\xae.\xf1\xb4d\x97P\x86\xe5\xa8\xe7m\x10z8\x1f\x0eg\x1f\x0b{@\x15=|@\x97T\x85\xb9\xd1\x1a\xb4\xc1\xc3\xf3\xf7\xd4\xd8\x03\x92\xe8\xe1\x03:a0\x18\xece\xd5eX\xce\xd6\xa0)\x86\xf1\xf2]a\x0fH\xa5\x87\x0f\xe8\x0a\xbd{wB\x0f\x9f\x1a{\x80\xc0\x07\xf4\xcb`0\xc8\xb3\xf22,\x87=]/w\x92\x835z\xae@C\x0c\xe9\x02m\x0f{[Yy\x11\xe5P\x86\xe5h]\xdaC\xd8\x03\x04>\xa0\x8f\xf2\xac\xbc\x0c\xcb\x81\x19\xaa\x00\x02\x1f\xd0Qq\xbd\xdc\xfd\x92].\x9d\xcb\x06 \xf0\x01\xdd\x96W\x5c\x7f\xa0\x89\x00\x04>\xa0\xa3\x06\x83\xc1(+/\xc3r\xbc&eX\x00\x04>\xa0\x97a/\xac&Q6\x11c\x1d\xd6\xcb\x05\x10\xf8\x80^\x0bC\xb5e\x135\x8e\xccV\x05\x10\xf8\x80\x8e\x8aeX\x9e\x95\xecr\x9d\xadQ\x19\x16\x00\x81\x0f\xe8\xa3\xaa0\xa7\x0c\x0b\x80\xc0\x07tU,\xc3\xf2\xa4d\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xee\x1aW\x5c\xaf\x0c\x0b\x80\xc0\x07t\xd5`0\x08a\xael\xbd\xdcP\x86\xe5BK\x01\x08|@7\xc3^(\xc3\x92\x97\xec\x12\xca\xb0\xe8\xdd\x03\x10\xf8\x80\x0e\x0ba\xaf\xac\x0cKn\xa2\x06\x80\xc0\x07t\xd4`0\xd8).\x9e\x96\xecr]\x84=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\x0d\x06\x83\xbd\xac|\xbd\xdcs\xeb\xe5\x02\x08|@\xb7\xe9\xdd\x03\x10\xf8\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xd0z\xb9\x00\x02\x1f\xd0\xdd\xb0\xb7\x95\x95\x97Y\x09eXL\xd4\x00\x10\xf8\x80\x0e\xcb\xb3\xf22,\xd6\xcb\x05\x10\xf8\x80\xae\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x10\xf8\x80\xee\xca+\xae\xb7\xa2\x06\x80\xc0\x07t\xd5`0\x18e\xe5eXN\x95a\x01\x10\xf8\x80\xee\x86=\xeb\xe5\x02\x08|@\xcf\x850WV\x86\xe5H\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Y\xc9.\xd7\x992,\x00\x02\x1f\xd0iUa.W\x86\x05@\xe0\x03:*\x96ayR\xb2\xcb\xb92,\x00\x02\x1f\xd0mU\xbd{\xbd\x9e\xa8Q\x04\xde\xbdb\xbbM\xdc\x8eV\xf88\x0fj<\xce\x91\x975\x1d\xfd{\x1c\x15\xdb\xc5k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x985D\x14\x17\xdb%\xbb\x1c\xdf\xde\xde^\xf4\xb9\x0d\x8a\xe7wR\x5c\x9c'\xee\xfe4\x9e\xef\xb8\xec\xe3T5\x83\xfa!=\xb2t\xf5\xfd(\xbcn_LxO\x0a#\x10\xef\xc7\xf5\xbd\x05>\x80\x86C\xc4:\x95a\x19\xd5\xd8w\x15a*\x1c\xa7\x8d\x05<\x17h\xcb\xfbQx\x8d\xefW\xec\xf6\xac\xcf=}\x02\x1f\xb0\xaa\x10q\xb4.\x135b\xb9\x99\xc3\xc4\xddw\x97\xf9\xa1S\xdc\xd7Nq\xf14q\xf7C\xa5s\xe8\xa8\xd4/\x97\xb9\xc0\x07\x90\x1e\x22\xb6*B\xc4u\x11\x1c\xf25k\x96p~\xdeu\x8d}\x97\xf9\xb8R\x5c\xae\xe11\xa3\x1f\xefG\xc3,\xbd\x07{W\xe0\x03H7\xae\xb8~\xb4n\x0d\x12{3S{\x19\xb6\x9711\x22L(\xa9\xf1\x01g\x15\x14\x10\xf8\x00\x92C\xc4\xf9\xba\xae\x97[s\x02\xc7Q<\x0frQ\xc7)\xdcvj\xef\xdesk\x1c\x83\xc0\x07\xf0\x89\xa0Rq\xfdh\xcd\xdb'\xf5\xf9\x87!\xa8E\xf6\xaaU-uw/L\xae\xc9\xbd\xac\xe9\xb0\xab\x1a\xfb\xde\x08|\x00\x15\xe2L\xb8\xb2\x10\xf1|\xddO\xfa\xaf9\x81\xe3\xd9\x22\xca\xb4\xc4\xdbL\x0d\x93#\xab\xa0\xd0\x83\xbf\xb9\xe4\x9eu\x81\x0f\xa0<D<\xaa\x08\x11z\x8a>\xf9\xa1\x92:\x81#_\xd0\xfd\xa7\x9c\xc4~\x1e\x87\xa1\xa1\xebFYu\xef\xdd\xa5\xc0\x070\x7f\x888\xd0St'\xb6Cj\x90\xdbo\xb2LK\xc2Rw\x0f\x03\xfa\xc8\xd1\xa2'\x7fsW\xc5Ex\xedO\xeb\xe9;\x0e\xd7\xf7\xf9=\xea\x0d/\x03\xa0\xa1\x10QV\xd4\xf4\xd2\xea\x0c\x9f\xfa\x00\x1a\xc7\x99\xb8)\xb3d\xf3\xf8a\xd5\x84\xd4\xe3p\xa4\xe6\x1e=\xfb\x9b\x0b\xab\xfa\x0cc\xed\xc9\xb0m\x15\xdbY\xb1]\xac\xc3\x97Q\x81\x0fhB^q\xbd\x92\x1e\xd3\xdb\xe5\xc3\x84\xfdB1\xe6\xd1\xbc\xa19.u\x972QC\xcd=\xfa\x1e\xfc.\xd6\xedy\x1b\xd2\x05\xe6\x92\xd0Ku\xaa\xa4G\xe9\x07\xcf\xf3\xd4P=O\x99\x96\x9a\xeb\xe5\x0a\xe8 \xf0\x01$\x87\x88uZ/wVy\x96V\x0abs\xce\xb6L\x9d\xa8\xa1\xe6\x1e\x08|\x00\x9fP5D\xe8<\xb0\x0a5W\xe08\x98\xa5\x97/\x9e\xb3\xb4\x9f\xb0\xab\x99\xd4 \xf0\x01|\x22Dl\x15\x17\xcfJv\x09eG\x8e\xb4TR\xe8\x1bgiu\xc26fl\xd3\xd4\xdfQs\x0f\x04>\x80Z!\x22\x17\x1ejI\xed\xe5\xdb\x8f=v\xa9\xc1|\x94\xa5\xcd\x04Vs\x0f\x04>\x80O\x84\x88aV^\xcb\xed\x5c\x19\x96zjN\xe08J<N\xa9\x135\xd4\xdc\x83\xd5\xbd\x9fn\x85\xf7\xd4&\xebmN\xa2,\x0b0\x8b\xca\xde=M4\x93<\x06\xaf\xaa\xc9\x15\xa1L\xcb^B\x8f\x5cj\x19\x16\xe7Z\xae\xee\xc3>\x84\xf2\xf0A\x7f_\x1b.\xfc\x7f\xb7\x22\x9c_<\xd8\xce\x1c\xbbN\x1d\xeb\xbdx\xbc\xb7&\x1d\xe7b\x9fi\xc7\xf9d\xde\x11\x13\x81\x0f\xa8\xfb\xa6\x15\x02\xc9v\xc9.\xc7fy\xce&\xbc\xa1\xc7Zy/\x12C\xf7I\xc5\x87K\xca0\xb1\x9a{\xcb\xff\x1b\xda\x8a\xc1~\xaf\xe2oi\x92\x8d\x18\x14v\x1f\xdc^8\xffs\xdcD\xafz\xecez?q\xf7//\xea4\x80\xf8\xfa\xbd\xca\xd2f\x96W>\x8e\xd4\xe7U\xdc\xce`\x01\xcfe\x18\x8f\xf7\xfe\x1c\xc7\xf9Eq;\xf7K\xbf\xcd\x14\xfe\x0c\xe9\x02u\xdf\x84\x8f*z\x1f\x94a\x99/\xf4\x85\x0f\xed\x94\x09\x1c\x9b1\x1c\x96\x05\xc2\x94\x0fK\xc7k\x89_\x96\x8a-\xf4\xd6\xbc\xcc\xee&<m7t\xd3\xbb1\x10\x5c\xcd;,\x18\xbf\xac\xa5\x9eZ0\x9e\xa76d\xd5mg\xe9e\x84NZz\xbc\xc3P\xedY\x0c\x9a\xfb\x0d\xdc\xe4v\xfc2\x18\x8es\xed\xba\x9c\x02\x1fPG^\xf1&|d\xa2Fc\xed\x9c\xb4\xdf\xa47\xfd\xd8\x83\x94\xf2\x01\xa3\xe6\xde\xf2\x82\xdeU\xfc\xb0\xde^\xe0]\x85\xe1\xfb\xf7\x8b\xfb\x9akv|\xf1\x9a\x08_\x02.\x13v\x0d\xef\x05'\x0bh\xaf\xd0\xf3\x99\xb2\xde\xf3e\xd6\xd2\xd3G\xe2H\xc8E\x966a\xaa\xae\x8d\xf8\x85a\xa7\xce/\x09|@\xf2\xb7\xd5\xe2\xe2i\xc9.\xd7\x86\x06\x9b\x11C\xd8q\xe2\x1b\xff\xc1\x8c\x81Q\xcd\xbd\xe5\xfc\xdd\x1c\xc5\xa0\xb7\xb9\xc4\xbb}\x1az\x96\xe6\xec}\x1b%\xee\xb7[\xd1\xd3\x5c\xb7\xbd\xc2c\x1e\xa7>\xc66~\xc1|p\xcc7\x16x77u\xbf\xac\x09|@\xaa\xaa7aC\x83\xcd:\xc8\xd2V\xe08\x88a\xfc\xfe\xc3f\x98\xa5\xf5\xee\xa9\xb9\xb7\x1c\xabZ\xb3u\xb7Fp\x9a\xf4\xa5#<\xeew\x13w\xcf\x1f\xbe\x06\x1bx\x9fI\x09J\xef\xc6\xc7\xd8\xb6\xb0\x97W|1nJ\xed\x9eU\x81\x0fHy\x13\x1bf\xe5C\x13j\xb85,\x86\xb1<a\xd7\x8d\xd7\xf6K\xf9\x9dS\xc7ki\xe6i\xe7\xcb9\xef\xfbI\x0c \xb3\xbe\x06COUjA\xf0\xf1\xbc\x0dUc(\xf7<>\xb6\xb6\xbdO\x86\xc7\xff\xac\xad\xaf+\x81\x0fH\xfd\xd6]f\xa4\x89\x16\x12\xfa\x8e\x12?\xf4\xf7\xefkye\xd5\xe7\x0c\x99X\xb3\xfc\xe0~\x5cq<N\xb3\xbb\xde\xb4\xb7\x8b\xed\xb3a\xa6h\xdcv\xee\xff]\xfc\xfcq\xb1}9\xde\xd6M\x8d\x87\xf0\xacN\xa1\xee)\x7f\xdb)\xf77\xd7\xd0n\x8d\xa1\xdcV\xd6\x8cL\x98\xd0\xf6\xa9\xd0z\x7f\xcc\x1f\x1c\xef\xfb\xe3\xfcv\xbcn\xda\xb1\xbe\x99\xe5\x0b\x9b\xb2,@\xd5\x1bY\xe8!(;\xff\xe8\xb9:`\x0b\x15>DS\xcad\x84\xe3\x94r\xceV\xeex-]\xf8p\xde\x7f-\xb4\x84\x9f\x1d\xa5\x0eK\xc6c\x16\xb6\x93\x07%wR{\x93B\x10\x19\xce\x18X\xaf\xe2\x04\x84\xf7R^[\xc5\xbe'3\xbe\xbeB\xd8K\x19\xca\x1d\xb5\xf4\xf5\x9bZ\xf32|\x81;\x98v\xfe\xdd\x83\xe3|\xf6\xe0=8\xf4\x1c\xee=x\x0d\x8dgy\x80z\xf8\x80\xaao\xad\x07\x15\xdf\xb6s-\xb585&p\x84\x0f\x83\xaa\xe1\xb0\xcb6\x0e\x85\xad\xc11<\x89\x7f+a;,\xb6\xad\xe2g\xa3Y\xcfA\x0b\xbd\x86q\x82\xd4\xdbYz\xef\xdb\xce\x9c\x8f\xff4a\xd7\x99\x86vk\x0c\xe5\x1e\xb7\xf8T\x84Qb\xd8\x1b\xd6\x9dl\x11\x9esx\xbddw\xbd\x7f\xa7\x02\x1f\xb0\x08U\xb5\xdc\xac\x97\xbb\xbc\xde\x83\x9b\x06ng\xa4)W\x1a\x08B\xd0k\xeco&\x06\x87Q\x8d\xd7\xd0\xbc\x8f\xbf\xf1\xa1\xdd\x1aC\xb9\xd7YKOE\x88\xa7RT\xf5\xee\xdd\xc4\xb0\xf7j\x8e\xe3}Ul{\xb3~Q\x10\xf8\x80iob\xa1G\xa0l\xb6\xa7\xde\xa2%\xa91\x81\xa3\xcc\xf36\xcej\x5c\xa3cx\xb2\x88/G\xb1\xc7+\xa5\x07x\xaf\x81\xd7`\xeam\xd4\x99\xb5;\xce\xd2\x86r\xf7Z\xfc\xe5r\x98\xf2<W\xfd\xf8\x05>`\x9a\xaa0\xe7\xc4\xff\xe5\x06\x86\xd4\x09\x1c\x93\x5cg\x86\xde\xfb,\xe5\xd8n,q\x15\x8e\xa4\xa1\xdd\xf8xR\x86r\x0f[\xfee%\xa5]W>\x14-\xf0\x01\x93\xde\x88GY\xf9l\xcfS+4\xac\xc4\xac!\xfb\xc0\xd0{\xaf\xbf\x0c\x5cei\xe7\xd8\x0d\x1b\xb8\xbb<\xf1\x8bG\xe9\xd0n\x8d\xa1\xdc\xf3\x9e\x14t_y`\x15\xf8\x80Io\xc4Uo\xb0z\xf7V\xf3\xc1\x1eB\xf6q\xcd_Sso=\xa4|\x01\xdbi\xe05\x18\xbe8\x8cR\xc3a\xc9j\x1f)\xb3Z[Y\x82eF;\xab~\x00\x02\x1fP\xf7\x8d\xf8PY\x8f\x95\xcak\xee\xefX\xad\x87\x94\x1e\xa4GM\xdcQ\x1c^=L\xd8u\xe2\xd0n<?8\xa5\xa4\xcc\xa8G\xef5[\x02\x1f\xd0\x1a\xf1D\xeb\xb2\xde\xbbp.\x98\x89\x1a\xab\x0f\xe4u<\x9d\xf7\xdc-:!e\xc8~\xb7\xa9;\x8b\xc3\xac)\xabp<\x99\xf0\xfaKy\x0f9\xeeY\xcft\xbe\xea\x07 \xf0\x01\xaf\xbf\x11+\xc3\xd2\xde@\x1ezF\x9e\xcex\x5c\xe9\xb1\x15Mj\x18ei\xa5Z\xc6\x0f^\xc3\xa3\x84\xe0\xd9\xda\x12,S\xa4\xb4\xfdf\xf1\xdc\xc7\x02\x1f\xd0\x860\x11\xbe\x85\x97\xcd\x98\x0b'O\x8f\xb5\xd4\xca\x03\xf9,\xb6\xe7Y\xf6\x0a\xa6\x84\xcc\xab\xc4`\x16\xc2N^c\xf9\xb1\xbd\x8e}\xb1<K\xdc/,\x818.9\xafq\xa1,\xad\x06\xa4\x86\x89\x5c\x13\xad4\x90\x8f\xb2\xf9\x86\xe4\xc2\x07\xee\xb8-\x1f\xa4\xc5c9[\xc2\xdd\x5c\x14\xcf\xf7\xa0\xa3\xc7;\x84\x82\x9d\xb8\x85\x7f\x0f\xe3U[Y\xda\x12^\xcb\x0a}\xe3\xc4\x952\x0e\xe2c\xaf\xaa\xb9w\xd8\xb5z\x91a\xe8\xb9h\x83\xeb\xc4\xe3\x12j\x9b\x0eC\x00^\xf6\x17h\x81\x0f\xb8\x0f\x13\xdb%\xbb\x1c+\xc3\xb2\xf2\x0f\xffy\x87e7\xe2m\x8cZ\xf2\xb4v\x1d\xd9O\x1d\xe3\xbd\x18\xec\x86m\x0au\x09\xc2k\xea\xaa\x22\xccmd\xe5\x85\xdc\x83.\x97`\x09\x8f\xfbE\xe2\xbe\xe1\xd8\xbe\x88\xeb\x94\x87\xedd\x19_\xc4\x0c\xe9\x82\x0f\x9a\xaa0q\x93)\xc3\xd2\x86\x0f\x93\x8d\x06ng\xdf\x04\x8e\xf6}\xd9*\xb609\xe1{10\xecw,\xec\xd5]\x85\xa3\xec}f\xd4\xd5\xe3\x18{\xeb\xcek\xfe\xdaf<\xe6Wq\xa8w\xa1\xa5[\x04>\xa0*L\x1c\x99\xa8\xb1\xd2@\x902Q#|X\xbe\x9bx\x93c\xad\xba\xfa/Y\xf1\x9c\xb6W\xf1\x03\xffI\xd7\x9fS\x8dU8\xa69\xe8A\x09\x96\x10zgY\x0d\xe7\xbe\xf7\xf3\xc3\xe25q\x11\xbf\x044~\x9e\x9f\xc0\x07\xeb\xfd\xc1\xb3U\x11&\xae{R\xe5\xbe\xcbR\x86r\x8fj,\xbd\xb6\x19\x87\x92X\xcd\xdf\x5c\xe8-\x0f\xc1\xe6Y\xd6L\xafm\xdb\xbe<\xce\x12xN\xfb0!,~1\x1ef\xb3/\x81\x18lg\x1f\xf7\xfa\xe5M\x06?\x81\x0f\xd6[\xd5\x9b\xac\xa1\xdc\xd5\x86\x83QV}\xae\xdb\xcd\x83P\x98z\xbc\x0ej,nO3\xc7\xf2Q\x9c\xa8\xf2\xf5\x1e\x06\xbd\x87\x81gT\xf3\xd7\xae\xb3\xfe\xac\xa6\xf10\xf4\x9d\xcfyS\x1b\xf1K\xc1US_\xd0\x04>X\xdf\x0f\xa0aE\x988\xb7$\xd7j\x03B\x96\xde\xbb\xf7*~\xd8\x84@\x91\xb2\xa6\xeaF\xa66\xdf2\x8fe\x18\x96\xbf\xca\xe6\x9b\xa8r\x19CD\x186=|\xb0\xbd\xfd`\xbb\x5c\xf5s\x8d3l\xeb,\xffw\xd1\xb7SF\xc2\xf3)\xb6a\x96\xb6\x1aIR\xf0\x8bC\xbds\x9d\xe3g\x96.\xac\xafq\xc5\xf5z\xf7V\xab\xaa\x08vp3!\xb8\x85\xe3\x96rN\xd8G+ \xacj\xf6uq\xbf\x835\x0a{gY\xfd^\xbd\x10\xeeNb :K\xbc\xafW-y\xbe\xfb5~%\xbc\x0e\xf7\xfa\xf8\xe52\x9c\x0e\x13\x8b-\x1fe\xf3\x9f\xa7\x19\x86z\xcf\xc2)\x01\xb3\x0e\x7f\xeb\xe1\x83\xf5\xecq\xa8Z/\xf7y\xd7ja\xf5\xec\xf8\x0c\x13?4?\xb5\xf2I<\xf1=\xb5ga\xac\xb5\x17z\x1c\x1f\xd5\x0c{7\xf1\xd8=\x0e=D\xe1\xbc\xcc.\x95C\x8a\xcfw\x96\xe06\xee\xeb)\x06\xe1\xef\xb1\xd8\xc2d\x8e\xd0\x03{<\xe7\xcd\x85\xd7\xd1\x8bx\xaa\x87\xc0\x07$\xbd)\xe7\x15\x1f:\xb9\x96Z\xa9\x94\xe1\xd6\xeb8Qc\xda\xef_'\xdc\x86\x09\x1c\x8buR#\xec\x85\xa1\xda\xad\xd0+\xd4\xe1\xd9\xaaG\xd9l%e6\xfa\xfe\xe5#\x04\xf7b\x0bA\xedq\x0c\xf5\xd7s\xdc\xdc\x8bX\xecZ\xe0\x03*\xdf\x94\xad\x97\xdb\xde@\x1ez_\xb7\x13v=(\xf9py\x95\xa5\x0f\xc9?3\x81c!\xc71|\xb8\xa7\x9c\xb3\x17\xbe`\xbd\x1dV\x04\xe9\xf2\xdf]\x0c \xfbs\xdc\xc4\xee:,\xff\x17{\xfc\xc2{l\xf8\x9b\xfbbv\xd7\xebw3\xc3M\xd5\xee\x15\x15\xf8`\xbd>\x84\xaa\xce\xaf)\xeb5b\xf1\xc7\xa7\xaa\xf7\xf5^\xe5\x84\x9ax}\xeaL\xc1\xb1\xd6o\x5c\x9e\xb8\xdf\xb0\xeb\xab\xd8\xc4\xd7m\x13\xaf\xa1|\x9d\xbe|\x84\xd3fb\xaf_x\xce\x875\x83\xdfFVs$F\xe0\x83\xf5R\x15\xe6F\x9ah\xe5\xc7'e\x080\xf5\x8d>\xf5x\xee\xce2D\xc4\xd4\x00\x14\xda2eh\xf3\xb0'\xe7\xca\xa6\x0c]\xa7\xce\x1e_\xbb\xca\x00qVo\x1e\x83_\x9d\xf3\xfc\xf6\xeb\x04d\x81\x0f\xd6\xebC\xa8l\x88\xe9\xd4z\xb9+=>\xc3,mH,y]\xe3\x9a\x138\x8e\x16Q\xdd\x7fM\xa5\x84\xe7\x9b>\x145\x8f\xc3\xb0UC\xd7\xd7q\xe2BJ\x98\xd9^\xd7\xf3Jc\xf0\x0b_\xd2\xdei\xf8\xb5&\xf0\xc1\x1a\x85\x89\x94\x9an\xca\xb0\xacV\xcaP\xfa,\xeb\x1a'O\xe0\xf0\x1ah\xcc0a\x9fq\x0f\xdeW\xc2)\x22)\xe1l\xf4\xe0=&e\xd8\xf2\xd9\xa2\xd7\x95my\xf0\x1b\xd7\x08}\x02\x1f\xf0\xa90W6\xc4t\xd8\x83u,\xbb\xfc\xc1\x19>4S&j\xd4^\xd7x\x86\x09\x1c;\x8e\xc8\xdc_\xaeR\x86s\xcf\x16p\xf7\xbbK~\xba!\x98T\x0e\xe5\xde\xf7H\xc7\xd7b^\xe3\xb6\xd7V\x0c})\xc3\xe0\xc9\x7f\xaf\x02\x1f\xf4\xff\x03h\xab\xe2\x03\x7fR\xf1^\xdas|\xee\xcd\xbc\xaeq\x9c\xc0q\x9a\xb8\xbb\xd7\xc2|R?\x80\xaf\x1a~\x1d\xed,\xf9u{\x94\xf0%\xe5S=\xd25\xd6|\xde\x8e\xf7\xb1\xceR\xfe\xde\x93\x0bz\x0b|\xb0\x1eo\x1aeo\x0a\x07\xca\xb0\xacT\xeaD\x8dy\x87[S\x7f\x7fw\xd6\xc2\xae\xd4\x0a\xe1MO\xd6\xd8Zb\xd8\x1b\x16\x17OS\xde{\xa6\x8c\x1c\xa4\xbe\xbe\x9e\xc6\xfb\xf2\x1ai\x80\xc0\x07=\x960\x11\xe0|\xd6ezh\xec\xf8\xa4,\xb94\xf7\xba\xc6&p\xb4\xee\xd87\x1d\xd0\xf6\x96\xf4\xb8SK\xb0\x5cN+\xf1\x14\x83\xcc\xf3\xc4\xbb\x1c\xaf\xf9k\xf1\xba\xa9\x1b\x12\xf8\xa0\xdf\xaa\x86DrM\xb4R\xa9a;o\xf0\xf5\x90r\xd2\xfc\x86\xd7\xc6\xc25\x16\xf8bx\xdc_\xe2k6\xe5\x1c\xc5Q\xc2k:u2\xd1:\xbf\x16\x1b\x0b\xbb\x02\x1f\xf4\xb7\x07!\xbc\xe1\x96\x9dcs\xac\x0c\xcbJ\x8fO\x9e\xf8\xc1\xd9\xd8q\xaa9\x81\xe3\xa9\x09\x1c\x0b\xd5d\x8f\xdcxI\xaf\xd9\xf0\x98Sz\xa4+\xeb\x0b\xc6\xd7\xe2\xa8\xc6kq\xb8\x86\xef\x11\x8f\xb2\x1a\xe7\xe8\x09|\xb0\x9ea\xa2\xaa\x0c\x8b\xf5rW{|\xb6j\x04\xafF\x8fS\x1c\xc2\xb7\x02\xc7\xe2\xa4\x9ew\xb5\xd7\xd0k)\xbc>vk\xbe7\xcc\xfa\x9aMy=\x5cg\x89\x13\x7f\xe2\x17\x99\xd4\xa1\xdd\x935\x1c\xdaMy\x8d\xa4\xfe-\x0b|\xd0S\x07\x15\xdf\x0c\x8f\x94aY\xa9\xd4\x89\x1a\xcf\x17t\x9cRC\xe4\xf6:\xaco\xdap\xa0~\x95%\x0eU\xce\xdb\xb6\xb1\x17\xffY\xcd_\x9b\xb5\xd7v\x9c\xf8\x9a\x1d\xd5\x9c\x04\x96'\xb6\xd7F[\xbf\x80\x84 \xdat\x0fd\x8de\x16\xcf\x04>XS\xf1\x9bx\xd9\x87\xc0u\x1f*\xfcw\xf8\xf8\x84\x0f\x86\x94a\xb1\x85\xf5\xc2\xc6\x9e\x95\xd4%\x9cr\x138j;\xa9\xd1\xb6;3\xbe\x8e\xc2k\xe3\xc5\x92^\xb3)\xabi\xdc\x7fA9\xab\xf9Z\xac3\xb4\xfb\xa4\xa5K\x00\x86\xc7\xf4~\xf1\xd8\xce\x9a\x08~\xf1\xef-\xb4c\xca)\x1f\xc9!X\xe0\x83\xfe\xa9z\x03\xd0c\xb3\xc2\x9e\x80\x1ao\xd0G\x0b.\x97\x93g\xe9\x138\xd4\xe6\xab'\xb5\xbdB\xdb\x9e\xd5\x091!P\x14\xdbEV\xbfgo\xd6\xd7l\x08\xa4__\xe4\x17\x94\x9aC\xbbm\x9c\xb5{\x7f\xfcv\x1f\x04\xbf\xd1,\x8f3\x06\xc6\xd0\x1e)\x85\xd8\xcf\xeb\x8c\x00\x08|\xd0\xaf@1\xac\xf8&>wy\x0f\xe6R\xb5\xe2\xc9\xbd\xe4\xf3\xa0f\x15?(R\xefc\x7f\x9d\xeb\xa1\xcd\xd8\xb6\xa9\xe7V\x85\xd0\xf7^YH\x08\xa1+\xf4\xb2\xc5\xa0\xf7~I\x18\xb8i\xf8\xfd\xa4\xce\x17\x94\xd1\x9c_P\xf2,}h\xb75\xefa\xb1\x8d^\xef\xb1\x0f\xef\xc1\xa1\xf7\xf5{\xf1\xb8\xe61\xa8?\x9a\x16\xaa\x13\x8f\xef\x5c_\xde\x05>\xe8\x97\xaa7g\xbd{\xab\xfb`\xd8\xca\xd2{e\xf2e\x14\xc3\x8eC\xfb\xa9u\xbe\xf4\xf2\xd53\xaa\xb9\xff\xc3\x90p[l\xaf\xe2\xe5m\xf1\xb3\x0f\xb3\xbb^\xb6\xb2 p3\xc3}\xa6\x84\xb0\x94\xf0q\xda@\x9d\xc8:C\xbb\xbb-:\xb7t/\xe1\xb8>\x8bA\xee\xfe\xd8\x86\xed\xaa\xe6\xf1}\xdda\xdd\xc2\xcc\x02\x1f\xf4'PT\xf5\x1e=_@u\x7f\x9a\x0b\xe3\xf7.\x97\x5c\x0c;\xf5\x83s;\x9e7FZ\x80\xb9*.\xde\x9d\xe3&\xea\x94\xe3\x08ao\x98\x18\xba\x92\xce\x19\xac\xb1\x9aFcA\xb3\xe6\xd0n\xbe\x80\xe2\xd5\x8b\x08|\xd3l\xceq\x9f\xc7\xb3\x9c\x87-\xf0A?\xc2^\xd5\x8c.eXV{|\xf6\xb2\xf4\xd2\x19K\xed\xb9\x88!!u\xf8\xf1\xa0%\x1f\xb2]\x09}\xa1W\xf4x\xc1ws\x1f\xf6R\xbf\xcc=Jx\xbd\x86}R{\xecF\x0d\xf7F\xe7Y\xfa\xb9\xa5\xe3\x15\xff]O\x1a\xce]\xb4\x10\xf6f\x0a\xd8\x02\x1f\xf4CU\x99\x8f\xdcz\xb9+\xfdPH\x1d\x0e=_Q1\xec\xd4\x90i\x02G\xfd\xd0\x17>\x9c\x9f/\xe8\xe6CP\xdfz-\xecU\x85\xf7\x94\x89\x04\xe3,\xad\x87\xb1\xf1s\x82g\x18\xda]\xe5\x17\xd9e\x7f\xf9yw\xd6\xb0'\xf0A?\x02E\x18\xa2)[V\xe9z\xda\x9a\x96,-Lm\xd6\xd8w\x15\xa1\xa4\xce\xda\xa6OL\xe0\xa8\xdd\xbe\xe1\xb8~9knRE8\xef\xf2\x9d\xe2v\x873|\x91\xdb\xa9x?\x09\x81\x22\xb5l\xd0hA\xed\x15B\xe4i\xe2\xee\xcfV\xb5\x22L\xfc\xbby\x9c-\xbe\x177\xdc\xfe\xe3y\xdf\xc7\x05>\xe8\xbe\xaa7\x81\x91&ZY\x18\x0f=\x00\xa9\x135\x8eW|\x8ee^#\x90\x8c\x1d\xdd\x99BLx=\x1c\xce\x11\xfc.c\xd0\xdb*9\xcf\xb3\xea5\xb4U\xf1zM\x0d\x15\xf9\x82\x8b\xb7\x8f\xba\xf0z\x0cm\x10{\xdd>\x9b\xdd\x9d\xb3y\xde\xd0M\xdf\xc4/a!\xe8\x8d\x9ah\xebAq#\xfe\x12!\xda\xf8\xca[gY\x8de\x8a\xa2\xf3\x9bo|0\x5cQ\xa0\x08\xe7\x86\xbdW\xf6\xd8B/\x80#\x0b\xad\xfb2\x10\xfev\xc3\xdf\xe6N\xc9{\xceu\x0cp\xe1}\xe9\xc4\xea8\x9d9\xb6\x8f\xe2q\xbd?\xbe\x8f\x12>WBP|\x15\x8f\xf5\xd9\x22\xbe\xfc\xbd\xe1\xd0@\xa7\xe9\xdd\x83\x0e\x8a=~jb\xf6\xf3\xd8~?\xb8\xb5\xe9q\x19\xd2\x85\xee~\x8b\xcc\xb3\xf2s\xc3\x0e\xf5\x08\x00 \xf0Aw\xc3\xdeVV~\x82\x7f8\xff\xc3D\x0d\x00\x04>\xe8\xb0<+/\x9bp\xa0\x0c\x0b\x00\x02\x1ftT,\x89QV\x86e\xd9+5\x00 \xf0\x01\x0d\xcb+\xae\xb7^.\x00\x02\x1ftU,\x8aZ6\xbd\xffxE+5\x00 \xf0\x01\x0d\x84\xbd\xaa%\xba\xac\x97\x0b\x80\xc0\x07\x1d\x17\x86j\xcb&j\x1c)\xc3\x02\x80\xc0\x07\x1d\x95\xb0DW\xa8\xc8\xaf\x0c\x0b\x00\x02\x1ftXU\x98S\x86\x05\x00\x81\x0f\xba*\x96ayR\xb2\xcby\x5c\xa6\x09\x00\x04>\xe8\xa8q\xc5\xf5\xca\xb0\x00 \xf0AW\x0d\x06\x83\x10\xe6\xca\xd6\xcb\x0deX.\xb4\x14\x00\x02\x1ft3\xec\x852,y\xc9.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb0\x10\xf6\xca\xca\xb0\xe4&j\x00 \xf0AG\x0d\x06\x83\x9d\xe2\xe2i\xc9.\xd7E\xd8S\x86\x05\x00\x81\x0f:\xac*\xcc\x8d4\x11\x00\x02\x1ft\xd4`0\xd8\xcb\xca\xd7\xcb=\xb7^.\x00\x02\x1ft\x9b\xde=\x00\x04>\xe8\xab\xc1`\x90g\xe5eX\x0e\xad\x97\x0b\x80\xc0\x07\xdd\x0d{[Yy\x99\x95P\x86\xc5D\x0d\x00\x04>\xe8\xb0<+/\xc3b\xbd\x5c\x00\x04>\xe8\xaa\xb8^\xee~\xc9.\x97E\xd8\x1bk)\x00\x04>\xe8\xae\xbc\xe2z+j\x00 \xf0AW\x0d\x06\x83QV^\x86\xe5X\x19\x16\x00\x04>\xe8n\xd8KY/7\xd7R\x00\x08|\xd0]a\xa8\xb6\xac\x0c\xcb\x912,\x00\x08|\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\x00\x10\xf8\xa0\xd3\xaa\xc2\x5c\xae\x0c\x0b\x00\x02\x1ftT,\xc3\xf2\xa4d\x97seX\x00\x10\xf8\xa0\xdb\xaaz\xf7\x94a\x01@\xe0\x83\xae\x1a\x0c\x06!\xccm\x97\xec\x12\xca\xb0\x5ch)\x00\x04>\xe8f\xd8K)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xb2\xf5r\x8fL\xd4\x00@\xe0\x83\x8e\x8aeX\x9e\x96\xecr]\x84\xbd\x5cK\x01 \xf0Aw\x8d+\xae\x1fi\x22\x00\x04>\xe8\xa8\xc1`\xb0\x97\x95\xaf\x97{n\xbd\x5c\x00\x04>\xe8\xb6\xaa2,#M\x04\x80\xc0\x07\x1d5\x18\x0c\xf2\xac|\xbd\xdc\xe7\xd6\xcb\x05@\xe0\x83\xee\x86\xbdP\x86\xa5\xac\xccJ(\xc3\x92k)\x00\x04>\xe8\xae0\x94[V\x86\xe5@\x19\x16\x00\x04>\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05@\xe0\x83n\xcb+\xae\xb7\xa2\x06\x00\x02\x1ft\xd5`0\x18e\xe5eXN\x95a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850WV\x86\xe5H\x19\x16\x00\x04>\xe8\xa8\xb8^\xee\xb3\x92]\xae\xb3\xea\x22\xcc\x00 \xf0A\x8bU\x85\xb9\x5c\x19\x16\x00\x04>\xe8\xa8X\x86\xe5I\xc9.\xe7\xca\xb0\x00 \xf0A\xb7U\xf5\xee\x99\xa8\x01\xc0J\xbc\xa1\x09`~\xb1\x0c\xcbv\xc9.\xc7\xb7\xb7\xb7\x17Z\x0a\xa0{\xbe\xf9\xf8\x0b\xe3\xf8\xcf\xfcK/\xbf}\xd5\xc5\xe7\xa0\x87\x0f\xe6\xf4'\xff\xf8\x07\xc2\x17\xa7\xb2\xde=eX\x00\xba\xed$\xbb[9\xe9e\x11\xfe\xce\x8am$\xf0\xc1\x9ay\xfc\x9d\x1f\xdb\xca\xca\xd7\xcb=2Q\x03\xa0\xbb\xbe\xf4\xf2\xdb!\xf0]\xc7\xff\x86\xa2\xfa/\x8a\xd0wUly\xb1=\x12\xf8\xa0\xe7\xfe\xdc\x1f\xbe\x91\xfd\xea{\xbf\xf2\xf9\x92]\xae\x8b\xb0\x97k)\x80\xce{}$'\xd4[\x0de\xb8\xbe\x17\x86|\x8bmG\xe0\x83\x9e\xfa\xdc\xcb\x1f\xac\xda\xc5P.@?\x8c\xb3\xbbSt&\x09\xc3\xbd\x1f\x16\xa1\xef\xa2\xad\xc3\xbd\x02\x1f\xcc\xe8\xa7\xfe\xe0G\xb3\xf3_>+\xdb%\x94a9\xd1R\x00\xdd\xf7\xa5\x97\xdf\x0e\xa7\xe6T\xbd\xa7\x87\xc9{a\xb8\xf7U\x1c\xee\xdd\x12\xf8\xa0\xe3~\xf7\xdf\x7f\xb7j\x97\x91V\x02\xe8\x95\xd4\x95\x92\xc2y\xdda\xb87L\xf28)\xb6\xa1\xc0\x07\x1d\xf4\xd7\xbe\xfb(\xfb\xd6\xc5\xaf\x97\xed\xf2\xdcz\xb9\x00\xfd\xf2\xa5\x97\xdf\x0e\xe5\xb5\xcek\xfeZ(\xc8\xff~\x9c\xe41Z\xd5$\x0f\x81\x0fj\xfa\xf1?\xfeL\xf6\xad\xb3\xd2\xb0\x17\xce\xf1\xc8\xb5\x14@/\x8dg\xfc\xbd0\xc9\xe3E\xb1]\xc5I\x1e[\x02\x1f\xb4\xd8\x9b\xbf\xf3\xe3\xd9w\xbe\xf3\x9d\xb2]\xac\x97\x0b\xd0S_z\xf9\xed\x10\xf8\xae\xe7\xb8\x890\xdc\xfb\xb0\xa6\xdf\x9e\xc0\x07-\xf3\x17\xfe\xef\x0fe\xbf\xfaK\xff\xa2l\x97\xcb\x22\xec\x1di)\x80^\x1b7t;\xa1\xa6\xdf{q\xb8\xf7`\x91\xc3\xbd\x02\x1f\xd4\xf0c\xd7\x95\x7f2\xca\xb0\x00\xf4_\xd3_\xec\xc3p\xef\xd7\xb3\x05\xd6\xf4\x13\xf8 \xd1_\xfd\xbd\x9f\xa8*\xc3rz{{{\xa6\xa5\x00\xfa-\x96h9^\xd0\xcd\xdf\xd7\xf4kt\x09\xb77\x1c6\xa8\x16&j\xbc\xfa\xd6\xff,\xdd\xe7\x1f\xfd\xdc\xdf\xfc\xc56L\xbd\x07`).b8[\x940\xdc\xbb[|\xae\x84\xde\xc4\xb0\x8d\x8b\xa0y5\xeb\x8d\x0dnoo\x1d2\x886\xbe\xf2\xd6Y\xfc#\xfb\x84P\x86\xe5W\xfe\xd9/O\xfd\xbd|\xefg\xb2\x9f\xb9\xf8\xef\x1a\x10\x80E:\x8e\xc1\xef\xac\xee/\x1a\xd2\x85\x09~\xf4\xf7>\xf7\xfd\x7f\x87\xf5r\xcb\xca\xb0\xec\xbe\xf5\xd3\xd9\xdf\xf8\xad\xefh4\x00\x16-\xf4(\xbe\x7f\xbf\x84[\x9dI\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f>\xfc\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf\xfc\xf6\x8f\x94\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xd1\x12n\xd9]M\xbf\xa3\x94\x9a~\x86t\xe1\x81\xcf\xff\xec/\xfc\xceO|g\xe3O\xdf\xff\xffs\x7f\xf1e\xf6\xaf~\xf1\x9fO\xdd\xff\xe7\x7f\xf6\xed\xec\x17\xfe\xd3ok8\x00V\xed4\xbb\x1b\xee\x9d\xb8\xde\xafI\x1b\x10\xbd\xb9\xfd\xd5\xf1Od\x1f\x87\xbd\xe03\xbf}S\xfa;?\x9b\xfd/\x0d\x07@\x1b\x84%\xdc\x9e|\xf3\xf1\x17BQ\xe8\xfbI\x1e\xdf_\x04@\x0f\x1f\xc4\xb0\x97M\x98m\xb5\xf1#\x7f\x94\xfd\x89\x1f\xfd\xcd\xec_\xbe\x7f\xfa\xa9\xdf\xf9\xfb?\xf7\xb7\xb2\xbf\xfb\xeb\xffM\xe3\x01\xd0F\xa1\xc7\x22\xf4\xf6\x1d\x855\x80\x05>\x84\xbd)a\xef\xa1?\xfb\xd9?\xc8\xfe\xcf\xef\xff\x9b\xec_\xff\xbb\x7f\xfb\xd1\xff77\xb7\xb2\x7f\xba\xf1#\xce\xdd\x03\xa0\x0b\xc1\xef@\xe0C\xd8\xabQG\xe9\xef\xfc\xe4\xb7\xb3_\xfa\x8f\x17\xd9;_\xfc\xf3\xca\xb0\x00\xd0f\x9f\x18\xda\x15\xf8\x10\xf6jz\xe7\x07\xbf\x95\xfd\xf5\xef\xfe\xa6\x06\x04\xa0\x8d&N\xde\x10\xf8\x10\xf6f\xf0\xf7\xfe\xf0\xd7\xb3\x9f\xfe\xfd\xff\xaa!\x01h\x830l\x1b>\xd7\x8e\xa6\xad\xc6!\xf0!\xec\x09}\x00t\xd3ev7l{\xf2pF\xee$\xca\xb2 \xec\xcd\xe8\xbf\xfc\xc0\xe7\xb2\x9f\xce\x04>\x00\x96\xae\xf6\x12k\x02\x1f\xc2\xde\x0c\xde\xbe\xfd\x1f\xd9\xcf\xff\xde\xafiT\x00\x96%\x0c\xdb\xdeO\xc2\xb8\xaa\xfb\xcb\x02\x1f\xc2^M\x9f\x1f\xfc\xef_\xfe\xf9\xdf\xfd\xb5\xafiU\x80\xb56,\xb6gK\xb8\x9f\xf3\x18\xf2\xc6\xf3\xdc\x88\xc0\x87\xb0W\xcf\xf1\xaf^<\x1fiU\x80\xf5\xf6\xcd\xc7_8X\xf0]\x84a\xdb\x8f\x8a&7qc&m \xec\xd5\xf8\xe3\xfb\x8d\xcb\xaf\x09{\x00\xc2\xdeVq\xf1r\x017=qY\xb4&\xe8\xe1C\xd8\x13\xf6\x00\xa8\xa7\xe9\xde\xbd0l{\xf4z\xed\xbc&\xe9\xe1C\xd8\x13\xf6\x00H\xf4\xcd\xc7_xT\x5c\x5c\x15\xdb\xc6\x9c7u\xbf\xd6m>\xcb$\x8c\xba\xf4\xf0!\xec\x09{\x00\xa4\xdb\x9b3\xec\x85a\xdb<K\xa8\x9d'\xf0\x81\xb0\x07\xc0j\xe43\xfe^X\xf2\xec\xa8N\xed<\x81\x0f\x84=\x00\x96\xec\x9b\x8f\xbf0,.6k\xfc\xca\x5c\xb5\xf3\x04>\x10\xf6\x00X\xbe\xd4\xcf\x86\x8f\x96<\x9b\xb7v^\x93L\xda@\xd8\x13\xf6\x00\xa8\x90X\x8a\xa5\xd1\xdayM\xd2\xc3\x87\xb0'\xec\x01Pm\xda\xe7C\x98\x841\x8eA\xefU[\x1f\xbc\xc0\x87\xb0'\xec\x01P\xed\xf5\xda{\x8d,y&\xf0\x81\xb0\x07@\x0b|\xf3\xf1\x17\xc2gD(\xc5\xb2\xd4\xday\x02\x1f\xc2\x9e\xb0\x07\xc0\xf2\x0c\x8b\xed\xddl\x01K\x9e-\x8bI\x1b\x08{\x00\xd0s\x9f\xd1\x04\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{Z\x15\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\xd66\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xa2\xc0W\x84\xb3G\xc2\x1e\x00@\x8f\x03_\xe1\xa8\x08i[\xc2\x1e\x00@\x0f\x03_\xec\xdd\x0b\x01m\xd4\xc0m\x09{\x00\x00m\x0b|\x85\x83x9W\xb8\x12\xf6\x00\x00\xda\x1b\xf8\xee\x83\xd5f\x11\xda\xf6\x84=\x00\x80\x1e\x05\xbe\x18\xf06'\x84?a\x0f\x00\xa0\x0f\x81oB\xc0{Rg\xf2\x86\xb0\x07\x00\xd0\xe2\xc0\x17\x83\xdd\x93\x84\x10(\xec\x01\x00t1\xf0\x95\x04\xbb\xca\xe0%\xec\x01\x00t;\xf0\x95N\xde\x10\xf6\x00\x00:\x10\xf8\x8a\xd0\x16B\xd6f\xc9.\x07\xc2\x1e\x00@\x87\x03_V=l\xbb\xfb\xfa\xe4\x0da\x0f\x00\xa0#\x81/\x06\xb9\xdd\x84]\x0f\x84=\x00\x80\x0e\x06\xbel\xcap\xed\x04#a\x0f\x00`u\x06\xb7\xb7\xb73\xfdb\x11\xe0^\x15\x17\x1b\x89\xbb_\x16\xdb\xb6\xb0\x07\x00\xb0|3\xf5\xf0\xc5\xc9\x1a\x1b5~E\xd8\x03\x00\xe8R\xe0\xcbfX:M\xd8\x03\x00X\x8d\xdaC\xbaq\xb2\xc6Ka\x0f\x00\xa0\x1bf\xe9\xe1\xcb\x85=\x00\x80\xee\xa8\xd5\xc3\xf7\xe6\xf6W\x1f\x15\x17WY\xbd\xf3\xf7\x84=\x00\x80\x15\xaa\xdb\xc3\xb7'\xec\x01\x00\xf4;\xf0\x1d,\xe9q\x09{\x00\x00\xcb\x0e|on\x7fu'k\xae\xbc\x8a\xb0\x07\x00\xd0\xb6\xc0\x97-\xa7wO\xd8\x03\x00XE\xe0\x8b\x935\xf6\x96\xf1\x80\x8a\xfb\x1a:,\x00\x00\xcdI\x9a\xa5\x1bW\xd6x\xb1\xc4\xc7u]lG\xc56\xfe\x8d\xcb\xaf\xbdr\x98\x00\x00\x16\x1f\xf8\xae\x8a\x8b\xcd\x15<\xbe\x9bb;\x09\xe1\xaf\x08~\x17\x0e\x17\x00\xc0\x02\x02_\x1cb}\xbf\x05\x8f\xf52\xbb\xeb\xf5;\xd1\xeb\x07\x00\x90.\xe5\x1c\xbeQK\x1ek\x98!\x1c\x86\x95\xaf\x8a\x10:\x8e\xb3\x86\x01\x00\xa8P\xda\xc3\x17'k|\xaf\xc5\x8f?\xf4\xfa\xe5\xbfq\xf9\xb5\x13\x87\x12\x00`\xb2\xaa\x1e\xbeQ\xcb\xc3\xde\x91\xb0\x07\x00P\xee\x8d\x8a\xeb\x0fZ\xf6xM\xe2\x00\x00h*\xf0\xc5\xc9\x1a\x9b-y\x9c&l\x00\x004\x1d\xf8\xb2v\x0c\xe7\x1egz\xf3\x00\x00\xe62q\xd2\xc6\x9b\xdb_\xdd*.^\xae\xe81)\xba\x0c\x00\xd0\xa0i=|\xa3\x15<\x96\xe3\x18\xf2\xce\x1c\x16\x00\x80\xfe\x04>\xbdy\x00\x00\xcb\x0e|on\x7fu/[\xfcd\x8d\xd3\x18\xf2\x94T\x01\x00Xv\xe0\xcb\x16\xd7\xbb\x17z\xf3\xc61\xe8]iz\x00\x80\xe5\xf8\xc4\xa4\x8d\x05M\xd68\xcf\x14H\x06\x00X\x99\xd7{\xf8F\x0d\xddn(\x90<\x8eA\xefJ3\x03\x00\xf4'\xf0\x85\xde\xbc0d;\xd6\xb4\x00\x00-\x0b|on\x7f5\x84\xbdY&k\xe8\xcd\x03\x00\xe8B\xe0\xcb\xea\xf7\xeeY\xee\x0c\x00\xa0\x03>\x9a\xb4Qc\xb2F\xe8\xcd\x0b\x93/,w\x06\x00\xd0\x11\xf7=|\x07\x15\xfb\xe9\xcd\x03\x00\xe8x\xe0\x1bM\xb9\xderg\x00\x00]\x0f|q\xb2\xc6\xc6\x83\x9fY\xee\x0c\x00\xa0O\x81/\xfb\xb8wOo\x1e\x00@O\x03_\x08x{z\xf3\x00\x00\xfa\xe9\xff\x0b0\x00\xb2\x10\xef\xec0\x8f}\x9d\x00\x00\x00\x00IEND\xaeB`\x82\x00\x006\xc9\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006VIDATx\xda\xec\xddO\x8c$Y~\x17\xf0\x88\xf5X\xfe\x87]\xbd\xd2\xf2O+\xdc\xd9\x1cX\x0d\x18U\xad\xc0bA\xac\xaa\xc6B\xc2\x5c\xdc5\x12\x17\x9f*\xe7\xc0\xc1\x87\xa5kN\xec\xad\xa3%\x0e\xbem5#$$\x0e\x93u\xc2\xe2\xe0\xa9>\x1a\x1bu\x95\x16\xc1\x98?\xda*\xb3\x92Y\x0c\xee*\xc4\xb2\x96\xd0j\xbbla\x0c\xd8n\xe2M\xbd\xda\xa9\xe9\xc9|\x11\x91\x19\x99\x19\x11\xf9\xf9H\xa1\xec\xae\x8c\xca\x8c|\x99\x95\xf9\xcd\xf7\xe2\xfd^\xfe\xea\xd5\xab\x0cH\xdb\xfa\xca\x97\xf6\xcb\x8b\xd1\xf5{\x1f\x1ei\x0d\x00\xfa\xe63\x9a\x00j9\x8c\x1b\x00\xf4N\xae\x87\x0f\xd2\xb6\xbe\xf2\xa5Qy\xf1\x22\xfe\xf7\xed\xeb\xf7><\xd1*\x00\xf4\x89\x1e>\xa8V\xdc\xf9\xb7^>\x00zG\x0f\x1f$l}\xe5K\xf7\xca\x8b\xcb\xf0\xcf;?~p\xfd\xde\x87\x97Z\x07\x80\xbe\xd0\xc3\x07i\xfb\xaf\x85\xbd\xa0\xd0,\x00\x08|0\x1c\xd3\xc2\xdd~\xec\xf9\x03\x00\x81\x0f\xfa\xac\x0cu{\xe5\xc5\xfdiWe7=\x7f\x00 \xf0A\xcf\xa5&h\x14\x9a\x07\x80\xbe0i\x03\xa6x\xad\x14\xcb,o]\xbf\xf7\xe1\xa9\xd6\x02\xa0\xeb\xf4\xf0\xc1t\xe3\x1a\xfb(\xd1\x02\x80\xc0\x07=V'\xcc=\x8c=\x81\x00 \xf0A\x9f\x94!n\x9c}\xba\x14\xcb,c-\x06\x80\xc0\x07\xfds\xb8\xa4}\x01@\xe0\x83u\x8b\xa5X\xb6\x9b\xfcJ\xec\x11\x04\x00\x81\x0fzb\x9e\xf0\xa6\x97\x0f\x80NS\x96\x05\xa2\xb8z\xc6\xf7\xe6\xfcu%Z\x00\xe8,=|\xf0\xb1Ez\xea\xc6\x9a\x0f\x00\x81\x0f\xbao\x91\xd0v`}]\x00\x04>\xe8\xb08\xf1\xe2\xfe\x827\xe3\x5c>\x00\x04>\xe8\xb0qGn\x03\x00\x04>h[\x5c-c\xb7\x85\x9b\xba\xafD\x0b\x00\x02\x1ftS\xd1\xe2m\x09|\x00t\x8e\xb2,l\xb48\xd1\xe22\xab\xbf\x94Z\x1d\x0f\xae\xdf\xfb\xf0R\xeb\x02\xd0\x15z\xf8\xd8t\xe3\x96\xc3^PhV\x00\x04>\xe8\x8ee\xcc\xac\xddW\xa2\x05\x00\x81\x0f:\xa0\x0ce\xfb\xd9\xe2\xa5X\xa6\xdet\xe6\x5c>\x00\x04>\xe8\x84e\x8625\xf9\x00\xe8\x0c\x936\xd8H\xb1\x14\xcb\x8b%\xdf\xcd\xdb\xd7\xef}x\xa2\xb5\x01X7=|l\xaaU\xf4\xc0\x8d53\x00]\xa0\x87\x8f\x8d\xb3\xa4R,\xb3(\xd1\x02\xc0\xda\xe9\xe1c\x13\xed\xaf(\xec\x05\xce\xe5\x03@\xe0\x835Xe\x08\x1b+\xd1\x02\x80\xc0\x07+T\x86\xaf\xbd\xf2b{\x95w\x99\xdd\xf4(\x02\x80\xc0\x07+2^\xc3}\x1a\xd6\x05`\xadL\xda`c\xac\xa8\x14\xcb,o]\xbf\xf7\xe1\xa9g\x01\x80u\xd0\xc3\xc7&\x19o\xe8}\x03 \xf0\x81\xc0\xb7\x02\x07\xb1\x87\x11\x00\x04>X\x862l\x85\xb0w\x7f\xcd\x871\xf6L\x00 \xf0\xc1\xb0\xc3\x96\xc0\x07\x80\xc0\x07\xcb\xb0\xf5\x95/\xed\x94\x17\xbb\x1d8\x94\xfb\xb1\xa7\x11\x00\x04>hY\x97\xca\xa2\x08|\x00\xac\x9c\xb2,\x0cZ\x5c\xe5\xe2{5w\xbf\xbe\xfd\xb5\x86w\x13~\xefeV\xff\x1c\xc1/^\xbf\xf7\xe1\xb9g\x07\x80U\xd1\xc3\xc7\xd0\x8dk\xecsUn\xef\x94\xdb\xa8\xdc\xe6\x09b\xe7e\x80\x1b\xc5\xdb8\xab\xb1\xbfB\xcc\x00\x08|\xd0\xa2T\xb8z\x96\xdd\x14D\x1e\x95\xdb\xa4\xdc^.rG\xf16\xf6\xca\x7f~\xb1\xdc\x8e\x13\xbb\x1eX_\x17\x00\x81\x0fZP\x86\xaa\xb0\x86\xed\xeb\xc3\xaca\xf8\xf5i\xb9=(\xc3\xd9\xfe2V\xbf\x08\xc3\xb5\xe56.\xff\xf9\xd9r{\x92\xdd\xf4 \xben\xec\x19\x02`U\xde\xd0\x04\x0c\xd8\xdd\xde\xbd\x8br;\x0a\xbdp\xab\xba\xf3\xd8cX\x84-\xce\xce\x0d\xdb\xee\x9dc;\xf2\x14\x01\xb0\x0az\xf8\x18\xa4\xb8\xaaE\x08Wah5\x0c\xdb\xee\xac2\xecM\x09\x7f\xb7\xc3\xbd\x0f\xe21\xdd\x8b=\x90\x00\xb0tz\xf8\x18\xb20l{\xd9\xa5\x03\x8a\xc73\x8e\xe7\xf09\x8f\x0f\x00\x81\x0f\x16\x0cV]>\xbe0\xdc\xfb\xd23\x05\xc0*\x18\xd2\x05\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x04>\x00X\x91<\xcf\xef\x95\xdb\x9e\x96\x00\x81\x0f\x80a\x86\xbd\x9d\xf2\xe2RK\x80\xc0\x07\xc00\xc3\xdeay\xf1\x8dr\xdb\xd2\x1a\xb0\x5c\xd6\xd2\x05`\xd5A\xef^y1)\xb7\x87\xb7?{\xf5\xea\xd5\xa9\x96\x01\x81\x0f\x80a\x84\xbd0\x84{Rn\xf7\xb5\x06\xac\x8e!]\x00V\x15\xf6n\x87p_\x0f{gZ\x07\x96K\x0f\x1f\x00\xcb\x0ez\x9f\x1a\xc2\x05\x04>\x00\x86\x13\xf6\xea\x0c\xe1\x9ej)X.C\xba\x00,+\xec\xcd\x1a\xc2\x05VL\x0f\x1f\x00m\x07\xbd\xa6C\xb8\xa7Z\x0d\x04>\x00\xfa\x13\xf6\xcc\xc2\x85\x0e2\xa4\x0b@[ao\x9c\xdd\xf4\xd65\x0a{j\xf0\xc1\xf2\xe9\xe1\x03`\xd1\xa0\x17\x86p\x8f\xca\xed@k\x80\xc0\x07\xc0\xf0\xc2^\x18\xc2\x9d\x94\xdb\xf6\x9c7\xa1\x06\x1f\xac\x80!]\x00\xe6\x0d{\xe3\xecf\x08w[k@\xb7\xe9\xe1\x03\xa0i\xd0ks\x08\xf7T\x8b\x82\xc0\x07@\xb7\xc2\xde\xa2C\xb8\xc0\x1a\x18\xd2\x05\xa0n\xd8\x1bg\xed\x0f\xe1\x9ejYX>=|\x00T\x05=\xb3pA\xe0\x03`\xc0ao\x94\xdd\x14R^\xca\x10\xae\x1a|\xb0\x1a\x86t\x01\x98\x15\xf6\xf6\xcb\x8b\xf3\xcc\xf9z \xf0\x010\xc8\xb0\x17\x86p?(\xb7\xad%\xde\x8d\x1a|\xb0\x22\x86t\x01\xb8\x1b\xf4F\xd9\x12\x87p\x81\xf5\xd0\xc3\x07\xc0m\xd8[\xf5\x10\xee\xa9V\x07\x81\x0f\x80\xd5\x85\xbdU\x0c\xe1\x02kbH\x17`\xb3\x83\xde([\xdf\x10\xee\xa9g\x00VC\x0f\x1f\xc0\xe6\x86=\xb3pA\xe0\x03`\xc0ao\xedC\xb8j\xf0\xc1\xea\x18\xd2\x05\xd8\xac\xa0\x17V\xcd\x08AK\xaf\x1el\x10=|\x00\x9b\x13\xf6\xf6\xca\x8b\xcb\x8e\x84=5\xf8@\xe0\x03\xa0\xe5\xb0W\x94\x17\xcf3\xb3pa#\x19\xd2\x05\x18v\xd0\x0bC\xb8a\x16\xeen\xc7\x0e\xed\xd4\xb3\x03\x02\x1f\x00\xed\x18\xc5p\x15f\xe3\xee\xc4\x9f\xedj\x16\x10\xf8\x00\x18\x88W\xaf^\x9d\xc7\xb0\xf7)\xf1\x9c\xbe\xe0\xf5\xcbU\x04\xc2S\xcf\x0e\x08|\x00,?\x0c\x9e\xa6\xc2\xd7k\x81p/\xd33\x08\x02\x1f\x00\xc3\x0d\x84e\xf8\xbb\x5c\xd2m\x03+`\x96.\x00Iq\x86\xef}-\x01\x02\x1f\x00\xc3\x0c{a\x96\xefa\xcb7\xab\x06\x1f\x08|\x00tHX\x82-U\xbb\xefI\xb9]k&\x10\xf8\x00\xe8\xa18i\xe3 \xb1\xcb\xc5\xabW\xaf\x8a\xf2r\xdc\xf0\xa6O\xb5.\x08|\x00tCQq\xfdGC\xbde\xe8\x0b\x85\x9d\x9fj.\x10\xf8\x00\xe8\x91<\xcf\xc7Y\xba\x0c\xcb\xb3\xbb3m\xcb\x7f\x87\xf0wQ\xf3\xe6O\xb50\xac\x96\xb2,\x007\x01g\x94\xdd\xacJ\xb1Wna\xa2BrU\x8a2\xe0\xe4\x03n\x8b\xf0\xf8\x8b\x8a\xdd\xa6M\xe4\xd8\xcfn\x8a<[\xaf\x17\x04>\x80N\x84\x9a\x9d\xec\xe3\x82\xc2{]\x0c)\xf1\x1c\xba\xe7\xf1\xbfg1L]\x96\xdbi\x5cAcYB\x98K\x95ayR\xde\xff\xe5\x94\x10|\x19{\x06?H\xdd\xb8\x1a| \xf0\x01,;\xe4\x85@\xb2\x9f\xf5\xa3\xae\xdc\xe8\xce\xbfw\xb3;\xbd\x8d\xe5c\xb9\x0d\x81'm\x06\xc0\xd8\xd3\x99*\xc3r\x95\xdd\xcc\xdc\x9d\x15\xe6N\xca\xdb\x08\xe7\xf3=\xf2\x8a\x03\x81\x0f`U!\xef^\x0cx!\xc4l\xf7\xec\xf0G\x15\xd7\x7f?\x04\x96\x8f3\x04\xb1\xd3\x10\x00\xe3$\x8ayU\x95a)\xca\xdb\x7f\x99\xba\x81p>_\x0c\xd7\xd3\x86\xc3\xd5\xe0\x8350i\x03\x18l\xd0\x8b+D\x5c\x96\xdb\xfb=\x0c{\xc1^\x83}C\x8fe(\xa1\xf2A\xf9\xb8_\x96\xdb\xa4\xdc\xf6\x1b\xb6Y\xb8\xbf\x87\x89]\xce\xca07\xa9ys\xe1\xbe\xa7\xd5\xe7{\xe9\xd5\x09\x02\x1f@\x9bA\xefq\xd6\xef\x09\x04\xa39\x7fo\xebN\xf8\x0b\xe7\xd5\x15q\xa8\xb6\xcaQ\xc5\xf5E\xdd\x03\x88\xbd\x80\xd3\x02\xe7\xb9W)\xac\x9e!]`\x88\x0e\x17\x08z\xd7\xd9\xc7\x93#\xc2\xf6r\x8d!\xe5~K\xb7\x11\x82\xef\xe32\xf4=\x0b\xa1n\xda\xa4\x898\xd9\x22\xd5\x0bz\xdct\xb2E\xd8\xbf\xbc\xdd'\xf1\xfeo\xe9\xe1\x03\x81\x0f`1\xa1g\xa9\x0c\x19\xe1\x1c\xb6\x83\x9a\xbf\x12\xce);\x8d\xdby\xd5\xf9i+\xf6V\xf6q\x89\x98\xbdx\xb9H\x8fe\x18\xae}\x18\xcf\xf7+n\x87g\xe3y\x8eG\x15!\xf8p\xce\xe7\xa3\x88C\xc5\xb7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x15G\x89\xc0\x17\xc2\xce\xed\xcc\xd6\x93.?\x88;=j\xdf?\xce84\xbbwg\x9b\xa7\x170\xfc\xce\xfbq\xe8\xbb\xa8\x11$\x8f\x16\x0c\xc2ah\xf72\xde\x87\x1e>\x10\xf8\x00Z\x09J\xe7e\x98\x09\xab>\xdc\x1d\xa2<.\xb7I\xdfk\xc0\xc5\xfaw\x93\xb8\xdd\xad'8\xce\x9aOL\x09\xc1\xef\x1f\x96\xdb\xe7\x13\xfb\x5c\xc5\xf5r\x179\xe6\x97q\x02\xc9\xf3%\xd7\x0f\x04f0i\x03\x18\xaa\xd0\xcb\x17\x86\x22\xc39d\x9f-\x83\xc6x\x88\x05\x7fC\x80*\xb7\xd0\x03\x17\x82\xdf\x83r{7\xab\xbf\xc4YP\xd5\xe3v\xd8\xd2q\x9e\xc6c\x03\x04>\x80\xd6\x82P\xe8\xcd\xbb\x17z\xa7:v^\xde2\x1f\xf3\xe5k\xe1/\x14@\xbeJ\xfcJ\x08\x86\x7f)q\xfdY\x9b\xc3\xde\xe1\xd8\xbc2A\xe0\x03\xa0\xdd\xf0wXn\xa3\xf2\xbfo\x97\xdb\xb3)\xbb}\xae\xe2f\xc6Z\x12\x04>\x00\xfa\x11\xfe\xc2\xea\x1b\xe1\x1c\xba\xdb^\xbf0\xd4\x1df'\xa7\xce\xdd{:m\xbd\x5c@\xe0\x03\xa0\xdb\xc1\xef\xa3^\xbf\xec\xa6\xa0\xf3Nb\xd7\x10\x08\x0b-\x06\x02\x1f\x00\xfd\xb5\xf0z\xb9\x80\xc0\x07@G\xc52.\xa9\xa2\xd4\x17&W\x80\xc0\x07@\xbfU\x85\xb9CM\x04\x02\x1f\x00=\x15\xd7\xcb\xddM\xec\xf2l\x88\xb5\x0a\x01\x81\x0f`S\xc2^X/\xb7\xa8\xd8M\xef\x1e\x08|\x00\xf4X\x08s\xa9uw\x9f(\xc3\x02\x02\x1f\x00=\x95\xe7\xf9(K\xf7\xde\x85\xd58L\xd4\x00\x81\x0f`)A\xe4\xa8\xdc\xce\xe3p#\xcb\xa3\x0c\x0b\x08|\x00k\x09{{\xe5\xc5\xa3r\xdb.\xb7\xcbX.\x84\xe5\xb4\xf3\xc3\xc4.a\xbd\xdc\xc9\x12\xeew\x22\xcc\x83\xc0\x07pw\x081\xf4>}#\xce\x22ey\xed<M\xb1\x8c\xb0\x97\xdd\xd4\xfa\x0ba\xfeT\x98\x07\x81\x0f\xd8@e\x008\x8ca\xe0u\xef\xc7\xb0@;\xed<\x9e\xd1\xce\xb7\x8e\xdb.\xc3r'\xec\xdd\x12\xfa@\xe0\x0360\x84T\x95\x07\xd9\xd3J\xad\xb5s\xaaw\xaf\xf5\xf5r\xa7\x84\xbd[[B\x1f\x08|\xc0f9\xcc\xd2\x13\x08\xc6\x9ah%\xed|\xb4\x842,\xa9@'\xf4\x81\xc0\x07l\x82\xd8\xeb\x94*\x0frl\xa5\x87V\xdayT^<N\xecrU\xb6s\xb1\x84\xbb\xde+\xb7\x0b\xa1\x0f\x04>`\xb3\xa5z\x9d\xae3+=\xb4eR\xe3yh],\xed\x22\xf4\x81\xc0\x07l\xb8q\xe2\xba#\xb5\xe0\x16\x17\xcb\xb0\xa4\xd6\xcb\x0deXN\x96u\xff\x0dC\x9f\x92- \xf0\x01\x03\x0b\x22!\xec\xcdZ\xda+\xf4\xeeY\xe9\xa1\x1d\x93\x8a\xeb\x97\xde\x8b*\xf4\x81\xc0\x07l\xae\xfd\xc4uz\xf7\xda\x09\xd5U\xeb\xe5>-\xdb\xf9|\x15\xc7\x12\x9f\xcfq\x0c\xf3\xb3\x84\x92-'\x9e9\x10\xf8\x80a\x04\x91Q6{\xb5\x07\xbd{\xed\xb4qU\xb9\x9b\xd6\xcb\xb0\xd4\x08}!\x5c\xeeU\x84\xbe]\xb5\x17A\xe0\x03\x86!\xd5\xbbw\xa2w\xaf\x15\x9d\x5c/7\x86\xbe\xaaa\xe4\x83\xd8;\x09\x08|\xc0@\x03\x9f\xde\xbd\x05\xc5\x19\xaf\x07\x89]B\x19\x96\xb5\xb5s\x5c\xab\xf7I\xc5n_+\x1f\xc7\xbeg\x13\x04>\xa0\x9fa$\x0c5\xce\x9a5z\xb1\xaas\xca\x06\xae*\xcc\x8d\xd7}\x80\xb1\xee\xdf\xb3\x8a\xdd&q\xf8\x1f\x10\xf8\x80\x9e\xd9K}\xc0k\x9e\x85\x03\xf5~\x96.\xc3\xf2\xacC\xc5\xacC\xf0\xac\x9a\xb9{b\xe6.\x08|@\xff$\xcf\xdf\xd3<\x0b\x85\xbd\xaa\xf5r\x83\xce\x9c\x1b\xd7`\xe6\xaea~\x10\xf8\x80\x9e\xd9\x9b\xf1\xf3\x8b%\xac\xe5\xbai\xaa\xca\xb0<\xe9Z\x1b\xc7!\xfc\xa2b\xb7\x83X\xb7\x11\x10\xf8\x80\xae\x8b=P\xb3\x02\xc9\xa9\x16Z\xa8mGY\xba\xf7\xae\xb3\xe5n\xe2\x04\x92\xaa\xf3\xf9\x8e,\xbf\x06\x02\x1f\xd0\x0f\xa9\x0fl\x81o1E\x96.\xc3r\xd8\xf1r7\xe3,=\xb4\x1b\x1e\xdb\xc4\xd3\x0c\x02\x1f\xd0}{\x02_\xfb\xe2z\xb9\xa92,g\xb1\x14Jg\xdd9\x9f/\xc59\x9e \xf0\x01=0\xab\x87\xefB\xb1\xe5\x85T\x0d\xd5\x16}x\x10\xe5k \x04\xbaiC\xbba&\xef\x17c)\x17@\xe0\x03:nVy\x8dKM3\x9f8\x99a;\xb1\xcbq\x87\xca\xb0\xd4\x11\xceC\xbc;\xb4\x1b&\x9a\xec\xa8\xcf\x08\x02\x1f\xd0\x1f\xb3\xea\xc3\xf90\x9f/\xecU\x95aY\xf9z\xb9\x8b\x8a\xb3\x88\xc3c\xd2\xab\x07\x02\x1f\xd0\xd3p\x92\x09|\xad\x0a\xbda\xa9\x89\x1aG},u\x13B\x9e^=X\xae74\x01\xb0Do\x95\xdb(n{\xf12\x94iq\xfe^\xf3\x00\x1d\xda\xeeqb\x97+\xbdc\x80\xc0\x07\xacT\x9c\x94q\xaa%Z3\xa9\xb8\xfeP\x13\x01\xb3\x18\xd2\x05\xe8\xb8X\x86%\xb5^\xeeY\x9c\xf1\x0a \xf0\x01\xf4\xd4\xa4\xe2z\xbd{\x80\xc0\x07\xd0Wy\x9eW\xad\x97{l\xb2\x03 \xf0\x01\xf47\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01\xf4X\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa7\xf2<\x0f\xcb\xd2=J\xec\x12\xca\xb0\x1ci)@\xe0\x03\xe8\xaf\xaa07\xd6D\x80\xc0\x07\xd0Sy\x9e\xefg\xd5eXN\xb5\x14 \xf0\x01\xf4\x97\xde=@\xe0\x03\x18\xaa<\xcf\x8b,]\x86\xe5I\x1f\xd7\xcb\x05\x04>\x00\xb2\xef\xaf\x97\x9b*\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00=Vd\xe92,\x87\xca\xb0\x00\x02\x1f@O\xc5\xf5r\x0f\x12\xbb\x5c\x94ao\xa2\xa5\x00\x81\x0f\xa0\xbf\x8a\x8a\xeb\xad\xa8\x01\x08|\x00}\x95\xe7\xf98K\x97a9V\x86\x05\x10\xf8\x00\xfa\x1b\xf6\xc2z\xb9\xa9\x89\x18a\xa2F\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pmj\xa2\xc6\x912,\x80\xc0\x07\xd0S\xb1\x0c\xcb\xe3\xc4.W\x992,Um\xb8\x13{I\x81\x8474\x01\xc0\xdaT\x859eX>\x19\xee\xf6\xca\x8b\x9d\xb8\x85\xb0|{\xde\xe3[\xe5v\xaa\x85@\xe0\x03\xe8bxy\x98\xd8%\xac\x97{\xa2\xa5>\xe1\xf9\x8c\x9f\xef\x08|\x90fH\x17`=&\x15\xd7+\xc32%\x04\xcf\xf8\xb9!]\x10\xf8\x00\xba%\xcf\xf3\x10\xe6R\xeb\xe5\x862,\xe7Z\xeaS.g\xfc|O\xd3\x80\xc0\x07\xd0\xa5\xb0\x17z\xa3\x8a\xc4.\xa1\x0c\x8b\xde\xbdf\x81\x0f\xa8\xe0\x1c>`\x15!g/\xbb9\xc9>l\xb7\xff\x0e=\x5coo\xe0yj!\xec\xa5\xca\xb0\x14&j4\x0e|\xbb\x9a\x06\x04>`\xfdNf\x84\x9c\xbdx\xdd\xa6\x04\xdf0\xb9\xe0Qb\x97\xab2\xec)\xc3\xd2<\xf0\x01\x15\x0c\xe9\x02\xab\x0a|\xd3\xecoX;T\x85\xb9\xb1\x97\x0a \xf0\x01C\x0b|\xf7c\xaf\xd7\xe0\x95\x8f3\x84\xdb\xd4\xd0\xe3\x99\xf5r+]&\xdawO\xf3\x80\xc0\x07\xacQ<O\xefz\xc6\xd5\xe3\x0di\x06\xbd{\x8b\xbf\x8e.\xb5\x02\x08|@\xb7M65\xe8\xe4y^d\xe92,O\x85\x19@\xe0\x03\x86`V\x0f\xd7V\x19\x88\x06\x1b\xfab\x19\x96T\x99\x95\xd0\xf3Yxy\x00\x02\x1f\xd0{\xb1\x07k\xd6J\x09C\x0e<!\xe8\xa6\xca\xb0X/\x17\x10\xf8\x80\xc1\x85\x9fi\xee\x0f\xb1\x97/N$8H\xecrQ\x86\xbd\x89\x97E\xa3/\x0e\xf9\x8c\xedT\xeb\x80\xc0\x07t\xe3\xc3:L\xde\xb8\x9aqu1\xc0\x87\x5c\xf5\x98\xac\xa8\x01\x08|\xc0 \xcd\x0aA\x83\xea\xe5\x8b\x8f%U\x86\xe5\x99^)@\xe0\x03\x06)\x0ea\xce\xec\xe5\x8b\x93\x1c\xfa\x1e\xf6\xac\x97\x0b\x08|\xc0\xc6\x9b\x15\x86\xee\x0f$\x08\x1df\xe92,G\xca\xb0\x00\x02\x1f0h\x15\xbd|\x8f\xf3<\x1f\xf5\xf5\xb1\xc5c\x7f\x9c\xd8%<n\xeb\xe5\x02\x02\x1f\xb0\x11R=y\x93\x1e?\xae\xaa0W(\xc3\x02\x08|\xc0F\x883vg\xd5\xe5\xdb\xcd\xf3\xbcwC\xbb\xb1\x0c\xcb\xc3\xc4.g\xca\xb0\x00\x02\x1f\xb0i\xc6\x89\xeb\x8a\x1e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0f\x89\x13\x17\x9e\xcc\xb8:\xacN1\xe9\xcbc\x89=\x92\xdb\x89]\x8e\xcb\xc7{\xeeY\x07\x04>`\x13C_Q^\x5c\xcc\xb8\xba\x17C\xbb\xca\xb0\x00\x02\x1f@\xb5\xfd\x18\x8a\xa6\xf9Z\x19\xa8v:~\xfc!\xec\xa5\xd6\xcb=2Q\x03\x10\xf8\x80\x8d\x16\x87v\xc7\x89]&]=\xf6x\x9e\xe1\xa3\xc4.W\xb1\x17\x13@\xe0\x036>\xf4\x85Y\xbbOg\x5c\xdd\xe5s\xdf\xaa\xc2\xe8\xd8\xb3\x0b\x08|\x00\x1f\x87\xbep\x9e\xdb\xf1\x9d\x1f\x85a\xdew\xca\x9fw24\xe5y\x1e\x86\xa2S\xeb\xe5\x9eY/\x17\xe8\x8274\x01\xd0\xb1\xd07\x8e\x93 F\xe56\xee\xf8\xcc\xd6\xaa2,c\xcf( \xf0\x01L\x0f}\xfb]?\xc62\x94\x16Yz\xbd\xdc\xa7\xd6\xcb\x05\xba\xc2\x90.@\xf3\xb0\x17z SeV\xc2Pt\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pn\xaa\x0c\xcb\xa12,\x80\xc0\x07\xd0Sq\xbd\xdc\x83\xc4.\x17\xd6\xcb\x05\x04>\x80~+*\xae\xb7\xa2\x06 \xf0\x01\xf4U\x9e\xe7\xe3,]\x86\xe5\x992,\x80\xc0\x07\xd0\xdf\xb0W\xb5^n\xa0w\x0f\x10\xf8\x00z,\x84\xb9T\x19\x96'\xca\xb0\x00\x02\x1f@O\xc5\xf5rS\xbdwWYu\x11f\x00\x81\x0f\xa0\xc3\xaa\xca\xb0\x14\xca\xb0\x00\x02\x1f@O\xc52,\x0f\x13\xbb\x9c)\xc3\x02\x08|\x00\xfdV5T[h\x22@\xe0\x03\xe8\xa9X\x86e;\xb1\xcb\xb12,\x80\xc0\x07\xd0\xbd\x107)\xb7\xf3r\xdb\xa9\xd8/\x94aI\xf5\xee\x85\xf5r\x95a\x01\x04>\x80\x8e\x85\xbdqv\xb3,Z\xe8\xb5;-\xff\x9f\x0alE\x96\x9e\xa8qd\xa2\x06 \xf0\x01t+\xec\x85\x1e\xbd\xf7\xef\xfc(\x84\xb9\xaf\x95??\x8deW\xee\xee\x1b\xfe\xff(qsWe\xd8+\xb4*\xd0\x17oh\x02`\x03\xc2^\x18\x9e=\x9dquX*\xedE\xb9\xcf\x93\xec\xe3^\xbbI\xc5M\x1e\xcey\x1c\xfb\xf1\xb6/\xcb\xed\xf5\xde\xc1\xf3:?s\xce \xf0\x01L\x17B\xd2V\xc5>\x8fC\x90+C\xd9?\xc9\xd2\xeb\xe5\x862,'\xf3\x1cD\xf8\xbdX\xe6e\xda\xf1\xec\xd6\x0c\x8dS\x8f\xe9\xb5\xff\xbf\x8ca\xb1\xeag\x97V\x07\x01\x81\x0f`(B@\x1b\xd5\x08}\xe1\xfa\x7fP\xb1\xcfx\x91\x03)\x03\xd6\xed\x84\x91pL\xdb-=\xbeia\xf1\xe1\x9c\x012\xac\x1a\xf2z\x08\xac\xd5\xfb\x18~\xe6\xbcF\x10\xf8\x00\xd6\x22\x9co\x17f\xe7f7\xb3n\x1f.pSO\xdb\xe8\x11\x0b\xb7\x11{\xfaN\xb2\x9a={+t?\xfb\xf4\x9a\xc1\xb5\x8fqJ\x80<\x9b\xb2\xdbi\x0c\x87'^\x9d \xf0\x01\xb4\x19\xfaBP\xdb\x8fAk2%\xd4T\xf9\x7f\xe5\xf6\x9f[<\x9e\xd0\x13\xb6\x17\x83\xe8\xc1\x80\x9b~ZX\xac*y\x03\xb4\xcc,]`\xd3\x82\xdfi\xb9\x8d\xca\x7f\x86I\x1a\xd7\x0d~\xf5\x07\xcb\xed\x1f\xc7\x1a~\xf7Z<\x9eqy\xf1\xee\x06=\x05\xa1\xc7o\xcf\xd0/\x08|\x00\xab\x08~Evs^\xdfq\xc3_\xbd\xad\xe1\xb7\xd3\xe2\xb1\x84\xde\xaew\x1a\x06\xd0>\x0a+\x93\x08{ \xf0\x01\xac4\xf4\xbd\x8c=l\xffa\x8e\xd0\xf7\x8d2\xf4\x15m\xf5\xf6\x95\xc71)/\xf6\x06\x1c\xfa\x9e\xc6\xb6\x06\x04>\x80\xd5\x8a\xabo\xfc\x959\x7f=\x94r\x09\x130\x8a\x96B_\x98\xf9\x1az\x0e/\x06\xd6\xcc\xef\x94\x8f\xcd2t \xf0\x01\xac%\xec\x85\xde\xb9E\xc3Z(\xe5\xd2Z\x98\x89\x93K\xf6\xb2\xe9\xb3[\xfb&\xf4V\xbe\x1d{/\x01\x81\x0f`-BPK\xcd\xd6}\x96\xdd\xd4\xa5\xab\xd2jy\x918\xd4\x1cB\xdfq\x8f\xdb6\x84\xbd=\xa5W@\xe0\x03X\x9b\xb8^n\xaag.\x04\xbdq\x9c\xd1\xfbNE\xf0[J\x89\x91\x1e\xcf\xe0\xbd\x88a\xef\xdc+\x0d\x04>\x80u\x0a!-\xb5\xf2Fq;\x9b4\x0cI\xc6\xe0\xf7V\xf6\xe9^\xb7\x8be\x06\x9b\x1e\xce\xe0\x15\xf6@\xe0\x03X\xbfX|9\xb5\xe2\xc6\xd9\xb4\xf3\xceb\x0d\xbfq\xf9\xcf\x07\xd9\xc7u\xfc\x96^@\xb8G3x\x9fej\xec\x81\xc0\x07\xd0\x11U!\xad\xa8\x08`\x97\xa1\x8e_\xb9\xdd[\xd5\x84\x84\x1e\xcc\xe0\x0d5\xf6\xf6\x85=\x10\xf8\x00\xd6.\x96a\xd9\xae\x08.\xa7]<\xf6\x0e\xcf\xe0}W\x8d=\xe86k\xe9\x02\x9b\x14\xf6\xaa\xd6p\x0dC\xa6\x87\x1d<\xeep\xcc\xb7=g\xa1\xa7\xaf(\xb7\xaf\x96\xdb\xdf\xee\xc0\xe1\xbd\xa3\xec\x0a\x08|\x00]\x12\xc2\x5cj\xa2\xc6QG\x87$\xc3P\xeen\xc7\x8e)\x84\xe3\xfd\xae\xf6\x86\x02\x02\x1f\xb0\x81b\x19\x96\xc7\x89]\xae\xe2\xfa\xba]\xb4\xd3\xb1\xe3\xb9\xad\xb1g&.\xf4\x84s\xf8\x80M1\xa9\xb8\xbe\xcbK\x7fmu\xe8X\xc2\xa4\x91\x1da\x0f\xfaE\x0f\x1f0x\xb1\x0cKjH\xf4\xac\xab+B\xc4c\xefR\xd8Sv\x05zH\x0f\x1f\xb0\x09&\x15\xd7w\xb9w\xef^G\x8e\xe3X\xd8\x83\xfe\xd2\xc3\x07\x0cZ\x9e\xe7U\xeb\xe5>\xed\xf8\xf0d\x08X\xcfb\xf0\x0b\xdb\xf6:\xc2\x9e\xb2+ \xf0\x01t5\xec\x85\x80T$v\xb9\xae\xb8~\xed\xe2,\xd8\xd3\x8a\xc7\xf8\xbd%\x1e\x82\xb2+0\x00\x86t\x81!\xab\xbd^n\x8f-s\x06\xaf\xb0\x07\x03\xa1\x87\x0f\x18\xa4<\xcfC\x10:H\xec\x12\xca\xb0\x1c\x0d\xe0\xa1.#\xf0)\xbb\x02\x03\xa3\x87\x0f\x18\xaa\xaa07\x1e\xc8\xe3l;\xf0\x09{ \xf0\x01t_\x9e\xe7\xfbY\xba\x0c\xcb\xb3\x01\xad\x10\xd1f\xe0\x0beWF\xc2\x1e\x08|\x00]\x0f{U\xeb\xe5\x06\x87\x03z\xc8m\xcd\xdaUc\x0f\x04>\x80\xde\xa8*\xc3\xf2\xa4\x0c5\x97\x03\x09\xb7{-\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x87\x004\xca\xd2\xbdw\xe1\xfc\xb4\xa3\x01=\xe46\x86s\x9f\xaa\xb1\x07\xc3g\x96.0$E\x96.\xc3r8\xb0^\xac\xd1\x82\xbf\xaf\xec\x0al\x08=|\xc0 \xc4\xe1\xcdT\x19\x96\xb3\x01\x86\x9by{\xf8\xae\x85=\xd8,z\xf8\x80\xa1\xa8\x1a\xaa-\x06\xf8\x98w\xe7\x0c{\xca\xae\xc0\x86\xd1\xc3\x07\xf4^\x9e\xe7\xe3,=[\xf5x@eXn\x1f\xf3<\xbd{W\xc2\x1el&=|@\xdf\x83OU\x19\x96\xce\xaf\x97;\xa7\xa6\x81O\xd9\x15\xd8`z\xf8\x80\xbe\x0b\xb3rS\x135\x8e\x86R\x86\xe55\xa3\x06\xfb>\x13\xf6`\xb3\xe9\xe1\x03z+\x96ay\x9c\xd8%\xac\x97[\x0c\xf4\xe1\xef\xd5\xdc\xefX\xd9\x15@\x0f\x1f\xd0g\x93\x8a\xeb\x0f\x07\xfc\xd8\xeb\x0c\xe9>\x11\xf6\x80@\x0f\x1f\xd0K\xb1\x0cKj\x96j(\xc3r2\xd0\xc7>\xca\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07\xf4^U\x98\xd9\xd4\xde\xbd0Ie<\xd4\xb0\x0b\x08|\xc0\x86\xc8\xf3\xbcj\xbd\xdc\xa7\x03/=\xb2\x93\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0oa/\x94a)\x12\xbb\x0c\xb5\x0c\xcb]{S~v!\xec\x01\xb3\xe8\xe1\x03\xfa&\x84\xb9\xd4\xf9k\xc5\x06\x94\x1f\x19\xcd\x08{\xca\xae\x00S\xe9\xe1\x03z#\xae.\xf1(\xb1K(\xc3r4\xf06\x08=\x9cw\x87\xb3\x8f\x85=\xa0\x8a\x1e>\xa0O\xaa\xc2\xdcx\x03\xda\xe0\xee\xf9{j\xec\x01\xb5\xe8\xe1\x03z!\xcf\xf3\xfd\xac\xba\x0c\xcb\xe9\x064\xc5^\xbc|W\xd8\x03\xea\xd2\xc3\x07\xf4\x85\xde\xbd\x1b\xa1\x87O\x8d=@\xe0\x03\x86%\xcf\xf3\x22K\x97ay2\xd0\xf5r\xa79\xdc\xa0\xc7\x0a\xb4\xc4\x90.\xd0\xf5\xb07\xca\xd2E\x94C\x19\x96\xa3Mi\x0fa\x0f\x10\xf8\x80!*\xb2t\x19\x96C3T\x01\x04>\xa0\xa7\xe2z\xb9\x07\x89].\x9c\xcb\x06 \xf0\x01\xfdVT\x5c\x7f\xa8\x89\x00\x04>\xa0\xa7\xf2<\x1fg\xe92,\xc7\x1bR\x86\x05@\xe0\x03\x06\x19\xf6\xc2j\x12\xa9\x89\x18\x9b\xb0^.\x80\xc0\x07\x0cZ\x18\xaaMM\xd482[\x15@\xe0\x03z*\x96ay\x9c\xd8\xe5*\xdb\xa02,\x00\x02\x1f0DUaN\x19\x16\x00\x81\x0f\xe8\xabX\x86\xe5ab\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xfe\x9aT\x5c\xaf\x0c\x0b\x80\xc0\x07\xf4U\x9e\xe7!\xcc\xa5\xd6\xcb\x0deX\xce\xb5\x14\x80\xc0\x07\xf43\xec\x852,Eb\x97P\x86E\xef\x1e\x80\xc0\x07\xf4X\x08{\xa92,\x85\x89\x1a\x00\x02\x1f\xd0Sy\x9e\xef\x94\x17\x8f\x12\xbb\x5c\x95aO\x19\x16\x00\x81\x0f\xe8\xb1\xaa07\xd6D\x00\x02\x1f\xd0Sy\x9e\xefg\xe9\xf5r\xcf\xac\x97\x0b \xf0\x01\xfd\xa6w\x0f@\xe0\x03\x86*\xcf\xf3\x22K\x97ayb\xbd\x5c\x00\x81\x0f\xe8o\xd8\x1be\xe92+\xa1\x0c\x8b\x89\x1a\x00\x02\x1f\xd0cE\x96.\xc3b\xbd\x5c\x00\x81\x0f\xe8\xab\xb8^\xeeAb\x97\x8b2\xecM\xb4\x14\x80\xc0\x07\xf4WQq\xbd\x155\x00\x04>\xa0\xaf\xf2<\x1fg\xe92,\xcf\x94a\x01\x10\xf8\x80\xfe\x86=\xeb\xe5\x02\x08|\xc0\xc0\x850\x97*\xc3r\xa4\x0c\x0b\x80\xc0\x07\xf4T,\xc3\xf28\xb1\xcbU\xa6\x0c\x0b\x80\xc0\x07\xf4ZU\x98+\x94a\x01\x10\xf8\x80\x9e\x8aeX\x1e&v9S\x86\x05@\xe0\x03\xfa\xad\xaawo\x90\x135\xca\xa0\xbb_n\xafjn\xe3.\x04\xf3\x06\xc7kr\x0d}\xfd\xbb\x1c\x97\xdb\xf9k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x98\xf3\xcd5\x04\x83\xed\xc4.\xc7\xaf^\xbd:\x1f\xe2c/\x1f\xd7IyqVs\xf7\xa2\x03\x87\x5c\xf7\x18Bal\xe7[\xd2\xc7\xf7\xa3Iy\xf1\xfe\x94\xf7\xa40\x02\xf1<\xae\xef-\xf0\x014|sU\x86\xa5~\x88\xba\xbf\xce^\xbe\xd8\xbb\xb1[sw\xbd{\xf4\xf1\xfd(\xfc-\x1eT\xec\xf6x\xc8=}\x02\x1f\xb0\xcc\xb0\x93Z/\xf7h\xe8\x135b\x11\xe9g-\x87\xc3e\x98\xd4\xdcOal\xfa\xea\xb0\x07\x7f\x87\x02\x1f\xd0\xbbo\xd3\xa3\xf2\xe2Qb\x97\xab28\x14\x1b\xd2\x1cu?h\xd6\xd2\xcb\x17\xef\xf3~\x8d]\x15\xc6\xa6\xaf\xefG{\x15_>\xef\xda\x1dj;\x08|\xc02L*\xae\x1foJC\xc4b\xd2Ok\xee\xbe\x8e\x10\x5c\xf7>\x15\xc6\x06\x81\x0f\xe0\xfb\xdf\xa6\xf7+\xbe%\x9fm\xe0\xb0`\x08U\xd75\xf6[i/_\x83\xde\xbdM\xea\x91\x05\x81\x0f\xa0\x86\xaa\x19\x9c\xe3Mk\x90x\xaeb\xdd\x99\xadE\x9c\xf0\xb2\xaa Z\x87\xa1\x5c\xfa\xec\xb2\xc1\xbe\xd7Cm\x04\x81\x0fhM\x9c\x09\x97\xea1z\xba\xc1\xc3\x82!\xf0]\xd5\xd8\xef\xfe*\x02V,\x99S\xa7w\xef,\x96\x98\x81\xbe~\xe1\x0a\xef9g\x0d\xfeN\x05>\x80D\x80\xb8W\x11T\xc27\xe7b\x83?t^6x\xfc\x87\xcb\xec\xe5\xabQ2\xe7\xae\xb1W7\x030\xce\xaa{\xef.\x04>\x80z\xdf\x8cS3\xe1\x0e7}\xbd\xdc\xb8\x84\xdcE\x8d]\xb7\xb2\xe5\xf6\xf2\x1df\xf5f->5Q\x83\x81\xfc\xed\x85\xd7\xf1^6\xbb\xa7\xef8\x5c?\xe4\xf7\xa87\xbc\x0c\x80E\xc5\xb2\x07\xa9\xa2\xa6\x17\xd6\xcb\xfdD\xd8z^g\xbf\xb2][\xafUX\xa3'\xf6\xd6F\xf7\xc82\xc8\xd0\x17V\xf5\x09K\x08\xee\x94\x97a\x1b\x95\xdbi\xb9\x9do\xc2\x97Q\x81\x0fhCU0p\xd2\xff\xc7\x1f:\xa7\xe5\x07N\xe8e\xa8\xaa\xf7u\xdb\xcb\xd7v\xe8\xaa\xdb\xbb\xb7\xf1=\xb2\x0c:\xf8\x9do\xda\xe36\xa4\x0b,$\x96\xf6H\x85\x17\xab3|\xda\xb8n8k\xf3\x5c\xbe\x06\xbd{gzdA\xe0\x03\xb8\x1b \x8a\xc4.Vg\x98\x22\x9eOt\x5cc\xd7\xb6\xcf\xe5\xab:\xcf\xf2V\xe1Y\x02\x81\x0f\xe0VUi\x0f\xab3\xa4CU\x9d\x9a_\xad\xf4\xf2\xc5\xe5\xee\x0ej\xecz\xacG\x16\x04>\x80\xbb\x01\xe2qb\x97Ps\xeeHKM\x17\x83p\x9d\xf6i\xab\x97\xaf\xa8\xb1\x8f\x1eY\x10\xf8\x00>\xa1*\xac\x14N\xfa\xaf\xd5\x86u{\xf9F\x0b\x86\xf3:\xbd{G\x9e3\x10\xf8\x00n\x03\xc4^y\xf10\xb1\x8b\x93\xfek\x88\xe1\xaaN\x8fZ\xe8\xe5+\x16\xb8\xab:\xbfk\xbd\x5cX\xcf\xfb\xe9(\xbc\xa7\xc6\xf7\xd5\xa5Q\x96\x05\x98Ge\xef\x9e&\xaa\x1d\xfa&5\x96\xa4\x0b\x0e\xc2~M\xcf\x89\xacQ#\xf1\xd6\xd8\xb3\xb1\x96\x0f\xfbp~fx\x8enk\xc3\x85\xff\xa7f\xbd\x87\x1e\xe1\xf3;\xdb\xa9\xf3d{\xf5\x5c\xef\xc7\xe7{4\xedy.\xf7\x99\xf5<\x9f,\xda\xfb.\xf0\x01M\xdf\xb4B0\xd8N\xec\xe2\xa4\xff\xe6B/\xdf\x075\xf6+\xe6\x08fu\xc2\xb7\xd29\xab\xfd\x1b\x1a\xc5\xe7q\xbf\xe2oi\x9a\xad\x18\x14v\xef\xdc^\xa8\xeb8i\xa3W=~Ax^s\xf7\xb7\x97\xb5\xcer\x0cG\x97Y\xbdY\xe5\x95\xc7Q\xf7q\x95\xb7\x93/\xe1\xb1\xec\xc5\xe7\xfb`\x81\xe7\xf9\xfd\xf2vn\x97~\x9b+\xfc\x19\xd2\x05\x9a\xbe\x09\x1fU\xf4>8\xe9\xbf\xa1\xf8aUgq\xf7\x83&\xe7\xf2\xc5\x0f\x9a\xdd\x1a\xbbz\xceV\xf4e\xa9\xdcBo\xcd\x8b\xecf\xc2\xd3vK7\xbd\x1b\x03\xc1\xe5\xa2\xc3\x821\xf8?\xad\xb9\xfbd\x89k>O\xb2\xfa\xcb\xff\x9dt\xf4\xf9\x0eC\xb5\xa71h\x1e\xb4p\x93\xe1\xf5\xf2~\x08\xc2\xa1\xb7\xbfi\xdb\x0b|@\x13E\xc5\x9b\xb0\x93\xfe\x17k\xdb6\xf7\xab\xbb\xef\x13C\x82+\x09z\x97\xf1\xc3z{\x89w\x15N\x0bx\x1e\x96\xe4[0\xf4\x85/\x00u\xd7|>YB{\x85\x9e\xcf\x875v\xbd\xc8:z\xfaH\x1c\x099\xaf\xf9\x85\xab\xa9\xad\xf8\x85aG\xe0\x03\x96\xf2m\xb5\xbcx\x94\xd8\xc5I\xff\x8b}\xc8\x86\x9e\x80:\xc5\x98k\xf5\xf2\xd5\xec\xddS:g\xf9\x7f7G1\xe8\xdd_\xe1\xdd>\x0a=K\x0b\xf6\xbe\x8dk\xee\xb7[\xde\xcfa\x8b\xed\x15\x8eyR\xf7\x18\xbb\xf8\x05\xf3\xces\xbe\xb5\xc4\xbb\xb9nz\x1a\x86\xc0\x07\xd4U\xf5&lXpqE\x8b\xfb\xd5\xdaG\x8f\xec\xd2\xadk\xcd\xd6\xdd\x06\xc1i\xda\x17\x90p\xdc\xef\xd6}=.R6h\xca\xfbL\x9d\xa0\xf4n<\xc6\xae\x85\xbd\xa2\xe2\x8bq[\x1a\xf7\xac\x0a|@\x9d7\xb1\xbd,\xdd[t\xd6\xd5\xf3h\xfa$\x0e\xad>\xa9\xb1k\xb2\x97\xaff\xef\x9e\xd29\xab\xb1\xc8\xdf\xc5\xc5\x82\xf7\xfd0\x06\x90y_\x8f\xa1\xa7\xaa\xce\xb9\xa5[\x8b\x84\xcb;\xaf\xdb\xbaC\xb9g\xf1\xd8\xba\xf6>\x19\x8e\xffqW_W\x02\x1fP\xf7[w\xcaX\x13\xb5\xa6n1\xe6b\xce\xebn\xe9\x91]M\x88\x0f=\xa8\xa9\xa1\xfa\xf0\x5c?\xcbnz\xd3\xde*\xb7\xcf\x86\x99\xa2q\xdb\xb9\xfdw\xf9\xf3\x07\xe5\xf6v\xbc\xad\xeb\x06\x87\xf0\xb8\x0c\x22;\x0b<\x84q\xcd\xfb[hh\xb7\xc1P\xeeu\x17\xdfojLh\xfbTh\xbd}\xce\xef<\xdf\xb7\xcf\xf3[\xf1\xbaY\xcf\xf5\xf5<_\xb0\x05>\xa0\xea\x8d,\x84\x87\xd4\xf9GO\x9d\xf4\xdfz@\xa8\xf3\xc11\xb5\x97\xaff\xef\xdeq\x17\x87\xc3\x06\xecdJh\x09\x1f\xe6_,\x9f\x87{\xe5\xb6\x1fz\xac\xc29Y\xb3\x86\xd8\xc3\xdfX\xf8\x90/\xb7\x10v\xc2\xf3\xfe\xa4\xc1\xfd\x1f-\xf0z\xbcl\x10\xb0\x16\x19\xda\x0da\xaf\xceP\xee\xb8\xa3\xef7U\xeb\x8a\xdf\xba\x88!o\xef\xf69\x9f\xf2<\x9f\xc6\xeb\xc2c\xbdw'\xe8\xd7\xfd\x02.\xf0\x01s}k=\xac\xf8\xb6]h\xa9\xd6C_h\xd3\xab:\x1f~\xd3>t+~G\xe9\x9c\xd5?\x9f'\xb1\xdd\xafcP\x1b\xc5\x0f\xf3\xf39o\xefe|\x8d\xbc\x95\xd5\xef}\xdbY\xf0\xf8\x9f\xd5\xd8u\xae\xa1\xdd\x06C\xb9\xc7\x1d>u\xa4N(\x0eao\xaf\xe9d\x8b;A\xffA|\x1e\x04>\xa0uG\x15\xdf\xba\x9d\xf4\xbf<u\x82\xf4\xe1\xdd\x99\x985{\xf7<g\xeb\x0b\x04!\xe8\xb5\xd6\xfe18\x8ck\xee~\xd8\xc2\xf1\xb7>\xb4\xdb`(\xf7\xaa\xab_T\xe2\xdf]U\xef\xdeu\x0c{/\x17x\xbe/co\xf0\x5c_\x14\x04>`\xd6\x9bX\xe8\x11H\x15\x0b\xbd\xe8\xe2\x89\xd3C\x11'TT\x9d0\xbf\xf5\xda\x87`UH\xf4\x9c\xad\xef\xf9<YF\xd0\x8e=^u\xca\xf9\xec/x?/\x1b\xdcF\x93\xa1\xddIVo(w\xbf\xc3_T\xf6\xea<\xceu\x1f\xbf\xc0\x07\xccR\x15\x0c\x0c\x0b._Qc\x9f\xc3\x18\xd0\xc3\x07\xec\xae\xe7\xcc\xebd\xd6\x97\x83\x15\xae\xc2Qkh7\x1eO\x9d\xa1\xdc'\x1d?\xe7\xb4N\xbb\xae}(Z\xe0\x03\xa6\xbd\x11\x8f+\xc2\x83\xb5WW \xb6qe/_|\xbe\xaa>\xf4=g\xc3}\x9d\x5cf\xf5\xce\xb1\xdbk\xe1\xee\xc2\xeb\xacN\xb9\x98\xe4\xd0n\x83\xa1\xdc\xb3\x81\x14t_{`\x15\xf8\x80io\xc4Uo\xb0z\x8aVg\x5c\xf3C85\xfcn\xa2\xc6\xf0\xd5\x09\xf3;\x8b\xdeI\x1c\x96\x1c\xd7\xdc=\xb5\xdek\x9dY\xad\x9d,\xc12\xa7\x9du\x1f\x80\xc0\x074}#\xb6\xf6\xea\x0a\xc5\xb6\xae:G\xab\xea\x83\xf3\xc8s6xuz\x90\xee\xb5\xf4\x9a\x0c\xf7U\xa7,\xcc\xd4\xa1\xddx~p\x9d\x02\xc5\xe3\x01\xbdnG\x02\x1f\xd0\x19\xf1<\xb0TO\x90\xb5W\xd7\x17\xc2\xaf\xe7\xfc]\xcf\xd9f\xa83!`\xb7\xad;\x8b\xc3\xacuV\xe1x8\xe5\xdc\xc1:\xaf\xc7\xe3\x81\xad\xdeS\x08|@\x97(\xc3\xd2A\x0d\x8a1O\x0d\x8b\x9e\xb3\x8dx\x8d\xac\xe3\x1c\xb1q\xcd/\x22\x93;_*\xc75\x82ggK\xb0\xccP\xa7\xed\xef\x97\x8f}\x22\xf0\x01kWc\xc6\x9c\xb5W\xd7\x1f\xc6\x9b\xf6\xf2Y\xe3\x98e\x86\xcc\xcb\x9a\xc1,\x84\x9d\xa2\xc1\xf2c\xfb=\xfb\x92rZs\xbf\xb0:\xce$q^\xe3R\xbd\xe1%\x0b\xdc\x09\x14)\x85&Z\xeb\x87\xeb\xcb8\xeb\xf1\xfd\x06\xbf6\xee\xf0\x17\x8c\xd3\x15\xdc\xcdy\xd9n\xbd\x9c\xac\x12C\xc1N\xdc\xc2\xbf\xf7\xe2U\xa3\xac\xde\x12^\xabz]Nj\xae\x94q\x18\x8f\xbd\xaa\xe6\xde\x93\xbe-\xfb\x17\xbeT\x95mpU\xf3y\x09\x93\xab\xf6B\x00^\xf5\x17h\x81\x0f\xb8\x1df\xd9N\xecr\xac\xa4Gg>\x5c\x8b\x9a\x1f,]_\xe3x\xd73\xfa\xa9\x80\xb7\x1f\x83\xdd^\x97B]\xcd/\x16\x97\x15an+K\xcf$\x0f\xfa\x5c\x82\xa5h\xf0e,<\xb7\xef\xc7\xbf\xe5\xb0\x9d\xac\xa2G\xd3\x90.\xf8\xa0\xa9\x1afQ\xd2\xa3[\xea>\x17\x85\xa6\xea\xc7\x97\xadr\x0b\xc3\xee\xdf\x8b\x81\xe1\xa0ga\xaf\xe9*\x1c\xa9\xf7\x99q\x9f\xbf\x8ce\xf5&\xb1|*\xf8\x85\xb0\x1c\x87z\x97Z\xbaE\xe0\x03\x8a\x8ao\xe6GN\xfa\xef\x94Z\xcf\x85\xe7\xac\xdb_\xb2\xe29m/\xe3\x07\xfe\xc3\xbe?\xa6\x06\xabp\xcc\xfc\x223\x80\x12,!\xf4^\xcc\xf1{\xb7\xbd\x9f\xdf(_\x13\xe7\xf1K@\xeb\xe7\xf9\x09|\xb0\xd9\x1f<\xa3\xf2\xe2Qb\x97\xab\x81T\xb9\x87\xae\xfc\xcd\x85\x1e\xda\x10l\x1eg\xf5\xd6\x90\xed\xdb\x97\xc7y\x02\xcf\xb3!L\x08\x8b_\xb2\xf6\xe6l\x83[\xdb\xd9\xc7\xbd~E\x9b\xc1O\xe0\x83\xcdV\xf5&k(\x17\xda\x09z\xf7\xe2D\x95\xaf\x0d0\xe8\xdd\x0d<\xe3\x86\xbfv\x95\x0dg5\x8d\xbb\xa1\xefl\xc1\x9b\xda\x8a_\x0a.\xe3\xb9~\x0b3i\x036\xf7\x03(\xbc)\xa5N\x9cW\xd2\x03\xda\xf9[\x0b\xe7f\x9d.\x18\xf4B\xafQ\x08\x13\xe7\xd9'\x87\xf5O\xef\xfc;\x9c\x8b\xbb\xbd\xe6\xc0\x13\x86$\xc3\xca0\x075\x7f\xe5|h\xa7\x1f\xdc\x86\xbe\x18\xd4\x1e/xs\x1f\x05\xbf8\x13z\xbc\xc8\x0cf\x81\x0f6\xd7\xa4\xe2z\xbd{,\xf3C1\x17\xf6\x92B\x0f\xd1I\x0cD\xa75\xef\xebeG\x1e\xefA\x83_\x09+q\xec\x0f\xf1\xcbe8\x1d&\x16[\x0eA|\xd1\xf34C\x90?\x0d\xa7\x04\xcc;\xfc-\xf0\xc1f\xf68T\xad\x97\xfb\xb4o\xb5\xb0\xa0\x83\x7fg\xf7\x1a\x86\xbd\xeb\x18\x0e&}\x9c\xc0\x10\x1f\xef<\xc1\xed\xa3\x19\xaaC\x5c\xef9>\xa6\xfd8\xa22n\x18\x86_\x17^G\xa1\x9cK6O\xe8s\x0e\x1fl\xe6\x87PQ\xf1\xa1Sh)X\xd8I\x83\xb0\x17f\xb8\x8eB\xafP\x8f\x83\xcfQ6_I\x99\xd0F\x93!\xbf\x10B/m\xb9\x85\xc0\xf7\xa0\xdc\x9ed7\xe7.\xce\xeb\xfd8\xc4+\xf0\x01\x95o\xca\xd6\xcb\x85\xe5~\xb1\x0a\x1f\xeeu\x8aK\x87/Xo\x85\x15A\xfa\xfcw\x17\x03\xc8\x22\xbdW\xbbq\xe4a\xd0B\x98\x8f\xa1~T\xfe\xf7\x8b\xe5v\x9c5_21\x98\xc4*\x0b\x02\x1f0\xf5M\xb9\xea\xfc\x9aP\x86\xe5HK\xc1\xc2\x8a\x9a\xfb\xed\xf5}\x15\x9b8j0i\xa3\xcd\x9a\x86\x98\x9e\x87\xbf\xf3\xd8\xeb\x17\x1e\xf3\x93\x86\xc1o+k8\x12#\xf0\xc1f\xa9\x0ascM\x04\x0b\x07\xa0\xd0\xdbUgh\xf3\xc9@\xce\x95\xad3t\xfd\xacf\x88\xd9\xb8\xca\x00\xa1g7\xd6;\x0d\xc1\xef\xb8\xc1\xaf\x1e4\x09\xc8\x02\x1fl\xd6\x87Pj\x88\xe9\x99\xf5r\xa1\x15u\xce\xaf\xba\x1eBQ\xf38\x0c[5t\x1dF\x0e\xf6k\x86\x99\xed\xb6\xea\xce\xf54\xf8\x85/\xdd\xef\xb4\xfcZ\x13\xf8`\x83\xc2^\xd5z\xb9\x812,\xd0\x8e\xbd\x1a\xfbL\x06\xf0\xbe\x12N\x11\xa9\x13\xce\xc6w\xdec\xea\x0c[>^\xf6\xba\xb2\x1d\x0f~\x93\x06\xa1O\xe0\x03>\x15\xe6RCLO\x86X\x12\x01\xd6\xf4\xe5\xaa\xcep\xee\xe9\x12\xee~w\xc5\x0f7\x04\x93\xca\xa1\xdc\xdb\x91\x838)\xa5hp\xdb\x1b+\x86\xbe:\xc3\xe0\xb5\x83\xb1\xc0\x07\xc3\xff\x00\x1ae\xe9\xde\xbb\xdb\xda_\xc0\xe2\xea~\x00_\xb6\xfcw\xbe\xd2\x1e\xb1\xf2\xfe\xea\xac\xeaq\xfd\xfa{O\x9c\x14Vg\xad\xd9\xedx\x1f\x9b\xacN8\xae]\xd0[\xe0\x83\xcdx\xd3H\xbd)\x1c*\xc3\x02\xab\xb5\x84\xc9\x1a\xa3\x15\x86\xbd\xbd\xf2\xe2Q\x9d\xf7\x9e\x19#\x07\xe3\x9aw\xf5(\xde\x97\xd7H\x0b\x04>\x18\xb0\xf8f\x99*\xc3r6\xef2=\xc0B\x7f\x9bm\x07\xb4\xfd\x15\x1dw\xdd\x12,\x17\xb3J<\xc5 \xf3\xb4\xe6]N\xe2}n\xaa\xab\xb6nH\xe0\x83a\xab\x1a\x12)4\x11\xacEk\x81/\x86\xc7\x83\x15\x1dw\x08{u\xceQ\x1c\xd7x\xef\xa9\x13f\xeeo\xf8\xfbTkaW\xe0\x83\xe1\xf6 \x847\xdc\xd496\xc7\xca\xb0\xc0\xda\xb4\xd9#7Y\xd1{J8\xe6\x875v\xad\xac/\x18O#\x19\xd7\xbc\xeb\x8d\x1c\xda\x8d=\x9b[m\xdd\x9e\xc0\x07\xc3}\xa3H\xf5\xeeY/\x17\x96\xa3\xeeyW\xfb-\xfd\xad\x87\xbf\xe3\xdd\x86\xef\x0d\xf3\xdc\xcf\xa8f\xb0\xbc\xcajN\x02\x8b_8\xeb\x0e\xed\x9el\xe0\xd0n\x9d\xd7\xc8\x99\xc0\x07\x9b\xed\xb0\xe2\x9b\xe1\x912,\xd0\xbe\xd8sUk\xa8r\xd1\xb5cc/\xfe\xe3\x86\xbf6\xefl\xdeIV\xaf\xb7i\xdcp\x12XQ\xb3\xbd\xb6\xb2\x8e\x96j\x09A\xb4\xed\x1e\xc8\x18n\xeb|)?\x15\xf8`C\xc5o\xe2\xa9\x0f\x81\xab!T\xf8\x87\x0e\xab\xbb<X1o9\x95\xd8\xb3\xf7\xfe\x8a\xdeS\xea\xac\xa6\x11<mz\x9aH\xc3\xa1\xdd\x87qX\xb9k\xc21=/\x8f\xed\xb4\x8d\xe0\x17\xc3^h\xc7:\xe7J\xd6\x0e\xc1\x02\x1f\x0cO\xd5\x1b\x80\x155`\xb9\xea\xd6\x8f\x0b\xbdV\xa7MBL\x08\x14\xe5v\x9e5\xef\xd9\x9b7|\x84@\xfa\xb5\x1a\xbb\xce}\x9aH\xc3\xa1\xdd.\xce\xda\xbd}\xfev\xef\x04\xbf\xf1<\xc7\x19\x03ch\x8f\xed\x1a\xbb\x9f5\x19\xa9\x11\xf8`@\xe2\x9b\xc5n\xc5\x1b\xc4\x89\x96\x82\xe5\x89\x1f\xc2u\xcf\xad\x0a\xa1\xef\x83TH\x08\xa1+\xf4\xb2\xc5\xa0\xf7<\x11\x06\xae\xdb|\x1c\x0dJ\xb0\x04\xe3\x05\xeby\x86\xb0Xwh\xb73\xefa\xb1\x8d^\x9f\xc8\x12\xde\x83C\xef\xeb\xf7\xe2\xf3Z\xc4\xa0~oV\xa8\xae\xf9\xfc.\xf4\xe5\xfd\x0d\x7f\x9a0(Uo\xcez\xf7`5\xc6\xe5\xf6\xa2\xc1\xfe\xbb\xb7A\xa1\xfc\xe0\xbf\x0doMfh^\xc7\xfb\xfc\xa0\xc5\xc7P\xd4\x0c\x1f\xcf\x16\xfd\x22\x19\xc2b<'\xf1y\x9d\xb6\x0a\x01iV\x9d\xbf\x15\xdb\xaf\xf9\xbc>\x8e\xe1\xee\xf6\xe7!\xdc\xde_\xe0~\x9f4-\xcc\xac\x87\x0f\x06\x22\x9eg\x93z\x03y\xba\x84\xea\xfe\xc0\xf4\x00sY^\xbc\xbb\xc0M4\x0d{{5CW\xads\x06\x1b\xac\xa6q\x1b4\xdbh\xb3\xd3\xac\xfe\xd0n\xb1\x84\xe2\xd5\xcb\x08|\xb3,\x12\xf6\x8e\xe79\x0f[\xe0\x83a\x84\xbd\xaa\x19]\xca\xb0\xc0\xeaC_\xe8\x81:^\xf2\xdd\xdc\x86\xbd\xba_\xe6*\xcf+\x8b\xef'u{\xec\xc6-/\xcdXd\xf5\x86\xa6\xd7>kw\xc6p\xee\xb2\x85\xb07W\xc0\x16\xf8`\x18\x8e*z\x04\x0a\xeb\xe5\xc2ZB_\xf8p~\xba\xa4\x9b\x0f\xe7\x09\x8e^\x0b{U\xe7\x0e\xd6\x99H0\xc9\xea\xf50\xb6~Np\xc3Y\xbb\xbbq\xb6\xf2\xba\x8cV|\x7f\xef\xce\x1b\xf6\x04>\x18\x808\x8b.\xb5\xac\xd2UG\xceu\x81M\x0d}\xe1t\x8b\xb7\xb3\xf6&U\x84\xf3\xbf\xde)owo\x8e/r;\x15\xef'!P\xd4\xe9\xb5jm(wJ{\x85\x10\xf9\xac\xe6\xee\x8f\xe7-m\xd3\xc2q\x86\xa0\xfd [~/n\xb8\xfd\x07\x8b\xbe\x8f\x0b|\xd0\x7fUo\x02cM\x04k\x0f}!\xc4\x8c\xca\xed\xc9\x02\xc1\xef\x22\x06\xbd\xd0\xab7\x99\xb1O\xd5\xd0\xee(\x11\xf6FY\xfd\x922\xc5\x92\x8b\xb7\x8f\x1b\xb4\xd3d\x8d\xcf\xebe\xecu\xfblvs\xce\xe6YK7\x1d\x1e\xfb\xd3\x18\xf4\xc6m\xb4u^\xde\x88\xbfD\x88\xb6\xbe\xf2\xa5\xd3\xac\xc12E\xd1\xd9\xf5{\x1f\xee\xad\xe3xc\xfd\xae\xd4\xac\xbc0\xe4\xb2\xe7\x99\x85n\x89\x7f\xbb\xe1os'\xf1\x9es\x15\x03\x5cx_:\xb1:No\x9e\xdb{\xf1y\xbd}~\xef\xd5\xf8\x5c\x09A\xf1e|\xaeO\x971\xc1NY\x16\xe87\xbd{\xd0C\xb1\xc7OM\xcca>\xb7\xdf\x0fn]:.C\xba\xd0\xdfo\x91E\x96\x9e\xda\xffD\x8f\x00\x00\x02\x1f\xf47\xec\x8d\xb2t\x11\xe5p\xfe\x87\x89\x1a\x00\x08|\xd0cE\x96.\x9bp\xa8\x0c\x0b\x00\x02\x1f\xf4T\xac\x80\x9f*\xc3r\x91\x98\xc1\x07\x80\xc0\x07\xf4@Qq\xbd\xf5r\x01\x10\xf8\xa0\xafbQ\xd4\xd4\xf4\xfe\xe3\xb8\x1e%\x00\x08|\xd0\xc3\xb0\x17j9\xa5&bX/\x17\x00\x81\x0fz.\x0c\xd5\xa6&j\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xec\x12*\xf2+\xc3\x02\x80\xc0\x07=V\x15\xe6\x94a\x01@\xe0\x83\xbe\x8aeX\x1e&v9\x8b\xcb4\x01\x80\xc0\x07=5\xa9\xb8^\x19\x16\x00\x04>\xe8\xab<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ck)\x00\x04>\xe8g\xd8\x0beX\x8a\xc4.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb1\x10\xf6ReX\x0a\x135\x00\x10\xf8\xa0\xa7\xf2<\xdf)/\x1e%v\xb9*\xc3\x9e2,\x00\x08|\xd0cUan\xac\x89\x00\x10\xf8\xa0\xa7\xf2<\xdf\xcf\xd2\xeb\xe5\x9eY/\x17\x00\x81\x0f\xfaM\xef\x1e\x00\x02\x1f\x0cU\x9e\xe7E\x96.\xc3\xf2\xc4z\xb9\x00\x08|\xd0\xdf\xb07\xca\xd2eVB\x19\x16\x135\x00\x10\xf8\xa0\xc7\x8a,]\x86\xc5z\xb9\x00\x08|\xd0Wq\xbd\xdc\x83\xc4.\x17e\xd8\x9bh)\x00\x04>\xe8\xaf\xa2\xe2z+j\x00 \xf0A_\xe5y>\xce\xd2eX\x8e\x95a\x01@\xe0\x83\xfe\x86\xbd:\xeb\xe5\x16Z\x0a\x00\x81\x0f\xfa+\x0c\xd5\xa6\xca\xb0\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xecr\x95)\xc3\x02\x80\xc0\x07\xbdV\x15\xe6\x0aeX\x00\x10\xf8\xa0\xa7b\x19\x96\x87\x89]\xce\x94a\x01@\xe0\x83~\xab\xea\xddS\x86\x05\x00\x81\x0f\xfa*\xcf\xf3\x10\xe6\xb6\x13\xbb\x842,\xe7Z\x0a\x00\x81\x0f\xfa\x19\xf6\xea\x94a\xd1\xbb\x07\x80\xc0\x07=\x16\xc2^j\xbd\xdc#\x135\x00\x10\xf8\xa0\xa7b\x19\x96G\x89]\xae\xca\xb0Wh)\x00\x04>\xe8\xafI\xc5\xf5cM\x04\x80\xc0\x07=\x95\xe7\xf9~\x96^/\xf7\xccz\xb9\x00\x08|\xd0oUeX\xc6\x9a\x08\x00\x81\x0fz*\xcf\xf3\x22K\xaf\x97\xfb\xd4z\xb9\x00\x08|\xd0\xdf\xb0\x17\xca\xb0\xa4\xca\xac\x842,\x85\x96\x02@\xe0\x83\xfe\x0aC\xb9\xa92,\x87\xca\xb0\x00 \xf0AO\xc5\xf5r\x0f\x12\xbb\x5cX/\x17\x00\x81\x0f\xfa\xad\xa8\xb8\xde\x8a\x1a\x00\x08|\xd0Wy\x9e\x8f\xb3t\x19\x96g\xca\xb0\x00 \xf0A\x7f\xc3\x9e\xf5r\x01\x10\xf8`\xe0B\x98K\x95a9R\x86\x05\x00\x81\x0fz*\xae\x97\xfb8\xb1\xcbUV]\x84\x19\x00\x04>\xe8\xb0\xaa0W(\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<L\xecr\xa6\x0c\x0b\x00\x02\x1f\xf4[U\xef\x9e\x89\x1a\x00\xac\xc5\x1b\x9a\x00\x16\x17\xcb\xb0l'v9~\xf5\xea\xd5\xb9\x96\x02\xe8\x9f\xaf?\xf8\xc2$\xfe\xb3\xf8\xf2\x8bo]\xf6\xf11\xe8\xe1\x83\x05\xfd\xc9?\xfe\x81\xf0\xc5)\xd5\xbb\xa7\x0c\x0b@\xbf\x9dd7+'\xbd(\xc3\xdfi\xb9\x8d\x05>\xd80\x0f\xbe\xf3c\xa3,\xbd^\xee\x91\x89\x1a\x00\xfd\xf5\xe5\x17\xdf\x0a\x81\xef*\xfe7\x14\xd5\x7f\xbf\x0c}\x97\xe5V\x94\xdb=\x81\x0f\x06\xee\xcf\xfd\xe1\x1b\xd9\xaf}\xf0\xab\x9fO\xecrU\x86\xbdBK\x01\xf4\xde\xeb#9\xa1\xdej(\xc3\xf5\xbd0\xe4[n;\x02\x1f\x0c\xd4\xe7^\xfc`\xd5.\x86r\x01\x86a\x92\xdd\x9c\xa23M\x18\xee\xfdF\x19\xfa\xce\xbb:\xdc+\xf0\xc1\x9c~\xea\x0f~4;\xfb\x95\xd3\xd4.\xa1\x0c\xcb\x89\x96\x02\xe8\xbf/\xbf\xf8V85\xa7\xea==L\xde\x0b\xc3\xbd/\xe3p\xefH\xe0\x83\x9e\xfb\xdd\x7f\xff\xdd\xaa]\xc6Z\x09`P\xea\xae\x94\x14\xce\xeb\x0e\xc3\xbda\x92\xc7I\xb9\xed\x09|\xd0C\x7f\xed\xbb\xf7\xb2o\x9e\xffFj\x97\xa7\xd6\xcb\x05\x18\x96/\xbf\xf8V(\xafu\xd6\xf0\xd7BA\xfe\xe7q\x92\xc7x]\x93<\x04>h\xe8\xc7\xff\xf83\xd97O\x93a/\x9c\xe3Qh)\x80A\x9a\xcc\xf9{a\x92\xc7\xfb\xe5v\x19'y\x8c\x04>\xe8\xb07\x7f\xe7\xc7\xb3\xef|\xe7;\xa9]\xac\x97\x0b0P_~\xf1\xad\x10\xf8\xae\x16\xb8\x890\xdc{\xb7\xa6\xdf\xbe\xc0\x07\x1d\xf3\x17\xfe\xef\x0fe\xbf\xf6\xcb\xff\x22\xb5\xcbE\x19\xf6\x8e\xb4\x14\xc0\xa0MZ\xba\x9dP\xd3\xef\x838\xdc{\xb8\xcc\xe1^\x81\x0f\x1a\xf8\xb1\xab\xca?\x19eX\x00\x86\xaf\xed/\xf6a\xb8\xf7k\xd9\x12k\xfa\x09|P\xd3_\xfd\xbd\x9f\xa8*\xc3\xf2\xec\xd5\xabW\xa7Z\x0a`\xd8b\x89\x96\xe3%\xdd\xfcmM\xbfV\x97p{\xc3\xd3\x06\xd5\xc2D\x8d\x97\xdf\xfc\x9f\xc9}\xfe\xd1\xcf\xfd\xcd_\xea\xc2\xd4{\x00V\xe2<\x86\xb3e\x09\xc3\xbd\xbb\xe5\xe7J\xe8M\x0c\xdb\xa4\x0c\x9a\x97\xf3\xdeX\xfe\xea\xd5+O\x19D[_\xf9\xd2i\xfc#\xfb\x84P\x86\xe5W\xff\xd9\xaf\xcc\xfc\xbdb\xffg\xb2\x9f9\xff\xef\x1a\x10\x80e:\x8e\xc1\xef\xb4\xe9/\x1a\xd2\x85)~\xf4\xf7>\xf7\xfd\x7f\x87\xf5rSeXv\xbf\xf4\xd3\xd9\xdf\xf8\xed\xefh4\x00\x96-\xf4(>\xbf]\xc2\xad\xc9$\x0f\x81\x0f^\xf3c\xbf\xfd\x97\xb3{\xdf\xf8\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf|\xfbG\x92eX\xde\xfeS?\x9c\xfd\xf0\xef\xfe\x91\x86\x03`U>Z\xc2-\xbb\xa9\xe9wT\xa7\xa6\x9f!]\xb8\xe3\xf3?\xfb\x0b\xbf\xf3\x13\xdf\xd9\xfa\xd3\xb7\xff\xff\xdc_|\x91\xfd\xab_\xfa\xe73\xf7\xff\xf9\x9f}+\xfb\x85\xff\xf4m\x0d\x07\xc0\xba=\xcbn\x86{\xa7\xae\xf7k\xd2\x06Don\x7fu\xf2\x13\xd9\xc7a/\xf8\xcc\xb7\xaf\x93\xbf\xf3\xb3\xd9\xff\xd2p\x00tAX\xc2\xed\xe1\xd7\x1f|!\x14\x85\xbe\x9d\xe4\xf1\xfdE\x00\xf4\xf0A\x0c{\xd9\x94\xd9V[?\xf2G\xd9\x9f\xf8\xd1\xdf\xca\xfe\xe5\xf3g\x9f\xfa\x9d\xbf\xffs\x7f+\xfb\xbb\xbf\xf1\xdf4\x1e\x00]\x14z,Bo\xdfQX\x03X\xe0C\xd8\x9b\x11\xf6\xee\xfa\xb3\x9f\xfd\x83\xec\xff\xfc\xfe\xbf\xc9\xfe\xf5\xbf\xfb\xb7\x1f\xfd\xff\xfe\xfdQ\xf6O\xb7~\xc4\xb9{\x00\xf4!\xf8\x1d\x0a|\x08{\x0d\xea(\xfd\x9d\x9f\xfcV\xf6\xcb\xff\xf1<{\xe7\x8b\x7f^\x19\x16\x00\xba\xec\x13C\xbb\x02\x1f\xc2^C\xef\xfc\xe07\xb3\xbf\xfe\xdd\xdf\xd2\x80\x00t\xd1\xd4\xc9\x1b\x02\x1f\xc2\xde\x1c\xfe\xde\x1f\xfeF\xf6\xd3\xbf\xff_5$\x00]\x10\x86m\xc3\xe7\xda\xd1\xac\xd58\x04>\x84=\xa1\x0f\x80~\xba\xc8n\x86mO\xee\xce\xc8\x9dFY\x16\x84\xbd9\xfd\x97\x1f\xf8\x5c\xf6\xd3\x99\xc0\x07\xc0\xca5^bM\x0f\x1f\xc2\xde\x1c\xdez\xf5?\xb2\x9f\xff\xbd_\xd7\xa8\x00\xacJ\x18\xb6\xbd\x9d\x84q\xd9\xf4\x97\xf5\xf0!\xec5\xf4\xf9\xfc\x7f\xff\xca\xcf\xff\xee\xaf\xff\xa2V\x05\xd8h{\xe5\xf6x\x05\xf7s\x16C\xded\x91\x1b\xd1\xc3\x87\xb0\xd7\xcc\xf1o^\xfc\xe2X\xab\x02l\xb6\xaf?\xf8B\x98\x05\xfbp\x89w\x11\x86m?*\x9a\xdc\xc6\x8d\x09|\x08{\xc2\x1e\x00\xcd\xc2\xde\xa8\xbcx\xb1\x84\x9b\x9e\xba,Z\x1b\x0c\xe9\x22\xec\x09{\x004s\xd8\xf2\xed\x85a\xdb\xa3\xd7k\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@M_\x7f\xf0\x85{\xe5\xc5e\xb9m-xS\xb7k\xdd\x16\xf3L\xc2hJ\x0f\x1f\xc2\x9e\xb0\x07@}\xfb\x0b\x86\xbd0l[d5j\xe7\x09| \xec\x01\xb0\x1e\xc5\x9c\xbf\x17\x96<;jR;O\xe0\x03a\x0f\x80\x15\xfb\xfa\x83/\xec\x95\x17\xf7\x1b\xfc\xcaB\xb5\xf3\x04>\x10\xf6\x00X\xbd\xba\x9f\x0d\x1f-y\xb6h\xed\xbc6\x99\xb4\x81\xb0'\xec\x01P\xa1f)\x96Vk\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@\xb5Y\x9f\x0fa\x12\xc6$\x06\xbd\x97]=x\x81\x0faO\xd8\x03\xa0\xda\xeb\xb5\xf7ZY\xf2L\xe0\x03a\x0f\x80\x0e\xf8\xfa\x83/\x84\xcf\x88P\x8ae\xa5\xb5\xf3\x04>\x84=a\x0f\x80\xd5\xd9+\xb7w\xb3%,y\xb6*&m \xec\x01\xc0\xc0\x09|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0\x09|\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\xc0\xe6\x06>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x16\xf3\x99\x96\xc3\xd9=a\x0f\x00`\xc0\x81\xaftT\x86\xb4\x91\xb0\x07\x000\xc0\xc0\x17{\xf7B@\x1b\xb7p[\xc2\x1e\x00@\xd7\x02_\xe90^.\x14\xae\x84=\x00\x80\xee\x06\xbe\xdb`u\xbf\x0cm\xfb\xc2\x1e\x00\xc0\x80\x02_\x0cx\xf7\xa7\x84?a\x0f\x00`\x08\x81oJ\xc0{\xd8d\xf2\x86\xb0\x07\x00\xd0\xe1\xc0\x17\x83\xdd\xc3\x1a!P\xd8\x03\x00\xe8c\xe0K\x04\xbb\xca\xe0%\xec\x01\x00\xf4;\xf0%'o\x08{\x00\x00=\x08|eh\x0b!\xeb~b\x97Ca\x0f\x00\xa0\xc7\x81/\xab\x1e\xb6\xdd}}\xf2\x86\xb0\x07\x00\xd0\x93\xc0\x17\x83\xdcn\x8d]\x0f\x85=\x00\x80\x1e\x06\xbel\xc6p\xed\x14ca\x0f\x00`}\xf2W\xaf^\xcd\xf5\x8be\x80{Y^l\xd5\xdc\xfd\xa2\xdc\xb6\x85=\x00\x80\xd5\x9b\xab\x87/N\xd6\xd8j\xf0+\xc2\x1e\x00@\x9f\x02_6\xc7\xd2i\xc2\x1e\x00\xc0z4\x1e\xd2\x8d\x935^\x08{\x00\x00\xfd0O\x0f_!\xec\x01\x00\xf4G\xa3\x1e\xbe7\xb7\xbfz\xaf\xbc\xb8\xcc\x9a\x9d\xbf'\xec\x01\x00\xacQ\xd3\x1e\xbe}a\x0f\x00`\xd8\x81\xefpE\xc7%\xec\x01\x00\xb4\xa4\xf6\x90\xee\x9b\xdb_\xdd)/\xbe!\xec\x01\x00\xf4K\x93\x1e\xbeU\xf4\xee\x09{\x00\x00\xeb\x08|q\xb2\xc6\xfe*\x0e\xa8\xbc\xaf=O\x0b\x00@{j\x0d\xe9\xc6\x955\xde_\xe1q]\x95\xdbQ\xb9M~\xf3\xe2\x17_z\x9a\x00\x00\x96\x1f\xf8.\xcb\x8b\xfbk8\xbe\xebr;\x09\xe1\xaf\x0c~\xe7\x9e.\x00\x80%\x04\xbe8\xc4\xfa\xbc\x03\xc7z\x91\xdd\xf4\xfa\x9d\xe8\xf5\x03\x00h7\xf0M\xca\x8b\x83\x0e\x1d\xb3^?\x00\x80\xb6\x02_\x9c\xac\xf1\xbd\x0e\x1f\x7f\xe8\xf5+\xca\xe0w\xe2\xa9\x04\x00\x98\xaej\x96\xee\xb8\xe3a\xefH\xd8\x03\x00H{\xa3\xe2\xfa\xc3\x8e\x1d\xaf\xe1\x5c\x00\x80\xb6\x02_\x9c\xacq\xbf#\xc7i\xc2\x06\x00@\xdb\x81/\xeb\xc6p\xeeq\xa67\x0f\x00`!S'm\xbc\xb9\xfd\xd5Qy\xf1bM\xc7\xa4\xe82\x00@\x8bf\xf5\xf0\x8d\xd7p,\xc71\xe4\x9dzZ\x00\x00\x86\x13\xf8\xf4\xe6\x01\x00\xac:\xf0\xbd\xb9\xfd\xd5\xfdl\xf9\x935\x9e\xc5\x90\xa7\xa4\x0a\x00\xc0\xaa\x03_\xb6\xbc\xde\xbd\xd0\x9b7\x89A\xefR\xd3\x03\x00\xac\xc6'&m,i\xb2\xc6Y\xa6@2\x00\xc0\xda\xbc\xde\xc37n\xe9vC\x81\xe4I\x0cz\x97\x9a\x19\x00`8\x81/\xf4\xe6\x85!\xdb\x89\xa6\x05\x00\xe8X\xe0{s\xfb\xab!\xec\xcd3YCo\x1e\x00@\x1f\x02_\xd6\xbcw\xcfrg\x00\x00=\xf0\xd1\xa4\x8d\x06\x935Bo^\x98|a\xb93\x00\x80\x9e\xb8\xed\xe1;\xac\xd8Oo\x1e\x00@\xcf\x03\xdfx\xc6\xf5\x96;\x03\x00\xe8{\xe0\x8b\x935\xb6\xee\xfc\xccrg\x00\x00C\x0a|\xd9\xc7\xbd{z\xf3\x00\x00\x06\x1a\xf8B\xc0\xdb\xd7\x9b\x07\x000L\xff_\x80\x01\x00e|\xfb\xc4\xd4o\x058\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = b"\x00\x11\x0bF\x95g\x00p\x00a\x00r\x00a\x00m\x00e\x00t\x00r\x00i\x00c\x00f\x00i\x00t\x00t\x00i\x00n\x00g\x00\x06\x07\x03}\xc3\x00i\x00m\x00a\x00g\x00e\x00s\x00\x1c\x053\xe8'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00z\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x10\x0a1\xdeg\x00m\x00o\x00d\x00e\x00l\x00-\x00v\x00i\x00e\x00w\x00e\x00r\x00.\x00p\x00n\x00g\x00\x1c\x053\xf0'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00x\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x1c\x053\xf4'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00y\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g"
qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00(\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00:\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x9e\x00\x00\x00\x00\x00\x01\x00\x00F6\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00~\xa5\x00\x00\x00x\x00\x00\x00\x00\x00\x01\x00\x006|"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| mapclientplugins/parametricfittingstep/resources_rc.py | 141,061 | -*- coding: utf-8 -*- Resource object code Created: Mon Oct 15 12:53:43 2018 by: The Resource Compiler for PySide (Qt v4.8.7) WARNING! All changes made in this file will be lost! | 183 | en | 0.751992 |
import unittest,os
from src.tasks.scrape_reddit.tiktok import dwn_tiktok
from src.tasks.generate_video.task import generate_tiktok
from src.tasks.upload_video.task import upload_video
class TestTiktok(unittest.TestCase):
def setUp(self):
pass
def test_tiktok(self):
context = {
'page':{
'Nombre':"Pagina que hace compilaciones perronas de tiktok",
"thumbnail": False,
'description':['Y a ti te ha pasado eso? \nIngresa mi codigo para que ganes dinero!!\nKwai 848290921'],
'tags':['Amor','Meme','Chistes','Divertido','Reddit'],
"playlist":"Compilaciones TikTok"
},
'video_path':os.getcwd()+"\\"+r"test\test_videos\caption.mp4",
'thumbnail_path':os.getcwd()+"\\"+r"data\thumbnails\8ccd23f7-7292-41d7-a743-b2c9f2b7fd36.png"}
#dwn_tiktok(context)
#generate_tiktok(context)
upload_video(context) | test/test_tiktok.py | 971 | dwn_tiktok(context)generate_tiktok(context) | 43 | eo | 0.242598 |
import os
import pathlib
from flask import Flask
from flask import request
from flask import redirect
from flask import url_for
from flask import session
from flask import render_template
from flask.json import jsonify
from td.app.auth import FlaskTDAuth
from configparser import ConfigParser
# Define the templates folder.
template_folder_path: pathlib.Path = pathlib.Path(__file__).parents[0]
template_folder_path: pathlib.Path = template_folder_path.joinpath('templates')
# Create the App.
app = Flask('TD_oAuth_App', template_folder=template_folder_path.resolve())
@app.route("/")
def home():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
return render_template("index.html")
@app.route("/login")
def demo():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
# Build the authorization URL.
auth_tuple = app.config['auth_client'].authorization_url()
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = auth_tuple[1]
return redirect(auth_tuple[0])
@app.route("/login/callback", methods=["GET"])
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
# Grab the Refresh and Access Token.
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
# Store it in the Session.
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict)
@app.route("/login/refresh", methods=["GET"])
def refresh():
# Grab the Refresh Token.
refresh_token_dict = app.config['auth_client'].grab_refresh_token()
return jsonify(refresh_token_dict)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
def run(flask_client: FlaskTDAuth, close_after: bool = False):
certs_pem = pathlib.Path(__file__).parents[0].joinpath('certs/cert.pem')
certs_key = pathlib.Path(__file__).parents[0].joinpath('certs/key.pem')
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
app.config['auth_client'] = flask_client
app.config['call_close'] = close_after
app.run(
ssl_context=(certs_pem, certs_key),
host='localhost',
port=5000,
debug=True
)
if __name__ == "__main__":
# Grab configuration values.
config = ConfigParser()
config.read('config/config.ini')
client_id = config.get('main', 'client_id')
redirect_uri = config.get('main', 'redirect_uri')
credentials = config.get('main','json_path')
# Define the Secret Key.
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
# Define the App Configurations.
app.config['auth_client'] = FlaskTDAuth(
client_id=client_id,
redirect_uri=redirect_uri,
credentials_file=pathlib.Path(credentials)
)
# Run the App.
app.run(
ssl_context=('td/certs/cert.pem', 'td/certs/key.pem'),
host='localhost',
port=5000,
debug=True
)
# flask_td_app = FlaskAppTD(client_id=client_id, redirect_uri=redirect_uri, credentials_file=credentials)
# flask_td_app.run()
# This allows us to use a plain HTTP callback
# os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
# # app.run(ssl_context="adhoc")
| td/oauth.py | 3,946 | Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
Define the templates folder. Create the App. Build the authorization URL. State is used to prevent CSRF, keep this for later. Grab the Refresh and Access Token. Store it in the Session. Grab the Refresh Token. Grab configuration values. Define the Secret Key. Define the App Configurations. Run the App. flask_td_app = FlaskAppTD(client_id=client_id, redirect_uri=redirect_uri, credentials_file=credentials) flask_td_app.run() This allows us to use a plain HTTP callback os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1" app.run(ssl_context="adhoc") | 1,084 | en | 0.746309 |
# Copyright 2015 Ciara Kamahele-Sanfratello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Planner is a generic interface used by Simulators to choose the next action to take
class Planner:
def __init__(self):
pass
def next_action(self, initial_state, goal_state, prev_obs):
pass
| simulator/Planners/Planner.py | 808 | Copyright 2015 Ciara Kamahele-Sanfratello Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Planner is a generic interface used by Simulators to choose the next action to take | 648 | en | 0.862503 |
"""Tests for queues.py"""
import sys
import unittest
from unittest import mock
import asyncio
from .. import utils as test_utils
class _QueueTestBase(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
class QueueBasicTests(_QueueTestBase):
def _test_repr_or_str(self, fn, expect_id):
"""Test Queue's repr or str.
fn is repr or str. expect_id is True if we expect the Queue's id to
appear in fn(Queue()).
"""
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
id_is_present = hex(id(q)) in fn(q)
self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine
def add_getter():
q = asyncio.Queue(loop=loop)
# Start a task that waits to get.
asyncio.Task(q.get(), loop=loop)
# Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_getters[1]' in fn(q))
# resume q.get coroutine to finish generator
q.put_nowait(0)
loop.run_until_complete(add_getter())
@asyncio.coroutine
def add_putter():
q = asyncio.Queue(maxsize=1, loop=loop)
q.put_nowait(1)
# Start a task that waits to put.
asyncio.Task(q.put(2), loop=loop)
# Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_putters[1]' in fn(q))
# resume q.put coroutine to finish generator
q.get_nowait()
loop.run_until_complete(add_putter())
q = asyncio.Queue(loop=loop)
q.put_nowait(1)
self.assertTrue('_queue=[1]' in fn(q))
def test_ctor_loop(self):
loop = mock.Mock()
q = asyncio.Queue(loop=loop)
self.assertIs(q._loop, loop)
q = asyncio.Queue(loop=self.loop)
self.assertIs(q._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
q = asyncio.Queue()
self.assertIs(q._loop, self.loop)
def test_repr(self):
self._test_repr_or_str(repr, True)
def test_str(self):
self._test_repr_or_str(str, False)
def test_empty(self):
q = asyncio.Queue(loop=self.loop)
self.assertTrue(q.empty())
q.put_nowait(1)
self.assertFalse(q.empty())
self.assertEqual(1, q.get_nowait())
self.assertTrue(q.empty())
def test_full(self):
q = asyncio.Queue(loop=self.loop)
self.assertFalse(q.full())
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertTrue(q.full())
def test_order(self):
q = asyncio.Queue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 3, 2], items)
def test_maxsize(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.02, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=2, loop=loop)
self.assertEqual(2, q.maxsize)
have_been_put = []
@asyncio.coroutine
def putter():
for i in range(3):
yield from q.put(i)
have_been_put.append(i)
return True
@asyncio.coroutine
def test():
t = asyncio.Task(putter(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
# The putter is blocked after putting two items.
self.assertEqual([0, 1], have_been_put)
self.assertEqual(0, q.get_nowait())
# Let the putter resume and put last item.
yield from asyncio.sleep(0.01, loop=loop)
self.assertEqual([0, 1, 2], have_been_put)
self.assertEqual(1, q.get_nowait())
self.assertEqual(2, q.get_nowait())
self.assertTrue(t.done())
self.assertTrue(t.result())
loop.run_until_complete(test())
self.assertAlmostEqual(0.02, loop.time())
class QueueGetTests(_QueueTestBase):
def test_blocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
@asyncio.coroutine
def queue_get():
return (yield from q.get())
res = self.loop.run_until_complete(queue_get())
self.assertEqual(1, res)
def test_get_with_putters(self):
q = asyncio.Queue(1, loop=self.loop)
q.put_nowait(1)
waiter = asyncio.Future(loop=self.loop)
q._putters.append(waiter)
res = self.loop.run_until_complete(q.get())
self.assertEqual(1, res)
self.assertTrue(waiter.done())
self.assertIsNone(waiter.result())
def test_blocking_get_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_get():
nonlocal finished
started.set()
res = yield from q.get()
finished = True
return res
@asyncio.coroutine
def queue_put():
loop.call_later(0.01, q.put_nowait, 1)
queue_get_task = asyncio.Task(queue_get(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
res = yield from queue_get_task
self.assertTrue(finished)
return res
res = loop.run_until_complete(queue_put())
self.assertEqual(1, res)
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_nonblocking_get_exception(self):
q = asyncio.Queue(loop=self.loop)
self.assertRaises(asyncio.QueueEmpty, q.get_nowait)
def test_get_cancelled(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.061, when)
yield 0.05
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
@asyncio.coroutine
def queue_get():
return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
@asyncio.coroutine
def test():
get_task = asyncio.Task(queue_get(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop) # let the task start
q.put_nowait(1)
return (yield from get_task)
self.assertEqual(1, loop.run_until_complete(test()))
self.assertAlmostEqual(0.06, loop.time())
def test_get_cancelled_race(self):
q = asyncio.Queue(loop=self.loop)
t1 = asyncio.Task(q.get(), loop=self.loop)
t2 = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
t1.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(t1.done())
q.put_nowait('a')
test_utils.run_briefly(self.loop)
self.assertEqual(t2.result(), 'a')
def test_get_with_waiting_putters(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
asyncio.Task(q.put('a'), loop=self.loop)
asyncio.Task(q.put('b'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(self.loop.run_until_complete(q.get()), 'a')
self.assertEqual(self.loop.run_until_complete(q.get()), 'b')
def test_why_are_getters_waiting(self):
# From issue #268.
@asyncio.coroutine
def consumer(queue, num_expected):
for _ in range(num_expected):
yield from queue.get()
@asyncio.coroutine
def producer(queue, num_items):
for i in range(num_items):
yield from queue.put(i)
queue_size = 1
producer_num_items = 5
q = asyncio.Queue(queue_size, loop=self.loop)
self.loop.run_until_complete(
asyncio.gather(
producer(q, producer_num_items), consumer(q, producer_num_items), loop=self.loop
),
)
@unittest.skipIf(sys.version_info < (3, 6, 4), "Changed in 3.6.4")
def test_cancelled_getters_not_being_held_in_self_getters(self):
def a_generator():
yield 0.1
yield 0.2
self.loop = self.new_test_loop(a_generator)
@asyncio.coroutine
def consumer(queue):
try:
yield from asyncio.wait_for(queue.get(), 0.1, loop=self.loop)
except asyncio.TimeoutError:
pass
queue = asyncio.Queue(loop=self.loop, maxsize=5)
self.loop.run_until_complete(self.loop.create_task(consumer(queue)))
self.assertEqual(len(queue._getters), 0)
class QueuePutTests(_QueueTestBase):
def test_blocking_put(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
# No maxsize, won't block.
yield from q.put(1)
self.loop.run_until_complete(queue_put())
def test_blocking_put_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=1, loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_put():
nonlocal finished
started.set()
yield from q.put(1)
yield from q.put(2)
finished = True
@asyncio.coroutine
def queue_get():
loop.call_later(0.01, q.get_nowait)
queue_put_task = asyncio.Task(queue_put(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
yield from queue_put_task
self.assertTrue(finished)
loop.run_until_complete(queue_get())
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_put(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_get_cancel_drop_one_pending_reader(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
reader = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader.cancel()
try:
loop.run_until_complete(reader)
except asyncio.CancelledError:
# try again
reader = loop.create_task(q.get())
loop.run_until_complete(reader)
result = reader.result()
# if we get 2, it means 1 got dropped!
self.assertEqual(1, result)
def test_get_cancel_drop_many_pending_readers(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
loop.set_debug(True)
q = asyncio.Queue(loop=loop)
reader1 = loop.create_task(q.get())
reader2 = loop.create_task(q.get())
reader3 = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader1.cancel()
try:
loop.run_until_complete(reader1)
except asyncio.CancelledError:
pass
loop.run_until_complete(reader3)
# It is undefined in which order concurrent readers receive results.
self.assertEqual({reader2.result(), reader3.result()}, {1, 2})
def test_put_cancel_drop(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(1, loop=loop)
q.put_nowait(1)
# putting a second item in the queue has to block (qsize=1)
writer = loop.create_task(q.put(2))
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
value1 = q.get_nowait()
self.assertEqual(value1, 1)
writer.cancel()
try:
loop.run_until_complete(writer)
except asyncio.CancelledError:
# try again
writer = loop.create_task(q.put(2))
loop.run_until_complete(writer)
value2 = q.get_nowait()
self.assertEqual(value2, 2)
self.assertEqual(q.qsize(), 0)
def test_nonblocking_put_exception(self):
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertRaises(asyncio.QueueFull, q.put_nowait, 2)
def test_float_maxsize(self):
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
q.put_nowait(1)
q.put_nowait(2)
self.assertTrue(q.full())
self.assertRaises(asyncio.QueueFull, q.put_nowait, 3)
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
yield from q.put(2)
self.assertTrue(q.full())
self.loop.run_until_complete(queue_put())
def test_put_cancelled(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
return True
@asyncio.coroutine
def test():
return (yield from q.get())
t = asyncio.Task(queue_put(), loop=self.loop)
self.assertEqual(1, self.loop.run_until_complete(test()))
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_put_cancelled_race(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
put_a = asyncio.Task(q.put('a'), loop=self.loop)
put_b = asyncio.Task(q.put('b'), loop=self.loop)
put_c = asyncio.Task(q.put('X'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(put_a.done())
self.assertFalse(put_b.done())
put_c.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(put_c.done())
self.assertEqual(q.get_nowait(), 'a')
test_utils.run_briefly(self.loop)
self.assertEqual(q.get_nowait(), 'b')
self.loop.run_until_complete(put_b)
def test_put_with_waiting_getters(self):
q = asyncio.Queue(loop=self.loop)
t = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.loop.run_until_complete(q.put('a'))
self.assertEqual(self.loop.run_until_complete(t), 'a')
def test_why_are_putters_waiting(self):
# From issue #265.
queue = asyncio.Queue(2, loop=self.loop)
@asyncio.coroutine
def putter(item):
yield from queue.put(item)
@asyncio.coroutine
def getter():
yield
num = queue.qsize()
for _ in range(num):
queue.get_nowait()
t0 = putter(0)
t1 = putter(1)
t2 = putter(2)
t3 = putter(3)
self.loop.run_until_complete(asyncio.gather(getter(), t0, t1, t2, t3, loop=self.loop))
class LifoQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.LifoQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([2, 3, 1], items)
class PriorityQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.PriorityQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 2, 3], items)
class _QueueJoinTestMixin:
q_class = None
def test_task_done_underflow(self):
q = self.q_class(loop=self.loop)
self.assertRaises(ValueError, q.task_done)
def test_task_done(self):
q = self.q_class(loop=self.loop)
for i in range(100):
q.put_nowait(i)
accumulator = 0
# Two workers get items from the queue and call task_done after each.
# Join the queue and assert all items have been processed.
running = True
@asyncio.coroutine
def worker():
nonlocal accumulator
while running:
item = yield from q.get()
accumulator += item
q.task_done()
@asyncio.coroutine
def test():
tasks = [asyncio.Task(worker(), loop=self.loop) for index in range(2)]
yield from q.join()
return tasks
tasks = self.loop.run_until_complete(test())
self.assertEqual(sum(range(100)), accumulator)
# close running generators
running = False
for i in range(len(tasks)):
q.put_nowait(0)
self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop))
def test_join_empty_queue(self):
q = self.q_class(loop=self.loop)
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
@asyncio.coroutine
def join():
yield from q.join()
yield from q.join()
self.loop.run_until_complete(join())
def test_format(self):
q = self.q_class(loop=self.loop)
self.assertEqual(q._format(), 'maxsize=0')
q._unfinished_tasks = 2
self.assertEqual(q._format(), 'maxsize=0 tasks=2')
class QueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.Queue
class LifoQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.LifoQueue
class PriorityQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.PriorityQueue
if __name__ == '__main__':
unittest.main()
| tests/python/test_queues.py | 18,385 | Test Queue's repr or str.
fn is repr or str. expect_id is True if we expect the Queue's id to
appear in fn(Queue()).
Tests for queues.py
Start a task that waits to get. Let it start waiting. resume q.get coroutine to finish generator Start a task that waits to put. Let it start waiting. resume q.put coroutine to finish generator The putter is blocked after putting two items. Let the putter resume and put last item. let the task start From issue 268. No maxsize, won't block. try again if we get 2, it means 1 got dropped! It is undefined in which order concurrent readers receive results. putting a second item in the queue has to block (qsize=1) try again From issue 265. Two workers get items from the queue and call task_done after each. Join the queue and assert all items have been processed. close running generators Test that a queue join()s successfully, and before anything else (done twice for insurance). | 922 | en | 0.909458 |
from flask import jsonify, request
from flask_restx import Resource, reqparse, fields, marshal_with
import requests
import redis
import os
import logging
import time
import datetime
import json
from app import api, db
from models import User
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
user_fields = {
"id": fields.Integer,
"uuid": fields.Integer,
"status": fields.String
}
@api.route("/users")
class Users(Resource):
users_post_reqparser = reqparse.RequestParser()
users_post_reqparser.add_argument(
"uuid",
type=int,
location="json",
required=True,
help="Please provide the UUID -",
)
@api.expect(users_post_reqparser)
@marshal_with(user_fields)
def post(self):
args = self.users_post_reqparser.parse_args()
new_user = User(uuid=args["uuid"])
db.session.add(new_user)
db.session.flush()
db.session.commit()
return new_user, 201
@marshal_with(user_fields)
def get(self):
# TODO: some authorization would be nice
return User.query.all(), 200
@api.route("/usersByUUID/<int:uuid>")
class UserByUUID(Resource):
@marshal_with(user_fields)
def get(self, uuid):
user = User.query.filter_by(uuid=uuid).first()
if user is None:
# we should really return 404 here and don't do POST magic
# in a GET request but this will make some thing much easier...
user = User(uuid=uuid)
db.session.add(user)
db.session.flush()
db.session.commit()
return user, 200
@api.route("/users/<int:id>")
class SingleUser(Resource):
user_put_reqparser = reqparse.RequestParser()
user_put_reqparser.add_argument(
"status",
type=str,
location="json",
required=True,
help="Please provide the status value (healty, covid_positive, covid_negative) -",
)
@marshal_with(user_fields)
def get(self, id):
found_user = User.query.filter_by(uuid=id).first()
if found_user is None:
api.abort(404, "User does not exist.")
return found_user, 200
@marshal_with(user_fields)
def put(self, id):
user = User.query.filter_by(uuid=id).first()
if user is None:
api.abort(404, "User does not exist.")
args = self.user_put_reqparser.parse_args()
user.status = args["status"]
db.session.commit()
if args["status"] == "covid_positive":
self._submit_filtering_jobs(user.uuid)
return user, 200
def delete(self, id):
user = User.query.filter_by(uuid=id).first()
if user is None:
api.abort(404, "User does not exist.")
db.session.delete(user)
db.session.commit()
return {"msg": "ok"}, 200
@staticmethod
def _chunks(l, n):
n = max(1, n)
return (l[i : i + n] for i in range(0, len(l), n))
def _submit_filtering_jobs(self, uuid):
"""
Here we create the task and put it on the job queue.
"""
# Some optimization: we make a request to the Location API
# to get all the geohash prefixes for all locations the diagonzed patient
# has visited in the last two weeks
two_weeks_ago = datetime.date.today() - datetime.timedelta(14)
params = {
"from": int(two_weeks_ago.strftime("%s")),
"to": int(time.time()),
"unit": "seconds",
}
# TODO: Do not hardcode URIs or ports, use env vars instead
# TODO: Do not assume that the period is always 2 weeks long, make it parametrized
location_api_resp = requests.get(
f"http://location-api:5000/geohashRegionsForUser/{uuid}", params=params
)
if location_api_resp.status_code != 200:
logger.warning(location_api_resp)
api.abort(
500, "There was a problem when requesting data from the Location API"
)
visited_regions_geohash_prefixes = location_api_resp.json()
logger.info(f"Visited Regions for diagonzed patient: {str(visited_regions_geohash_prefixes)}")
location_api_resp_users = requests.get("http://location-api:5000/users")
if location_api_resp_users.status_code != 200:
logger.warning(location_api_resp_users)
api.abort(
500, "There was a problem when requesting data from the Location API"
)
all_influx_users = list(set(location_api_resp_users.json()) - {str(uuid)})
logger.info(f"All Influx users without diagnozed patient: {str(all_influx_users)}")
# So, we should split the whole job into rougly N*k jobs, where N is the
# number of workers listening on the queue, so that each worker will get roughly
# k tasks to execute (so we can achieve nice load balancing).
# Let's assume for simplicity now that we have always 3 workers and k = 1.
n_workers = 3
task_size = len(all_influx_users) // n_workers
all_influx_users_partitioned = SingleUser._chunks(all_influx_users, task_size)
# Create the tasks and put the onto the Redis queue
redis_instance = redis.Redis(
host=os.getenv("REDIS_HOST", "queue"),
port=os.getenv("REDIS_PORT", 6379),
db=os.getenv("REDIS_DB_ID", 0),
)
redis_namespace = os.getenv("REDIS_NAMESPACE", "worker")
redis_collection = os.getenv("REDIS_COLLECTION", "jobs")
logger.info(f"Connected with Redis ({redis_namespace}:{redis_collection})")
for idx, users_batch in enumerate(all_influx_users_partitioned):
job = {
"type": "scan_users_locations",
"args": {
"user_id_range": users_batch,
"diagnozed_uuid": uuid,
"diagnozed_visited_regions": visited_regions_geohash_prefixes,
},
}
redis_instance.rpush(
f"{redis_namespace}:{redis_collection}", json.dumps(job)
)
logger.info(
f"Successfully pushed job #{idx} to the Job Queue:\n{json.dumps(job)}"
)
logger.info("Finished pushing jobs to the Queue.")
| users-api/routes.py | 6,346 | Here we create the task and put it on the job queue.
TODO: some authorization would be nice we should really return 404 here and don't do POST magic in a GET request but this will make some thing much easier... Some optimization: we make a request to the Location API to get all the geohash prefixes for all locations the diagonzed patient has visited in the last two weeks TODO: Do not hardcode URIs or ports, use env vars instead TODO: Do not assume that the period is always 2 weeks long, make it parametrized So, we should split the whole job into rougly N*k jobs, where N is the number of workers listening on the queue, so that each worker will get roughly k tasks to execute (so we can achieve nice load balancing). Let's assume for simplicity now that we have always 3 workers and k = 1. Create the tasks and put the onto the Redis queue | 848 | en | 0.905476 |
import time, calendar
from datetime import datetime
#
# Decodes UNIX timestamp (UTC secs since epoch) to python datetime and vice versa.
#
class Time(datetime):
def __new__(cls, *x):
return datetime.__new__(cls, *x)
@staticmethod
def decode(json):
assert isinstance(json, int)
return Time.utcfromtimestamp(json)
def encode(self):
timestamp = calendar.timegm(self.utctimetuple())
return timestamp
def __str__(self):
return self.isoformat(" ") + " (UTC)"
| raritan/rpc/Time.py | 525 | Decodes UNIX timestamp (UTC secs since epoch) to python datetime and vice versa. | 80 | en | 0.647313 |
"""Module with git related utilities."""
import git
class GitRepoVersionInfo:
"""
Provides application versions information based on the tags and commits in the repo
"""
def __init__(self, path: str):
"""
Create an instance of GitRepoVersionInfo
:param path: The path to search for git information. It searches for '.git' in this folder or any parent
folder.
"""
self._is_repo = False
try:
self._repo = git.Repo(path, search_parent_directories=True)
self._is_repo = True
except git.exc.InvalidGitRepositoryError:
self._repo = None
@property
def is_git_repo(self) -> bool:
"""
Checks if the path given in constructor is a sub-path of a valid git repo.
:return: Boolean true, if repo was found.
"""
return self._is_repo
def get_git_version(self, strip_v_in_version: bool = True) -> str:
"""
Gets application version in the format [last-tag]-[last-commit-sha].
:param strip_v_in_version: If the version tag starts with 'v' (like 'v1.2.3),
this chooses if the 'v' should be stripped, so the resulting tag is '1.2.3'.
If there's a "-", "." or "_" separator after "v", it is removed as well.
:return: The version string
"""
if not self._is_repo:
raise git.exc.InvalidGitRepositoryError()
tags = sorted(self._repo.tags, key=lambda t: t.commit.committed_date)
latest_tag = None if len(tags) == 0 else tags[-1]
ver = "0.0.0" if latest_tag is None else latest_tag.name
if strip_v_in_version and ver.startswith("v"):
txt_ver = ver.lstrip("v")
txt_ver = txt_ver.lstrip("-_.")
else:
txt_ver = ver
sha = self._repo.head.commit.hexsha
if latest_tag is not None and sha == latest_tag.commit.hexsha:
return txt_ver
return f"{txt_ver}-{sha}"
| step_exec_lib/utils/git.py | 1,985 | Provides application versions information based on the tags and commits in the repo
Create an instance of GitRepoVersionInfo
:param path: The path to search for git information. It searches for '.git' in this folder or any parent
folder.
Gets application version in the format [last-tag]-[last-commit-sha].
:param strip_v_in_version: If the version tag starts with 'v' (like 'v1.2.3),
this chooses if the 'v' should be stripped, so the resulting tag is '1.2.3'.
If there's a "-", "." or "_" separator after "v", it is removed as well.
:return: The version string
Checks if the path given in constructor is a sub-path of a valid git repo.
:return: Boolean true, if repo was found.
Module with git related utilities. | 714 | en | 0.757372 |
# A very very minimal BeautifulSoup immitation.
#
# BS uses SGMLlib to parse, which converts everything to lower case.
# This uses real xml parsing to mimic the parts of BS we use.
import xml.dom.minidom
def _getText(node):
nodelist = node.childNodes
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(str(node.data))
return rc
def _getNodesAsTags(root):
nodelist = root.childNodes
tags = []
for node in nodelist:
if node.nodeType == node.ELEMENT_NODE:
tags.append(Tag(node))
return tags
class Tag(object):
def __init__(self, node):
self.node = node
self.name = node.nodeName
self.contents = _getNodesAsTags(self.node)
text = _getText(self.node)
self.contents += text
self.text = ''.join(text)
def child_elements(self):
children = []
for tag in self.contents:
if isinstance(tag, Tag):
children.append(tag)
return children
def get(self, tagname):
got = self.first(tagname)
if got:
return got.text
def first(self, tagname):
found = None
for tag in self.contents:
if isinstance(tag, Tag):
if tag.name == tagname:
found = tag
break
return found
class BeautifulSupe(object):
def __init__(self, data):
#please don't give us your null terminators
data = data.strip(chr(0))
self.dom = xml.dom.minidom.parseString(data)
def first(self, tagname, root = None):
found = None
if root == None:
e = self.dom.getElementsByTagName(tagname)
if len(e) > 0:
found = e[0]
else:
for node in root.childNodes:
if node.nodeName == tagname:
found = node
break
if not found:
return None
tag = Tag(found)
return tag
def fetch(self, tagname, restraints = {}):
e = self.dom.getElementsByTagName(tagname)
matches = []
for node in e:
match = 1
for restraint in restraints:
f = self.first(restraint, node)
if not f:
match = 0
break
text = restraints[restraint]
if not f.contents[0].startswith(text):
match = 0
break
if match:
tag = Tag(node)
matches.append(tag)
return matches
def scour(self, prefix, suffix = None, node = None):
if node is None:
root = self.dom.getElementsByTagName(self.dom.documentElement.tagName)[0]
node = root
matches = []
for node in node.childNodes:
match = 0
name = node.nodeName
if name.startswith(prefix):
if suffix:
if name.endswith(suffix):
match = 1
else:
match = 1
if match:
tag = Tag(node)
matches.append(tag)
matches += self.scour(prefix, suffix, node)
return matches
| Sketches/RJL/bittorrent/BitTorrent/BitTorrent/BeautifulSupe.py | 3,419 | A very very minimal BeautifulSoup immitation. BS uses SGMLlib to parse, which converts everything to lower case. This uses real xml parsing to mimic the parts of BS we use.please don't give us your null terminators | 214 | en | 0.901699 |
from pdb import set_trace as TT
import numpy as np
import scipy
from scipy.spatial import ConvexHull
import skimage
from skimage.morphology import disk
import skbio
global trg_image
trg_image = None
def diversity_calc(config):
div_calc_name = config.FITNESS_METRIC
return get_div_calc(div_calc_name)
def get_div_calc(div_calc_name):
if div_calc_name == 'L2':
calc_diversity = calc_diversity_l2
elif div_calc_name == 'InvL2':
calc_diversity = calc_homogeneity_l2
elif div_calc_name == 'Differential':
calc_diversity = calc_differential_entropy
elif div_calc_name == 'Discrete':
calc_diversity = calc_discrete_entropy_2
elif div_calc_name == 'Hull':
calc_diversity = calc_convex_hull
elif div_calc_name == 'Sum':
calc_diversity = sum_experience
elif div_calc_name == 'Lifespans': # or config.FITNESS_METRIC == 'ALP':
calc_diversity = sum_lifespans
elif div_calc_name == 'Lifetimes':
calc_diversity = calc_mean_lifetime
elif div_calc_name == 'Actions':
calc_diversity = calc_mean_actions_matched
elif div_calc_name == 'MapTest':
calc_diversity = calc_local_map_entropy
elif div_calc_name == 'MapTestText':
calc_diversity = ham_text
get_trg_image()
elif div_calc_name == 'y_deltas':
calc_diversity = calc_y_deltas
elif div_calc_name == 'Scores' or config.FITNESS_METRIC == 'ALP':
calc_diversity = calc_scores
else:
raise Exception('Unsupported fitness function: {}'.format(config.FITNESS_METRIC))
return calc_diversity
def get_trg_image():
from PIL import Image, ImageDraw, ImageFont
font_size = 15
try:
font = ImageFont.truetype("arial.ttf", font_size)
except OSError:
try:
font = ImageFont.truetype("LiberationMono-Regular.ttf", font_size)
except OSError:
font = ImageFont.truetype("SFNSMono.ttf", 32)
global trg_image
trg_image = Image.new(mode = "RGB", size=(50, 50))
draw = ImageDraw.Draw(trg_image)
draw.text((1,1), "Evo", font=font, fill=(255,0,0))
draw.text((1,15), "NMMO", font=font, fill=(255,0,0))
draw.text((1,32), "¯\_(ツ)_/¯", font=font, fill=(255,0,0))
trg_image.save("trg_img.png")
trg_image = (np.array(trg_image)[:, :, 0] / 255 * 8).astype(np.uint8)
def ham_text(individual, config):
if trg_image is None:
get_trg_image()
map_arr = individual.chromosome.map_arr[10:-10, 10:-10]
return -(trg_image != map_arr).sum()
def calc_map_entropies(individual, config, verbose=False):
glob_ent = calc_global_map_entropy(individual, config)
loc_ent = calc_local_map_entropy(individual, config)
if verbose:
print('global entropy: {}\nlocal entropy: {}'.format(glob_ent, loc_ent))
return [glob_ent[0], loc_ent]
def calc_global_map_entropy(individual, config):
# FIXME: hack to ignore lava borders
b = config.TERRAIN_BORDER
map_arr = individual.chromosome.map_arr[b:-b, b:-b]
ent = scipy.stats.entropy(np.bincount(map_arr.reshape(-1), minlength=individual.n_tiles))
ent = ent * 100 / np.log(individual.n_tiles)
return [ent]
def calc_local_map_entropy(individual, config):
# FIXME: hack to ignore lava borders
b = config.TERRAIN_BORDER
map_arr = individual.chromosome.map_arr[b:-b, b:-b]
local_ent = skimage.filters.rank.entropy(map_arr, disk(3))
local_ent = local_ent.mean() * 100 / np.log2(individual.n_tiles)
return local_ent.item()
def get_pop_stats(agent_stats, pop=None):
# Get list of all populations for which we need stats
pops = agent_stats[0].keys() if pop is None else [pop]
# Get 1D array of agent stats
stats = [stats_i[p] for p in pops for stats_i in agent_stats]
if len(stats[0].shape) == 2:
# then rows correspond to agents so we stack them vertically (concatenate along axis 1)
return np.vstack(stats)
elif len(stats[0].shape) == 1:
# then each agent has a scalar value so we concatenate along axis 0
return np.hstack(stats)
raise Exception("Oy! Dafuk type o' agent data is this?")
def contract_by_lifespan(agent_stats, lifespans):
'''Pull agents close to their mean according to how short-lived they were. For punishing abundance of premature death
when rewarding diversity.'''
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = mean_agents - agent_stats
agent_skills = agent_stats + (weights * agent_deltas.T).T
return agent_skills
def expand_by_lifespan(agent_stats, lifespans):
'''Push agents further from their mean according to how short-lived they were. For punishing abundance of premature
death when rewarding homogeneity.'''
weights = sigmoid_lifespan(lifespans)
n_agents = lifespans.shape[0]
mean_agent = agent_stats.mean(axis=0)
mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
agent_deltas = mean_agents - agent_stats
# Displace agents by at most 100 units (otherwise we will not punish agents at all if they are already perfectly
# homogenous, for example.
agent_deltas = agent_deltas / np.linalg.norm(agent_deltas) * 100
agent_skills = agent_stats - (weights * agent_deltas.T).T
return agent_skills
def calc_scores(agent_stats, skill_headers=None, verbose=False):
scores = np.hstack(agent_stats['scores'])
if verbose:
print('scores: {}'.format(scores))
return np.mean(scores)
def calc_mean_actions_matched(agent_stats, skill_headers=None, verbose=False):
actions_matched = np.hstack(agent_stats['actions_matched'])
if verbose:
print(actions_matched)
# print(agent_stats['lifespans'])
return np.mean(actions_matched)
def calc_y_deltas(agent_stats, skill_headers=None, verbose=False):
y_deltas = np.hstack(agent_stats['y_deltas'])
if verbose:
print('y_deltas: {}'.format(y_deltas))
return np.mean(y_deltas)
def calc_mean_lifetime(agent_stats, skill_headers=None, verbose=False, pop=None):
lifetimes = get_pop_stats(agent_stats['lifespans'], pop)
if len(lifetimes) != 0:
lifetimes = np.hstack(lifetimes)
else:
lifetimes = [0]
mean_lifetime = lifetimes.mean()
return mean_lifetime
def sum_lifespans(agent_stats, skill_headers=None, n_policies=1, verbose=False, pop=None):
lifespans = get_pop_stats(agent_stats['lifespans'], pop=pop)
score = lifespans.mean()
if verbose:
print('Mean lifespan, pop {}: {}'.format(pop, score))
return score
def sum_experience(agent_stats, skill_headers=None, verbose=False, pop=None):
'''Simply take the sum of XP over skills and agents.'''
# No need to weight by lifespan, since high lifespan is a prerequisite for high XP.
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
a_skills = np.vstack(agent_skills)
a_lifespans = np.hstack(lifespans)
n_agents, n_skills = a_skills.shape
mean_xp = a_skills.sum() / (n_agents * n_skills)
if verbose:
print('skills')
print(a_skills.T)
print('lifespans')
print(a_lifespans)
print('mean xp:', mean_xp)
print()
return mean_xp
def sigmoid_lifespan(x):
# This basically assumes max lifespan is at least 100. Larger max lifespans won't really be a problem since this
# function converges to 1.
res = 1 / (1 + np.exp(0.1*(-x+50)))
return res
def calc_differential_entropy(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
a_skills = agent_skills
a_lifespans = lifespans
assert a_skills.shape[0] == a_lifespans.shape[0]
if verbose:
print(skill_headers)
print(a_skills.transpose())
print(len(agent_skills), 'populations')
print('lifespans')
print(a_lifespans)
if punish_youth:
# Below is an alternative way of weighting by lifespan
# weights = sigmoid_lifespan(a_lifespans)
# mean = np.average(a_skills, axis=0, weights=weights)
# cov = np.cov(a_skills,rowvar=0, aweights=weights)
# Instead, we'll just contract as usual
a_skills = contract_by_lifespan(a_skills, a_lifespans)
mean = np.average(a_skills, axis=0)
cov = np.cov(a_skills,rowvar=0)
gaussian = scipy.stats.multivariate_normal(mean=mean, cov=cov, allow_singular=True)
infos['gaussian'] = gaussian
score = gaussian.entropy()
if verbose:
print('score:', score)
return score
def calc_convex_hull(agent_stats, skill_headers=None, verbose=False, infos={}, pop=None, punish_youth=True):
'''Calculate the diversity of a population of agents in skill-space by computing the volume inside the convex hull of
the agents when treated as points in this space.'''
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills = np.vstack(agent_skills)
n_skills = agent_skills.shape[1]
lifespans = np.hstack(lifespans)
if verbose:
print('skills:')
print(agent_skills.transpose())
print('lifespans:')
print(lifespans)
print(len(agent_stats['lifespans']), 'populations')
if punish_youth:
agent_skills = contract_by_lifespan(agent_skills, lifespans)
if n_skills == 1:
# Max distance, i.e. a 1D hull
score = agent_skills.max() - agent_skills.mean()
else:
try:
hull = ConvexHull(agent_skills, qhull_options='QJ')
infos['hull'] = hull
score = hull.volume
score = score ** (1 / n_skills)
except Exception as e:
print(e)
score = 0
if verbose:
print('score:', score)
return score
def calc_discrete_entropy_2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills_0 = agent_skills= np.vstack(agent_skills)
lifespans = np.hstack(lifespans)
n_agents = lifespans.shape[0]
if n_agents == 1:
return -np.float('inf')
n_skills = agent_skills.shape[1]
if verbose:
print('skills')
print(agent_skills_0.transpose())
print('lifespans')
print(lifespans)
agent_skills = np.where(agent_skills == 0, 0.0000001, agent_skills)
if punish_youth:
# Below is a v funky way of punishing by lifespan
# weights = sigmoid_lifespan(lifespans)
# # contract population toward mean according to lifespan
# # mean experience level for each agent
# mean_skill = agent_skills.mean(axis=1)
# # mean skill vector of an agent
# mean_agent = agent_skills.mean(axis=0)
# assert mean_skill.shape[0] == n_agents
# assert mean_agent.shape[0] == n_skills
# mean_skills = np.repeat(mean_skill.reshape(mean_skill.shape[0], 1), n_skills, axis=1)
# mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0)
# agent_deltas = agent_skills - mean_agents
# skill_deltas = agent_skills - mean_skills
# a_skills_skills = mean_agents + (weights * agent_deltas.transpose()).transpose()
# a_skills_agents = mean_skills + (weights * skill_deltas.transpose()).transpose()
# div_agents = skbio.diversity.alpha_diversity('shannon', a_skills_agents).mean()
# div_skills = skbio.diversity.alpha_diversity('shannon', a_skills_skills.transpose()).mean()
# We'll just do the usual
a_skills = contract_by_lifespan(agent_skills, lifespans)
div_agents = skbio.diversity.alpha_diversity('shannon', a_skills).mean()
div_skills = skbio.diversity.alpha_diversity('shannon', a_skills.transpose()).mean()
# div_lifespans = skbio.diversity.alpha_diversity('shannon', lifespans)
score = -(div_agents * div_skills)#/ div_lifespans#/ len(agent_skills)**2
score = score#* 100 #/ (n_agents * n_skills)
if verbose:
print('Score:', score)
return score
def calc_discrete_entropy(agent_stats, skill_headers=None, pop=None):
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
agent_skills_0 = np.vstack(agent_skills)
agent_lifespans = np.hstack(lifespans)
weights = sigmoid_lifespan(agent_lifespans)
agent_skills = agent_skills_0.transpose() * weights
agent_skills = agent_skills.transpose()
BASE_VAL = 0.0001
# split between skill and agent entropy
n_skills = len(agent_skills[0])
n_pop = len(agent_skills)
agent_sums = [sum(skills) for skills in agent_skills]
i = 0
# ensure that we will not be dividing by zero when computing probabilities
for a in agent_sums:
if a == 0:
agent_sums[i] = BASE_VAL * n_skills
i += 1
skill_sums = [0 for i in range(n_skills)]
for i in range(n_skills):
for a_skills in agent_skills:
skill_sums[i] += a_skills[i]
if skill_sums[i] == 0:
skill_sums[i] = BASE_VAL * n_pop
skill_ents = []
for i in range(n_skills):
skill_ent = 0
for j in range(n_pop):
a_skill = agent_skills[j][i]
if a_skill == 0:
a_skill = BASE_VAL
p = a_skill / skill_sums[i]
if p == 0:
skill_ent += 0
else:
skill_ent += p * np.log(p)
skill_ent = skill_ent / (n_pop)
skill_ents.append(skill_ent)
agent_ents = []
for j in range(n_pop):
agent_ent = 0
for i in range(n_skills):
a_skill = agent_skills[j][i]
if a_skill == 0:
a_skill = BASE_VAL
p = a_skill / agent_sums[j]
if p == 0:
agent_ent += 0
else:
agent_ent += p * np.log(p)
agent_ent = agent_ent / (n_skills)
agent_ents.append(agent_ent)
agent_score = np.mean(agent_ents)
skill_score = np.mean(skill_ents)
# score = (alpha * skill_score + (1 - alpha) * agent_score)
score = -(skill_score * agent_score)
score = score * 100#/ n_pop**2
print('agent skills:\n{}\n{}'.format(skill_headers, np.array(agent_skills_0.transpose())))
print('lifespans:\n{}'.format(lifespans))
# print('skill_ents:\n{}\nskill_mean:\n{}\nagent_ents:\n{}\nagent_mean:{}\nscore:\n{}\n'.format(
# np.array(skill_ents), skill_score, np.array(agent_ents), agent_score, score))
print('score:\n{}'.format(score))
return score
def calc_homogeneity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=True):
'''Use L2 distance to punish agents for having high mean pairwise distance. Optimal state is all agents at the same
point in skill-space, with maximal lifespans.'''
if 'skills' not in agent_stats:
raise Exception('We should be including dead agents in this calculation, so we should get at least some skill '
'stats back here')
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
assert len(agent_skills) == len(lifespans)
if punish_youth:
agent_skills = expand_by_lifespan(agent_skills, lifespans)
n_agents = agent_skills.shape[0]
a = agent_skills
b = a.reshape(n_agents, 1, a.shape[1])
# https://stackoverflow.com/questions/43367001/how-to-calculate-euclidean-distance-between-pair-of-rows-of-a-numpy-array
distances = np.sqrt(np.einsum('ijk, ijk->ij', a - b, a - b))
score = np.sum(distances) / n_agents ** 2
if verbose:
# print(skill_headers)
print('agent skills:\n{}'.format(a.transpose()))
print('lifespans:\n{}'.format(lifespans))
print('score:\n{}\n'.format(
score))
return -score
def calc_diversity_l2(agent_stats, skill_headers=None, verbose=False, pop=None, punish_youth=False):
if 'skills' not in agent_stats:
return 0
agent_skills = get_pop_stats(agent_stats['skills'], pop)
lifespans = get_pop_stats(agent_stats['lifespans'], pop)
assert len(agent_skills) == len(lifespans)
if punish_youth:
agent_skills = contract_by_lifespan(agent_skills, lifespans)
n_agents = agent_skills.shape[0]
a = agent_skills
b = a.reshape(n_agents, 1, a.shape[1])
# https://stackoverflow.com/questions/43367001/how-to-calculate-euclidean-distance-between-pair-of-rows-of-a-numpy-array
distances = np.sqrt(np.einsum('ijk, ijk->ij', a-b, a-b))
score = np.sum(distances) / n_agents ** 2
if verbose:
# print(skill_headers)
print('agent skills:\n{}'.format(a.transpose()))
print('lifespans:\n{}'.format(lifespans))
print('score:\n{}\n'.format(
score))
return score
DIV_CALCS = [(calc_diversity_l2, 'mean pairwise L2'), (calc_differential_entropy, 'differential entropy'), (calc_discrete_entropy_2, 'discrete entropy'), (calc_convex_hull, 'convex hull volume'), (sum_lifespans, 'lifespans')]
| evolution/diversity.py | 17,145 | Calculate the diversity of a population of agents in skill-space by computing the volume inside the convex hull of
the agents when treated as points in this space.
Use L2 distance to punish agents for having high mean pairwise distance. Optimal state is all agents at the same
point in skill-space, with maximal lifespans.
Pull agents close to their mean according to how short-lived they were. For punishing abundance of premature death
when rewarding diversity.
Push agents further from their mean according to how short-lived they were. For punishing abundance of premature
death when rewarding homogeneity.
Simply take the sum of XP over skills and agents.
or config.FITNESS_METRIC == 'ALP': FIXME: hack to ignore lava borders FIXME: hack to ignore lava borders Get list of all populations for which we need stats Get 1D array of agent stats then rows correspond to agents so we stack them vertically (concatenate along axis 1) then each agent has a scalar value so we concatenate along axis 0 Displace agents by at most 100 units (otherwise we will not punish agents at all if they are already perfectly homogenous, for example. print(agent_stats['lifespans']) No need to weight by lifespan, since high lifespan is a prerequisite for high XP. This basically assumes max lifespan is at least 100. Larger max lifespans won't really be a problem since this function converges to 1. Below is an alternative way of weighting by lifespan weights = sigmoid_lifespan(a_lifespans) mean = np.average(a_skills, axis=0, weights=weights) cov = np.cov(a_skills,rowvar=0, aweights=weights) Instead, we'll just contract as usual Max distance, i.e. a 1D hull Below is a v funky way of punishing by lifespan weights = sigmoid_lifespan(lifespans) contract population toward mean according to lifespan mean experience level for each agent mean_skill = agent_skills.mean(axis=1) mean skill vector of an agent mean_agent = agent_skills.mean(axis=0) assert mean_skill.shape[0] == n_agents assert mean_agent.shape[0] == n_skills mean_skills = np.repeat(mean_skill.reshape(mean_skill.shape[0], 1), n_skills, axis=1) mean_agents = np.repeat(mean_agent.reshape(1, mean_agent.shape[0]), n_agents, axis=0) agent_deltas = agent_skills - mean_agents skill_deltas = agent_skills - mean_skills a_skills_skills = mean_agents + (weights * agent_deltas.transpose()).transpose() a_skills_agents = mean_skills + (weights * skill_deltas.transpose()).transpose() div_agents = skbio.diversity.alpha_diversity('shannon', a_skills_agents).mean() div_skills = skbio.diversity.alpha_diversity('shannon', a_skills_skills.transpose()).mean() We'll just do the usual div_lifespans = skbio.diversity.alpha_diversity('shannon', lifespans)/ div_lifespans/ len(agent_skills)**2* 100 / (n_agents * n_skills) split between skill and agent entropy ensure that we will not be dividing by zero when computing probabilities score = (alpha * skill_score + (1 - alpha) * agent_score)/ n_pop**2 print('skill_ents:\n{}\nskill_mean:\n{}\nagent_ents:\n{}\nagent_mean:{}\nscore:\n{}\n'.format( np.array(skill_ents), skill_score, np.array(agent_ents), agent_score, score)) https://stackoverflow.com/questions/43367001/how-to-calculate-euclidean-distance-between-pair-of-rows-of-a-numpy-array print(skill_headers) https://stackoverflow.com/questions/43367001/how-to-calculate-euclidean-distance-between-pair-of-rows-of-a-numpy-array print(skill_headers) | 3,412 | en | 0.812153 |
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
| ppoPolicyTraining.py | 21,384 | This is the PPO class we will use as our model in main.py
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
For continuous actionsFor discrete action_spaceIntegrating tensorboard Make sure the environment is compatible with our code Makeassert(type(env.action_space) == gym.spaces.Box) Initialize hyperparameters for training with PPO Extract environment informationenv.action_space.n env.action_space.shape[0] Initialize actor and critic networksprint(f'model =========== {self.actor}') ALG STEP 1print(f'critic =========== {self.critic}') Initialize optimizers for actor and critic Initialize the covariance matrix used to query the actor for actions This logger will help us with printing out summaries of each iteration timesteps so far iterations so far episodic lengths in batch episodic returns in batch Episodic returns in a neural network losses of actor network in current iteration Actor network Timesteps simulated so far Iterations ran so far ALG STEP 2 Autobots, roll out (just kidding, we're collecting our batch simulations here) ALG STEP 3 Calculate how many timesteps we collected this batch Increment the number of iterations Logging timesteps so far and iterations so far Calculate advantage at k-th iteration ALG STEP 5 One of the only tricks I use that isn't in the pseudocode. Normalizing advantages isn't theoretically necessary, but in practice it decreases the variance of our advantages and makes convergence much more stable and faster. I added this because solving some environments was too unstable without it. This is the loop where we update our network for some n epochs ALG STEP 6 & 7 Calculate V_phi and pi_theta(a_t | s_t) Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t) NOTE: we just subtract the logs, which is the same as dividing the values and then canceling the log with e^log. For why we use log probabilities instead of actual probabilities, here's a great explanation: https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms TL;DR makes gradient ascent easier behind the scenes. Calculate surrogate losses.print(f'A_k======================={A_k}')print(f'surr1======================={surr1}')print(f'surr2======================={surr2}') Calculate actor and critic losses. NOTE: we take the negative min of the surrogate losses because we're trying to maximize the performance function, but Adam minimizes the loss. So minimizing the negative performance function maximizes it.print(f'actor_loss======================={actor_loss}') Calculate gradients and perform backward propagation for actor network Calculate gradients and perform backward propagation for critic network Log actor loss Print a summary of our training so far Save our model if it's time Batch data. For more details, check function header. Episodic data. Keeps track of rewards per episode, will get cleared upon each new episode Keeps track of how many timesteps we've run so far this batch Keep simulating until we've run more than or equal to specified timesteps per batch rewards collected per episode Reset the environment. sNote that obs is short for observation. print(f'obs reset ============= {obs}') Run an episode for a maximum of max_timesteps_per_episode timesteps If render is specified, render the environment Increment timesteps ran this batch so far Track observations in this batch Calculate action and make a step in the env. Note that rew is short for reward.self.get_action_discrete(obs)print(f'action chosen =============== {action}')print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}') Track recent reward, action, and action log probability If the environment tells us the episode is terminated, break Track episodic lengths and rewardsself.env.render(act_list) Reshape data as tensors in the shape specified in function description, before returningprint(f'batch_acts =============== {batch_acts}')For discrete state spacetorch.tensor(batch_acts, dtype=torch.long).view(-1,)print(f'batch_acts =============== {batch_acts}') ALG STEP 4 Log the episodic returns and episodic lengths in this batch. The rewards-to-go (rtg) per episode per batch to return. The shape will be (num timesteps per episode) Iterate through each episode The discounted reward so far Iterate through all rewards in the episode. We go backwards for smoother calculation of each discounted return (think about why it would be harder starting from the beginning) Convert the rewards-to-go into a tensor Probability sampling for discrete actionsprint(f'obs ================== {obs}')print(f'mean ================== {mean}')print(f'dist ================== {dist}')print(f'action ====== {action} ========= {log_prob}') Query the actor network for a mean action Create a distribution with the mean action and std from the covariance matrix above. For more information on how this distribution works, check out Andrew Ng's lecture on it: https://www.youtube.com/watch?v=JjB58InuTqM Sample an action from the distribution Calculate the log probability for that action Return the sampled action and the log probability of that action in our distribution Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs Calculate the log probabilities of batch actions using most recent actor network. This segment of code is similar to that in get_action()For discrete actionsdist = Categorical(mean) Return the value vector V of each observation in the batch and log probabilities log_probs of each action in the batch Initialize default values for hyperparameters Algorithm hyperparameters Number of timesteps to run per batch Max number of timesteps per episode Number of times to update actor/critic per iteration Learning rate of actor optimizer Discount factor to be applied when calculating Rewards-To-Go Recommended 0.2, helps define the threshold to clip the ratio during SGA Miscellaneous parameters If we should render during rollout How often we save in number of iterations Sets the seed of our program, used for reproducibility of results Sets the type of environment to discrete or continuous Sets the number of trainig step Change any default values to custom values for specified hyperparameters Sets the seed if specified Check if our seed is valid first Set the seed Calculate logging values. I use a few python shortcuts to calculate each value without explaining since it's not too important to PPO; feel free to look it over, and if you have any questions you can email me (look at bottom of README) Round decimal places for more aesthetic logging messages Tracking the weight of the network Print logging statements Reset batch-specific logging data If the actor model is not specified, then exit Extract out dimensions of observation and action spacesenv.action_space.n env.action_space.shape[0] Build our policy the same way we build our actor model in PPO Load in the actor model saved by the PPO algorithm Evaluate our policy with a separate module, eval_policy, to demonstrate that once we are done training the model/policy with ppo.py, we no longer need ppo.py since it only contains the training algorithm. The model/policy itself exists independently as a binary file that can be loaded in with torch. | 10,592 | en | 0.825914 |
import os
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper
URL = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM"
MD5 = "fe39f8b653cada45afd5792e0f0e8f9b"
NUM_LINES = {
"train": 3600000,
"test": 400000,
}
_PATH = "amazon_review_polarity_csv.tar.gz"
_EXTRACTED_FILES = {
"train": os.path.join("amazon_review_polarity_csv", "train.csv"),
"test": os.path.join("amazon_review_polarity_csv", "test.csv"),
}
DATASET_NAME = "AmazonReviewPolarity"
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AmazonReviewPolarity(root: str, split: Union[Tuple[str], str]):
"""AmazonReviewPolarity Dataset
For additional details refer to https://arxiv.org/abs/1509.01626
Number of lines per split:
- train: 3600000
- test: 400000
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `test`)
:returns: DataPipe that yields tuple of label (1 to 2) and text containing the review title and text
:rtype: (int, str)
"""
# TODO Remove this after removing conditional dependency
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _PATH),
hash_dict={os.path.join(root, _PATH): MD5},
hash_type="md5",
)
cache_compressed_dp = GDriveReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split])
)
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").read_from_tar().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
)
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, encoding="utf-8")
return data_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| torchtext/datasets/amazonreviewpolarity.py | 2,621 | AmazonReviewPolarity Dataset
For additional details refer to https://arxiv.org/abs/1509.01626
Number of lines per split:
- train: 3600000
- test: 400000
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `test`)
:returns: DataPipe that yields tuple of label (1 to 2) and text containing the review title and text
:rtype: (int, str)
TODO Remove this after removing conditional dependency | 554 | en | 0.7235 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import catkin_pkg.package
catkin_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
catkin_package = catkin_pkg.package.parse_package(os.path.join(catkin_dir, catkin_pkg.package.PACKAGE_MANIFEST_FILENAME))
# -- Project information -----------------------------------------------------
project = 'SMACHA ROS'
copyright = '2019, ReconCell'
author = 'Barry Ridge'
# The short X.Y version
# version = ''
version = catkin_package.version
# The full version, including alpha/beta/rc tags
# release = '0.0.1'
release = catkin_package.version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.programoutput',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_logo = "_static/logo.png"
html_favicon = "_static/favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'canonical_url': 'https://reconcell.gitlab.io/smacha/smacha_ros/'
# 'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
# 'logo_only': False,
# 'display_version': True,
# 'prev_next_buttons_location': 'bottom',
# 'style_external_links': False,
# 'vcs_pageview_mode': '',
# 'style_nav_header_background': 'white',
# # Toc options
# 'collapse_navigation': True,
# 'sticky_navigation': True,
# 'navigation_depth': 4,
# 'includehidden': True,
# 'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'smacha_rosdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'smacha_ros.tex', 'smacha\\_ros package API',
'Barry Ridge', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'smacha_ros', 'smacha_ros package API',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'smacha_ros', 'smacha_ros package API',
author, 'smacha_ros', 'SMACHA is a meta-scripting, templating, and code generation engine for rapid prototyping of ROS SMACH state machines.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| smacha_ros/doc/conf.py | 6,981 | -*- coding: utf-8 -*- Configuration file for the Sphinx documentation builder. This file does only contain a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/master/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.')) -- Project information ----------------------------------------------------- The short X.Y version version = '' The full version, including alpha/beta/rc tags release = '0.0.1' -- General configuration --------------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = '.rst' The master toctree document. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. The name of the Pygments (syntax highlighting) style to use. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. html_theme = 'alabaster' Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} 'analytics_id': 'UA-XXXXXXX-1', Provided by Google in your dashboard 'logo_only': False, 'display_version': True, 'prev_next_buttons_location': 'bottom', 'style_external_links': False, 'vcs_pageview_mode': '', 'style_nav_header_background': 'white', Toc options 'collapse_navigation': True, 'sticky_navigation': True, 'navigation_depth': 4, 'includehidden': True, 'titles_only': False Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Custom sidebar templates, must be a dictionary that maps document names to template names. The default sidebars (for documents that don't match any pattern) are defined by theme itself. Builtin themes are using these templates by default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']``. html_sidebars = {} -- Options for HTMLHelp output --------------------------------------------- Output file base name for HTML help builder. -- Options for LaTeX output ------------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output ------------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) -- Options for Epub output ------------------------------------------------- Bibliographic Dublin Core info. The unique identifier of the text. This can be a ISBN number or the project homepage. epub_identifier = '' A unique identification for the text. epub_uid = '' A list of files that should not be packed into the epub file. -- Extension configuration ------------------------------------------------- -- Options for intersphinx extension --------------------------------------- Example configuration for intersphinx: refer to the Python standard library. -- Options for todo extension ---------------------------------------------- If true, `todo` and `todoList` produce output, else they produce nothing. | 4,846 | en | 0.541851 |
# qubit number=4
# total number=32
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.x(input_qubit[3]) # number=14
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.cx(input_qubit[2],input_qubit[3]) # number=22
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=24
prog.cz(input_qubit[3],input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.x(input_qubit[2]) # number=23
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=20
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[3]) # number=27
prog.x(input_qubit[3]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1996.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| benchmark/startQiskit_noisy1996.py | 3,963 | qubit number=4 total number=32 implement the oracle O_f NOTE: use multi_control_toffoli_gate ('noancilla' mode) https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate oracle.barrier() circuit begin number=16 number=17 number=18 number=14 number=15 number=2 number=3 number=4 number=12 number=22 number=5 number=6 number=24 number=25 number=26 number=7 number=8 number=23 number=9 number=10 number=11 number=20 number=29 number=30 number=31 number=27 number=28 circuit end | 752 | en | 0.393626 |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .data import DataIngestion
__all__ = ['DataIngestion']
| plugins/data/bAbI/digitsDataPluginBAbI/__init__.py | 165 | Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | 61 | en | 0.883921 |
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
| roundoff.py | 19,928 | Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
Wrapper function passed to multiprocessing.Pool
Wrapper function passed to multiprocessing.Pool
Wrapper function passed to multiprocessing.Pool
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
Methods for assessing treatment of finite-precision issues
don't multiprocess don't multiprocess don't multiprocess if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k') plt.hist(in_rr,bins=10,facecolor='w') plt.hist(buckets,bins=bins,log=log)'Max Coordinate-wise Distance') | 4,300 | en | 0.806865 |
import pytest
from selenium import webdriver
from model.application import Application
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox", help="browser type")
parser.addoption("--base_url", action="store", default="http://localhost:9080/php4dvd/", help="base URL")
@pytest.fixture(scope="session")
def browser_type(request):
return request.config.getoption("--browser")
@pytest.fixture(scope="session")
def base_url(request):
return request.config.getoption("--base_url")
@pytest.fixture(scope="session")
def app(request, browser_type, base_url):
if browser_type == "firefox":
driver = webdriver.Firefox()
elif browser_type == "chrome":
driver = webdriver.Chrome()
elif browser_type == "ie":
driver = webdriver.Ie()
#driver.implicitly_wait(30)
request.addfinalizer(driver.quit) #close brawser
return Application(driver, base_url)
| php4dvd/conftest.py | 947 | driver.implicitly_wait(30)close brawser | 39 | en | 0.314578 |
# name : Shoby Gnanasekaran
# net id: shoby
from dungeonchar import DungeonCharacter
from healable import Healable
from hero import Hero
class Priestess(Hero, Healable):
""" Priestess is a hero with it own statistics. The basic behaviour is same as the hero.
Special ability is to heal everytime after taking damage """
def __init__(self, name, model, **kwargs):
super().__init__(name = name, model = model, **kwargs)
super(DungeonCharacter, self).__init__(**kwargs)
def take_damage(self, dmg, source):
""" after taking damage, if the priestess is not dead, it heals itself"""
hp_before_attack = self.hp
super().take_damage(dmg, source)
if self._is_alive and hp_before_attack > self.hp and source != "pit":
heal_message = self.heal_itself()
self.model.announce(f"{self.name}: {heal_message}")
| priestess.py | 888 | Priestess is a hero with it own statistics. The basic behaviour is same as the hero.
Special ability is to heal everytime after taking damage
after taking damage, if the priestess is not dead, it heals itself
name : Shoby Gnanasekaran net id: shoby | 252 | en | 0.926904 |
from django.contrib import admin
from claims import models
# Register your models here.
admin.site.register(models.AddressCountry)
admin.site.register(models.AddressRegion)
admin.site.register(models.AddressCity)
admin.site.register(models.ProjectStatus)
| claims/admin.py | 256 | Register your models here. | 26 | en | 0.957485 |
#!/usr/bin/env python
# Copyright (c) 2021 IBM Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import argparse
import rosbag
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input_bag", required=True)
args = parser.parse_args()
input_bag = args.input_bag
topic_size_dict = {}
for topic, msg, time in rosbag.Bag(input_bag, 'r').read_messages(raw=True):
topic_size_dict[topic] = topic_size_dict.get(topic, 0) + len(msg[1])
topic_size = list(topic_size_dict.items())
topic_size.sort(key=lambda x: x[1])
print("topic", "size [GB]")
for topic, size in topic_size:
size_gb = size/(1024.0**3)
print(topic, size_gb)
if __name__ == "__main__":
main()
| mf_localization_mapping/script/check_topic_size.py | 1,741 | !/usr/bin/env python Copyright (c) 2021 IBM Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1,077 | en | 0.869003 |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Ludirium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet load on startup.
Verify that a ludiriumd node can maintain list of wallets loading on startup
"""
from test_framework.test_framework import LudiriumTestFramework
from test_framework.util import (
assert_equal,
)
class WalletStartupTest(LudiriumTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_nodes(self):
self.add_nodes(self.num_nodes)
self.start_nodes()
def run_test(self):
self.log.info('Should start without any wallets')
assert_equal(self.nodes[0].listwallets(), [])
assert_equal(self.nodes[0].listwalletdir(), {'wallets': []})
self.log.info('New default wallet should load by default when there are no other wallets')
self.nodes[0].createwallet(wallet_name='', load_on_startup=False)
self.restart_node(0)
assert_equal(self.nodes[0].listwallets(), [''])
self.log.info('Test load on startup behavior')
self.nodes[0].createwallet(wallet_name='w0', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w1', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w2', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w3', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w0', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w4', load_on_startup=True)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w1', 'w2', 'w3', 'w4')))
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w2', 'w4')))
self.nodes[0].unloadwallet(wallet_name='', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w3', load_on_startup=True)
self.nodes[0].loadwallet(filename='')
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('w2', 'w3')))
if __name__ == '__main__':
WalletStartupTest().main()
| test/functional/wallet_startup.py | 2,543 | Test wallet load on startup.
Verify that a ludiriumd node can maintain list of wallets loading on startup
!/usr/bin/env python3 Copyright (c) 2017-2020 The Ludirium Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. | 315 | en | 0.545345 |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_binary_bool_token, read_binary_integer32_token, collect_until_token, \
read_binary_float_token
from openvino.tools.mo.front.kaldi.utils import read_binary_vector, read_binary_matrix
from openvino.tools.mo.ops.tdnncomponent import TdnnComponent
class TdnnComponentFrontExtractor(FrontExtractorOp):
op = 'tdnncomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
collect_until_token(pb, b'<MaxChange>')
max_change = read_binary_float_token(pb)
collect_until_token(pb, b'<L2Regularize>')
collect_until_token(pb, b'<LearningRate>')
collect_until_token(pb, b'<TimeOffsets>')
time_offsets = read_binary_vector(pb, False, np.int32)
collect_until_token(pb, b'<LinearParams>')
weights, weights_shape = read_binary_matrix(pb)
collect_until_token(pb, b'<BiasParams>')
bias_params = read_binary_vector(pb)
collect_until_token(pb, b'<OrthonormalConstraint>')
orthonormal_constraint = read_binary_float_token(pb) # used only on training
collect_until_token(pb, b'<UseNaturalGradient>')
use_natural_grad = read_binary_bool_token(pb) # used only on training
collect_until_token(pb, b'<NumSamplesHistory>')
num_samples_hist = read_binary_float_token(pb)
collect_until_token(pb, b'<AlphaInOut>')
alpha_in_out = read_binary_float_token(pb), read_binary_float_token(pb) # for training, usually (4, 4)
# according to Kaldi documentation http://kaldi-asr.org/doc/classkaldi_1_1nnet3_1_1TdnnComponent.html#details
# it looks like it's used only during training (but not 100% sure)
collect_until_token(pb, b'<RankInOut>')
rank_in_out = read_binary_integer32_token(pb), read_binary_integer32_token(pb)
biases = mo_array(bias_params) if len(bias_params) != 0 else None
attrs = {
'weights': np.reshape(weights, weights_shape),
'biases': biases,
'time_offsets': time_offsets,
}
TdnnComponent.update_node_stat(node, attrs)
return cls.enabled
| tools/mo/openvino/tools/mo/front/kaldi/extractors/tdnncomponent_ext.py | 2,436 | Copyright (C) 2018-2022 Intel Corporation SPDX-License-Identifier: Apache-2.0 used only on training used only on training for training, usually (4, 4) according to Kaldi documentation http://kaldi-asr.org/doc/classkaldi_1_1nnet3_1_1TdnnComponent.htmldetails it looks like it's used only during training (but not 100% sure) | 322 | en | 0.763044 |
from fastcore.foundation import L
# 0~11 숫자를 포함한 L을 생성합니다 (range 사용)
t = ____________
print(t)
# L의 내용을 두 배 불립니다
t __ 2
print(t)
# 0이 담긴 위치 (0, 12) 를 튜플 방식으로 찾아서 반환합니다
t_1 = t[_, __]
print(t_1)
# 0이 담긴 위치 (0, 12) 를 마스킹 방식으로 찾아서 반환합니다
# - 마스크를 만듭니다 0과 12번째 위치에만 True를 넣습니다
mask = L([True])
mask += L([False] * 11)
mask += L([True])
mask += L([False] * 11)
t_2 = t______
print(t_2) | exercises/chapter01/exc_01_07.py | 555 | 0~11 숫자를 포함한 L을 생성합니다 (range 사용) L의 내용을 두 배 불립니다 0이 담긴 위치 (0, 12) 를 튜플 방식으로 찾아서 반환합니다 0이 담긴 위치 (0, 12) 를 마스킹 방식으로 찾아서 반환합니다 - 마스크를 만듭니다 0과 12번째 위치에만 True를 넣습니다 | 160 | ko | 1.000045 |
'''
Advent of Code - 2019
--- Day 2: 1202 Program Alarm ---
'''
from utils import *
from intcode import IntcodeRunner, HaltExecution
def parse_input(day):
return day_input(day, integers)[0]
def part1(program, noun=12, verb=2):
runner = IntcodeRunner(program)
runner.set_mem(1, noun)
runner.set_mem(2, verb)
while True:
try:
next(runner.run())
except HaltExecution:
break
return runner.get_mem(0)
def part2(program, target=19690720):
runner = IntcodeRunner(program)
for noun in range(100, -1, -1):
for verb in range(100):
runner.set_mem(1, noun)
runner.set_mem(2, verb)
while True:
try:
next(runner.run())
except HaltExecution:
break
if runner.get_mem(0) == target:
return 100*noun+verb
runner.reset()
if __name__ == '__main__':
data = parse_input('02')
print(f'Part One: {part1(data)}')
print(f'Part Two: {part2(data)}')
| challenges/2019/python/d02.py | 1,091 | Advent of Code - 2019
--- Day 2: 1202 Program Alarm --- | 60 | en | 0.777143 |
"""Workout schema module"""
import graphene
from exercises.schema import ExerciseType
from exercises.models import Exercise
class Query(graphene.ObjectType):
"""Workout query class"""
workout = graphene.List(ExerciseType,
body_part=graphene.String(),
exercise_name=graphene.String(),
equipment=graphene.String(),
level=graphene.String())
def resolve_workout(self, info, **kwargs):
"""query resolver for workout property"""
all_exercises = Exercise.objects.all()
if kwargs.get('body_part'):
all_exercises = all_exercises.select_related('body_part').filter(
body_part__name=kwargs.get('body_part').lower())
if kwargs.get('level'):
all_exercises = all_exercises.select_related('level').filter(
level__difficulty=kwargs.get('level').lower())
if kwargs.get('exercise_name'):
all_exercises = all_exercises.filter(
name__icontains=kwargs.get('exercise_name').lower())
if kwargs.get('equipment'):
all_exercises = all_exercises.select_related('equipment').filter(
equipment__name=kwargs.get('equipment').lower())
return all_exercises
| quarantineworkout/workout/schema.py | 1,325 | Workout query class
query resolver for workout property
Workout schema module | 77 | en | 0.623682 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.variable_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_NP_TO_TF = {
np.float32: dtypes.float32,
np.float64: dtypes.float64,
np.int32: dtypes.int32,
np.int64: dtypes.int64,
}
class VariableOpTest(test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
op = state_ops.assign(p, x)
op.op.run()
return p.eval()
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
self.setUp()
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
# NOTE(touts): the GPU test should pass for all types, whether the
# Variable op has an implementation for that type on GPU as we expect
# that Variable and Assign have GPU implementations for matching tf.
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testset_shape(self):
p = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual([1, 2], p.get_shape())
p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
def testAssign(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
tensor = array_ops.placeholder(dtypes.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
assigned = state_ops.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
self.assertEqual(shape, var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
def testAssignNoShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
state_ops.assign(var, value).get_shape())
def testAssignNoShapeNoValidateShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(
tensor_shape.unknown_shape(),
state_ops.assign(
var, value, validate_shape=False).get_shape())
def testAssignUpdate(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
def testTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], final.eval())
def testDestroyNonexistentTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
final.eval()
def testDuplicateTemporaryVariable(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
final.eval()
def testDestroyTemporaryVariableTwice(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
final.eval()
def testTemporaryVariableNoLeak(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="bar")
final = array_ops.identity(var)
final.eval()
def testTwoTemporaryVariablesNoLeaks(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="var1")
var2 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="var2")
final = var1 + var2
final.eval()
def testAssignDependencyAcrossDevices(self):
with self.test_session(use_gpu=True):
# The variable and an op to increment it are on the GPU.
var = state_ops.variable_op([1], dtypes.float32)
state_ops.assign(var, [1.0]).eval()
increment = state_ops.assign_add(var, [1.0])
with ops.control_dependencies([increment]):
with ops.device("/cpu:0"):
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place
# only after the increment.
result = math_ops.multiply(var, var)
self.assertAllClose([4.0], result.eval())
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual(False, variables.is_variable_initialized(v0).eval())
state_ops.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, variables.is_variable_initialized(v0).eval())
if __name__ == "__main__":
test.main()
| tensorflow/python/kernel_tests/variable_ops_test.py | 10,072 | Tests for tensorflow.ops.tf.variable_op.
Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== NOTE(touts): the GPU test should pass for all types, whether the Variable op has an implementation for that type on GPU as we expect that Variable and Assign have GPU implementations for matching tf. The variable and an op to increment it are on the GPU. This mul op is pinned to the CPU, but reads the variable from the GPU. The test ensures that the dependency on 'increment' is still honored, i.e., the Send and Recv from GPU to CPU should take place only after the increment. | 1,183 | en | 0.857427 |
import pylab as pl
from get_fish_info import get_fish_info
from fit_integrator_model import get_model_result, get_target_result
import numpy as np
from pathlib import Path
import gmm_model_fit
import pandas as pd
from pymoo.factory import get_problem, get_visualization, get_decomposition
# import random
#
# for dt in [0.001, 0.002, 0.005, 0.01, 0.1]:
#
# tau = 4
# Is = np.arange(0, 30, dt)
# xs = np.empty_like(Is)
# xs[0]
#
# for i in range(1, len(Is)):
# dx = random.gauss(0.2, 5) - xs[i - 1]
# xs[i] = xs[i - 1] + dx * dt / tau
# pl.plot(Is, xs)
# pl.show()
# sdf
root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/surrogate_fish1")
#root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/scn1lab_NIBR")
#root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/disc1_hetinx")
df = pd.read_hdf(root_path / "all_data.h5", key="all_bouts")
#
# df_extracted_features, df_extracted_binned_features, \
# df_extracted_binned_features_same_direction, \
# df_extracted_binned_features_heading_angle_change_histograms, \
# df_extracted_binned_features_inter_bout_interval_histograms = get_mean_fish_info(df)
#
# print(df_extracted_features)
# pl.plot(df_extracted_features.loc["wt", :]["correctness"])
# pl.plot(df_extracted_features.loc["het", :]["correctness"])
# pl.plot(df_extracted_features.loc["hom", :]["correctness"])
#
# pl.figure()
# pl.plot(df_extracted_features.loc["wt", :]["inter_bout_interval"])
# pl.plot(df_extracted_features.loc["het", :]["inter_bout_interval"])
# pl.plot(df_extracted_features.loc["hom", :]["inter_bout_interval"])
#
# pl.figure()
# pl.plot(df_extracted_binned_features.loc["wt", 0])
# pl.plot(df_extracted_binned_features.loc["wt", 1])
# pl.plot(df_extracted_binned_features.loc["wt", 2])
# pl.plot(df_extracted_binned_features.loc["wt", 3])
#
# pl.figure()
# pl.plot(df_extracted_binned_features_same_direction.loc["wt"])
# pl.plot(df_extracted_binned_features_same_direction.loc["het"])
# pl.plot(df_extracted_binned_features_same_direction.loc["hom"])
#
#
# pl.figure()
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 0])
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 1])
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 2])
# pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 3])
#
# pl.show()
#
#
# pl.show()
#
#
# print(df_extracted_features)
# gg
# sdf
genotype = "hom"
target_df_correctness_as_function_of_coherence, \
target_df_inter_bout_interval_as_function_of_coherence, \
target_df_binned_correctness, \
target_df_binned_same_direction, \
target_df_binned_features_heading_angle_change_histograms, \
target_df_binned_features_inter_bout_interval_histograms, \
target_df_gmm_fitting_results = get_target_result(root_path, genotype)
# colors = ["#000000", "#330000", "#990000", "#CC3333"]
#
# for i in range(4):
# pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), label=f"Coherence {i*25}%", color=colors[i], linewidth=2)
#
# pl.xlabel("Heading angle change (deg)")
# pl.ylabel("Probability")
# pl.legend()
#
# fig = pl.figure()
# fig.suptitle("Target functions")
# pl.subplot(211)
# pl.plot(target_df_correctness_as_function_of_coherence, 'o-', color='black')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot(target_df_inter_bout_interval_as_function_of_coherence, 'o-', color='black')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
medianprops = dict(linestyle='-.', linewidth=2.5, color='firebrick')
errornames = ["Error: 'Correctness as function of coherence'",
"Error: 'Inter-bout interval as function of coherence'",
"Error: 'Binned correctness at 25, 50, 100 %'",
"Error: 'Binned same direction'",
"Error: 'Histogram weights'"]
#errornames = ["Mixed"]
repeat = 1
X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}.npy")
F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}.npy")
#
#
# for i in range(7):
# F[-1, :, i] = F[-1, :, i] / np.max(F[-1, :, i])
# print(F.shape)
#
# i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4] + F[-1, :, 5] + F[-1, :, 6])
# print(F[-1, i6, 0])
# dd
#get_decomposition("asf").do(F[-1], [1, 1, 1, 1, 1, 1, 1]).argmin()
#print(I)
#sdfsdf
#X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}_single_error.npy")
#F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}_single_error.npy")
# from pymoo.factory import get_decision_making, get_reference_directions
#
# ref_dirs = get_reference_directions("das-dennis", 4, n_partitions=12)
# F = get_problem("dtlz1").pareto_front(ref_dirs)
#
# weights = np.array([10.25, 10.25, 0.25, 0.25])
# a, pseudo_weights = get_decision_making("pseudo-weights", weights).do(F, return_pseudo_weights=True)
# pl.plot(F[:, 0], F[:,1], 'o')
# pl.plot(F[a, 0], F[a,1], 'o')
# pl.show()
#
# print(a, pseudo_weights, F.shape)
# ghj
from pymoo.factory import get_decision_making, get_reference_directions
#weights = [1000, 1000, 1000, 0, 0, 0, 0]
#a, pseudo_weights = get_decision_making("pseudo-weights", weights).do(F[-1], return_pseudo_weights=True)
#print(pseudo_weights[0])
#print(a, pseudo_weights)
#dfg
for i in range(5):
#pl.hist(F[-1, :, i])
#pl.show()
#print(np.percentile(F[-1, :, i], 75))
#print(np.max(F[-1, :, i]) - np.min(F[-1, :, i]))
F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)
# print(F.shape)
#
#i6 = a
#i1 = np.argmin(F[-1, :, 0])
# i2 = np.argmin(F[-1, :, 1])
# i3 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500)
# i4 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3])
# i5 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25)
# #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25 + F[-1, :, 6]*5800)
# #i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 6800)
# i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500)
i6 = np.argmin(F[-1, :, 0] + 3*F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4])
#from pymoo.factory import get_decision_making
#dm = get_decision_making("high-tradeoff")
#I = dm.do(pf)
# print(F.shape)
# np.set_printoptions(precision=4, suppress=True)
# print((X[-1, i]))
# #gdfgh
# for error_i in range(len(errornames)):
# pl.figure()
# pl.title(errornames[error_i])
# bp = pl.boxplot(F[:, :, error_i].T, whis=[5, 95], showfliers=False, medianprops=medianprops)
# for gen in range(50):
# sc = pl.scatter([gen+1], [F[gen, :, error_i].min()], s=5, marker='.', c='firebrick')
# pl.yscale("log")
# pl.xlabel("Generation")
# pl.ylabel("Log Error")
# pl.show()
# dd
#
# pl.figure()
# pl.title("Compromise between all error functions")
# #error = F[:, :, 0] + F[:, :, 1]*500 + F[:, :, 3] + F[:, :, 5]*0.25 + F[:, :, 6]*500
# error = F[:, :, 0] + F[:, :, 1]*2500 + F[:, :, 3]*5 + F[:, :, 5]*0.5 + F[:, :, 6]*1500
#
# bp = pl.boxplot(error.T, whis=[5, 95], showfliers=False, medianprops=medianprops)
# for gen in range(50):
# sc = pl.scatter([gen + 1], [error[gen].min()], s=10, marker='.', c='firebrick')
# pl.yscale("log")
# pl.xlabel("Generation")
# pl.ylabel("Log Error")
# pl.show()
# pl.figure()
# pl.scatter(F[-1, :, 0], F[-1, :, 1], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0], F[-1, i1, 1], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0], F[-1, i2, 1], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0], F[-1, i3, 1], s=15, marker='o', c='C3', label="Compromise")
# pl.legend()
# pl.xlabel(errornames[0])
# pl.ylabel(errornames[1])
#
#
# pl.figure()
# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500, F[-1, :, 3], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500, F[-1, i1, 3], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500, F[-1, i2, 3], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500, F[-1, i3, 3], s=15, marker='o', c='C3', label="Compromise between 1 and 2")
# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500, F[-1, i4, 3], s=15, marker='o', c='C4', label="Compromise between all")
# pl.legend()
# pl.xlabel("Compromise between 1 and 2")
# pl.ylabel(errornames[3])
#
# pl.figure()
# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3], F[-1, :, 5], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3], F[-1, i1, 5], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3], F[-1, i2, 5], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3], F[-1, i3, 5], s=15, marker='o', c='C3', label="Compromise between 1 and 2")
# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3], F[-1, i4, 5], s=15, marker='o', c='C4', label="Compromise between 1, 2, and 3")
# pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3], F[-1, i5, 5], s=15, marker='o', c='C5', label="Compromise between all")
# pl.legend()
# pl.xlabel("Compromise between 1, 2, and 3")
# pl.ylabel(errornames[5])
#
#
# pl.figure()
# pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25, F[-1, :, 6], s=10, marker='.', c='C0', label='Individual')
# pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3] + F[-1, i1, 5]*0.25, F[-1, i1, 6], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'")
# pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3] + F[-1, i2, 5]*0.25, F[-1, i2, 6], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'")
# pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3] + F[-1, i3, 5]*0.25, F[-1, i3, 6], s=15, marker='o', c='C3', label="Compromise between 1 and 2")
# pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3] + F[-1, i4, 5]*0.25, F[-1, i4, 6], s=15, marker='o', c='C4', label="Compromise between 1, 2, and 3")
# pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3] + F[-1, i5, 5]*0.25, F[-1, i5, 6], s=15, marker='o', c='C5', label="Compromise between 1, 2, 3, and 4")
# pl.scatter(F[-1, i6, 0] + F[-1, i6, 1]*500 + F[-1, i6, 3] + F[-1, i6, 5]*0.25, F[-1, i6, 6], s=15, marker='o', c='C6', label="Compromise between all")
# pl.legend()
# pl.xlabel("Compromise between 1, 2, 3, and 4")
# pl.ylabel(errornames[6])
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i1])
# fig.suptitle("Best for 'Correctness as function of coherence'")
# pl.subplot(211)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C1')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C1')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i2])
# fig.suptitle("Best for 'Inter-bout interval as function of coherence'")
# pl.subplot(211)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C2')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C2')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i3])
# fig.suptitle("Compromise between 'Correctness and inter-bout interval as function of coherence'")
# pl.subplot(211)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(212)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i3])
# fig.suptitle("Compromise between 'Correctness and inter-bout interval as function of coherence'")
# pl.subplot(221)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(222)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
# pl.subplot(223)
# for i in range(4):
# pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
# pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C3')
# pl.xlabel("Correctness (%)")
# pl.ylabel("Time (s)")
#
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i4])
# fig.suptitle("Compromise between all three error functions")
# pl.subplot(221)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C4')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(222)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C4')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
# pl.subplot(223)
# for i in range(4):
# pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
# pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C4')
# pl.xlabel("Correctness (%)")
# pl.ylabel("Time (s)")
#
#
# fig = pl.figure()
# model_df_correctness_as_function_of_coherence, \
# model_df_inter_bout_interval_as_function_of_coherence, \
# model_df_binned_correctness, \
# model_df_binned_same_direction, \
# model_df_binned_features_heading_angle_change_histograms, \
# model_df_binned_features_inter_bout_interval_histograms, \
# model_df_gmm_fitting_results = get_model_result(X[-1, i5])
# fig.suptitle("Compromise between all four error functions")
# pl.subplot(221)
# pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C5')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Correctness (%)")
# pl.subplot(222)
# pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
# pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C5')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Inter-bout interval (s)")
# pl.subplot(223)
# for i in range(4):
# pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
# pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C5')
# pl.xlabel("Correctness (%)")
# pl.ylabel("Time (s)")
# pl.subplot(224)
# pl.plot(target_df_binned_same_direction, 'o-', color='black')
# pl.plot(model_df_binned_same_direction, 'o--', color='C5')
# pl.xlabel("Time since last bout (s)")
# pl.ylabel("Correctness (%)")
fig = pl.figure()
model_df_correctness_as_function_of_coherence, \
model_df_inter_bout_interval_as_function_of_coherence, \
model_df_binned_correctness, \
model_df_binned_same_direction, \
model_df_binned_features_heading_angle_change_histograms, \
model_df_binned_features_inter_bout_interval_histograms, \
model_df_gmm_fitting_results = get_model_result(X[-1, i6])
fig.suptitle("Compromise between all five error functions")
pl.subplot(231)
pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black')
pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C6')
pl.xlabel("Coherence (%)")
pl.ylabel("Correctness (%)")
pl.subplot(232)
pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black')
pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C6')
pl.xlabel("Coherence (%)")
pl.ylabel("Inter-bout interval (s)")
pl.subplot(233)
for i in range(4):
pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black')
pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C6')
pl.xlabel("Time (s)")
pl.ylabel("Correctness (%)")
pl.subplot(234)
pl.plot(target_df_binned_same_direction, 'o-', color='black')
pl.plot(model_df_binned_same_direction, 'o--', color='C6')
pl.xlabel("Time since last bout (s)")
pl.ylabel("Correctness (%)")
# pl.subplot(235)
# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_left"].values, '-o', color='black', label='s_left')
# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_center"].values, '-o', color='black', label='s_center')
# pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_right"].values, '-o', color='black', label='s_right')
#
# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_left"].values, '--o', color='C6', label='s_left')
# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_center"].values, '--o', color='C6', label='s_center')
# pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_right"].values, '--o', color='C6', label='s_right')
# pl.xlabel("Coherence (%)")
# pl.ylabel("Weight")
# pl.legend()
pl.subplot(235)
for i in range(4):
pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), color=f"black")
pl.plot(model_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), color=f"C6", linestyle='--')
pl.xlabel("Heading angle change")
pl.ylabel("Probability")
pl.show()
found_parameters = []
for repeat in range(12):
for genotype in ["wt", "het", "hom"]:
X = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}.npy")
F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}.npy")
for i in range(5):
#F[-1, :, i] = F[-1, :, i] / np.median(F[-1, :, i])
F[-1, :, i] = F[-1, :, i] / np.percentile(F[-1, :, i], 75)
#i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + 5 * F[-1, :, 3] + F[-1, :, 5] + 5 * F[-1, :, 6])
#i6 = np.argmin(F[-1, :, 0] + 5 * F[-1, :, 1] + 20 * F[-1, :, 4] + F[-1, :, 5] + 5 * F[-1, :, 6])
i6 = np.argmin(F[-1, :, 0] + 3 * F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4])
#i6 = np.argmin(F[-1, :, 0] + 2 * F[-1, :, 1] + F[-1, :, 2] + 3 * F[-1, :, 3] + F[-1, :, 5] + F[-1, :, 6])
#i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 500 + F[-1, :, 3] + F[-1, :, 5] * 0.25 + F[-1, :, 6] * 500)
#i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 1500)
#i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500)
found_parameters.append([genotype, repeat, 49] + list(X[-1, i6, :]))
df = pd.DataFrame(found_parameters,
columns=["genotype",
"repeat",
"gen",
"tau",
"sigma",
"T",
"p_below",
"p_above"]).astype(dtype={"repeat": "int64", "gen": "int64"}, copy=False)
df.set_index(["genotype", 'repeat', 'gen'], inplace=True)
df.sort_index(inplace=True)
df.to_hdf(root_path / "found_parameters.h5", key="parameters", complevel=9)
| armin_analysis/model_tests.py | 22,793 | import random for dt in [0.001, 0.002, 0.005, 0.01, 0.1]: tau = 4 Is = np.arange(0, 30, dt) xs = np.empty_like(Is) xs[0] for i in range(1, len(Is)): dx = random.gauss(0.2, 5) - xs[i - 1] xs[i] = xs[i - 1] + dx * dt / tau pl.plot(Is, xs) pl.show() sdfroot_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/scn1lab_NIBR")root_path = Path("/Users/arminbahl/Desktop/mutant_behavior_data/disc1_hetinx") df_extracted_features, df_extracted_binned_features, \ df_extracted_binned_features_same_direction, \ df_extracted_binned_features_heading_angle_change_histograms, \ df_extracted_binned_features_inter_bout_interval_histograms = get_mean_fish_info(df) print(df_extracted_features) pl.plot(df_extracted_features.loc["wt", :]["correctness"]) pl.plot(df_extracted_features.loc["het", :]["correctness"]) pl.plot(df_extracted_features.loc["hom", :]["correctness"]) pl.figure() pl.plot(df_extracted_features.loc["wt", :]["inter_bout_interval"]) pl.plot(df_extracted_features.loc["het", :]["inter_bout_interval"]) pl.plot(df_extracted_features.loc["hom", :]["inter_bout_interval"]) pl.figure() pl.plot(df_extracted_binned_features.loc["wt", 0]) pl.plot(df_extracted_binned_features.loc["wt", 1]) pl.plot(df_extracted_binned_features.loc["wt", 2]) pl.plot(df_extracted_binned_features.loc["wt", 3]) pl.figure() pl.plot(df_extracted_binned_features_same_direction.loc["wt"]) pl.plot(df_extracted_binned_features_same_direction.loc["het"]) pl.plot(df_extracted_binned_features_same_direction.loc["hom"]) pl.figure() pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 0]) pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 1]) pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 2]) pl.plot(df_extracted_binned_features_heading_angle_change_histograms.loc["wt", 3]) pl.show() pl.show() print(df_extracted_features) gg sdf colors = ["000000", "330000", "990000", "CC3333"] for i in range(4): pl.plot(target_df_binned_features_heading_angle_change_histograms.loc[i, :].droplevel("stim"), label=f"Coherence {i*25}%", color=colors[i], linewidth=2) pl.xlabel("Heading angle change (deg)") pl.ylabel("Probability") pl.legend() fig = pl.figure() fig.suptitle("Target functions") pl.subplot(211) pl.plot(target_df_correctness_as_function_of_coherence, 'o-', color='black') pl.xlabel("Coherence (%)") pl.ylabel("Correctness (%)") pl.subplot(212) pl.plot(target_df_inter_bout_interval_as_function_of_coherence, 'o-', color='black') pl.xlabel("Coherence (%)") pl.ylabel("Inter-bout interval (s)")errornames = ["Mixed"] for i in range(7): F[-1, :, i] = F[-1, :, i] / np.max(F[-1, :, i]) print(F.shape) i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + F[-1, :, 2] + F[-1, :, 3] + F[-1, :, 4] + F[-1, :, 5] + F[-1, :, 6]) print(F[-1, i6, 0]) ddget_decomposition("asf").do(F[-1], [1, 1, 1, 1, 1, 1, 1]).argmin()print(I)sdfsdfX = np.load(root_path / f"leaky_integrator_model2_X_{genotype}_{repeat}_single_error.npy")F = np.load(root_path / f"leaky_integrator_model2_F_{genotype}_{repeat}_single_error.npy") from pymoo.factory import get_decision_making, get_reference_directions ref_dirs = get_reference_directions("das-dennis", 4, n_partitions=12) F = get_problem("dtlz1").pareto_front(ref_dirs) weights = np.array([10.25, 10.25, 0.25, 0.25]) a, pseudo_weights = get_decision_making("pseudo-weights", weights).do(F, return_pseudo_weights=True) pl.plot(F[:, 0], F[:,1], 'o') pl.plot(F[a, 0], F[a,1], 'o') pl.show() print(a, pseudo_weights, F.shape) ghjweights = [1000, 1000, 1000, 0, 0, 0, 0]a, pseudo_weights = get_decision_making("pseudo-weights", weights).do(F[-1], return_pseudo_weights=True)print(pseudo_weights[0])print(a, pseudo_weights)dfgpl.hist(F[-1, :, i])pl.show()print(np.percentile(F[-1, :, i], 75))print(np.max(F[-1, :, i]) - np.min(F[-1, :, i])) print(F.shape)i6 = ai1 = np.argmin(F[-1, :, 0]) i2 = np.argmin(F[-1, :, 1]) i3 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500) i4 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3]) i5 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25) i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25 + F[-1, :, 6]*5800) i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 6800) i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500)from pymoo.factory import get_decision_makingdm = get_decision_making("high-tradeoff")I = dm.do(pf) print(F.shape) np.set_printoptions(precision=4, suppress=True) print((X[-1, i])) gdfgh for error_i in range(len(errornames)): pl.figure() pl.title(errornames[error_i]) bp = pl.boxplot(F[:, :, error_i].T, whis=[5, 95], showfliers=False, medianprops=medianprops) for gen in range(50): sc = pl.scatter([gen+1], [F[gen, :, error_i].min()], s=5, marker='.', c='firebrick') pl.yscale("log") pl.xlabel("Generation") pl.ylabel("Log Error") pl.show() dd pl.figure() pl.title("Compromise between all error functions") error = F[:, :, 0] + F[:, :, 1]*500 + F[:, :, 3] + F[:, :, 5]*0.25 + F[:, :, 6]*500 error = F[:, :, 0] + F[:, :, 1]*2500 + F[:, :, 3]*5 + F[:, :, 5]*0.5 + F[:, :, 6]*1500 bp = pl.boxplot(error.T, whis=[5, 95], showfliers=False, medianprops=medianprops) for gen in range(50): sc = pl.scatter([gen + 1], [error[gen].min()], s=10, marker='.', c='firebrick') pl.yscale("log") pl.xlabel("Generation") pl.ylabel("Log Error") pl.show() pl.figure() pl.scatter(F[-1, :, 0], F[-1, :, 1], s=10, marker='.', c='C0', label='Individual') pl.scatter(F[-1, i1, 0], F[-1, i1, 1], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'") pl.scatter(F[-1, i2, 0], F[-1, i2, 1], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'") pl.scatter(F[-1, i3, 0], F[-1, i3, 1], s=15, marker='o', c='C3', label="Compromise") pl.legend() pl.xlabel(errornames[0]) pl.ylabel(errornames[1]) pl.figure() pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500, F[-1, :, 3], s=10, marker='.', c='C0', label='Individual') pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500, F[-1, i1, 3], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'") pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500, F[-1, i2, 3], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'") pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500, F[-1, i3, 3], s=15, marker='o', c='C3', label="Compromise between 1 and 2") pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500, F[-1, i4, 3], s=15, marker='o', c='C4', label="Compromise between all") pl.legend() pl.xlabel("Compromise between 1 and 2") pl.ylabel(errornames[3]) pl.figure() pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3], F[-1, :, 5], s=10, marker='.', c='C0', label='Individual') pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3], F[-1, i1, 5], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'") pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3], F[-1, i2, 5], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'") pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3], F[-1, i3, 5], s=15, marker='o', c='C3', label="Compromise between 1 and 2") pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3], F[-1, i4, 5], s=15, marker='o', c='C4', label="Compromise between 1, 2, and 3") pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3], F[-1, i5, 5], s=15, marker='o', c='C5', label="Compromise between all") pl.legend() pl.xlabel("Compromise between 1, 2, and 3") pl.ylabel(errornames[5]) pl.figure() pl.scatter(F[-1, :, 0] + F[-1, :, 1]*500 + F[-1, :, 3] + F[-1, :, 5]*0.25, F[-1, :, 6], s=10, marker='.', c='C0', label='Individual') pl.scatter(F[-1, i1, 0] + F[-1, i1, 1]*500 + F[-1, i1, 3] + F[-1, i1, 5]*0.25, F[-1, i1, 6], s=15, marker='o', c='C1', label="Best for 'Correctness as function of coherence'") pl.scatter(F[-1, i2, 0] + F[-1, i2, 1]*500 + F[-1, i2, 3] + F[-1, i2, 5]*0.25, F[-1, i2, 6], s=15, marker='o', c='C2', label="Best for 'Inter-bout interval as function of coherence'") pl.scatter(F[-1, i3, 0] + F[-1, i3, 1]*500 + F[-1, i3, 3] + F[-1, i3, 5]*0.25, F[-1, i3, 6], s=15, marker='o', c='C3', label="Compromise between 1 and 2") pl.scatter(F[-1, i4, 0] + F[-1, i4, 1]*500 + F[-1, i4, 3] + F[-1, i4, 5]*0.25, F[-1, i4, 6], s=15, marker='o', c='C4', label="Compromise between 1, 2, and 3") pl.scatter(F[-1, i5, 0] + F[-1, i5, 1]*500 + F[-1, i5, 3] + F[-1, i5, 5]*0.25, F[-1, i5, 6], s=15, marker='o', c='C5', label="Compromise between 1, 2, 3, and 4") pl.scatter(F[-1, i6, 0] + F[-1, i6, 1]*500 + F[-1, i6, 3] + F[-1, i6, 5]*0.25, F[-1, i6, 6], s=15, marker='o', c='C6', label="Compromise between all") pl.legend() pl.xlabel("Compromise between 1, 2, 3, and 4") pl.ylabel(errornames[6]) fig = pl.figure() model_df_correctness_as_function_of_coherence, \ model_df_inter_bout_interval_as_function_of_coherence, \ model_df_binned_correctness, \ model_df_binned_same_direction, \ model_df_binned_features_heading_angle_change_histograms, \ model_df_binned_features_inter_bout_interval_histograms, \ model_df_gmm_fitting_results = get_model_result(X[-1, i1]) fig.suptitle("Best for 'Correctness as function of coherence'") pl.subplot(211) pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C1') pl.xlabel("Coherence (%)") pl.ylabel("Correctness (%)") pl.subplot(212) pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C1') pl.xlabel("Coherence (%)") pl.ylabel("Inter-bout interval (s)") fig = pl.figure() model_df_correctness_as_function_of_coherence, \ model_df_inter_bout_interval_as_function_of_coherence, \ model_df_binned_correctness, \ model_df_binned_same_direction, \ model_df_binned_features_heading_angle_change_histograms, \ model_df_binned_features_inter_bout_interval_histograms, \ model_df_gmm_fitting_results = get_model_result(X[-1, i2]) fig.suptitle("Best for 'Inter-bout interval as function of coherence'") pl.subplot(211) pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C2') pl.xlabel("Coherence (%)") pl.ylabel("Correctness (%)") pl.subplot(212) pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C2') pl.xlabel("Coherence (%)") pl.ylabel("Inter-bout interval (s)") fig = pl.figure() model_df_correctness_as_function_of_coherence, \ model_df_inter_bout_interval_as_function_of_coherence, \ model_df_binned_correctness, \ model_df_binned_same_direction, \ model_df_binned_features_heading_angle_change_histograms, \ model_df_binned_features_inter_bout_interval_histograms, \ model_df_gmm_fitting_results = get_model_result(X[-1, i3]) fig.suptitle("Compromise between 'Correctness and inter-bout interval as function of coherence'") pl.subplot(211) pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3') pl.xlabel("Coherence (%)") pl.ylabel("Correctness (%)") pl.subplot(212) pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3') pl.xlabel("Coherence (%)") pl.ylabel("Inter-bout interval (s)") fig = pl.figure() model_df_correctness_as_function_of_coherence, \ model_df_inter_bout_interval_as_function_of_coherence, \ model_df_binned_correctness, \ model_df_binned_same_direction, \ model_df_binned_features_heading_angle_change_histograms, \ model_df_binned_features_inter_bout_interval_histograms, \ model_df_gmm_fitting_results = get_model_result(X[-1, i3]) fig.suptitle("Compromise between 'Correctness and inter-bout interval as function of coherence'") pl.subplot(221) pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C3') pl.xlabel("Coherence (%)") pl.ylabel("Correctness (%)") pl.subplot(222) pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C3') pl.xlabel("Coherence (%)") pl.ylabel("Inter-bout interval (s)") pl.subplot(223) for i in range(4): pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black') pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C3') pl.xlabel("Correctness (%)") pl.ylabel("Time (s)") fig = pl.figure() model_df_correctness_as_function_of_coherence, \ model_df_inter_bout_interval_as_function_of_coherence, \ model_df_binned_correctness, \ model_df_binned_same_direction, \ model_df_binned_features_heading_angle_change_histograms, \ model_df_binned_features_inter_bout_interval_histograms, \ model_df_gmm_fitting_results = get_model_result(X[-1, i4]) fig.suptitle("Compromise between all three error functions") pl.subplot(221) pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C4') pl.xlabel("Coherence (%)") pl.ylabel("Correctness (%)") pl.subplot(222) pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C4') pl.xlabel("Coherence (%)") pl.ylabel("Inter-bout interval (s)") pl.subplot(223) for i in range(4): pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black') pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C4') pl.xlabel("Correctness (%)") pl.ylabel("Time (s)") fig = pl.figure() model_df_correctness_as_function_of_coherence, \ model_df_inter_bout_interval_as_function_of_coherence, \ model_df_binned_correctness, \ model_df_binned_same_direction, \ model_df_binned_features_heading_angle_change_histograms, \ model_df_binned_features_inter_bout_interval_histograms, \ model_df_gmm_fitting_results = get_model_result(X[-1, i5]) fig.suptitle("Compromise between all four error functions") pl.subplot(221) pl.plot([0, 25, 50, 100], target_df_correctness_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_correctness_as_function_of_coherence.values, 'o--', color='C5') pl.xlabel("Coherence (%)") pl.ylabel("Correctness (%)") pl.subplot(222) pl.plot([0, 25, 50, 100], target_df_inter_bout_interval_as_function_of_coherence.values, 'o-', color='black') pl.plot([0, 25, 50, 100], model_df_inter_bout_interval_as_function_of_coherence.values, 'o--', color='C5') pl.xlabel("Coherence (%)") pl.ylabel("Inter-bout interval (s)") pl.subplot(223) for i in range(4): pl.plot(target_df_binned_correctness.loc[i, :].droplevel("stim"), 'o-', color='black') pl.plot(model_df_binned_correctness.loc[i, :].droplevel("stim"), 'o--', color='C5') pl.xlabel("Correctness (%)") pl.ylabel("Time (s)") pl.subplot(224) pl.plot(target_df_binned_same_direction, 'o-', color='black') pl.plot(model_df_binned_same_direction, 'o--', color='C5') pl.xlabel("Time since last bout (s)") pl.ylabel("Correctness (%)") pl.subplot(235) pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_left"].values, '-o', color='black', label='s_left') pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_center"].values, '-o', color='black', label='s_center') pl.plot(target_df_gmm_fitting_results.index*25, target_df_gmm_fitting_results["w_right"].values, '-o', color='black', label='s_right') pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_left"].values, '--o', color='C6', label='s_left') pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_center"].values, '--o', color='C6', label='s_center') pl.plot(model_df_gmm_fitting_results.index*25, model_df_gmm_fitting_results["w_right"].values, '--o', color='C6', label='s_right') pl.xlabel("Coherence (%)") pl.ylabel("Weight") pl.legend()F[-1, :, i] = F[-1, :, i] / np.median(F[-1, :, i])i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] + 5 * F[-1, :, 3] + F[-1, :, 5] + 5 * F[-1, :, 6])i6 = np.argmin(F[-1, :, 0] + 5 * F[-1, :, 1] + 20 * F[-1, :, 4] + F[-1, :, 5] + 5 * F[-1, :, 6])i6 = np.argmin(F[-1, :, 0] + 2 * F[-1, :, 1] + F[-1, :, 2] + 3 * F[-1, :, 3] + F[-1, :, 5] + F[-1, :, 6])i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 500 + F[-1, :, 3] + F[-1, :, 5] * 0.25 + F[-1, :, 6] * 500)i6 = np.argmin(F[-1, :, 0] + F[-1, :, 1] * 2500 + F[-1, :, 3] * 5 + F[-1, :, 5] * 0.5 + F[-1, :, 6] * 1500)i6 = np.argmin(F[-1, :, 0]*500 + F[-1, :, 1]*2500 + F[-1, :, 3]*50 + F[-1, :, 5]*0.5 + F[-1, :, 6]*4500) | 17,476 | en | 0.284999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.