id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
130649
|
from typing import Dict, Union, Optional
from kgx.utils.infores import InfoResContext
from kgx.prefix_manager import PrefixManager
from kgx.config import get_logger
log = get_logger()
class Source(object):
"""
A Source is responsible for reading data as records
from a store where the store is a file or a database.
"""
def __init__(self):
self.graph_metadata: Dict = {}
self.node_filters = {}
self.edge_filters = {}
self.node_properties = set()
self.edge_properties = set()
self.prefix_manager = PrefixManager()
self.infores_context: Optional[InfoResContext] = None
def set_prefix_map(self, m: Dict) -> None:
"""
Update default prefix map.
Parameters
----------
m: Dict
A dictionary with prefix to IRI mappings
"""
self.prefix_manager.update_prefix_map(m)
def check_node_filter(self, node: Dict) -> bool:
"""
Check if a node passes defined node filters.
Parameters
----------
node: Dict
A node
Returns
-------
bool
Whether the given node has passed all defined node filters
"""
pass_filter = False
if self.node_filters:
for k, v in self.node_filters.items():
if k in node:
# filter key exists in node
if isinstance(v, (list, set, tuple)):
if any(x in node[k] for x in v):
pass_filter = True
else:
return False
elif isinstance(v, str):
if node[k] == v:
pass_filter = True
else:
return False
else:
log.error(f"Unexpected {k} node filter of type {type(v)}")
return False
else:
# filter key does not exist in node
return False
else:
# no node filters defined
pass_filter = True
return pass_filter
def check_edge_filter(self, edge: Dict) -> bool:
"""
Check if an edge passes defined edge filters.
Parameters
----------
edge: Dict
An edge
Returns
-------
bool
Whether the given edge has passed all defined edge filters
"""
pass_filter = False
if self.edge_filters:
for k, v in self.edge_filters.items():
if k in {"subject_category", "object_category"}:
pass_filter = True
continue
if k in edge:
# filter key exists in edge
if isinstance(v, (list, set, tuple)):
if any(x in edge[k] for x in v):
pass_filter = True
else:
return False
elif isinstance(v, str):
if edge[k] == v:
pass_filter = True
else:
return False
else:
log.error(f"Unexpected {k} edge filter of type {type(v)}")
return False
else:
# filter does not exist in edge
return False
else:
# no edge filters defined
pass_filter = True
return pass_filter
def set_node_filter(self, key: str, value: Union[str, set]) -> None:
"""
Set a node filter, as defined by a key and value pair.
These filters are used to filter (or reduce) the
search space when fetching nodes from the underlying store.
.. note::
When defining the 'category' filter, the value should be of type ``set``.
This method also sets the 'subject_category' and 'object_category'
edge filters, to get a consistent set of nodes in the subgraph.
Parameters
----------
key: str
The key for node filter
value: Union[str, set]
The value for the node filter.
Can be either a string or a set.
"""
if key == "category":
if isinstance(value, set):
if "subject_category" in self.edge_filters:
self.edge_filters["subject_category"].update(value)
else:
self.edge_filters["subject_category"] = value
if "object_category" in self.edge_filters:
self.edge_filters["object_category"].update(value)
else:
self.edge_filters["object_category"] = value
else:
raise TypeError(
"'category' node filter should have a value of type 'set'"
)
if key in self.node_filters:
self.node_filters[key].update(value)
else:
self.node_filters[key] = value
def set_node_filters(self, filters: Dict) -> None:
"""
Set node filters.
Parameters
----------
filters: Dict
Node filters
"""
if filters:
for k, v in filters.items():
if isinstance(v, (list, set, tuple)):
self.set_node_filter(k, set(v))
else:
self.set_node_filter(k, v)
def set_edge_filters(self, filters: Dict) -> None:
"""
Set edge filters.
Parameters
----------
filters: Dict
Edge filters
"""
if filters:
for k, v in filters.items():
if isinstance(v, (list, set, tuple)):
self.set_edge_filter(k, set(v))
else:
self.set_edge_filter(k, v)
def set_edge_filter(self, key: str, value: set) -> None:
"""
Set an edge filter, as defined by a key and value pair.
These filters are used to filter (or reduce) the
search space when fetching nodes from the underlying store.
.. note::
When defining the 'subject_category' or 'object_category' filter,
the value should be of type ``set``.
This method also sets the 'category' node filter, to get a
consistent set of nodes in the subgraph.
Parameters
----------
key: str
The key for edge filter
value: Union[str, set]
The value for the edge filter.
Can be either a string or a set.
"""
if key in {"subject_category", "object_category"}:
if isinstance(value, set):
if "category" in self.node_filters:
self.node_filters["category"].update(value)
else:
self.node_filters["category"] = value
else:
raise TypeError(
f"'{key}' edge filter should have a value of type 'set'"
)
if key in self.edge_filters:
self.edge_filters[key].update(value)
else:
self.edge_filters[key] = value
def clear_graph_metadata(self):
"""
Clears a Source graph's internal graph_metadata. The value of such graph metadata is (now)
generally a Callable function. This operation can be used in the code when the metadata is
no longer needed, but may cause peculiar Python object persistent problems downstream.
"""
self.infores_context = None
def set_provenance_map(self, kwargs):
"""
Set up a provenance (Knowledge Source to InfoRes) map
"""
self.infores_context = InfoResContext()
self.infores_context.set_provenance_map(kwargs)
def get_infores_catalog(self) -> Dict[str, str]:
"""
Return the InfoRes Context of the source
"""
if not self.infores_context:
return dict()
return self.infores_context.get_catalog()
def set_node_provenance(self, node_data):
"""
Set a specific node provenance value.
"""
self.infores_context.set_node_provenance(node_data)
def set_edge_provenance(self, edge_data):
"""
Set a specific edge provenance value.
"""
self.infores_context.set_edge_provenance(edge_data)
|
130652
|
from io import BytesIO
from nbtlib import CompoundSchema, File, schema
from nbtlib.tag import (
INT,
Byte,
Float,
Int,
List,
Long,
String,
read_numeric,
write_numeric,
)
# fmt: off
BedrockLevelData = schema("BedrockLevelData", {
"CenterMapsToOrigin": Byte,
"Difficulty": Int,
"FlatWorldLayers": String,
"ForceGameType": Byte,
"GameType": Int,
"Generator": Int,
"InventoryVersion": String,
"LANBroadcast": Byte,
"LastPlayed": Long,
"LevelName": String,
"LimitedWorldOriginX": Int,
"LimitedWorldOriginY": Int,
"LimitedWorldOriginZ": Int,
"MultiplayerGame": Byte,
"NetherScale": Int,
"NetworkVersion": Int,
"Platform": Int,
"PlatformBroadcast": Byte,
"PlatformBroadcastMode": Int,
"RandomSeed": Long,
"SpawnX": Int,
"SpawnY": Int,
"SpawnZ": Int,
"StorageVersion": Int,
"Time": Long,
"XBLBroadcast": Byte,
"XBLBroadcastIntent": Byte,
"XBLBroadcastMode": Int,
"abilities": schema("Abilities", {
"attackmobs": Byte,
"attackplayers": Byte,
"buildandmine": Byte,
"doorsandswitches": Byte,
"flySpeed": Float,
"flying": Byte,
"instabuild": Byte,
"invulnerable": Byte,
"lightning": Byte,
"mayfly": Byte,
"op": Byte,
"opencontainers": Byte,
"permissionsLevel": Int,
"playerPermissionsLevel": Int,
"teleport": Byte,
"walkSpeed": Float,
}),
"bonusChestEnabled": Byte,
"bonusChestSpawned": Byte,
"commandblockoutput": Byte,
"commandsEnabled": Byte,
"currentTick": Long,
"dodaylightcycle": Byte,
"doentitydrops": Byte,
"dofiretick": Byte,
"domobloot": Byte,
"domobspawning": Byte,
"dotiledrops": Byte,
"doweathercycle": Byte,
"drowningdamage": Byte,
"eduLevel": Byte,
"educationFeaturesEnabled": Byte,
"experimentalgameplay": Byte,
"falldamage": Byte,
"firedamage": Byte,
"hasBeenLoadedInCreative": Byte,
"hasLockedBehaviorPack": Byte,
"hasLockedResourcePack": Byte,
"immutableWorld": Byte,
"isFromLockedTemplate": Byte,
"keepinventory": Byte,
"lastOpenedWithVersion": List[Int],
"lightningLevel": Float,
"lightningTime": Int,
"maxcommandchainlength": Int,
"mobgriefing": Byte,
"naturalregeneration": Byte,
"prid": String,
"pvp": Byte,
"rainLevel": Float,
"rainTime": Int,
"sendcommandfeedback": Byte,
"serverChunkTickRange": Int,
"showcoordinates": Byte,
"spawnMobs": Byte,
"startWithMapEnabled": Byte,
"texturePacksRequired": Byte,
"tntexplodes": Byte,
"worldStartCount": Long,
})
# fmt: on
class BedrockLevelFile(File, CompoundSchema):
schema = {"": BedrockLevelData}
def __init__(
self, level_data=None, version=8, *, gzipped=False, byteorder="little"
):
super().__init__({"": level_data or {}}, gzipped=gzipped, byteorder=byteorder)
self.version = version
@classmethod
def parse(cls, buff, byteorder="little"):
version = read_numeric(INT, buff, byteorder)
_length = read_numeric(INT, buff, byteorder)
self = super().parse(buff, byteorder)
self.version = version
return self
def write(self, buff, byteorder="little"):
tmp = BytesIO()
super().write(tmp, byteorder)
tmp.seek(0)
data = tmp.read()
write_numeric(INT, self.version, buff, byteorder)
write_numeric(INT, len(data), buff, byteorder)
buff.write(data)
@classmethod
def from_buffer(cls, buff, byteorder="little"):
return super().from_buffer(buff, byteorder)
@classmethod
def load(cls, filename, gzipped=False, byteorder="little"):
return super().load(filename, gzipped, byteorder)
|
130658
|
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class TemporalVariableTransformer(BaseEstimator, TransformerMixin):
# Temporal elapsed time transformer
def __init__(self, variables, reference_variable):
if not isinstance(variables, list):
raise ValueError('variables should be a list')
self.variables = variables
self.reference_variable = reference_variable
def fit(self, X, y=None):
# we need this step to fit the sklearn pipeline
return self
def transform(self, X):
# so that we do not over-write the original dataframe
X = X.copy()
for feature in self.variables:
X[feature] = X[self.reference_variable] - X[feature]
return X
# categorical missing value imputer
class Mapper(BaseEstimator, TransformerMixin):
def __init__(self, variables, mappings):
if not isinstance(variables, list):
raise ValueError('variables should be a list')
self.variables = variables
self.mappings = mappings
def fit(self, X, y=None):
# we need the fit statement to accomodate the sklearn pipeline
return self
def transform(self, X):
X = X.copy()
for feature in self.variables:
X[feature] = X[feature].map(self.mappings)
return X
|
130748
|
from ast import literal_eval
import copy
import yaml
import numpy as np
import os
import argparse
class AttrDict(dict):
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
elif name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
__C = AttrDict()
cfg = __C
# --------------------------------------------------------------------------- #
# general options
# --------------------------------------------------------------------------- #
##OS options
__C.DATA_DIRECTORY = "parsed_dataset-p1"
__C.OUTPUT_DIRECTORY = "output"
__C.EXP_NAME = 'p1-run-1'
__C.OUTPUT_FILE_NAME = "predict.json"
__C.MODE = "train"
## Dataset options
__C.SPLIT = "test" #NOT USE
__C.GENERATE_VOCABULARIES = False
__C.LOAD_VOCABULARIES = False
__C.INPUT_VOCAB_PATH = ""
__C.TARGET_VOCAB_PATH = ""
__C.INIT_WRD_EMB_FROM_FILE = False
__C.WRD_EMB_INIT_FILE = ''
# --------------------------------------------------------------------------- #
# model options
# --------------------------------------------------------------------------- #
## Command Encoder
__C.CMD_D_EMBED = 32
__C.CMD_D_ENC = 64
__C.CMD_D_H = 64 # Same as ENC_DIM
## Situation Encoder (LGCN)
__C.SITU_D_FEAT = 64 # 3 * D_CNN_OUTPUT if CNN then LGCN
__C.SITU_D_CTX = 64 # 512
__C.SITU_D_CMD = 64 # 512
__C.SITU_D_CNN_OUTPUT = 64
#1
## Decoder
__C.DEC_D_H = 64
__C.DEC_NUM_LAYER = 1
__C.DEC_CONDITIONAL_ATTENTION = True
__C.H_FEAT = 14
__C.W_FEAT = 14
__C.D_FEAT = 1152 # 1024+128
__C.T_ENCODER = 45
__C.ADD_POS_ENC = True
__C.PE_DIM = 128
__C.PE_SCALE = 1.
__C.MSG_ITER_NUM = 4
__C.STEM_NORMALIZE = True
__C.STEM_LINEAR = True
__C.STEM_CNN = False
__C.STEM_CNN_DIM = 512
__C.STEM_RENORMALIZE = False
# __C.WRD_EMB_DIM = 300
__C.WRD_EMB_FIXED = False
# __C.ENC_DIM = 512
__C.CMD_DIM = 512
__C.CMD_INPUT_ACT = 'ELU'
__C.CTX_DIM = 512
__C.OUT_QUESTION_MUL = True
__C.OUT_CLASSIFIER_DIM = 512
__C.USE_EMA = True
__C.EMA_DECAY_RATE = 0.999
# Dropouts
__C.encInputDropout = 0.8
__C.locDropout = 1.
__C.cmdDropout = 0.92
__C.memoryDropout = 0.85
__C.readDropout = 0.85
__C.outputDropout = 0.85
__C.decoderDropout = 0.85
__C.MASK_PADUNK_IN_LOGITS = True
__C.BUILD_VQA = True
__C.BUILD_REF = False
# CLEVR-Ref configs
__C.BBOX_IOU_THRESH = .5
__C.IMG_H = 320 # size in loc
__C.IMG_W = 480 # size in loc
# Loss option
__C.AUXILIARY_TASK = False
# --------------------------------------------------------------------------- #
# training options
# --------------------------------------------------------------------------- #
__C.TRAIN = AttrDict()
__C.TRAIN.BATCH_SIZE = 200
__C.VAL_BATCH_SIZE = 4000
__C.TRAIN.START_EPOCH = 0
__C.TRAIN.CLIP_GRADIENTS = True
__C.TRAIN.GRAD_MAX_NORM = 8.
__C.TRAIN.SOLVER = AttrDict()
# __C.TRAIN.SOLVER.LR = 3e-4
__C.TRAIN.SOLVER.LR = 8e-4
__C.TRAIN.SOLVER.LR_DECAY = 0.9
__C.TRAIN.SOLVER.ADAM_BETA1 = 0.9
__C.TRAIN.SOLVER.ADAM_BETA2 = 0.999
__C.TRAIN.SOLVER.LR_DECAY_STEP = 20000
__C.TRAIN.MAX_EPOCH = 100
__C.TRAIN.RUN_EVAL = True
__C.TRAIN.USE_MULTI_GPU = True
__C.PRINT_EVERY = 100
__C.EVALUATE_EVERY = 10
__C.SAVE_EVERY = 20
#GSCAN Specific
__C.TRAIN.K = 0
__C.TRAIN.WEIGHT_TARGET_LOSS = 0.3 # change only when auxiliary is used
# --------------------------------------------------------------------------- #
# test options
# --------------------------------------------------------------------------- #
__C.TEST = AttrDict()
__C.TEST.SPLIT = "" #\TODO test split not initialized yet
__C.TEST.MAX_DECODING_STEP = 30
__C.TEST.BATCH_SIZE = 1
__C.TEST.EPOCH = -1 # Needs to be supplied
__C.TEST.DUMP_PRED = False
__C.TEST.RESULT_DIR = './exp_clevr/results/%s/%04d'
__C.TEST.NUM_VIS = 0
__C.TEST.VIS_DIR_PREFIX = 'vis'
__C.TEST.VIS_FILTER_EDGE = True
__C.TEST.VIS_EDGE_SCALE = 1.
__C.TEST.VIS_FINAL_REL_TH = .025
__C.TEST.VIS_FINAL_ABS_TH = .025
__C.TEST.VIS_MSG_TH = .1
# --------------------------------------------------------------------------- #
# post-processing configs after loading
# --------------------------------------------------------------------------- #
def _postprocess_cfg(): # NoQA
__C.GPUS = __C.GPUS.replace(' ', '').replace('(', '').replace(')', '')
assert __C.EXP_NAME != '<fill-with-filename>', 'EXP_NAME must be specified'
# --------------------------------------------------------------------------- #
def build_cfg_from_argparse(args_list=None):
"""Load config with command line options (`--cfg` and a list of options)"""
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default='')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args(args_list)
if args.cfg:
_merge_cfg_from_file(args.cfg)
if args.opts:
_merge_cfg_from_list(args.opts)
_postprocess_cfg()
return __C
def _merge_cfg_from_file(cfg_filename):
"""Load a yaml config file and merge it into the global config."""
with open(cfg_filename, 'r') as f:
yaml_cfg = yaml.load(f)
if yaml_cfg is not None:
_merge_a_into_b(AttrDict(yaml_cfg), __C)
if __C.EXP_NAME == '<fill-with-filename>':
__C.EXP_NAME = os.path.basename(cfg_filename).replace('.yaml', '')
def _merge_cfg_from_cfg(cfg_other):
"""Merge `cfg_other` into the global config."""
_merge_a_into_b(cfg_other, __C)
def _merge_cfg_from_list(cfg_list):
"""Merge config keys, values in a list (e.g., from command line) into the
global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.
"""
assert len(cfg_list) % 2 == 0
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = full_key.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d, 'Non-existent key: {}'.format(full_key)
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, d[subkey], subkey, full_key
)
d[subkey] = value
def _merge_a_into_b(a, b, stack=None):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict'
assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict'
for k, v_ in a.items():
full_key = '.'.join(stack) + '.' + k if stack is not None else k
# a must specify keys that are in b
if k not in b:
raise KeyError('Non-existent config key: {}'.format(full_key))
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, AttrDict):
try:
stack_push = [k] if stack is None else stack + [k]
_merge_a_into_b(v, b[k], stack=stack_push)
except BaseException:
raise
else:
b[k] = v
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to AttrDict objects
if isinstance(v, dict):
return AttrDict(v)
# All remaining processing is only applied to strings
if not isinstance(v, str):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
"""Checks that `value_a`, which is intended to replace `value_b` is of the
right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
# The types must match (with some exceptions)
type_b = type(value_b)
type_a = type(value_a)
if type_a is type_b:
return value_a
# Exceptions: numpy arrays, strings, tuple<->list
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, str):
value_a = str(value_a)
elif isinstance(value_a, tuple) and isinstance(value_b, list):
value_a = list(value_a)
elif isinstance(value_a, list) and isinstance(value_b, tuple):
value_a = tuple(value_a)
else:
raise ValueError(
'Type mismatch ({} vs. {}) with values ({} vs. {}) for config '
'key: {}'.format(type_b, type_a, value_b, value_a, full_key)
)
return value_a
|
130774
|
import numpy as np
def find_closest_quads(_point, _quadlist, _n_closest):
d_sq_list = [None] * _quadlist.__len__() # empty list for distances point to all quads
for i in range(_quadlist.__len__()): # iterate over all quads
d_sq = _quadlist[i].measure_centroid_distance_squared(_point) # distance to centroid
d_sq_list[i] = d_sq
idx_list = sorted(range(len(d_sq_list)), key=lambda k: d_sq_list[k])
n_closest_idx = idx_list[:_n_closest]
return n_closest_idx
def create_parameters(verts, quads):
param = []
N_closest_candidates = 4 # compute list of N_closest_candidates closest quads
vertex_count=0
vertices_total = verts['fine'].__len__()
for vertex in verts['fine']:
vertex_count+=1
if vertex_count % ((vertices_total+100)/100) == 0:
print "%d %%:projecting vertex %d of %d..."%(100*vertex_count/vertices_total,vertex_count, vertices_total)
closest_idx_candidates = find_closest_quads(vertex, quads['coarse'], N_closest_candidates) # find N closest quads with fast criterion: distance to centroid
distance_min = np.inf
for candidate_idx in closest_idx_candidates: # iterate over all candidates from coarse criterion
projected_point, distance, u, v = \
quads['coarse'][candidate_idx].projection_onto_quad(vertex) # find closest quad with fine criterion: projection onto quad
if abs(distance) < distance_min: # if candidate gives smaller distance, than all candidates before, this is the new reference
distance_min = abs(distance)
u_min = u
v_min = v
idx_min = candidate_idx
param.append([idx_min,u_min,v_min])
return np.array(param)
|
130775
|
import json
from typing import List
from injector import inject
from sqlalchemy import text
from infrastructure.dependency.scopes import IScoped
from infrastructure.json.JsonConvert import JsonConvert
from models.configs.ApplicationConfig import ApplicationConfig
@JsonConvert.register
class Pagination:
def __init__(self,
Filter: str = None,
Page: int = None,
PageUrl: str = None,
Limit: int = None,
TotalPage: int = None,
TotalCount: int = None
):
self.Filter: str = Filter
self.Page: int = Page
self.PageUrl: str = PageUrl
self.Limit: int = Limit
self.TotalPage: int = TotalPage
self.TotalCount: int = TotalCount
class HtmlTemplateService(IScoped):
@inject
def __init__(self,
application_config: ApplicationConfig
):
self.application_config: ApplicationConfig = application_config
@property
def default_css(self):
pagination_css = '''
.pagination {
display: table;
margin: 0 auto;
padding: 20px;
}
.pagination a {
color: black;
float: left;
padding: 8px 16px;
text-decoration: none;
transition: background-color .3s;
border: 1px solid #ddd;
}
.pagination a.active {
background-color: #4CAF50;
color: white;
border: 1px solid #4CAF50;
}
.pagination a:hover:not(.active) {background-color: #ddd;}
'''
return '''
.wrapper{
margin: 0 auto;
padding: 20px;
}
.container600 {
width: 300px;
max-width: 100%;
}
@media all and (max-width: 600px) {
.container600 {
width: 100% !important;
}
}
.col49 {
width: 49%;
}
.col2 {
width: 2%;
}
.col50 {
width: 50%;
}
@media all and (max-width: 599px) {
.fluid {
width: 100% !important;
}
.reorder {
width: 100% !important;
margin: 0 auto 10px;
}
.ghost-column {
display:none;
height:0;
width:0;
overflow:hidden;
max-height:0;
max-width:0;
}
}
.pdi-column{
text-align: left;
padding:4px;
font-family: Arial,sans-serif;
font-size: 12px;
line-height:10px;
}
.pdi-row{
text-align: left;
padding:4px;
font-family: Arial,sans-serif;
font-size: 10px;
line-height:10px;
}
.row-nowrap{
white-space: nowrap;
}
table {
border-collapse: collapse;
width: 100%;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f2f2f2;}
ul.breadcrumb {
padding: 10px 16px;
list-style: none;
background-color: #eee;
}
ul.breadcrumb li {
display: inline;
font-size: 18px;
}
ul.breadcrumb li+li:before {
padding: 8px;
color: black;
content: "/\00";
}
ul.breadcrumb li a {
color: #0275d8;
text-decoration: none;
}
ul.breadcrumb li a:hover {
color: #01447e;
text-decoration: underline;
}
''' + pagination_css
def mail_html_template(self, body, mail_css=None):
css = mail_css if mail_css is not None else self.default_css
template = f'''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title></title>
<style>{css}</style>
</head>
<body>
{body}
</body>
</html>
'''
return template
def get_nullable_dict_value(self, dict, key):
if key in dict:
return dict[key]
return None
def get_dict_value(self, dict, key):
if key in dict and dict[key] is not None:
return dict[key]
return ''
def prepare_table_data_dynamic(self, query, headers, prepare_row, sortable=None, pagination: Pagination = None):
if sortable is not None:
query = query.order_by(text(sortable))
pagination_json = None
if pagination is not None:
total_count = query.count()
if pagination.Limit is None or pagination.Limit < 1 or pagination.Limit > 200:
pagination.Limit = 50
total_page = int(total_count / pagination.Limit) + 1
if pagination.Page is None or pagination.Page < 1 or total_page < pagination.Page:
pagination.Page = 1
if pagination.Limit:
query = query.limit(pagination.Limit)
if pagination.Page:
offset = (pagination.Page - 1) * pagination.Limit
if offset is None or offset <= 0:
offset = 0
query = query.offset(offset)
pagination_json = {'PageUrl': pagination.PageUrl, 'PageNumber': pagination.Page, 'Limit': pagination.Limit,
'Count': total_count, 'TotalPage': total_page,'Filter':pagination.Filter}
rows = []
for data in query:
row = prepare_row(data)
rows.append(row)
return {'columns': headers, 'rows': rows,
'pagination': pagination_json}
def render_table(self, source, width=None):
columns: List[str] = self.get_nullable_dict_value(source, 'columns')
rows: List[str] = self.get_nullable_dict_value(source, 'rows')
pagination_json = self.get_nullable_dict_value(source, 'pagination')
headers = ''
headers = headers + f'<th scope="col" class="pdi-column">#</th>'
for column in columns:
column_style = self.get_dict_value(column, 'style')
column_class = self.get_dict_value(column, 'class')
column_value = self.get_dict_value(column, 'value')
headers = headers + f'<th scope="col" style="{column_style}" class="pdi-column {column_class}">{column_value}</th>'
bodies = ''
index = 0
for row in rows:
bodies = bodies + '<tr>'
index = index + 1
bodies = bodies + f'<td valign="top" class="pdi-row ">{index}</td>'
for data in row['data']:
row_style = self.get_dict_value(data, 'style')
row_class = self.get_dict_value(data, 'class')
row_value = self.get_dict_value(data, 'value')
bodies = bodies + f'<td valign="top" style="{row_style}" class="pdi-row {row_class}">{row_value}</td>'
bodies = bodies + '</tr>'
table_width = width if width is not None else '100%'
pagination_html = ''
if pagination_json is not None:
page_data = ""
# JsonConvert.register(Pagination)
pagination = JsonConvert.FromJSON(json.dumps(pagination_json))
# TotalPage = self.get_nullable_dict_value(pagination, 'TotalPage')
for page in range(1, pagination.TotalPage + 1):
filter=f'{pagination.Filter}' if pagination.Filter is not None and pagination.Filter!='' else ''
page_url = pagination.PageUrl.format(f'?PageNumber={page}&Limit={pagination.Limit}&Filter={filter}')
if page == pagination.PageNumber:
page_data = f'{page_data}<a href="{page_url}" class="active">{page}</a>'
else:
page_data = f'{page_data}<a href="{page_url}" >{page}</a>'
pagination_html = f'''
<div class="pagination">
{page_data}
</div>
'''
table = f'''
<table width="{table_width}" cellpadding="0" cellspacing="0" style="min-width:100%;">
<thead>
{headers}
</thead>
<tbody>
{bodies}
</tbody>
</table>
{pagination_html}
'''
return table
def render_html(self,
content,
):
body_content = f'''
<div class="wrapper">
<div class="crumb">
<ul class="breadcrumb">
<li><a href="/Home">Home</a></li>
<li><a href="/Connection">Connections</a></li>
<li><a href="/DataOperation">DataOperations</a></li>
<li><a href="/DataOperation/Job">Jobs</a></li>
<li><a href="/DataOperation/Job/Execution">Executions</a></li>
<li><a href="/documentation">Documentation (Swagger UI)</a></li>
</ul>
</div>
{content}
</div>
'''
mail_body = self.mail_html_template(body_content)
return mail_body
|
130809
|
def set_wait_for_kernel_finish(wait_for_kernel_finish : bool = None):
from ._program import OCLProgram
OCLProgram._wait_for_kernel_finish = wait_for_kernel_finish
|
130825
|
from acacia import git, utils
import os,sys
import plac
from pprint import pprint
from datetime import datetime
GIT_CACHE='/tmp/git-cache-3'
def extract_timing_data(commit_id, repo_url, verbose=False, git_cache=GIT_CACHE):
if not os.path.exists(git_cache):
print('Folder ' + git_cache + ' must exist!')
return None
# ensure the repository is available locally
git.clone_repo(repo_url, output_folder=GIT_CACHE, skip_existing=True)
cwd = os.path.join(git_cache, git.folder_name_from_url(repo_url))
# get tag info
tag = utils.execute('git tag --sort=taggerdate --contains ' + commit_id, cwd=cwd)[0]
if tag != '':
tag_date = utils.execute('git show -s --format="%at" ' + tag + '^{commit}', cwd=cwd)[0][1:-1]
else:
tag_date='0'
try:
commit_date = utils.execute('git show -s --format="%ct" ' + commit_id, cwd=cwd)[0][1:-1]
time_delta = int(tag_date) - int(commit_date)
except:
commit_date = '0'
time_delta = 0
# print("exception:", commit_id, repo_url, commit_date, tag_date)
if verbose:
print('repository: ' + repo_url)
print('commit: ' + commit_id)
print('commit_date: ' + commit_date)
print(' ' + datetime.utcfromtimestamp(int(commit_date)).strftime('%Y-%m-%d %H:%M:%S'))
print('tag: ' + tag)
print('tag_date: ' + tag_date)
print(' ' + datetime.utcfromtimestamp(int(tag_date)).strftime('%Y-%m-%d %H:%M:%S'))
print('Commit-to-release interval: {0:.2f} days'.format( time_delta/(3600 * 24) ))
result = (tag, tag_date, commit_date, time_delta )
print(result)
return ( result )
# (help, kind, abbrev, type, choices, metavar)
@plac.annotations(
repo_url=("Repository", "positional", None, str, None, 'REPOSITORY'),
commit_id=("Commit", "positional", None, str, None, 'COMMIT'),
verbose=("Verbose", "flag", 'v', bool),
git_cache= ("Git repository dir", "option", 'g', str, None, 'REPO_DIR')
)
def main(repo_url, commit_id, verbose=False, git_cache=GIT_CACHE):
return extract_timing_data(commit_id, repo_url, verbose, git_cache)
if __name__ == '__main__':
import plac; plac.call(main)
|
130836
|
from typing import Dict
from PIL import Image, ImageDraw, ImageFont
from fontTools.ttLib import TTFont
class AnimText:
font_array = [
# AA-Like > Pixel > Generic
# AA-like, Latin, hiragana, katakana, (part of) cyrillic
{'path': './assets/igiari/Igiari.ttf'},
# Pixel, Kanji, Hiragana, Katakana
{'path':'./assets/igiari/jackeyfont.ttf'},
# Arabic
{'path':'./assets/igiari/arabic-1.ttf', 'size': 12},
# Pixel-font, Hebrew
{'path':'./assets/igiari/STANRG__.ttf'},
# Generic
{'path':'./assets/igiari/NotoSans-Regular.ttf'},
# Pixel font, Arabic
{'path':'./assets/igiari/bitsy-font-with-arabic.ttf', 'size': 10},
]
def __init__(
self,
text: str,
*,
x: int = 0,
y: int = 0,
font_path: str = None,
font_size: int = 12,
typewriter_effect: bool = False,
colour: str = "#ffffff",
):
self.x = x
self.y = y
self.text = text
# Used for font handling internals
self._internal_text = text.replace('\n', '').replace('\r', '').replace('\t', '')
self.typewriter_effect = typewriter_effect
self.font_size = font_size
self.font = self._select_best_font()
self.font_path = self.font['path']
if ('size' in self.font):
self.font_size = self.font['size']
self.colour = colour
def render(self, background: Image, frame: int = 0):
draw = ImageDraw.Draw(background)
_text = self.text
if self.typewriter_effect:
_text = _text[:frame]
if self.font_path is not None:
font = ImageFont.truetype(self.font_path, self.font_size)
draw.text((self.x, self.y), _text, font=font, fill=self.colour)
else:
draw.text((self.x, self.y), _text, fill=self.colour)
return background
def _select_best_font(self):
best_font = self.font_array[-1]
best_font_points = 0
for font in self.font_array:
font_points = self._check_font(font)
if font_points > best_font_points:
best_font_points = font_points
best_font = font
if best_font_points >= len(self._internal_text):
return font
print(f'WARNING. NO OPTIMAL FONT FOUND, font score: {best_font_points}/{len(self._internal_text)}, text {self._internal_text}')
return best_font
def _check_font(self, font):
font_path = font['path']
font = TTFont(font_path)
# We check all chars for presence on the font
valid_char = 0
for char in self._internal_text:
# We check if the char is in any table of the font
for table in font['cmap'].tables:
if ord(char) in table.cmap:
valid_char += 1
break
return valid_char
def __str__(self):
return self.text
|
130864
|
from sys import stdout, exit
from textwrap import dedent
from copy import copy
from clingo.application import Application
from clingo import SymbolType, Number, Function, ast, clingo_main
class TermTransformer(ast.Transformer):
def __init__(self, parameter):
self.parameter = parameter
def __get_param(self, name, location):
n = name.replace('\'', '')
primes = len(name) - len(n)
param = ast.SymbolicTerm(location, self.parameter)
if primes > 0:
param = ast.BinaryOperation(location, ast.BinaryOperator.Minus, param, ast.SymbolicTerm(location, Number(primes)))
return n, param
def visit_Function(self, term):
name, param = self.__get_param(term.name, term.location)
term = term.update(name=name)
term.arguments.append(param)
return term
def visit_SymbolicTerm(self, term):
# this function is not necessary if gringo's parser is used
# but this case could occur in a valid AST
raise RuntimeError("not implemented")
class ProgramTransformer(ast.Transformer):
def __init__(self, parameter):
self.final = False
self.parameter = parameter
self.term_transformer = TermTransformer(parameter)
def visit(self, x, *args, **kwargs):
ret = super().visit(x, *args, **kwargs)
if self.final and hasattr(ret, "body"):
if x is ret:
ret = copy(x)
loc = ret.location
fun = ast.Function(loc, "finally", [ast.SymbolicTerm(loc, self.parameter)], False)
atm = ast.SymbolicAtom(fun)
lit = ast.Literal(loc, ast.Sign.NoSign, atm)
ret.body.append(lit)
return ret
def visit_SymbolicAtom(self, atom):
return atom.update(symbol=self.term_transformer(atom.symbol))
def visit_Program(self, prg):
self.final = prg.name == "final"
prg = copy(prg)
if self.final:
prg.name = "static"
prg.parameters.append(ast.Id(prg.location, self.parameter.name))
return prg
def visit_ShowSignature(self, sig):
return sig.update(arity=sig.arity + 1)
def visit_ProjectSignature(self, sig):
return sig.update(arity=sig.arity + 1)
class TModeApp(Application):
def __init__(self):
self._imin = 0
self._imax = None
self._istop = "SAT"
self._horizon = 0
def _parse_imin(self, value):
try:
self._imin = int(value)
except ValueError:
return False
return self._imin >= 0
def _parse_imax(self, value):
if value.upper() in ("INF", "INFINITY"):
self._imax = None
return True
try:
self._imax = int(value)
except ValueError:
return False
return self._imax >= 0
def _parse_istop(self, value):
self._istop = value.upper()
return self._istop in ["SAT", "UNSAT", "UNKNOWN"]
def register_options(self, options):
group = "Incremental Options"
options.add(group, "imin", "Minimum number of solving steps [0]",
self._parse_imin, argument="<n>")
options.add(group, "imax", "Maximum number of solving steps [infinity]",
self._parse_imax, argument="<n>")
options.add(group, "istop", dedent("""\
Stop criterion [sat]
<arg>: {sat|unsat|unknown}"""), self._parse_istop)
def print_model(self, model, printer):
table = {}
for sym in model.symbols(shown=True):
if sym.type == SymbolType.Function and len(sym.arguments) > 0:
table.setdefault(sym.arguments[-1], []).append(Function(sym.name, sym.arguments[:-1]))
for step, symbols in sorted(table.items()):
stdout.write(" State {}:".format(step))
sig = None
for sym in sorted(symbols):
if (sym.name, len(sym.arguments)) != sig:
stdout.write("\n ")
sig = (sym.name, len(sym.arguments))
stdout.write(" {}".format(sym))
stdout.write("\n")
def _main(self, ctl):
step, ret = 0, None
while ((self._imax is None or step < self._imax) and
(step == 0 or step < self._imin or (
(self._istop == "SAT" and not ret.satisfiable) or
(self._istop == "UNSAT" and not ret.unsatisfiable) or
(self._istop == "UNKNOWN" and not ret.unknown)))):
parts = []
parts.append(("base", [Number(step)]))
parts.append(("static", [Number(step)]))
if step > 0:
ctl.release_external(Function("finally", [Number(step-1)]))
parts.append(("dynamic", [Number(step)]))
else:
parts.append(("initial", [Number(0)]))
ctl.ground(parts)
ctl.assign_external(Function("finally", [Number(step)]), True)
ret, step = ctl.solve(), step+1
def main(self, ctl, files):
with ast.ProgramBuilder(ctl) as bld:
ptf = ProgramTransformer(Function("__t"))
ast.parse_files(files, lambda stm: bld.add(ptf(stm)))
ctl.add("initial", ["t"], "initially(t).")
ctl.add("static", ["t"], "#external finally(t).")
self._main(ctl)
exit(clingo_main(TModeApp()))
|
130962
|
import sys, os
if '..' not in sys.path:
sys.path.append('..')
import subprocess
import pickle, multiprocessing, copy
import pandas as pd
import numpy as np
from collections import namedtuple, defaultdict
import botorch.utils.transforms as transforms
import argparse
from lib.calibrationFunctions import (
pdict_to_parr, parr_to_pdict, save_state, load_state,
get_calibrated_params, gen_initial_seeds, get_test_capacity, downsample_cases, extract_seeds_from_summary)
from lib.mobilitysim import MobilitySimulator
from lib.parallel import launch_parallel_simulations
from lib.distributions import CovidDistributions
from lib.data import collect_data_from_df
from lib.measures import *
from lib.calibrationSettings import (
calibration_lockdown_dates,
calibration_states,
calibration_lockdown_dates,
calibration_testing_params,
calibration_lockdown_beta_multipliers,
calibration_mob_paths)
from lib.summary import *
TO_HOURS = 24.0
ROOT = 'summaries'
"""Tuples representing various objects concerning a simulation and experiment"""
Simulation = namedtuple('Simulation', (
# Generic information
'experiment_info', # Description of the experiment that contains the simulation
'simulation_info', # Description of the simulation itself
'start_date', # Start date
'end_date', # End date
'sim_days', # Days of simulation
'country', # Country
'area', # Area
'random_repeats', # Random repeats of simulation
# Mobility and measures
'mob_settings_file', # Mobility settings
'full_scale', # Whether or not simulation is done at full scale
'measure_list', # Measure list
'testing_params', # Testing params
'store_mob', # Indicator of whether to return and store MobilitySimulator object and measure bernoullis
# Model
'model_params', # Model parameters (from calibration)
'distributions', # Transition distributions
'initial_seeds', # Simulation seeds
## default arguments
'num_age_groups', # Number of age groups
'beacon_config', # dictionary containing information regarding beacon implementation
'thresholds_roc', # threshold values for ROC curve computation
), defaults=(None, None, None)) # NOTE: `defaults` iterable is applied from back to front, i.e. just `beacon_config` and `thresholds_roc` and `num_age_groups` has a default
Plot = namedtuple('Plot', (
'path', # path to result file of this simulation containing pickled `Result` namedetuple
'label', # label of this plot on the legend
))
"""Helper functions"""
def get_properties(objs, property):
'''Retrieves list of properties for list of namedtuples'''
out = []
for o in objs:
if isinstance(o, dict):
out.append(o[property])
elif isinstance(o, Simulation) or isinstance(o, Plot) or isinstance(o, Result):
out.append(getattr(o, property))
else:
raise ValueError(f'Unknown type of elements in `objs`. Type is {type(o).__name__}')
return out
def options_to_str(**options):
return '-'.join(['{}={}'.format(k, v) for k, v in options.items()])
def process_command_line(return_parser=False):
'''Returns command line parser for experiment configuration'''
parser = argparse.ArgumentParser()
parser.add_argument("--country", required=True,
help="specify country indicator for experiment")
parser.add_argument("--area", required=True,
help="specify area indicator for experiment")
parser.add_argument("--cpu_count", type=int, default=multiprocessing.cpu_count(),
help="update default number of cpus used for parallel simulation rollouts")
parser.add_argument("--smoke_test", action="store_true",
help="flag to quickly finish runs to see if something breaks")
parser.add_argument("--append_name", type=str,
help="appends name to experiment")
parser.add_argument("--p_adoption", type=float,
help="only run experiment with a single adoption level")
parser.add_argument("--beta_dispersion", type=float,
help="only run experiment with a single beta dispersion level")
parser.add_argument("--beacon_proportion", type=float,
help="only run experiment with a single beacon proportion")
parser.add_argument("--beacon_mode",
help="only run experiment with a single beacon mode")
parser.add_argument("--test_lag", type=float,
help="only run experiment with the specified test lag")
parser.add_argument("--background_exposures", type=float,
help="set number of background exposures per week")
parser.add_argument("--tracing_threshold", type=float,
help="set smart tracing threshold")
parser.add_argument("--isolation_cap", type=float,
help="set maximum of newly isolated people per day")
parser.add_argument("--beta_normalization", type=float,
help="")
parser.add_argument("--p_social_distancing", type=float,
help="mobility reduction for all")
parser.add_argument("--calibration_state", type=str,
help="specify path of calibration state")
parser.add_argument("--mobility_reduction", action="store_true",
help="flag to turn off mobility reduction")
parser.add_argument("--continued", action="store_true",
help="skips sub-experiments for which summaries already exist")
if return_parser:
return parser
args = parser.parse_args()
country = args.country
area = args.area
# check calibration state
try:
calibration_state_strg = calibration_states[country][area]
if not os.path.isfile(calibration_states[country][area]):
raise FileNotFoundError
except KeyError:
print(f'{country}-{area} is unknown country-area combination.')
exit(1)
except FileNotFoundError:
print(f'{country}-{area} calibration not found.')
exit(1)
return args
def get_version_tag():
git_commit = subprocess.check_output(["git", "describe", "--always"]).strip().decode(sys.stdout.encoding)
return git_commit
"""Experiment class for structured experimentation with simulations"""
class Experiment(object):
"""
Class to organize a set of experiment simulations. One experiment objects
contains several simulations that are stored and can be analyzed collectively.
"""
def __init__(self, *,
experiment_info,
start_date,
end_date,
random_repeats,
full_scale,
verbose,
cpu_count=None,
multi_beta_calibration=False,
condensed_summary=True,
continued_run=False):
self.experiment_info = experiment_info
self.start_date = start_date
self.end_date = end_date
self.random_repeats = random_repeats
self.cpu_count = cpu_count if cpu_count else multiprocessing.cpu_count()
self.full_scale = full_scale
self.multi_beta_calibration = multi_beta_calibration
self.condensed_summary = condensed_summary
self.continued_run = continued_run
self.verbose = verbose
# list simulations of experiment
self.sims = []
def get_sim_path(self, sim):
version_tag = get_version_tag()
return sim.experiment_info + '-' + version_tag + '/' + sim.experiment_info + '-' + sim.simulation_info
def save_condensed_summary(self, sim, summary):
filepath = os.path.join('condensed_summaries', self.get_sim_path(sim) + '_condensed.pk')
condensed_summary = condense_summary(summary=summary, metadata=sim)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'wb') as fp:
pickle.dump(condensed_summary, fp)
return
def check_summary_existence(self, sim):
filepath = os.path.join('condensed_summaries', self.get_sim_path(sim) + '_condensed.pk')
return os.path.isfile(filepath)
def save_run(self, sim, summary):
filename = self.get_sim_path(sim) + '.pk'
obj = Result(
metadata=sim,
summary=summary,
)
with open(os.path.join(ROOT, filename), 'wb') as fp:
pickle.dump(obj, fp)
return
def add(self, *,
simulation_info,
country,
area,
measure_list,
full_scale=True,
test_update=None,
seed_summary_path=None,
set_calibrated_params_to=None,
set_initial_seeds_to=None,
expected_daily_base_expo_per100k=0,
beacon_config=None,
thresholds_roc=None,
estimate_mobility_reduction=False,
store_mob=False):
# Set time window based on experiment start and end date
sim_days = (pd.to_datetime(self.end_date) - pd.to_datetime(self.start_date)).days
max_time = TO_HOURS * sim_days # in hours
# extract lockdown period
lockdown_start_date = pd.to_datetime(
calibration_lockdown_dates[country]['start'])
lockdown_end_date = pd.to_datetime(
calibration_lockdown_dates[country]['end'])
days_until_lockdown_start = (lockdown_start_date - pd.to_datetime(self.start_date)).days
days_until_lockdown_end = (lockdown_end_date - pd.to_datetime(self.start_date)).days
# Load mob settings
mob_settings_file = calibration_mob_paths[country][area][1 if full_scale else 0]
with open(mob_settings_file, 'rb') as fp:
mob_settings = pickle.load(fp)
num_age_groups = len(mob_settings['mob_rate_per_age_per_type'])
# Obtain COVID19 case date for country and area to estimate testing capacity and heuristic seeds if necessary
unscaled_area_cases = collect_data_from_df(country=country, area=area, datatype='new',
start_date_string=self.start_date, end_date_string=self.end_date)
assert(len(unscaled_area_cases.shape) == 2)
# Scale down cases based on number of people in town and region
sim_cases = downsample_cases(unscaled_area_cases, mob_settings)
# Instantiate correct state transition distributions (estimated from literature)
distributions = CovidDistributions(country=country)
# Expected base rate infections
if expected_daily_base_expo_per100k > 0.0:
# Scale expectation to simulation size
num_people = len(mob_settings['home_loc'])
lambda_base_expo_population = expected_daily_base_expo_per100k * (num_people / 100000)
# Convert to individual base rate by dividing by population size; priority queue handles superposition
lambda_base_expo_indiv = lambda_base_expo_population / num_people
# Poisson process with rate lambda: interarrival times are Exponential r.v. with mean = 1 / lambda
# Hence set rate of Expo r.v.s to 1 / (1 / lambda) = lambda
distributions.lambda_0 = lambda_base_expo_indiv
# Get initial seeds for simulation
# (a) Define heuristically based on true cases and literature distribution estimates
if seed_summary_path is None:
# Generate initial seeds based on unscaled case numbers in town
initial_seeds = gen_initial_seeds(
sim_cases, day=0)
if sum(initial_seeds.values()) == 0:
print('No states seeded at start time; cannot start simulation.\n'
'Consider setting a later start date for calibration using the "--start" flag.')
sys.exit(0)
# (b) Define based state of previous batch of simulations,
# using the random rollout that best matched the true cases in terms of squared error
else:
seed_summary_ = load_summary(seed_summary_path)
seed_day_ = seed_summary_.max_time # take seeds at the end of simulation
initial_seeds = extract_seeds_from_summary(
seed_summary_, seed_day_, sim_cases)
if set_initial_seeds_to is not None:
initial_seeds = set_initial_seeds_to
if set_calibrated_params_to is not None:
calibrated_params = set_calibrated_params_to
else:
# Load calibrated model parameters for this area
calibrated_params = get_calibrated_params(
country=country, area=area, multi_beta_calibration=self.multi_beta_calibration,
estimate_mobility_reduction=estimate_mobility_reduction)
if self.multi_beta_calibration:
betas = calibrated_params['betas']
else:
betas = {
'education': calibrated_params['beta_site'],
'social': calibrated_params['beta_site'],
'bus_stop': calibrated_params['beta_site'],
'office': calibrated_params['beta_site'],
'supermarket': calibrated_params['beta_site'],
}
model_params = {
'betas': betas,
'beta_household': calibrated_params['beta_household'],
}
# Add standard measure of positives staying isolated
measure_list += [
# standard behavior of positively tested: full isolation
SocialDistancingForPositiveMeasure(
t_window=Interval(0.0, max_time), p_stay_home=1.0),
SocialDistancingForPositiveMeasureHousehold(
t_window=Interval(0.0, max_time), p_isolate=1.0),
]
measure_list = MeasureList(measure_list)
testing_params = copy.deepcopy(calibration_testing_params)
testing_params['testing_t_window'] = [0.0, max_time]
if test_update:
testing_params = test_update(testing_params)
# store simulation
sim_kwargs = dict(
# Generic information
experiment_info=self.experiment_info,
simulation_info=simulation_info,
start_date=self.start_date,
end_date=self.end_date,
sim_days=sim_days,
country=country,
area=area,
random_repeats=self.random_repeats,
# Mobility and measures
mob_settings_file=mob_settings_file,
full_scale=full_scale,
measure_list=measure_list,
testing_params=testing_params,
store_mob=store_mob,
# Model
model_params=model_params,
distributions=distributions,
initial_seeds=initial_seeds,
)
# Beacon
# fields are added here (even though defaulting to `None`) to double check backwards compatibility
# with stored `Result` objects prior to implementing beacon functionality
if beacon_config is not None:
sim_kwargs['beacon_config'] = beacon_config
if thresholds_roc is not None:
sim_kwargs['thresholds_roc'] = thresholds_roc
sim = Simulation(**sim_kwargs)
if self.continued_run and self.check_summary_existence(sim):
if self.verbose:
print(f'[Skipped Sim] {self.get_sim_path(sim)}')
else:
self.sims.append(sim)
if self.verbose:
print(f'[Added Sim] {self.get_sim_path(self.sims[-1])}')
def run_all(self):
'''
Runs all simulations that were provided via the `add` method and stored in `self.sims`
'''
# generate experiment folder
current_directory = os.getcwd()
directory = os.path.join(current_directory, ROOT, self.experiment_info + '-' + get_version_tag())
# directory = os.path.join(current_directory, ROOT, self.get_sim_path(self.sims[0]))
if not os.path.exists(directory):
os.makedirs(directory)
# run all simulations
for sim in self.sims:
with open(sim.mob_settings_file, 'rb') as fp:
mob_settings = pickle.load(fp)
summary = launch_parallel_simulations(
mob_settings=sim.mob_settings_file,
distributions=sim.distributions,
random_repeats=sim.random_repeats,
cpu_count=self.cpu_count,
params=sim.model_params,
initial_seeds=sim.initial_seeds,
testing_params=sim.testing_params,
measure_list=sim.measure_list,
max_time=TO_HOURS * sim.sim_days,
home_loc=mob_settings['home_loc'],
num_people=len(mob_settings['home_loc']),
site_loc=mob_settings['site_loc'],
num_sites=len(mob_settings['site_loc']),
beacon_config=sim.beacon_config,
thresholds_roc=sim.thresholds_roc if sim.thresholds_roc is not None else [], # convert to [] if None
store_mob=sim.store_mob,
store_measure_bernoullis=sim.store_mob,
verbose=False)
if self.condensed_summary is True:
self.save_condensed_summary(sim, summary)
else:
self.save_run(sim, summary)
if self.verbose:
print(f'[Finished Sim] {self.get_sim_path(sim)}')
|
130972
|
import os
import django
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "<KEY>"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'OPTIONS': {
}
}
}
if os.environ.get('GITHUB_WORKFLOW', False):
DATABASE_ENGINE = os.environ.get('DATABASE_ENGINE', 'sqlite')
if 'mysql' in DATABASE_ENGINE:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '3306',
},
}
elif 'postgres' in DATABASE_ENGINE:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': '127.0.0.1',
'PORT': '5432',
},
}
ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"django_scrubber",
]
SITE_ID = 1
if django.VERSION >= (1, 10):
MIDDLEWARE = ()
else:
MIDDLEWARE_CLASSES = ()
|
130995
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
from smoothing_actions import *
N_simul = 150
def complete_ss(beta, b0, x0, A, C, S_y, T=12):
"""
Computes the path of consumption and debt for the previously described
complete markets model where exogenous income follows a linear
state space
"""
# Create a linear state space for simulation purposes
# This adds "b" as a state to the linear state space system
# so that setting the seed places shocks in same place for
# both the complete and incomplete markets economy
# Atilde = np.vstack([np.hstack([A, np.zeros((A.shape[0], 1))]),
# np.zeros((1, A.shape[1] + 1))])
# Ctilde = np.vstack([C, np.zeros((1, 1))])
# S_ytilde = np.hstack([S_y, np.zeros((1, 1))])
lss = qe.LinearStateSpace(A, C, S_y, mu_0=x0)
# Add extra state to initial condition
# x0 = np.hstack([x0, np.zeros(1)])
# Compute the (I - beta*A)^{-1}
rm = la.inv(np.eye(A.shape[0]) - beta*A)
# Constant level of consumption
cbar = (1-beta) * (S_y @ rm @ x0 - b0)
c_hist = np.ones(T)*cbar
# Debt
x_hist, y_hist = lss.simulate(T)
b_hist = np.squeeze(S_y @ rm @ x_hist - cbar/(1-beta))
return c_hist, b_hist, np.squeeze(y_hist), x_hist
if __name__ == '__main__':
# Define parameters
alpha, rho1, rho2 = 10.0, 0.9, 0.0
sigma = 1.0
# N_simul = 1
# T = N_simul
A = np.array([[1., 0., 0.],
[alpha, rho1, rho2],
[0., 1., 0.]])
C = np.array([[0.], [sigma], [0.]])
S_y = np.array([[1, 1.0, 0.]])
beta, b0 = 0.95, -10.0
x0 = np.array([1.0, alpha/(1-rho1), alpha/(1-rho1)])
# Do simulation for complete markets
s = np.random.randint(0, 10000)
np.random.seed(s) # Seeds get set the same for both economies
out = complete_ss(beta, b0, x0, A, C, S_y, 150)
c_hist_com, b_hist_com, y_hist_com, x_hist_com = out
fig, ax = plt.subplots(1, 2, figsize = (15, 5))
# Consumption plots
ax[0].set_title('Cons and income', fontsize = 17)
ax[0].plot(np.arange(N_simul), c_hist_com, label = 'consumption', lw = 3)
ax[0].plot(np.arange(N_simul), y_hist_com, label = 'income',
lw = 2, color = sb.color_palette()[3], alpha = .6, linestyle = '--')
ax[0].legend(loc = 'best', fontsize = 15)
ax[0].set_xlabel('Periods', fontsize = 13)
ax[0].set_ylim([-5.0, 110])
# Debt plots
ax[1].set_title('Debt and income', fontsize = 17)
ax[1].plot(np.arange(N_simul), b_hist_com, label = 'debt', lw = 2)
ax[1].plot(np.arange(N_simul), y_hist_com, label = 'Income',
lw = 2, color = sb.color_palette()[3], alpha = .6, linestyle = '--')
ax[1].legend(loc = 'best', fontsize = 15)
ax[1].axhline(0, color = 'k', lw = 1)
ax[1].set_xlabel('Periods', fontsize = 13)
plt.show()
|
131021
|
class Track(object):
id = 0
header_line = 1
filename = ""
key_color = "#FF4D55"
name = ""
description = ""
track_image_url = "http://lorempixel.com/400/200"
location = ""
gid = ""
order = -1
def __init__(self, id, name, header_line, key_color, location, gid, order):
super(Track, self).__init__()
self.id = id
self.name = name
self.header_line = header_line
self.key_color = key_color
self.track_image_url = "http://lorempixel.com/400/200"
self.location = location
self.gid = gid
self.order = order
class Service(object):
id = 0
service = ""
url = ""
def __init__(self, id, service, url):
super(Service, self).__init__()
self.id = id
self.service = service
self.url = url
class LogoIco(object):
logo_url = ""
ico_url = ""
main_page_url = ""
def __init__(self, logo_url, ico_url, main_page_url):
super(LogoIco, self).__init__()
self.logo_url = logo_url
self.ico_url = ico_url
self.main_page_url = main_page_url
class Speaker(object):
def __init__(self):
super(Speaker, self).__init__()
class Copyright(object):
def __init__(self):
super(Copyright, self).__init__()
class Session(object):
def __init__(self):
super(Session, self).__init__()
class Sponsor(object):
def __init__(self):
super(Sponsor, self).__init__()
class Microlocation(object):
def __init__(self):
super(Microlocation, self).__init__()
|
131061
|
from typing import Optional, Text
import sys
import json
import argparse
import onnx
from torch.utils.data import DataLoader
from torchvision.datasets.folder import ImageFolder
from torchvision.datasets import ImageNet
import furiosa_sdk_quantizer.frontend.onnx
from furiosa_sdk_quantizer.evaluator.data_loader import random_subset
from furiosa_sdk_quantizer.evaluator.model_caller import ModelCaller
from furiosa_sdk_quantizer.frontend.onnx.quantizer import quantizer
def main():
args = parse_args()
if args.command == "export_spec":
export_spec(args.input, args.output)
elif args.command == "optimize":
optimize(args.input, args.output)
elif args.command == "build_calibration_model":
build_calibration_model(args.input, args.output)
elif args.command == "quantize":
quantize(args.input, args.output, args.dynamic_ranges)
elif args.command == "post_training_quantization_with_random_calibration":
post_training_quantization_with_random_calibration(args.input, args.output, args.num_data)
elif args.command == "calibrate_with_random":
calibrate_with_random(args.input, args.output, args.num_data)
elif args.command == "calibrate_with_data_loader":
calibrate_with_data_loader(
args.input,
args.output,
args.dataset_path,
args.dataset_type,
args.num_data,
args.preprocess_from_onnx_model_exporter_registry,
)
elif args.command == "evaluate":
evaluate(
args.input,
args.output,
args.dataset_path,
args.dataset_type,
args.num_data,
args.preprocess_from_onnx_model_exporter_registry,
args.batch_size,
)
elif args.command == "evaluate_with_fake_quantization":
evaluate_with_fake_quantization(
args.input,
args.output,
args.dynamic_ranges,
args.dataset_path,
args.dataset_type,
args.num_data,
args.preprocess_from_onnx_model_exporter_registry,
args.batch_size,
)
else:
raise Exception(f"Unsupported command, {args.command}")
def parse_args():
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument(
"-i", "--input", type=str, help="Path to Model file (tflite, onnx are supported)"
)
common_parser.add_argument("-o", "--output", type=str, help="Path to Output file")
dataset_parser = argparse.ArgumentParser(add_help=False)
dataset_parser.add_argument("--dataset-path", type=str, help="Path to dataset")
dataset_parser.add_argument("--dataset-type", type=str, help="Type of dataset")
dataset_parser.add_argument("-n", "--num-data", type=int, help="The number of data")
dataset_parser.add_argument("-p", "--preprocess-from-onnx-model-exporter-registry", type=str)
parser = argparse.ArgumentParser(description="Furiosa AI quantizer")
subparsers = parser.add_subparsers(dest="command")
export_spec_cmd = subparsers.add_parser(
"export_spec", help="export_spec help", parents=[common_parser]
)
build_calibration_model_cmd = subparsers.add_parser(
"build_calibration_model", help="build calibrate model help", parents=[common_parser]
)
optimize_cmd = subparsers.add_parser("optimize", help="optimize help", parents=[common_parser])
quantize_cmd = subparsers.add_parser("quantize", help="quantize help", parents=[common_parser])
quantize_cmd.add_argument("-d", "--dynamic-ranges", type=str, help="Dynamic ranges")
post_training_quantization_with_random_calibration = subparsers.add_parser(
"post_training_quantization_with_random_calibration",
help="calibrate help",
parents=[common_parser],
)
post_training_quantization_with_random_calibration.add_argument(
"-n", "--num-data", type=int, help="The number of random data"
)
calibrate_with_random_cmd = subparsers.add_parser(
"calibrate_with_random", help="Output: dynamic ranges", parents=[common_parser]
)
calibrate_with_random_cmd.add_argument(
"-n", "--num-data", type=int, help="The number of random data"
)
calibrate_cmd = subparsers.add_parser(
"calibrate_with_data_loader", parents=[common_parser, dataset_parser]
)
evaluate_cmd = subparsers.add_parser("evaluate", parents=[common_parser, dataset_parser])
evaluate_cmd.add_argument("-b", "--batch-size", type=int)
evaluate_with_fake_quantization_cmd = subparsers.add_parser(
"evaluate_with_fake_quantization", parents=[common_parser, dataset_parser]
)
evaluate_with_fake_quantization_cmd.add_argument("-b", "--batch-size", type=int)
evaluate_with_fake_quantization_cmd.add_argument(
"-d", "--dynamic-ranges", type=str, help="Dynamic ranges)"
)
return parser.parse_args()
def export_spec(input: Optional[Text] = None, output: Optional[Text] = None):
model = _read_model(input)
if output is not None:
with open(output, "w") as writable:
furiosa_sdk_quantizer.frontend.onnx.export_spec(model, writable)
else:
furiosa_sdk_quantizer.frontend.onnx.export_spec(model, sys.stdout)
def optimize(input: Optional[Text] = None, output: Optional[Text] = None):
model = _read_model(input)
model = furiosa_sdk_quantizer.frontend.onnx.optimize_model(model)
if output is not None:
onnx.save_model(model, output)
else:
onnx.save_model(model, sys.stdout)
def build_calibration_model(input: Optional[Text] = None, output: Optional[Text] = None):
model = _read_model(input)
model = furiosa_sdk_quantizer.frontend.onnx.build_calibration_model(model)
if output is not None:
onnx.save_model(model, output)
else:
onnx.save_model(model, sys.stdout)
def quantize(
input: Optional[Text] = None, output: Optional[Text] = None, dynamic_ranges: str = None
):
model = _read_model(input)
model = furiosa_sdk_quantizer.frontend.onnx.optimize_model(model)
with open(dynamic_ranges, "r") as readable:
dynamic_ranges = json.load(readable)
model = furiosa_sdk_quantizer.frontend.onnx.quantize(
model,
per_channel=True,
static=True,
mode=quantizer.QuantizationMode.dfg,
dynamic_ranges=dynamic_ranges,
)
if output is not None:
onnx.save_model(model, output)
else:
onnx.save_model(model, sys.stdout)
def _read_model(input: Optional[Text] = None) -> onnx.ModelProto:
if input is not None:
with open(input, "rb") as readable:
model = onnx.load_model(readable, onnx.helper.ModelProto)
else:
model = onnx.load_model(sys.stdin, onnx.helper.ModelProto)
return model
def _load_dataset(
dataset_path: str,
dataset_type: str,
num_data: int,
preprocess_from_registry: str,
batch_size: int,
) -> DataLoader:
# FIXME: onnx-model-exporter takes too long to load
from onnx_model_exporter.models import registry
model_cls = registry.model_entrypoint(preprocess_from_registry)
_, transform = ModelCaller(model_cls, "onnx").call()
# TODO: support various type of dataset
if dataset_type == "ImageFolder":
dataset = ImageFolder(dataset_path, transform)
elif dataset_type == "ImageNetValDataset":
dataset = ImageNet(dataset_path, split="val", transform=transform)
else:
raise ValueError(f"Unexpected dataset type: {dataset_type}.")
if num_data == 0:
num_data = len(dataset)
# `seed` is fixed to 1 because we need to get the same subset of
# `dataset` across multiple executions.
dataset = random_subset(dataset, num_data, seed=1)
# The `shuffle` argument of DataLoader.__init__ does not have to be
# set to True because we are calibrating or evaluating, not
# training, the model.
loader = DataLoader(dataset, batch_size)
return loader
def post_training_quantization_with_random_calibration(
input: Optional[Text] = None, output: Optional[Text] = None, num_data: Optional[int] = None
):
model = _read_model(input)
model = furiosa_sdk_quantizer.frontend.onnx.post_training_quantization_with_random_calibration(
model,
static=True,
per_channel=True,
mode=quantizer.QuantizationMode.dfg,
num_data=num_data,
)
if output is not None:
onnx.save_model(model, output)
else:
onnx.save_model(model, sys.stdout)
def calibrate_with_random(
input: Optional[Text] = None, output: Optional[Text] = None, num_data: Optional[int] = None
):
model = _read_model(input)
dynamic_ranges = furiosa_sdk_quantizer.frontend.onnx.calibrate_with_random(model, num_data)
if output is not None:
with open(output, "w") as f:
json.dump(dynamic_ranges, f, ensure_ascii=True, indent=2)
else:
json.dump(dynamic_ranges, sys.stdout, ensure_ascii=True, indent=2)
def calibrate_with_data_loader(
input: Optional[Text],
output: Optional[Text],
dataset_path: Optional[Text],
dataset_type: Text,
num_data: Optional[int],
preprocess: Optional[Text],
) -> None:
model = _read_model(input)
loader = _load_dataset(dataset_path, dataset_type, num_data, preprocess, 1)
dynamic_ranges = furiosa_sdk_quantizer.frontend.onnx.calibrate_with_data_loader(model, loader)
if output is not None:
with open(output, "w") as f:
json.dump(dynamic_ranges, f, ensure_ascii=True, indent=2)
else:
json.dump(dynamic_ranges, sys.stdout, ensure_ascii=True, indent=2)
def evaluate(
input: Optional[Text],
output: Optional[Text],
dataset_path: Optional[Text],
dataset_type: Text,
num_data: Optional[int],
preprocess: Optional[Text],
batch_size: Optional[int] = 1,
) -> None:
import furiosa_sdk_quantizer.evaluator.eval_imagenet
model = _read_model(input)
loader = _load_dataset(dataset_path, dataset_type, num_data, preprocess, batch_size)
accuracy = furiosa_sdk_quantizer.evaluator.eval_imagenet.evaluate(model, loader)
if output is not None:
with open(output, "w") as f:
json.dump(accuracy, f, ensure_ascii=True, indent=2)
else:
json.dump(accuracy, sys.stdout, ensure_ascii=True, indent=2)
def evaluate_with_fake_quantization(
input: Optional[Text],
output: Optional[Text],
dynamic_ranges: str,
dataset_path: Optional[Text],
dataset_type: Text,
num_data: Optional[int],
preprocess: Optional[int],
batch_size: Optional[int] = 1,
) -> None:
import furiosa_sdk_quantizer.evaluator.eval_imagenet
with open(dynamic_ranges) as readable:
dynamic_ranges = json.load(readable)
model = _read_model(input)
model = furiosa_sdk_quantizer.frontend.onnx.optimize_model(model)
model = furiosa_sdk_quantizer.frontend.onnx.quantize(
model,
per_channel=True,
static=True,
mode=quantizer.QuantizationMode.fake,
dynamic_ranges=dynamic_ranges,
)
loader = _load_dataset(dataset_path, dataset_type, num_data, preprocess, batch_size)
accuracy = furiosa_sdk_quantizer.evaluator.eval_imagenet.evaluate(model, loader)
if output is not None:
with open(output, "w") as f:
json.dump(accuracy, f, ensure_ascii=True, indent=2)
else:
json.dump(accuracy, sys.stdout, ensure_ascii=True, indent=2)
|
131099
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
class STN3D(nn.Module):
def __init__(self, input_channels=3):
super(STN3D, self).__init__()
self.input_channels = input_channels
self.mlp1 = nn.Sequential(
nn.Conv1d(input_channels, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Conv1d(128, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(),
)
self.mlp2 = nn.Sequential(
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, input_channels * input_channels),
)
def forward(self, x):
batch_size = x.size(0)
num_points = x.size(2)
x = self.mlp1(x)
x = F.max_pool1d(x, num_points).squeeze(2)
x = self.mlp2(x)
I = torch.eye(self.input_channels).view(-1).to(x.device)
x = x + I
x = x.view(-1, self.input_channels, self.input_channels)
return x
class PointNetEncoder(nn.Module):
def __init__(self, embedding_size, input_channels=3):
super(PointNetEncoder, self).__init__()
self.input_channels = input_channels
self.stn1 = STN3D(input_channels)
self.stn2 = STN3D(64)
self.mlp1 = nn.Sequential(
nn.Conv1d(input_channels, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Conv1d(128, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(),
)
self.fc = nn.Linear(1024, embedding_size)
def forward(self, x):
batch_size = x.shape[0]
num_points = x.shape[1]
x = x[:, :, : self.input_channels]
x = x.transpose(2, 1) # transpose to apply 1D convolution
x = self.mlp1(x)
x = self.mlp2(x)
x = F.max_pool1d(x, num_points).squeeze(2) # max pooling
x = self.fc(x)
return x
class TargetEncoder(nn.Module):
def __init__(self, embedding_size, input_channels=3):
super(TargetEncoder, self).__init__()
self.input_channels = input_channels
self.stn1 = STN3D(input_channels)
self.stn2 = STN3D(64)
self.mlp1 = nn.Sequential(
nn.Conv1d(input_channels, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Conv1d(128, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(),
)
self.fc = nn.Linear(1024, embedding_size)
def forward(self, x):
batch_size = x.shape[0]
num_points = x.shape[1]
x = x[:, :, : self.input_channels]
x = x.transpose(2, 1) # transpose to apply 1D convolution
x = self.mlp1(x)
x = self.mlp2(x)
x = F.max_pool1d(x, num_points).squeeze(2) # max pooling
x = self.fc(x)
return x
class Classification_Layer(nn.Module):
def __init__(self, input_dim, num_class, use_bn=False):
super(Classification_Layer, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, num_class)
if (use_bn):
self.bn1 = nn.BatchNorm1d(intermediate_layer)
def forward(self, x, use_bn=False):
if use_bn:
x = F.relu(self.bn1(self.fc1(x)))
else:
x = self.fc1(x)
return F.log_softmax(x, dim=1)
class ParamDecoder(nn.Module):
def __init__(self, input_dim, intermediate_layer, embedding_size, use_bn=False):
super(ParamDecoder, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, intermediate_layer)
self.fc2 = nn.Linear(intermediate_layer, embedding_size)
if (use_bn):
self.bn1 = nn.BatchNorm1d(intermediate_layer)
def forward(self, x, use_bn=False):
if use_bn:
x = F.relu(self.bn1(self.fc1(x)))
else:
x = self.fc1(x)
x = self.fc2(x)
return x
class ParamDecoder2(nn.Module):
def __init__(self, input_dim, intermediate_layer, embedding_size, use_bn=False):
super(ParamDecoder2, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, 512)
self.fc2 = nn.Linear(512, intermediate_layer)
self.fc3 = nn.Linear(intermediate_layer, embedding_size)
if (use_bn):
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(intermediate_layer)
def forward(self, x, use_bn=False):
if use_bn:
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.fc2(x)))
else:
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class LatentDecoder(nn.Module):
def __init__(self, input_dim, embedding_size):
super(LatentDecoder, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, 256)
self.bn1 = nn.BatchNorm1d(256)
self.fc_mu = nn.Linear(256, embedding_size)
self.fc_sigma = nn.Linear(256, embedding_size)
def forward(self, x, use_bn=False):
x = F.relu(self.bn1(self.fc1(x)))
mu = self.fc_mu(x)
sigma = self.fc_sigma(x)
return mu, sigma
class TargetDecoder(nn.Module):
def __init__(self, input_dim, num_points):
super(TargetDecoder, self).__init__()
self.input_dim = input_dim
self.num_points = num_points
self.fc1 = nn.Linear(input_dim, 1024)
self.fc2 = nn.Linear(1024, 1024)
self.fc3 = nn.Linear(1024, num_points*3)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = x.view(-1, self.num_points, 3)
return x
class ECCV(nn.Module):
def __init__(self, embedding_size, input_channels=3):
super(ECCV, self).__init__()
self.input_channels = input_channels
self.stn1 = STN3D(input_channels)
self.stn2 = STN3D(64)
self.mlp1 = nn.Sequential(
nn.Conv1d(input_channels, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Conv1d(128, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(),
)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc_mu = nn.Linear(256, embedding_size)
self.fc_sigma = nn.Linear(256, embedding_size)
def forward(self, x):
batch_size = x.shape[0]
num_points = x.shape[1]
x = x[:, :, : self.input_channels]
x = x.transpose(2, 1) # transpose to apply 1D convolution
x = self.mlp1(x)
x = self.mlp2(x)
x = F.max_pool1d(x, num_points).squeeze(2) # max pooling
x = self.fc1(x)
x = self.fc2(x)
mu = self.fc_mu(x)
sigma = self.fc_sigma(x)
return mu, sigma
### For Images ###
#
def set_parameter_requires_grad(model, is_fixed):
if is_fixed:
for param in model.parameters():
param.requires_grad = False
## Set layer4 to trainable
for name, param in model.layer4[0].named_parameters():
param.requires_grad = True
for name, param in model.layer4[1].named_parameters():
param.requires_grad = True
class ImageEncoder(nn.Module):
def __init__(self, embedding_size, is_fixed, use_pretrained=True):
super(ImageEncoder, self).__init__()
#ResNet18
model_ft = models.resnet18(pretrained=use_pretrained)
# Set trainable parameters
set_parameter_requires_grad(model_ft, is_fixed)
# Set output feature dim
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, embedding_size)
input_size = 224
self.input_size = input_size
self.model = model_ft
# ###Debug
# for name, param in model_ft.named_parameters():
# print(name)
# print(param.requires_grad)
# exit()
def forward(self, x):
batch_size = x.shape[0]
x = self.model(x)
return x
|
131102
|
from setuptools import find_packages, setup
url = "https://github.com/SherylHYX/pytorch_geometric_signed_directed"
__version__ = '0.8.0'
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
install_requires = [
"torch",
"torch_sparse",
"torch_scatter",
"sklearn",
"torch_geometric",
"numpy",
"networkx==2.6.3",
"scipy"
]
setup_requires = ["pytest-runner"]
tests_require = ["pytest", "pytest-cov", "mock"]
keywords = [
"machine-learning",
"deep-learning",
"deeplearning",
"deep learning",
"machine learning",
"signal processing",
"signed graph",
"graph",
"directed graph",
"embedding",
"clustering",
"graph convolution",
"graph neural network",
"representation learning",
"learning",
]
setup(
name="torch_geometric_signed_directed",
packages=find_packages(),
version=__version__,
license="MIT",
description="An Extension Library for PyTorch Geometric on signed and directed networks.",
long_description=long_description,
long_description_content_type="text/markdown",
include_package_data=True,
author="<NAME>",
author_email="<EMAIL>",
url=url,
download_url='{}/archive/{}.tar.gz'.format(url, __version__),
keywords=keywords,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
python_requires=">=3.6",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
|
131120
|
import numpy as np
def R2_nom_denom(y, yhat):
""" Calculates the nominator and denomitor for calculating R-squared
Args:
y (array): data
yhat (array): predicted data data
Returns:
nominator (float or array), denominator (float or array)
"""
y, yhat = np.array(y), np.array(yhat)
with np.errstate(divide="ignore", invalid="ignore"):
nom = np.sum((y - yhat) ** 2, axis=0)
denom = np.sum(y ** 2, axis=0) # Kendricks denominator
return nom, denom
|
131125
|
import sys
import cv2
import numpy as np
from line_boundary_check import *
# ----------------------------------------------------------------------------
g_mouse_pos = [0 ,0]
# Mouse event handler
def onMouse(event, x, y, flags, param):
global g_mouse_pos
g_mouse_pos = [x, y]
# ----------------------------------------------------------------------------
class boundaryLine:
def __init__(self, line=(0,0,0,0)):
self.p0 = (line[0], line[1])
self.p1 = (line[2], line[3])
self.color = (0,255,255)
self.lineThinkness = 4
self.textColor = (0,255,255)
self.textSize = 4
self.textThinkness = 2
self.count1 = 0
self.count2 = 0
# Draw single boundary line
def drawBoundaryLine(img, line):
x1, y1 = line.p0
x2, y2 = line.p1
cv2.line(img, (x1, y1), (x2, y2), line.color, line.lineThinkness)
cv2.putText(img, str(line.count1), (x1, y1), cv2.FONT_HERSHEY_PLAIN, line.textSize, line.textColor, line.textThinkness)
cv2.putText(img, str(line.count2), (x2, y2), cv2.FONT_HERSHEY_PLAIN, line.textSize, line.textColor, line.textThinkness)
cv2.drawMarker(img, (x1, y1),line.color, cv2.MARKER_TRIANGLE_UP, 16, 4)
cv2.drawMarker(img, (x2, y2),line.color, cv2.MARKER_TILTED_CROSS, 16, 4)
# Draw multiple boundary lines
def drawBoundaryLines(img, boundaryLines):
for line in boundaryLines:
drawBoundaryLine(img, line)
# in: boundary_line = boundaryLine class object
# trajectory = (x1, y1, x2, y2)
def checkLineCross(boundary_line, trajectory_line):
global audio_enable_flag
global sound_welcome, sound_thankyou
traj_p0 = trajectory_line[0] # Trajectory of an object
traj_p1 = trajectory_line[1]
bLine_p0 = (boundary_line.p0[0], boundary_line.p0[1]) # Boundary line
bLine_p1 = (boundary_line.p1[0], boundary_line.p1[1])
intersect = checkIntersect(traj_p0, traj_p1, bLine_p0, bLine_p1) # Check if intersect or not
if intersect == True:
angle = calcVectorAngle(traj_p0, traj_p1, bLine_p0, bLine_p1) # Calculate angle between trajectory and boundary line
if angle<180:
boundary_line.count1 += 1
else:
boundary_line.count2 += 1
#cx, cy = calcIntersectPoint(traj_p0, traj_p1, bLine_p0, bLine_p1) # Calculate the intersect coordination
#------------------------------------
# Area intrusion detection
class area:
def __init__(self, contour):
self.contour = np.array(contour, dtype=np.int32)
self.count = 0
# Draw areas (polygons)
def drawAreas(img, areas):
for area in areas:
if area.count>0:
color=(0,0,255)
else:
color=(255,0,0)
cv2.polylines(img, [area.contour], True, color,4)
cv2.putText(img, str(area.count), (area.contour[0][0], area.contour[0][1]), cv2.FONT_HERSHEY_PLAIN, 4, color, 2)
# Area intrusion check
def checkAreaIntrusion(area, points):
global audio_enable_flag
global sound_warning
area.count = 0
for pt in points:
if pointPolygonTest(area.contour, pt):
area.count += 1
# ----------------------------------------------------------------------------
# boundary lines
boundaryLines = [
boundaryLine([ 300, 40, 20, 400 ]),
boundaryLine([ 440, 40, 700, 400 ])
]
# Areas
areas = [
area([ [200,200], [500,180], [600,400], [300,300], [100,360] ])
]
def main():
cv2.namedWindow('test')
cv2.setMouseCallback('test', onMouse)
prev_mouse_pos = [0, 0]
trace = []
trace_length = 25
key = -1
while key != 27: # ESC key
img = np.zeros((600, 800, 3), dtype=np.uint8)
for line in boundaryLines:
checkLineCross(line, (prev_mouse_pos, g_mouse_pos))
drawBoundaryLines(img, boundaryLines)
for area in areas:
checkAreaIntrusion(area, (g_mouse_pos,))
drawAreas(img, areas)
trace.append(g_mouse_pos)
if len(trace)>trace_length:
trace = trace[-trace_length:]
cv2.polylines(img, np.array([trace], dtype=np.int32), False, (255,255,0), 1, cv2.LINE_AA)
prev_mouse_pos = g_mouse_pos
cv2.imshow('test', img)
key = cv2.waitKey(50)
return 0
if __name__ == '__main__':
sys.exit(main())
|
131145
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
import model
from data_reader import load_data, DataReader
flags = tf.flags
# data
flags.DEFINE_string('data_dir', 'data', 'data directory. Should contain train.txt/valid.txt/test.txt with input data')
flags.DEFINE_string('train_dir', 'cv', 'training directory (models and summaries are saved there periodically)')
flags.DEFINE_string('load_model', None, '(optional) filename of the model to load. Useful for re-starting training from a checkpoint')
# model params
flags.DEFINE_integer('rnn_size', 650, 'size of LSTM internal state')
flags.DEFINE_integer('highway_layers', 2, 'number of highway layers')
flags.DEFINE_integer('char_embed_size', 15, 'dimensionality of character embeddings')
flags.DEFINE_string ('kernels', '[1,2,3,4,5,6,7]', 'CNN kernel widths')
flags.DEFINE_string ('kernel_features', '[50,100,150,200,200,200,200]', 'number of features in the CNN kernel')
flags.DEFINE_integer('rnn_layers', 2, 'number of layers in the LSTM')
flags.DEFINE_float ('dropout', 0.5, 'dropout. 0 = no dropout')
# optimization
flags.DEFINE_float ('learning_rate_decay', 0.5, 'learning rate decay')
flags.DEFINE_float ('learning_rate', 1.0, 'starting learning rate')
flags.DEFINE_float ('decay_when', 1.0, 'decay if validation perplexity does not improve by more than this much')
flags.DEFINE_float ('param_init', 0.05, 'initialize parameters at')
flags.DEFINE_integer('num_unroll_steps', 35, 'number of timesteps to unroll for')
flags.DEFINE_integer('batch_size', 20, 'number of sequences to train on in parallel')
flags.DEFINE_integer('max_epochs', 25, 'number of full passes through the training data')
flags.DEFINE_float ('max_grad_norm', 5.0, 'normalize gradients at')
flags.DEFINE_integer('max_word_length', 65, 'maximum word length')
# bookkeeping
flags.DEFINE_integer('seed', 3435, 'random number generator seed')
flags.DEFINE_integer('print_every', 5, 'how often to print current loss')
flags.DEFINE_string ('EOS', '+', '<EOS> symbol. should be a single unused character (like +) for PTB and blank for others')
FLAGS = flags.FLAGS
def run_test(session, m, data, batch_size, num_steps):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
state = session.run(m.initial_state)
for step, (x, y) in enumerate(reader.dataset_iterator(data, batch_size, num_steps)):
cost, state = session.run([m.cost, m.final_state], {
m.input_data: x,
m.targets: y,
m.initial_state: state
})
costs += cost
iters += 1
return costs / iters
def main(_):
''' Trains model from data '''
if not os.path.exists(FLAGS.train_dir):
os.mkdir(FLAGS.train_dir)
print('Created training directory', FLAGS.train_dir)
word_vocab, char_vocab, word_tensors, char_tensors, max_word_length = \
load_data(FLAGS.data_dir, FLAGS.max_word_length, eos=FLAGS.EOS)
train_reader = DataReader(word_tensors['train'], char_tensors['train'],
FLAGS.batch_size, FLAGS.num_unroll_steps)
valid_reader = DataReader(word_tensors['valid'], char_tensors['valid'],
FLAGS.batch_size, FLAGS.num_unroll_steps)
test_reader = DataReader(word_tensors['test'], char_tensors['test'],
FLAGS.batch_size, FLAGS.num_unroll_steps)
print('initialized all dataset readers')
with tf.Graph().as_default(), tf.Session() as session:
# tensorflow seed must be inside graph
tf.set_random_seed(FLAGS.seed)
np.random.seed(seed=FLAGS.seed)
''' build training graph '''
initializer = tf.random_uniform_initializer(-FLAGS.param_init, FLAGS.param_init)
with tf.variable_scope("Model", initializer=initializer):
train_model = model.inference_graph(
char_vocab_size=char_vocab.size,
word_vocab_size=word_vocab.size,
char_embed_size=FLAGS.char_embed_size,
batch_size=FLAGS.batch_size,
num_highway_layers=FLAGS.highway_layers,
num_rnn_layers=FLAGS.rnn_layers,
rnn_size=FLAGS.rnn_size,
max_word_length=max_word_length,
kernels=eval(FLAGS.kernels),
kernel_features=eval(FLAGS.kernel_features),
num_unroll_steps=FLAGS.num_unroll_steps,
dropout=FLAGS.dropout)
train_model.update(model.loss_graph(train_model.logits, FLAGS.batch_size, FLAGS.num_unroll_steps))
# scaling loss by FLAGS.num_unroll_steps effectively scales gradients by the same factor.
# we need it to reproduce how the original Torch code optimizes. Without this, our gradients will be
# much smaller (i.e. 35 times smaller) and to get system to learn we'd have to scale learning rate and max_grad_norm appropriately.
# Thus, scaling gradients so that this trainer is exactly compatible with the original
train_model.update(model.training_graph(train_model.loss * FLAGS.num_unroll_steps,
FLAGS.learning_rate, FLAGS.max_grad_norm))
# create saver before creating more graph nodes, so that we do not save any vars defined below
saver = tf.train.Saver(max_to_keep=50)
''' build graph for validation and testing (shares parameters with the training graph!) '''
with tf.variable_scope("Model", reuse=True):
valid_model = model.inference_graph(
char_vocab_size=char_vocab.size,
word_vocab_size=word_vocab.size,
char_embed_size=FLAGS.char_embed_size,
batch_size=FLAGS.batch_size,
num_highway_layers=FLAGS.highway_layers,
num_rnn_layers=FLAGS.rnn_layers,
rnn_size=FLAGS.rnn_size,
max_word_length=max_word_length,
kernels=eval(FLAGS.kernels),
kernel_features=eval(FLAGS.kernel_features),
num_unroll_steps=FLAGS.num_unroll_steps,
dropout=0.0)
valid_model.update(model.loss_graph(valid_model.logits, FLAGS.batch_size, FLAGS.num_unroll_steps))
if FLAGS.load_model:
saver.restore(session, FLAGS.load_model)
print('Loaded model from', FLAGS.load_model, 'saved at global step', train_model.global_step.eval())
else:
tf.global_variables_initializer().run()
session.run(train_model.clear_char_embedding_padding)
print('Created and initialized fresh model. Size:', model.model_size())
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=session.graph)
''' take learning rate from CLI, not from saved graph '''
session.run(
tf.assign(train_model.learning_rate, FLAGS.learning_rate),
)
''' training starts here '''
best_valid_loss = None
rnn_state = session.run(train_model.initial_rnn_state)
for epoch in range(FLAGS.max_epochs):
epoch_start_time = time.time()
avg_train_loss = 0.0
count = 0
for x, y in train_reader.iter():
count += 1
start_time = time.time()
loss, _, rnn_state, gradient_norm, step, _ = session.run([
train_model.loss,
train_model.train_op,
train_model.final_rnn_state,
train_model.global_norm,
train_model.global_step,
train_model.clear_char_embedding_padding
], {
train_model.input : x,
train_model.targets: y,
train_model.initial_rnn_state: rnn_state
})
avg_train_loss += 0.05 * (loss - avg_train_loss)
time_elapsed = time.time() - start_time
if count % FLAGS.print_every == 0:
print('%6d: %d [%5d/%5d], train_loss/perplexity = %6.8f/%6.7f secs/batch = %.4fs, grad.norm=%6.8f' % (step,
epoch, count,
train_reader.length,
loss, np.exp(loss),
time_elapsed,
gradient_norm))
print('Epoch training time:', time.time()-epoch_start_time)
# epoch done: time to evaluate
avg_valid_loss = 0.0
count = 0
rnn_state = session.run(valid_model.initial_rnn_state)
for x, y in valid_reader.iter():
count += 1
start_time = time.time()
loss, rnn_state = session.run([
valid_model.loss,
valid_model.final_rnn_state
], {
valid_model.input : x,
valid_model.targets: y,
valid_model.initial_rnn_state: rnn_state,
})
if count % FLAGS.print_every == 0:
print("\t> validation loss = %6.8f, perplexity = %6.8f" % (loss, np.exp(loss)))
avg_valid_loss += loss / valid_reader.length
print("at the end of epoch:", epoch)
print("train loss = %6.8f, perplexity = %6.8f" % (avg_train_loss, np.exp(avg_train_loss)))
print("validation loss = %6.8f, perplexity = %6.8f" % (avg_valid_loss, np.exp(avg_valid_loss)))
save_as = '%s/epoch%03d_%.4f.model' % (FLAGS.train_dir, epoch, avg_valid_loss)
saver.save(session, save_as)
print('Saved model', save_as)
''' write out summary events '''
summary = tf.Summary(value=[
tf.Summary.Value(tag="train_loss", simple_value=avg_train_loss),
tf.Summary.Value(tag="valid_loss", simple_value=avg_valid_loss)
])
summary_writer.add_summary(summary, step)
''' decide if need to decay learning rate '''
if best_valid_loss is not None and np.exp(avg_valid_loss) > np.exp(best_valid_loss) - FLAGS.decay_when:
print('validation perplexity did not improve enough, decay learning rate')
current_learning_rate = session.run(train_model.learning_rate)
print('learning rate was:', current_learning_rate)
current_learning_rate *= FLAGS.learning_rate_decay
if current_learning_rate < 1.e-5:
print('learning rate too small - stopping now')
break
session.run(train_model.learning_rate.assign(current_learning_rate))
print('new learning rate is:', current_learning_rate)
else:
best_valid_loss = avg_valid_loss
if __name__ == "__main__":
tf.app.run()
|
131151
|
import json
from graphite.util import jsonResponse, HttpResponse, HttpError
from graphite.functions import SeriesFunctions, SeriesFunction, PieFunctions, PieFunction, functionInfo
class jsonInfinityEncoder(json.JSONEncoder):
def encode(self, o):
return super(jsonInfinityEncoder, self).encode(o).replace('Infinity,', '1e9999,')
def default(self, o):
if hasattr(o, 'toJSON'):
return o.toJSON()
return o.__dict__
@jsonResponse(encoder=jsonInfinityEncoder)
def functionList(request, queryParams):
if request.method != 'GET':
return HttpResponse(status=405)
if queryParams.get('type') == 'pie':
funcs = PieFunctions()
else:
funcs = SeriesFunctions()
grouped = queryParams.get('grouped', '').lower() in ['1', 'true']
group = queryParams.get('group')
result = {}
for (name, func) in funcs.items():
info = functionInfo(name, func)
if group is not None and group != info['group']:
continue
if grouped:
if info['group'] not in result:
result[info['group']] = {}
result[info['group']][name] = info
else:
result[name] = info
return result
@jsonResponse(encoder=jsonInfinityEncoder)
def functionDetails(request, queryParams, name):
if request.method != 'GET':
return HttpResponse(status=405)
try:
if queryParams.get('type') == 'pie':
func = PieFunction(name)
else:
func = SeriesFunction(name)
except KeyError:
raise HttpError('Function not found: %s' % name, status=404)
return functionInfo(name, func)
|
131155
|
import re
import copy
import pickle
import numpy as np
from collections import OrderedDict
import torch
from torch.autograd import Variable
import global_variables as g
def save_checkpoint(state, filename='./checkpoints/checkpoint.pth.tar'):
print('save model!', filename)
torch.save(state, filename)
def save_pickle(d, path):
print('save pickle to', path)
with open(path, mode='wb') as f:
pickle.dump(d, f)
def load_pickle(path):
print('load', path)
with open(path, mode='rb') as f:
return pickle.load(f)
def get_entities(fpath):
entities = OrderedDict({'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []})
with open(fpath, 'r') as file:
lines = file.readlines()
for l in lines:
wds = l.rstrip().split(' ')[2].split('\t')
slot_type = wds[0] # ex) R_price
slot_val = wds[1] # ex) cheap
# if slot_type not in entities:
# entities[slot_type] = []
if slot_type in entities:
if slot_val not in entities[slot_type]:
entities[slot_type].append(slot_val)
return entities
def load_embd_weights(word2vec, vocab_size, embd_size, w2i):
embedding_matrix = np.zeros((vocab_size, embd_size))
print('embed_matrix.shape', embedding_matrix.shape)
found_ct = 0
for word, idx in w2i.items():
# words not found in embedding index will be all-zeros.
if word in word2vec.wv:
embedding_matrix[idx] = word2vec.wv[word]
found_ct += 1
print(found_ct, 'words are found in word2vec. vocab_size is', vocab_size)
return torch.from_numpy(embedding_matrix).type(torch.FloatTensor)
def preload(fpath, vocab, system_acts):
with open(fpath, 'r') as f:
lines = f.readlines()
for idx, l in enumerate(lines):
l = l.rstrip()
if l != '':
ls = l.split("\t")
t_u = ls[0].split(' ', 1)
# turn = t_u[0]
uttr = t_u[1].split(' ')
if len(ls) == 2: # includes user and system utterance
for w in uttr:
if w not in vocab:
vocab.append(w)
if len(ls) == 2: # includes user and system utterance
sys_act = ls[1]
sys_act = re.sub(r'resto_\S+', '', sys_act)
if sys_act.startswith('api_call'): sys_act = 'api_call'
if sys_act not in system_acts: system_acts.append(sys_act)
vocab = sorted(vocab)
system_acts = sorted(system_acts)
return vocab, system_acts
def load_data(fpath, entities, w2i, system_acts):
'''
store data as dialog (multi turns)
'''
data = []
with open(fpath, 'r') as f:
lines = f.readlines()
# x: user uttr, y: sys act, c: context, b: BoW, p: previous sys act, f: action filter
x, y, c, b, p, f = [], [], [], [], [], []
context = [0] * len(entities.keys())
for idx, l in enumerate(lines):
l = l.rstrip()
if l == '':
data.append((x, y, c, b, p, f))
# reset
x, y, c, b, p, f = [], [], [], [], [], []
context = [0] * len(entities.keys())
else:
ls = l.split("\t")
t_u = ls[0].split(' ', 1)
# turn = t_u[0]
uttr = t_u[1].split(' ')
update_context(context, uttr, entities)
act_filter = generate_act_filter(len(system_acts), context)
bow = get_bow(uttr, w2i)
sys_act = g.SILENT
if len(ls) == 2: # includes user and system utterance
sys_act = ls[1]
sys_act = re.sub(r'resto_\S+', '', sys_act)
if sys_act.startswith('api_call'): sys_act = 'api_call'
else:
continue # TODO
x.append(uttr)
if len(y) == 0:
p.append(g.SILENT)
else:
p.append(y[-1])
y.append(sys_act)
c.append(copy.deepcopy(context))
b.append(bow)
f.append(act_filter)
return data, system_acts
def update_context(context, sentence, entities):
for idx, (ent_key, ent_vals) in enumerate(entities.items()):
for w in sentence:
if w in ent_vals:
context[idx] = 1
def generate_act_filter(action_size, context):
mask = [0] * action_size
# TODO hard coding
# 0 <SILENT>
# 1 any preference on a type of cuisine
# 2 api_call
# 3 great let me do the reservation
# 4 hello what can i help you with today
# 5 here it is
# 6 how many people would be in your party
# 7 i'm on it
# 8 is there anything i can help you with
# 9 ok let me look into some options for you
# 10 sure is there anything else to update
# 11 sure let me find an other option for you
# 12 what do you think of this option:
# 13 where should it be
# 14 which price range are looking for
# 15 you're welcome
# context: {'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []}
mask[0] = 1
mask[7] = 1
mask[8] = 1
if context == [0, 0, 0, 0]:
mask[4] = 1
if context == [1, 1, 1, 1]:
mask[2] = 1
mask[3] = 1
mask[5] = 1
mask[8] = 1
mask[9] = 1
mask[10] = 1
mask[11] = 1
mask[12] = 1
mask[15] = 1
if context[0] == 0: # R_cuisine
mask[1] = 1
if context[1] == 0: # R_location
mask[13] = 1
if context[2] == 0: # R_price
mask[14] = 1
if context[3] == 0: # R_number
mask[6] = 1
return mask
def get_bow(sentence, w2i):
bow = [0] * len(w2i)
for word in sentence:
if word in w2i:
bow[w2i[word]] += 1
return bow
def add_padding(data, seq_len):
pad_len = max(0, seq_len - len(data))
data += [0] * pad_len
data = data[:seq_len]
return data
def make_word_vector(uttrs_list, w2i, dialog_maxlen, uttr_maxlen):
dialog_list = []
for uttrs in uttrs_list:
dialog = []
for sentence in uttrs:
sent_vec = [w2i[w] if w in w2i else w2i[g.UNK] for w in sentence]
sent_vec = add_padding(sent_vec, uttr_maxlen)
dialog.append(sent_vec)
for _ in range(dialog_maxlen - len(dialog)):
dialog.append([0] * uttr_maxlen)
dialog = torch.LongTensor(dialog[:dialog_maxlen])
dialog_list.append(dialog)
return to_var(torch.stack(dialog_list, 0))
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def padding(data, default_val, maxlen, pad_seq_len):
for i, d in enumerate(data):
pad_len = maxlen - len(d)
for _ in range(pad_len):
data[i].append([default_val] * pad_seq_len)
return to_var(torch.FloatTensor(data))
def get_data_from_batch(batch, w2i, act2i):
uttrs_list = [d[0] for d in batch]
dialog_maxlen = max([len(uttrs) for uttrs in uttrs_list])
uttr_maxlen = max([len(u) for uttrs in uttrs_list for u in uttrs])
uttr_var = make_word_vector(uttrs_list, w2i, dialog_maxlen, uttr_maxlen)
batch_labels = [d[1] for d in batch]
labels_var = []
for labels in batch_labels:
vec_labels = [act2i[l] for l in labels]
pad_len = dialog_maxlen - len(labels)
for _ in range(pad_len):
vec_labels.append(act2i[g.SILENT])
labels_var.append(torch.LongTensor(vec_labels))
labels_var = to_var(torch.stack(labels_var, 0))
batch_prev_acts = [d[4] for d in batch]
prev_var = []
for prev_acts in batch_prev_acts:
vec_prev_acts = []
for act in prev_acts:
tmp = [0] * len(act2i)
tmp[act2i[act]] = 1
vec_prev_acts.append(tmp)
pad_len = dialog_maxlen - len(prev_acts)
for _ in range(pad_len):
vec_prev_acts.append([0] * len(act2i))
prev_var.append(torch.FloatTensor(vec_prev_acts))
prev_var = to_var(torch.stack(prev_var, 0))
context = copy.deepcopy([d[2] for d in batch])
context = padding(context, 1, dialog_maxlen, len(context[0][0]))
bow = copy.deepcopy([d[3] for d in batch])
bow = padding(bow, 0, dialog_maxlen, len(bow[0][0]))
act_filter = copy.deepcopy([d[5] for d in batch])
act_filter = padding(act_filter, 0, dialog_maxlen, len(act_filter[0][0]))
return uttr_var, labels_var, context, bow, prev_var, act_filter
|
131173
|
import Web3Wrapper
import InputGenerator
import FormulaSolidityPort
MINIMUM_VALUE_BALANCE = 100
MAXIMUM_VALUE_BALANCE = 10 ** 34
GROWTH_FACTOR_BALANCE = 2.5
MINIMUM_VALUE_WEIGHT = 100000
MAXIMUM_VALUE_WEIGHT = 900000
GROWTH_FACTOR_WEIGHT = 1.5
MINIMUM_VALUE_AMOUNT = 1
MAXIMUM_VALUE_AMOUNT = 10 ** 34
GROWTH_FACTOR_AMOUNT = 2.5
def Main():
rangeBalance1 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_BALANCE, MAXIMUM_VALUE_BALANCE, GROWTH_FACTOR_BALANCE)
rangeWeight1 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_WEIGHT, MAXIMUM_VALUE_WEIGHT, GROWTH_FACTOR_WEIGHT)
rangeBalance2 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_BALANCE, MAXIMUM_VALUE_BALANCE, GROWTH_FACTOR_BALANCE)
rangeWeight2 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_WEIGHT, MAXIMUM_VALUE_WEIGHT, GROWTH_FACTOR_WEIGHT)
rangeAmount = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_AMOUNT, MAXIMUM_VALUE_AMOUNT, GROWTH_FACTOR_AMOUNT)
testNum = 0
numOfTests = len(rangeBalance1) * len(rangeWeight1) * len(rangeBalance2) * len(rangeWeight2) * len(rangeAmount)
FormulaContract = Web3Wrapper.Contract('BancorFormula')
FormulaContract.setter().init()
FormulaContractAddr = FormulaContract.getter()
for balance1 in rangeBalance1:
for weight1 in rangeWeight1:
for balance2 in rangeBalance2:
for weight2 in rangeWeight2:
for amount in rangeAmount:
testNum += 1
if True:
resultSolidityPort = Run(FormulaSolidityPort, balance1, weight1, balance2, weight2, amount)
resultContractAddr = Run(FormulaContractAddr, balance1, weight1, balance2, weight2, amount)
print('Test {} out of {}: resultSolidityPort = {}, resultContractAddr = {}'.format(testNum, numOfTests, resultSolidityPort, resultContractAddr))
if resultSolidityPort != resultContractAddr:
print('Emulation Error:')
print('balance1 = {}'.format(balance1))
print('weight1 = {}'.format(weight1))
print('balance2 = {}'.format(balance2))
print('weight2 = {}'.format(weight2))
print('amount = {}'.format(amount))
return
def Run(module, balance1, weight1, balance2, weight2, amount):
try:
return module.crossReserveTargetAmount(balance1, weight1, balance2, weight2, amount)
except:
return -1
Main()
|
131256
|
import yaml
from scipy.signal import firwin, lfilter
class SpeakerYML(object):
def __init__(self, ymlf):
# open yml file
with open(ymlf) as yf:
conf = yaml.safe_load(yf)
# read parameter from yml file
self.wav_fs = conf['wav']['fs']
self.wav_bit = conf['wav']['bit']
self.wav_fftl = conf['wav']['fftl']
self.wav_shiftms = conf['wav']['shiftms']
self.f0_minf0 = conf['f0']['minf0']
self.f0_maxf0 = conf['f0']['maxf0']
assert self.f0_minf0 < self.f0_maxf0, "should be minf0 < maxf0 in yml file"
self.mcep_dim = conf['mcep']['dim']
self.mcep_alpha = conf['mcep']['alpha']
self.power_threshold = conf['power']['threshold']
self.analyzer = conf['analyzer']
def print_params(self):
pass
class PairYML(object):
def __init__(self, ymlf):
# open yml file
with open(ymlf) as yf:
conf = yaml.safe_load(yf)
self.jnt_n_iter = conf['jnt']['n_iter']
self.GMM_mcep_n_mix = conf['GMM']['mcep']['n_mix']
self.GMM_mcep_n_iter = conf['GMM']['mcep']['n_iter']
self.GMM_mcep_covtype = conf['GMM']['mcep']['covtype']
self.GMM_mcep_cvtype = conf['GMM']['mcep']['cvtype']
self.GMM_codeap_n_mix = conf['GMM']['codeap']['n_mix']
self.GMM_codeap_n_iter = conf['GMM']['codeap']['n_iter']
self.GMM_codeap_covtype = conf['GMM']['codeap']['covtype']
self.GMM_codeap_cvtype = conf['GMM']['codeap']['cvtype']
self.GV_morph_coeff = conf['GV']['morph_coeff']
def print_params(self):
passø
def low_cut_filter(x, fs, cutoff=70):
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
|
131267
|
BASE_DIR=""
DATA_DIR="/mnt/nfs/work1/akshay/akshay/projects/oracle_cb/data/"
REMOTE_PATH_TO_PYTHON="/mnt/nfs/work1/akshay/akshay/anaconda3/python3"
REMOTE_BASE_DIR="/mnt/nfs/work1/akshay/akshay/projects/oracle_cb/"
|
131280
|
import os
import sys
from philosophy import (valid_page_name,
strip_parentheses)
def test_valid_page_name():
NON_MAINSPACE = ['File:',
'File talk:',
'Wikipedia:',
'Wikipedia talk:',
'Project:',
'Project talk:',
'Portal:',
'Portal talk:',
'Special:',
'Help:',
'Help talk:',
'Template:',
'Template talk:',
'Talk:',
'Category:',
'Category talk:']
assert all(not valid_page_name(non_main) for non_main in NON_MAINSPACE)
def test_strip_parentheses():
test_cases = {
'Hello (world)!': 'Hello !',
'The <a href="https://en.wikipedia.org/wiki/Encyclopedia_(disambiguation)">encyclopedia</a> looks pretty (((good))).': 'The <a href="https://en.wikipedia.org/wiki/Encyclopedia_(disambiguation)">encyclopedia</a> looks pretty .',
'< (hello)': None,
'< (goodbye) >': None,
'(hello) there': ' there',
'(sometimes) <(things) get> <complicated (do they?)': ' <(things) get> <complicated (do they?)',
'(an entire string contained within parentheses)': '',
"This isn't (my)) fault, okay?": "This isn't ) fault, okay?",
'There ((you are), my friend.': 'There ',
'You can (ignore all of this. Even this, (and this.) All of it.': 'You can ',
'<a b(rules are for everyone)': None,
"<a <b (doesn't matter <who you are>> everyone has to follow rules)": None,
'<a<coach goes (over there)> (and)> (he seems to <find>) <(nothing)': '<a<coach goes (over there)> (and)> <(nothing)',
}
for param, expected_output in test_cases.items():
if expected_output is None:
expected_output = param
assert strip_parentheses(param) == expected_output
|
131331
|
import torch
import torch.nn as nn
import torch.distributions as dist
from torch.distributions.kl import kl_divergence
class RBFKernel(nn.Module):
def __init__(self, in_size, prior_log_mean=None, prior_log_logvar=None, map_est=False):
super().__init__()
self.map_est = map_est
# Variational Parameters (log lengthscale and log scale factor)
log_init = torch.tensor(.5).log() * torch.ones(in_size + 1) \
+ .05 * torch.randn(in_size + 1)
self.log_mean = nn.Parameter(log_init)
self.log_logvar = nn.Parameter(-2 * torch.ones(in_size + 1))
self.register_buffer('prior_log_mean', prior_log_mean if prior_log_mean is not None \
else torch.zeros_like(self.log_mean))
self.register_buffer('prior_log_logvar', prior_log_logvar if prior_log_logvar is not None \
else torch.zeros_like(self.log_logvar))
def compute(self, kern_samples, x, y=None):
'''
Generic batch kernel evaluation. Send
y = None for efficient re-use of computations
in case x = y.
Arguments:
kern_samples: n_hypers x (in_size + 1)
x: ...batches x M x D
y: ...batches x N x D, if None, assumed equals x
Returns:
n_hypers x ...batches x M x N
'''
n_hypers = kern_samples.size(0)
kern_samples = kern_samples.view(n_hypers, 1, *([1] * len(x.shape[:-2])), -1)
sigma = kern_samples[..., :-1].exp()
gamma2 = (kern_samples[..., -1:] * 2.).exp()
sx = x.unsqueeze(0) / sigma
xx = torch.einsum('...ji,...ki->...jk', sx, sx)
if y is None:
yy = xy = xx
else:
sy = y.unsqueeze(0) / sigma
yy = torch.einsum('...ji,...ki->...jk', sy, sy)
xy = torch.einsum('...ji,...ki->...jk', sx, sy)
dnorm2 = - 2. * xy + xx.diagonal(dim1=-2, dim2=-1).unsqueeze(-1) + yy.diagonal(dim1=-2, dim2=-1).unsqueeze(-2)
return gamma2 * (-.5 * dnorm2).exp()
def compute_diag(self, kern_samples):
gamma2 = (kern_samples[..., -1:] * 2.).exp().unsqueeze(-2)
return gamma2
def sample_hypers(self, n_hypers):
if self.map_est:
return self.log_mean.unsqueeze(0)
log_dist = dist.Normal(self.log_mean, self.log_logvar.exp().sqrt())
log_hypers = log_dist.rsample(torch.Size([n_hypers]))
return log_hypers
def kl_hypers(self):
if self.map_est:
return torch.tensor(0.0, device=self.log_mean.device)
var_dist = dist.Normal(self.log_mean, self.log_logvar.exp().sqrt())
prior_dist = dist.Normal(self.prior_log_mean, self.prior_log_logvar.exp().sqrt())
total_kl = kl_divergence(var_dist, prior_dist).sum(dim=0)
return total_kl
class DeepRBFKernel(RBFKernel):
def __init__(self, in_size, feature_size=64, **kwargs):
super().__init__(feature_size, **kwargs)
self.phi = nn.Sequential(
nn.Linear(in_size, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, feature_size),
)
def compute(self, kern_samples, x, y=None):
x = self.phi(x)
if y is not None:
y = self.phi(y)
return super().compute(kern_samples, x, y=y)
|
131335
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import sys
import json
#from caresjpsutil import PythonLogger
FOSSIL_CAP = "/images/1_top_ten_emission_countries_fossil_ele_cap.png"
FOSSIL_CAP_PER = "/images/1_top_ten_emission_countries_fossil_ele_cap_per.png"
FOSSIL_GEN = "/images/1_top_ten_emission_countries_fossil_ele_gen.png"
FOSSIL_GEN_PER = "/images/1_top_ten_emission_countries_fossil_ele_gen_per.png"
if __name__ == "__main__":
#pythonLogger = PythonLogger('EmissionPlotter.py')
#pythonLogger.postInfoToLogServer('start of EmissionPlotter.py')
try:
modelsPath = json.loads(sys.argv[1])
# ### plot top ten emission countries versus fossil powerplant capacity
df = pd.read_csv(modelsPath + 'data/input/top_ten_emission_countries.csv', header='infer', sep=',')
df1 = df[df.Year == 2000]
my_list = df1["Country"].tolist()
my_list1 = ['CN', 'US', 'IN', 'RU', 'JP', 'DE', 'IR', 'KR', 'CA', 'SA']
df = df.sort_values(by=['Country', 'Year'], ascending=[True, True])
# ### plot1 - ele_cap versus year
df1 = df.loc[:, ['Country', 'Year', 'ele_cap_fossil_mw']]
df1.columns = ['Fossil fuel power capacity (MW)', 'Year', 'ele_cap_fossil_mw']
vmax1 = df1[['ele_cap_fossil_mw']].max(axis=0)
vmin1 = df1[['ele_cap_fossil_mw']].min(axis=0)
df1['Year'] = df1['Year'].astype(int)
df1 = df1.pivot(index='Fossil fuel power capacity (MW)', columns='Year', values='ele_cap_fossil_mw')
df1 = df1.reindex(my_list)
df1 = df1.reindex(my_list1)
csfont = {'fontname':'Times New Roman'}
plt.clf()
plt.figure(figsize=(3.3, 1.6))
sns.set_style("dark")
sns.set_context("paper", font_scale=0.9)
ax = sns.heatmap(df1, cmap='Greens', cbar=False, vmin=20000, vmax=1200000)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_label("GW")
cbar.set_ticks([20000, 1200000])
cbar.set_ticklabels(["20", "1200"])
ax.set_xlabel('')
ax.set_ylabel('Installed capacity \n')
ax.set(xticklabels=[])
plt.xticks(rotation=0)
plt.yticks(rotation=0)
plt.tight_layout()
plt.savefig(modelsPath + 'public' + FOSSIL_CAP, dpi=1000)
plt.close()
# ### plot2 - ele_cap percentage versus year
df2 = df.loc[:, ['Country', 'Year', 'ele_cap_fossil_per']]
df2.columns = ['Fossil fuel power capacity percentage', 'Year', 'ele_cap_fossil_per']
df2['Year'] = df2['Year'].astype(int)
df2 = df2.pivot(index='Fossil fuel power capacity percentage', columns='Year', values='ele_cap_fossil_per')
df2 = df2.reindex(my_list)
plt.clf()
plt.figure(figsize=(3.1, 1.6))
sns.set_context("paper", font_scale=0.9)
ax = sns.heatmap(df2, cmap='Greens', cbar=False, vmin=0, vmax=100)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_label("Percentage")
cbar.set_ticks([0, 100])
cbar.set_ticklabels(["0%", "100%"])
ax.set_xlabel('')
ax.set_ylabel('')
ax.set(xticklabels=[])
ax.set(yticklabels=[])
# plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig(modelsPath + 'public' + FOSSIL_CAP_PER, dpi=1000)
plt.close()
# ### plot3 - ele_gen versus year
df3 = df.loc[:, ['Country', 'Year', 'ele_gen_fossil_gwh']]
vmax3 = df3[['ele_gen_fossil_gwh']].max(axis=0)
vmin3 = df3[['ele_gen_fossil_gwh']].min(axis=0)
df3.columns = ['Fossil fuel power generation (GWh)', 'Year', 'ele_gen_fossil_gwh']
df3['Year'] = df3['Year'].astype(int)
df3 = df3.pivot(index='Fossil fuel power generation (GWh)', columns='Year', values='ele_gen_fossil_gwh')
df3 = df3.reindex(my_list)
plt.clf()
plt.figure(figsize=(3.3, 1.7))
sns.set_context("paper", font_scale=0.9)
ax = sns.heatmap(df3, cmap='Greens', cbar=False, vmin=110000, vmax=4300000)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_label("TWh")
cbar.set_ticks([110000, 4300000])
cbar.set_ticklabels(["110", "4300"])
ax.set_xticklabels(['2000', '', '', '', '', '2005', '', '', '', '', '2010', '', '', '', '2014'])
ax.set_xlabel('Year')
ax.set_ylabel('Annual generation \n')
# ax.set(yticklabels=[])
plt.xticks(rotation=0)
plt.tight_layout()
plt.savefig(modelsPath + 'public' + FOSSIL_GEN, dpi=1000)
plt.close()
# ### plot4 - ele_gen percentage versus year
df4 = df.loc[:, ['Country', 'Year', 'ele_gen_fossil_per']]
df4.columns = ['Fossil fuel power generation percentage', 'Year', 'ele_gen_fossil_per']
df4['Year'] = df4['Year'].astype(int)
df4 = df4.pivot(index='Fossil fuel power generation percentage', columns = 'Year', values = 'ele_gen_fossil_per')
df4 = df4.reindex(my_list)
plt.clf()
plt.figure(figsize=(3.3, 1.7))
sns.set_context("paper", font_scale=0.9)
ax = sns.heatmap(df4, cmap='Greens', cbar=False, vmin=0, vmax=100)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_label("Percentage")
cbar.set_ticks([0, 100])
cbar.set_ticklabels(["0%", "100%"])
ax.set_xlabel('Year')
ax.set_ylabel('')
ax.set(yticklabels=[])
ax.set_xticklabels(['2000', '', '', '', '', '2005', '', '', '', '', '2010', '', '', '', '2014'])
plt.xticks(rotation=0)
plt.tight_layout()
plt.savefig(modelsPath + 'public' + FOSSIL_GEN_PER, dpi=1000)
plt.close()
# #### color palette test
plt.clf()
plt.figure(figsize=(8, 4))
sns.set_context("paper", font_scale=1.3)
ax = sns.heatmap(df4, cmap='PuBu', cbar=False, vmin=0, vmax=100)
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_ticks([0, 100])
cbar.set_ticklabels(["0%", "100%"])
plt.tight_layout()
plt.savefig(modelsPath + 'public/images/color_palette_test/PuBu_rect.png', dpi=1000)
plt.close()
plt.clf()
sns.set_context("paper")
ax = sns.heatmap(df4, cmap='PuBu', cbar=False, vmin=0, vmax=100)
ax.set_ylabel('')
# ax.set(yticklabels=[])
# ax.set_aspect("equal")
cbar = ax.figure.colorbar(ax.collections[0])
cbar.set_ticks([0, 100])
cbar.set_ticklabels(["0%", "100%"])
plt.tight_layout()
plt.savefig(modelsPath + 'public/images/color_palette_test/PuBu_square.png', dpi=1000)
plt.close()
# ## overall figure
plt.close()
plt.clf()
plt.figure(figsize=(30,6))
sns.set_context("paper", font_scale=1.2)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True, sharey=True)
g1 = sns.heatmap(df1, cmap='Greens', vmin=20000, vmax=1200000, ax=ax1)
# cbar = g1.figure.colorbar(g1.collections[0])
# cbar.set_ticks([20000, 1200000])
# cbar.set_ticklabels(["20", "1200"])
g1.set_xlabel('')
g1.set_ylabel('')
g2 = sns.heatmap(df3, cmap='Greens', ax=ax2)
# cbar = g2.figure.colorbar(g2.collections[0])
# cbar.set_ticks([20000, 1200000])
# cbar.set_ticklabels(["10", "1200"])
g2.set_xlabel('')
g2.set_ylabel('')
# g2.set_yticks([])
g3 = sns.heatmap(df2, cmap='Greens', ax=ax3)
# cbar = g3.figure.colorbar(g3.collections[0])
# cbar.set_ticks([20000, 1200000])
# cbar.set_ticklabels(["30", "1200"])
g3.set_xlabel('')
g3.set_ylabel('')
g4 = sns.heatmap(df4, cmap='Greens', ax=ax4)
# cbar = g4.figure.colorbar(g4.collections[0])
# cbar.set_ticks([20000, 1200000])
# cbar.set_ticklabels(["40", "1200"])
g4.set_xlabel('')
g4.set_ylabel('')
# g4.set_yticks([])
plt.tight_layout()
pathsDict = {
"fossilCap": FOSSIL_CAP,
"fossilCapPer": FOSSIL_CAP_PER,
"fossilGen": FOSSIL_GEN,
"fossilGenPer": FOSSIL_GEN_PER
}
print(json.dumps(pathsDict))
except Exception as e:
print(e)
#pythonLogger.postInfoToLogServer('end of EmissionPlotter.py')
|
131378
|
from ursina import *
# Cf. LINE 84: temp_entity = Entity(parent=entity.model, ignore=True)
def new_combine(entity, analyze=False, auto_destroy=True, ignore=[]):
verts = list()
tris = list()
norms = list()
uvs = list()
cols = list()
to_destroy = list()
o = 0
for e in scene.entities:
if e in ignore:
continue
if e.has_ancestor(entity) or e == entity:
if not hasattr(e, 'model') or e.model == None or e.scripts or e.eternal:
continue
if not e.model.vertices:
continue
if analyze:
print('combining:', e)
verts += get_vertices(e, entity)
if not e.model.triangles:
new_tris = [i for i in range(len(e.model.vertices))]
else:
new_tris = list()
for t in e.model.triangles:
if isinstance(t, int):
new_tris.append(t)
elif len(t) == 3:
new_tris.extend(t)
elif len(t) == 4: # turn quad into tris
new_tris.extend([t[0], t[1], t[2], t[2], t[3], t[0]])
new_tris = [t+o for t in new_tris]
new_tris = [(new_tris[i], new_tris[i+1], new_tris[i+2]) for i in range(0, len(new_tris), 3)]
o += len(e.model.vertices)
tris += new_tris
# if e.model.normals:
# norms += e.model.normals
if e.model.uvs:
uvs += e.model.uvs
if e.model.colors: # if has vertex colors
cols.extend(e.model.colors)
else:
cols.extend((e.color, ) * len(e.model.vertices))
if auto_destroy and e != entity:
to_destroy.append(e)
if auto_destroy:
from ursina import destroy
[destroy(e) for e in to_destroy]
entity.model = Mesh(vertices=verts, triangles=tris, normals=norms, uvs=uvs, colors=cols, mode='triangle')
print('combined relative to model - 3.6.0 version')
# entity.model = Mesh(vertices=verts, mode='triangle')
# entity.flatten_strong()
if analyze:
render.analyze()
return entity.model
def get_vertices(entity, relative_to=None):
if relative_to is None:
return entity.model.vertices
vertices = list()
temp_entity = Entity(parent=entity.model, ignore=True)
for v in entity.model.vertices:
temp_entity.position = v
vertices.append(temp_entity.get_position(relative_to))
from ursina import destroy
destroy(temp_entity)
return vertices
if __name__ == '__main__':
from ursina import *
app = Ursina()
p = Entity()
e1 = Entity(parent=p, model='sphere', y=1.5, color=color.pink)
e2 = Entity(parent=p, model='cube', color=color.yellow, x=1, origin_y=-.5)
e3 = Entity(parent=e2, model='cube', color=color.yellow, y=2, scale=.5)
p.combine()
# p.y=2
# p.model.save()
# ursina_mesh_to_obj(p.model, name='combined_model_test', out_path=application.asset_folder)
EditorCamera()
app.run()
|
131385
|
import logging
import mysql.connector
from powergslb.database.mysql.powerdns import PowerDNSDatabaseMixIn
from powergslb.database.mysql.w2ui import W2UIDatabaseMixIn
__all__ = ['MySQLDatabase']
class MySQLDatabase(PowerDNSDatabaseMixIn, W2UIDatabaseMixIn, mysql.connector.MySQLConnection):
"""
MySQLDatabase class
"""
Error = mysql.connector.Error
def __enter__(self):
return self
def __exit__(self, *_):
self.disconnect()
@staticmethod
def join_operation(operation):
return ' '.join(filter(None, (line.strip() for line in operation.splitlines())))
def _execute(self, operation, params=()):
operation = self.join_operation(operation)
if params:
logging.debug('{}: "{}" % {}'.format(type(self).__name__, operation, params))
else:
logging.debug('{}: "{}"'.format(type(self).__name__, operation))
cursor = self.cursor(buffered=True)
try:
cursor.execute(operation, params)
if operation.startswith('SELECT'):
logging.debug('{}: {} rows returned'.format(type(self).__name__, cursor.rowcount))
column_names = [description[0] for description in cursor.description]
result = [dict(zip(column_names, row)) for row in cursor]
else:
logging.debug('{}: {} rows affected'.format(type(self).__name__, cursor.rowcount))
result = cursor.rowcount
finally:
cursor.close()
return result
|
131405
|
import mmcv
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from .recall import eval_recalls
def coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in [
'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'
]
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
if result_types == ['proposal_fast']:
ar = fast_eval_recall(result_files, coco, np.array(max_dets))
for i, num in enumerate(max_dets):
print('AR@{}\t= {:.4f}'.format(num, ar[i]))
return
for res_type in result_types:
if isinstance(result_files, str):
result_file = result_files
elif isinstance(result_files, dict):
result_file = result_files[res_type]
else:
assert TypeError('result_files must be a str or dict')
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
def fast_eval_recall(results,
coco,
max_dets,
iou_thrs=np.arange(0.5, 0.96, 0.05)):
if mmcv.is_str(results):
assert results.endswith('.pkl')
results = mmcv.load(results)
elif not isinstance(results, list):
raise TypeError(
'results must be a list of numpy arrays or a filename, not {}'.
format(type(results)))
gt_bboxes = []
img_ids = coco.getImgIds()
for i in range(len(img_ids)):
ann_ids = coco.getAnnIds(imgIds=img_ids[i])
ann_info = coco.loadAnns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, max_dets, iou_thrs, print_summary=False)
ar = recalls.mean(axis=1)
return ar
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def det2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different score for det and segm
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['score'] = float(mask_score[i])
data['category_id'] = dataset.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(dataset, results, out_file):
result_files = dict()
if isinstance(results[0], list):
json_results = det2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
|
131419
|
import asyncio
from pathlib import Path
from copy import deepcopy
import pytest
from pytest import fixture, mark
from ..query import QueryResult, QueryLevel
from ..net import LocalEntity
from ..route import StaticRoute, DynamicRoute, Router
from ..sync import SyncManager
from ..store import TransferMethod
from ..store.net_repo import NetRepo
from ..store.local_dir import LocalDir
from ..util import json_serializer
from .conftest import (
has_dcmtk,
DCMTK_VERSION,
dcmtk_priv_sop_retr_xfail,
dcmtk_priv_sop_send_xfail,
)
priv_sop_marks = [dcmtk_priv_sop_retr_xfail, dcmtk_priv_sop_send_xfail]
def make_lookup(dest1, dest2):
def lookup_func(ds):
if ds.PatientID == "TestPat1":
return [dest1]
else:
return [dest2]
return lookup_func
repo_to_repo_subsets = [
pytest.param([None] * 3, marks=priv_sop_marks),
["all"] * 3,
["PATIENT-0"] * 3,
["PATIENT-0/STUDY-1"] * 3,
pytest.param(["PATIENT-0/STUDY-0"] * 3, marks=priv_sop_marks),
pytest.param(["PATIENT-0/STUDY-0/SERIES-0"] * 3, marks=priv_sop_marks),
pytest.param(["PATIENT-0/STUDY-0/SERIES-0/IMAGE-0"] * 3, marks=priv_sop_marks),
pytest.param(["PATIENT-1"] * 3, marks=priv_sop_marks),
]
bucket_to_repo_subsets = [
pytest.param([None] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["all"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["PATIENT-0"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["PATIENT-0/STUDY-1"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["PATIENT-0/STUDY-0"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(["PATIENT-0/STUDY-0/SERIES-0"] * 3, marks=dcmtk_priv_sop_send_xfail),
pytest.param(
["PATIENT-0/STUDY-0/SERIES-0/IMAGE-0"] * 3, marks=dcmtk_priv_sop_send_xfail
),
pytest.param(["PATIENT-1"] * 3, marks=dcmtk_priv_sop_send_xfail),
]
@mark.parametrize(
"subset_specs",
[
[None] * 3,
["all"] * 3,
["PATIENT-0"] * 3,
["PATIENT-0/STUDY-0"] * 3,
["PATIENT-0/STUDY-0/SERIES-0"] * 3,
["PATIENT-0/STUDY-0/SERIES-0/IMAGE-0"] * 3,
["PATIENT-1"] * 3,
],
)
@mark.asyncio
@has_dcmtk
async def test_gen_transfers(make_local_node, make_dcmtk_net_repo, subset_specs):
local_node = make_local_node()
src_repo, full_qr, _ = make_dcmtk_net_repo(local_node, subset="all")
dest1_repo, dest1_init_qr, _ = make_dcmtk_net_repo(
local_node, subset=subset_specs[0]
)
dest2_repo, dest2_init_qr, _ = make_dcmtk_net_repo(
local_node, subset=subset_specs[1]
)
dest3_repo, dest3_init_qr, _ = make_dcmtk_net_repo(
local_node, subset=subset_specs[2]
)
static_route = StaticRoute([dest1_repo])
dyn_lookup = make_lookup(dest2_repo, dest3_repo)
dyn_route = DynamicRoute(dyn_lookup, required_elems=["PatientID"])
dests = [static_route, dyn_route]
# Build QRs of what we expect to be transfered to each dest
expect_qrs = {
dest1_repo: full_qr - dest1_init_qr,
dest2_repo: QueryResult(QueryLevel.IMAGE),
dest3_repo: QueryResult(QueryLevel.IMAGE),
}
for ds in full_qr:
dyn_dests = dyn_lookup(ds)
for dest in dyn_dests:
expect_qrs[dest].add(ds)
trans_qrs = {}
async with SyncManager(src_repo, dests) as sm:
async for transfer in sm.gen_transfers():
trans_level = transfer.chunk.qr.level
for route in transfer.method_routes_map[TransferMethod.PROXY]:
for dest in route.dests:
print(f"\n{dest} :\n{transfer.chunk.qr.to_tree()}")
if dest not in trans_qrs:
trans_qrs[dest] = {}
if trans_level not in trans_qrs[dest]:
trans_qrs[dest][trans_level] = deepcopy(transfer.chunk.qr)
else:
for ds in transfer.chunk.qr:
# Check this data is expected
assert ds in expect_qrs[dest]
# Check for duplicate transfers
for lvl_qr in trans_qrs[dest].values():
assert ds not in lvl_qr
trans_qrs[dest][trans_level].add(ds)
@mark.parametrize("subset_specs", repo_to_repo_subsets)
@mark.asyncio
@has_dcmtk
async def test_repo_sync_single_static(
make_local_node, make_dcmtk_net_repo, subset_specs
):
local_node = make_local_node()
src_repo, full_qr, _ = make_dcmtk_net_repo(local_node, subset="all")
dest1_repo, _, dest1_dir = make_dcmtk_net_repo(local_node, subset=subset_specs[0])
static_route = StaticRoute([dest1_repo])
dests = [static_route]
async with SyncManager(src_repo, dests) as sm:
async for transfer in sm.gen_transfers():
for route in transfer.method_routes_map[TransferMethod.PROXY]:
for dest in route.dests:
print(f"{dest} : {json_serializer.dumps(transfer.chunk.qr)}")
await sm.exec_transfer(transfer)
print(sm.report)
dest1_dir = Path(dest1_dir)
found_files = [x for x in dest1_dir.glob("**/*.dcm")]
print(found_files)
assert len(found_files) == len(full_qr)
@mark.parametrize("subset_specs", repo_to_repo_subsets)
@mark.asyncio
@has_dcmtk
async def test_repo_sync_multi(make_local_node, make_dcmtk_net_repo, subset_specs):
local_node = make_local_node()
src_repo, full_qr, _ = make_dcmtk_net_repo(local_node, subset="all")
dest1_repo, _, dest1_dir = make_dcmtk_net_repo(local_node, subset=subset_specs[0])
dest2_repo, _, _ = make_dcmtk_net_repo(local_node, subset=subset_specs[1])
dest3_repo, _, _ = make_dcmtk_net_repo(local_node, subset=subset_specs[2])
static_route = StaticRoute([dest1_repo])
dyn_route = DynamicRoute(
make_lookup(dest2_repo, dest3_repo), required_elems=["PatientID"]
)
dests = [static_route, dyn_route]
async with SyncManager(src_repo, dests) as sm:
async for transfer in sm.gen_transfers():
for route in transfer.method_routes_map[TransferMethod.PROXY]:
for dest in route.dests:
print(f"{dest} : {transfer.chunk.qr}")
await sm.exec_transfer(transfer)
print(sm.report)
dest1_dir = Path(dest1_dir)
found_files = [x for x in dest1_dir.glob("**/*.dcm")]
print(found_files)
assert len(found_files) == len(full_qr)
# TODO: Check that dynamic routing worked correctly
@mark.parametrize("subset_specs", bucket_to_repo_subsets)
@mark.asyncio
@has_dcmtk
async def test_bucket_sync(
make_local_dir, make_local_node, make_dcmtk_net_repo, subset_specs
):
src_bucket, init_qr, _ = make_local_dir("all", max_chunk=2)
local_node = make_local_node()
dest1_repo, _, dest1_dir = make_dcmtk_net_repo(local_node, subset=subset_specs[0])
dest2_repo, _, _ = make_dcmtk_net_repo(local_node, subset=subset_specs[1])
dest3_repo, _, _ = make_dcmtk_net_repo(local_node, subset=subset_specs[2])
static_route = StaticRoute([dest1_repo])
dyn_route = DynamicRoute(
make_lookup(dest2_repo, dest3_repo), required_elems=["PatientID"]
)
dests = [static_route, dyn_route]
async with SyncManager(src_bucket, dests) as sm:
async for transfer in sm.gen_transfers():
await sm.exec_transfer(transfer)
dest1_dir = Path(dest1_dir)
found_files = [x for x in dest1_dir.glob("**/*.dcm")]
print(found_files)
assert len(found_files) == len(init_qr)
|
131460
|
from sanic import HTTPResponse, Request, Sanic, text
app = Sanic(__name__)
@app.get("/<name>")
async def handler(request: Request, name: str) -> HTTPResponse:
return text(f"Hi {name}")
# DO NOT DO THIS
# @app.on_response
# async def cors(_, resp):
# resp.headers["Access-Control-Allow-Origin"] = "*"
|
131464
|
import os
import itertools
from ipykernel import kernelspec as ks
import nbformat
from nbformat.v4.nbbase import new_markdown_cell
from generate_contents import NOTEBOOK_DIR, REG, iter_notebooks, get_notebook_title
def prev_this_next(it):
a, b, c = itertools.tee(it, 3)
next(c)
return zip(itertools.chain([None], a), b, itertools.chain(c, [None]))
PREV_TEMPLATE = "< [{title}]({url}) "
CONTENTS = "| [Contents](Index.ipynb) |"
NEXT_TEMPLATE = " [{title}]({url}) >"
NAV_COMMENT = "<!--NAVIGATION-->\n"
COLAB_LINK = """
<a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/{notebook_filename}"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
"""
def iter_navbars():
for prev_nb, nb, next_nb in prev_this_next(iter_notebooks()):
navbar = NAV_COMMENT
if prev_nb:
navbar += PREV_TEMPLATE.format(
title=get_notebook_title(prev_nb), url=prev_nb
)
navbar += CONTENTS
if next_nb:
navbar += NEXT_TEMPLATE.format(
title=get_notebook_title(next_nb), url=next_nb
)
navbar += COLAB_LINK.format(notebook_filename=os.path.basename(nb))
yield os.path.join(NOTEBOOK_DIR, nb), navbar
def write_navbars():
for nb_name, navbar in iter_navbars():
nb = nbformat.read(nb_name, as_version=4)
nb_file = os.path.basename(nb_name)
is_comment = lambda cell: cell.source.startswith(NAV_COMMENT)
if is_comment(nb.cells[1]):
print("- amending navbar for {0}".format(nb_file))
nb.cells[1].source = navbar
else:
print("- inserting navbar for {0}".format(nb_file))
nb.cells.insert(1, new_markdown_cell(source=navbar))
if is_comment(nb.cells[-1]):
nb.cells[-1].source = navbar
else:
nb.cells.append(new_markdown_cell(source=navbar))
nbformat.write(nb, nb_name)
if __name__ == "__main__":
write_navbars()
|
131472
|
import traceback, sys, string
import win32com.server.util
from win32com.util import IIDToInterfaceName
from win32com.client.util import Enumerator
from win32com.server.exception import COMException
import pythoncom
from framework import trace
from win32com.axdebug import axdebug, gateways, contexts, stackframe, documents, adb
from win32com.axdebug.codecontainer import SourceCodeContainer
from win32com.axdebug.util import _wrap, _wrap_remove
import win32com.client.connect
import win32api, winerror
import os
try:
os.environ["DEBUG_AXDEBUG"]
debuggingTrace = 1 # Should we print "trace" output?
except KeyError:
debuggingTrace = 0
def trace(*args):
"""A function used instead of "print" for debugging output.
"""
if not debuggingTrace:
return
print win32api.GetCurrentThreadId(),
for arg in args:
print arg,
print
# Note that the DebugManager is not a COM gateway class for the
# debugger - but it does create and manage them.
class DebugManager:
_debugger_interfaces_ = [axdebug.IID_IActiveScriptDebug]
def __init__(self, scriptEngine):
self.scriptEngine = scriptEngine
self.adb = adb.Debugger()
self.rootNode = None
self.debugApplication = None
self.ccProvider = documents.CodeContainerProvider()
try:
self.scriptSiteDebug = scriptEngine.GetScriptSite(axdebug.IID_IActiveScriptSiteDebug)
except pythoncom.com_error:
# No debugger interface (ie, dumb host). Do the extra work.
trace("Scripting site has no debugger interface")
self.scriptSiteDebug = None
# Get the debug application object.
self.debugApplication = None
if self.scriptSiteDebug is not None:
# Spec says that we should test for this, and if it fails revert to
# PDM application.
try:
self.debugApplication = self.scriptSiteDebug.GetApplication()
self.rootNode = self.scriptSiteDebug.GetRootApplicationNode()
except pythoncom.com_error:
self.debugApplication = None
if self.debugApplication is None:
# Try to get/create the default one
# NOTE - Dont catch exceptions here - let the parent do it,
# so it knows debug support is available.
pdm=pythoncom.CoCreateInstance(axdebug.CLSID_ProcessDebugManager,None,pythoncom.CLSCTX_ALL, axdebug.IID_IProcessDebugManager)
self.debugApplication = pdm.GetDefaultApplication()
self.rootNode = self.debugApplication.GetRootNode()
assert self.debugApplication is not None, "Need to have a DebugApplication object by now!"
self.activeScriptDebug = None
if self.debugApplication is not None:
self.adb.AttachApp(self.debugApplication, self.ccProvider)
self.codeContainers = {}
self.activeScriptDebug = _wrap(ActiveScriptDebug(self, self.codeContainers), axdebug.IID_IActiveScriptDebug)
def Close(self):
# Called by the language engine when it receives a close request
if self.activeScriptDebug is not None:
_wrap_remove(self.activeScriptDebug)
self.activeScriptDebug = None
self.scriptEngine = None
self.rootNode = None
self.debugApplication = None
self.scriptSiteDebug = None
if self.ccProvider is not None:
self.ccProvider.Close()
self.ccProvider = None
self.codeContainers = {}
if self.adb:
self.adb.CloseApp()
self.adb = None
# print "Close complete"
def IsAnyHost(self):
"Do we have _any_ debugging interfaces installed?"
return self.debugApplication is not None
def IsSimpleHost(self):
return self.scriptSiteDebug is None
def HandleRuntimeError( self ):
"""Called by the engine when a runtime error occurs. If we have a debugger,
we let it know.
The result is a boolean which indicates if the error handler should call
IActiveScriptSite::OnScriptError()
"""
# if self.IsAnyHost:
# site = _wrap(self, axdebug.IID_IActiveScriptSite)
# breakResume, errorResume, fCallOnError = self.debugApplication(activeScriptErrorDebug, site)
# Do something with these!
# else:
trace("HandleRuntimeError")
fCallOnError = 1
return fCallOnError
def _query_interface_for_debugger_(self, iid):
if iid in self._debugger_interfaces_:
return self.activeScriptDebug
trace("DebugManager QI - unknown IID", iid)
return 0
def OnEnterScript(self):
trace("OnEnterScript")
try:
1/0
except:
# Bit of a hack - reach into engine.
baseFrame = sys.exc_info()[2].tb_frame.f_back
self.adb.SetupAXDebugging(baseFrame)
def OnLeaveScript(self):
trace("OnLeaveScript")
self.adb.ResetAXDebugging()
def AddScriptBlock(self, codeBlock):
# If we dont have debugging support, dont bother.
cc = DebugCodeBlockContainer(codeBlock, self.scriptSiteDebug)
if self.IsSimpleHost():
document = documents.DebugDocumentText(cc)
document = _wrap(document, axdebug.IID_IDebugDocument)
provider = documents.DebugDocumentProvider(document)
provider = _wrap(provider, axdebug.IID_IDebugDocumentProvider)
cc.debugDocument = document
newNode = self.debugApplication.CreateApplicationNode()
newNode.SetDocumentProvider(provider)
newNode.Attach(self.rootNode)
else:
newNode = None # Managed by smart host.
self.codeContainers[cc.sourceContext] = cc
self.ccProvider.AddCodeContainer(cc, newNode)
class DebugCodeBlockContainer(SourceCodeContainer):
def __init__(self, codeBlock, site):
self.codeBlock = codeBlock
SourceCodeContainer.__init__(self, codeBlock.codeText, codeBlock.GetFileName(), codeBlock.sourceContextCookie, codeBlock.startLineNumber, site)
def GetName(self, dnt):
if dnt==axdebug.DOCUMENTNAMETYPE_APPNODE:
return self.codeBlock.GetDisplayName()
elif dnt==axdebug.DOCUMENTNAMETYPE_TITLE:
return self.codeBlock.GetDisplayName()
# elif dnt==axdebug.DOCUMENTNAMETYPE_FILE_TAIL:
# elif dnt==axdebug.DOCUMENTNAMETYPE_URL:
else:
raise COMException(scode=winerror.S_FALSE)
class EnumDebugCodeContexts(gateways.EnumDebugCodeContexts):
def _wrap(self, ob):
return ob
class ActiveScriptDebug:
"""The class which implements the IActiveScriptDebug interface for the Active Script engine.
Only ever used by smart hosts.
"""
_public_methods_ = ["GetScriptTextAttributes", "GetScriptletTextAttributes", "EnumCodeContextsOfPosition"]
_com_interfaces_ = [axdebug.IID_IActiveScriptDebug]
def __init__(self, debugMgr, codeContainers):
self.debugMgr = debugMgr
self.scriptSiteDebug = debugMgr.scriptSiteDebug
self.codeContainers = codeContainers
def _Close(self):
self.debugMgr = None
self.scriptSiteDebug = None
self.codeContainers = {}
def _query_interface_(self, iid):
trace("DebuggerQI with", iid)
return _wrap(self.debugMgr.scriptEngine, iid)
def GetScriptTextAttributes(self, code, delim, flags):
container = SourceCodeContainer(code, "<Temp Code Block>")
return container.GetSyntaxColorAttributes()
def GetScriptletTextAttributes(self, code, delim, flags):
trace ("GetScriptletTextAttributes", code, delim, flags)
container = SourceCodeContainer(code, "<Temp Code Block>")
return container.GetSyntaxColorAttributes()
def EnumCodeContextsOfPosition(self, context, charOffset, numChars):
trace("EnumCodeContextsOfPosition", context, charOffset, numChars)
try:
context = self.codeContainers[context].GetCodeContextAtPosition(charOffset)
except KeyError:
raise COMException(scode=winerror.E_UNEXPECTED)
enum = EnumDebugCodeContexts([context])
return _wrap(enum, axdebug.IID_IEnumDebugCodeContexts)
|
131479
|
from matrixstore.db import get_db
def simplify_bnf_codes(bnf_codes):
"""Given list of BNF codes, return list of BNF prefixes for BNF subsections such
that:
1. every BNF code that belongs to one of these prefixes is in the original list,
2. every code in the original list belongs to exactly one prefix,
3. no prefix is a prefix of another prefix (I think this follows from 2),
4. every prefix is present in the prescribing data.
A BNF prefix may actually be a full BNF code.
"""
all_bnf_codes = get_all_bnf_codes()
# Drop any BNF codes for which we don't have prescribing.
bnf_codes_with_prescribing = set(bnf_codes) & all_bnf_codes
# In end-to-end tests there may be no prescribing for certain measures. Rather than
# adding new test prescribing data whenever a new measure is added, we return early
# here.
if not bnf_codes_with_prescribing:
return sorted(bnf_codes)
prefixes = []
for prefix in _prune_paths(bnf_codes_with_prescribing, all_bnf_codes):
prefixes.extend(get_subsection_prefixes(prefix))
return sorted(prefixes)
def get_all_bnf_codes():
"""Return list of all BNF codes for which we have prescribing."""
db = get_db()
return {r[0] for r in db.query("SELECT bnf_code FROM presentation")}
def get_subsection_prefixes(prefix):
"""Return BNF codes/prefixes of BNF subsections that begin with `prefix`.
For instance, if `prefix` is "0703021", we find all prefixes corresponding to
chemicals beginning 0703021.
"""
for length in [
2, # Chapter
4, # Section
6, # Paragraph
9, # Chemical
11, # Product
15, # Presentation
]:
if len(prefix) <= length:
break
db = get_db()
sql = (
"SELECT DISTINCT substr(bnf_code, 1, ?) FROM presentation WHERE bnf_code LIKE ?"
)
return {r[0] for r in db.query(sql, [length, prefix + "%"])}
def _prune_paths(paths, all_paths):
"""Given two lists of paths (`paths` and `all_paths`) from the root of a tree to its
leaves (where every path in `paths` is in `all_paths`) return a new list of
paths from the root of the tree to either a leaf or a branch (`pruned_paths`) such
that:
1. every path in `all_paths` that is reachable from a path in `pruned_paths` is
in `paths`,
2. every path in `paths` is reachable from exactly one path in `pruned_paths`,
3. no path in `pruned_paths` is reachable from another path in `pruned_paths`.
These three conditions correspond to those in the docstring for simplify_bnf_codes.
For instance:
all_paths: [AAA, AAB, ABA, ABB, BAA, BAB, BBA, BBB]
paths: [AAA, ABA, ABB, BAA, BAB, ]
To do this, we convert `paths` and `all_paths` into trees:
all_paths: paths:
A A
/ /
A A
/ \ /
/ B /
A A
/ \ A / \ A
/ \ / / \ /
/ B / B
/ \ / \
/ B / B
o o
\ A \ A
\ / \ /
\ A \ A
\ / \ \ / \
\ / B \ / B
B B
\ A
\ /
B
\
B
We then prune the `paths` tree, where for each node, we remove all of the node's
children if all of the children are in the `all_paths` tree.
So we remove the children of AB (since ABA and ABB are in `paths`) and BA (since BAA
and BAB are in `paths`).
This leaves:
A
/
A
/
/
A
/ \
/ \
/ B
/
/
o
\
\
\ A
\ /
\ /
B
Finally, we walk this pruned tree to give:
pruned_paths: [AAA, AB, BA]
A tree is representated as a nested dictionary. For instance, the `paths` tree is
represented as:
{
"A": {
"A": {
"A": {},
},
"B": {
"A": {},
"B": {}
}
},
"B": {
"A": {
"A": {},
"B": {}
}
},
}
And the `pruned_paths` tree:
{
"A": {
"A": {
"A": {}
},
"B": {}
},
"B": {
"A": {}
}
}
There is a test case for this example at TestPrunePaths.test_example.
"""
assert set(paths) < set(all_paths) # `paths` must be a subset of `all_paths`
full_tree = _paths_to_tree(all_paths)
subtree = _paths_to_tree(paths)
pruned_subtree = _prune_tree(subtree, full_tree)
pruned_paths = _tree_to_paths(pruned_subtree)
# We now verify the three conditions in the docstring hold.
# 1. every path in `all_paths` that is reachable from a path in `pruned_paths` is in
# `paths`.
for path in all_paths:
if any(path.startswith(pruned_path) for pruned_path in pruned_paths):
assert path in paths
# 2. every path in `paths` is reachable from exactly one path in `pruned_paths`.
for path in paths:
# print(path)
parent_pruned_paths = [
pruned_path for pruned_path in pruned_paths if path.startswith(pruned_path)
]
assert len(parent_pruned_paths) == 1
# 3. no path in `pruned_paths` is reachable from another path in `pruned_paths`.
for pruned_path_1 in pruned_paths:
for pruned_path_2 in pruned_paths:
if pruned_path_1 != pruned_path_2:
assert not pruned_path_1.startswith(pruned_path_2)
return pruned_paths
def _prune_tree(subtree, tree):
"""Do the work.
I'm struggling to write a good docstring here.
"""
if subtree == tree:
return {}
return {c: _prune_tree(subtree[c], tree[c]) for c in subtree}
def _paths_to_tree(paths):
"""Build a tree from the given paths.
The structure of the tree is described in the docstring for _prune_paths.
"""
tree = {}
for path in paths:
t = tree
for c in path:
if c not in t:
t[c] = {}
t = t[c]
return tree
def _tree_to_paths(tree):
"""Build a list of paths made by walking from the root of the tree to each leaf."""
if len(tree) == 0:
return [""]
return [
c + path
for c, subtree in sorted(tree.items())
for path in _tree_to_paths(subtree)
]
|
131483
|
from pathlib import Path
import tempfile
from transformers.convert_graph_to_onnx import convert, quantize
# requires:
# transformers==4.0.0
# torch==1.7.1
dest = Path(tempfile.mkdtemp(), "text-generation.onnx")
convert(
pipeline_name="text-generation",
model="gpt2",
output=dest,
framework="pt",
opset=11
)
print(dest)
|
131527
|
from recipe_scrapers.tineno import TineNo
from tests import ScraperTest
class TestTineNoScraper(ScraperTest):
scraper_class = TineNo
def test_host(self):
self.assertEqual("tine.no", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.tine.no/oppskrifter/middag-og-hovedretter/kylling-og-fjarkre/rask-kylling-tikka-masala",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Rask kylling tikka masala")
def test_total_time(self):
self.assertEqual(30, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("4", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://www.tine.no/_/recipeimage/w_2880%2Ch_1620%2Cc_fill%2Cx_764%2Cy_430%2Cg_xy_center/recipeimage/yshftxnhdmojzhelrupo.png",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertCountEqual(
[
"Ris:",
"4 dl basmatiris",
"Tikka masala:",
"400 g kyllingfileter",
"1 ss TINE Meierismørtil steking",
"1 stk paprika",
"½ dl chili",
"3 stk vårløk",
"1 ts hvitløksfedd",
"1 ss hakket, friskingefær",
"½ dl hakket, friskkoriander",
"2 ts garam masala",
"3 dl TINE Lett Crème Fraîche 18 %",
"3 ss tomatpuré",
"½ ts salt",
"¼ ts pepper",
"Raita:",
"½ dl slangeagurk",
"3 dl TINE Yoghurt Naturell",
"½ dl friskmynte",
"1 ts hvitløksfedd",
"½ ts salt",
"¼ ts pepper",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"Kok ris etter anvisningen på pakken.\nTikka masala: Del kylling i biter. Brun kyllingen i smør i en stekepanne på middels varme. Rens og hakk paprika, chili, vårløk og hvitløk og ha det i stekepannen sammen med kyllingen. Rens og finhakk ingefær og frisk koriander. Krydre med garam masala, koriander og ingefær. Hell i crème fraîche og tomatpuré, og la småkoke i 5 minutter. Smak til med salt og pepper.\nRaita: Riv agurk og bland den med yoghurt. Hakk mynte og hvitløk og bland det i. Smak til med salt og pepper.",
self.harvester_class.instructions(),
)
def test_ratings(self):
self.assertEqual(3.9, self.harvester_class.ratings())
def test_description(self):
self.assertEqual(
"En god og rask oppskrift på en kylling tikka masala. Dette er en rett med små smakseksplosjoner som sender tankene til India.",
self.harvester_class.description(),
)
|
131546
|
import sys
import os
import json
from cx_Freeze import setup, Executable
import urllib.request, ssl, certifi
request = urllib.request.Request(
f"https://api.type.world/latestUnpublishedVersion/world.type.guiapp/windows/?APPBUILD_KEY={os.environ['APPBUILD_KEY']}"
)
sslcontext = ssl.create_default_context(cafile=certifi.where())
response = urllib.request.urlopen(request, context=sslcontext)
version = response.read().decode()
profile = json.loads(open(os.path.join(os.path.dirname(__file__), "buildProfile.json")).read())
baseFolder = "wxPython"
destinationFolder = "build"
executables = [
Executable(
os.path.join(baseFolder, "app.py"),
base=os.getenv("BUILDBASE"),
copyright="Copyright 2018 by Yanone",
targetName="TypeWorld.exe",
icon=os.path.join(baseFolder, "icon", "tw.ico"),
),
Executable(
os.path.join(baseFolder, "agent.py"),
base=os.getenv("BUILDBASE"),
copyright="Copyright 2018 by Yanone",
targetName="TypeWorld Subscription Opener.exe",
icon=os.path.join(baseFolder, "icon", "tw.ico"),
),
]
if "agent" in profile:
executables.append(
Executable(
os.path.join(baseFolder, "daemon.py"),
base=os.getenv("BUILDBASE"),
copyright="Copyright 2018 by Yanone",
targetName="TypeWorld Taskbar Agent.exe",
icon=os.path.join(baseFolder, "icon", "tw.ico"),
)
)
setup(
name="Type.World",
version=version.split("-")[0],
description="Type.World – One Click Font-Installer",
options={
"build_exe": {
"include_files": [
os.path.join(baseFolder, "htmlfiles"),
os.path.join(baseFolder, "locales"),
os.path.join(baseFolder, "icon"),
os.path.join(baseFolder, "patrons"),
os.path.join(baseFolder, "intercom"),
os.path.join(baseFolder, "typeworldguiapp"),
],
"excludes": ["win32ctypes", "tkinter", "test", "numpy", "pytz"],
"packages": [
"appdirs",
"zmq",
"typeworld",
"packaging",
"requests",
"idna",
"pyasn1",
"rsa",
"cachetools",
"fontTools",
"pyasn1_modules",
"typeworld",
"keyring",
"markdown2",
# "pytz",
"winreg",
"win32api",
"plyer",
"flask",
"pywinsparkle",
"win32timezone",
"pkg_resources",
# "numpy",
],
"optimize": 1,
"build_exe": destinationFolder,
}
},
executables=executables,
)
|
131554
|
from pydantic import BaseModel
from typing import Optional
from enum import Enum
class DogBreed(str, Enum):
mutt = "mutt"
labrador = "labrador"
golden_retriever = "golden retriever"
class Dog(BaseModel):
name: str
age: int
breed: DogBreed
|
131590
|
import numpy as np
import pandas as pd
import pyro
import pyro.distributions as dist
import torch
from pyro.nn import PyroModule
from scvi import _CONSTANTS
from scvi.data._anndata import get_from_registry
from scvi.nn import one_hot
# class NegativeBinomial(TorchDistributionMixin, ScVINegativeBinomial):
# pass
class LocationModelMultiExperimentLocationBackgroundNormLevelGeneAlphaPyroModel(PyroModule):
"""
Cell2location models the elements of :math:`D` as Negative Binomial distributed,
given an unobserved gene expression level (rate) :math:`mu` and a gene- and batch-specific
over-dispersion parameter :math:`\alpha_{e,g}` which accounts for unexplained variance:
.. math::
D_{s,g} \sim \mathtt{NB}(\mu_{s,g}, \alpha_{e,g})
The expression level of genes :math:`\mu_{s,g}` in the mRNA count space is modelled
as a linear function of expression signatures of reference cell types :math:`g_{f,g}`:
.. math::
\mu_{s,g} = (m_{g} \left (\sum_{f} {w_{s,f} \: g_{f,g}} \right) + s_{e,g}) y_{s}
Here, :math:`w_{s,f}` denotes regression weight of each reference signature :math:`f` at location :math:`s`,
which can be interpreted as the expected number of cells at location :math:`s`
that express reference signature :math:`f`;
:math:`g_{f,g}` denotes the reference signatures of cell types :math:`f` of each gene :math:`g`,
`cell_state_df` input ;
:math:`m_{g}` denotes a gene-specific scaling parameter which adjusts for global differences in sensitivity
between technologies (platform effect);
:math:`y_{s}` denotes a location/observation-specific scaling parameter which adjusts for differences in sensitivity
between observations and batches;
:math:`s_{e,g}` is additive component that account for gene- and location-specific shift,
such as due to contaminating or free-floating RNA.
To account for the similarity of location patterns across cell types, :math:`w_{s,f}` is modelled using
another layer of decomposition (factorization) using :math:`r={1, .., R}` groups of cell types,
that can be interpreted as cellular compartments or tissue zones. Unless stated otherwise, R is set to 50.
Corresponding graphical model can be found in supplementary methods:
https://www.biorxiv.org/content/10.1101/2020.11.15.378125v1.supplementary-material
Approximate Variational Inference is used to estimate the posterior distribution of all model parameters.
Estimation of absolute cell abundance `w_{s,f}` is guided using informed prior on the number of cells
(argument called `N_cells_per_location`). It is a tissue-level global estimate, which can be derived from histology
images (H&E or DAPI), ideally paired to the spatial expression data or at least representing the same tissue type.
This parameter can be estimated by manually counting nuclei in a 10-20 locations in the histology image
(e.g. using 10X Loupe browser), and computing the average cell abundance.
An appropriate setting of this prior is essential to inform the estimation of absolute cell type abundance values,
however, the model is robust to a range of similar values.
In settings where suitable histology images are not available, the size of capture regions relative to
the expected size of cells can be used to estimate `N_cells_per_location`.
The prior on detection efficiency per location :math:`y_s` is selected to discourage over-normalisation, such that
unless data has evidence of strong technical effect, the effect is assumed to be small and close to
the mean sensitivity for each batch :math:`y_e`:
.. math::
y_s ~ Gamma(detection_alpha, detection_alpha / y_e)
where y_e is unknown/latent average detection efficiency in each batch/experiment:
.. math::
y_e ~ Gamma(10, 10 / detection_mean)
"""
def __init__(
self,
n_obs,
n_vars,
n_factors,
n_batch,
cell_state_mat,
n_groups: int = 50,
detection_mean=1 / 2,
detection_alpha=200.0,
m_g_gene_level_prior={"mean": 1, "mean_var_ratio": 1.0, "alpha_mean": 3.0},
N_cells_per_location=8.0,
A_factors_per_location=7.0,
N_cells_mean_var_ratio=1.0,
alpha_g_phi_hyp_prior={"alpha": 9.0, "beta": 3.0},
gene_add_alpha_hyp_prior={"alpha": 9.0, "beta": 3.0},
gene_add_mean_hyp_prior={
"alpha": 1.0,
"beta": 100.0,
},
detection_hyp_prior={"mean_alpha": 10.0},
w_sf_mean_var_ratio=5.0,
):
super().__init__()
self.n_obs = n_obs
self.n_vars = n_vars
self.n_factors = n_factors
self.n_batch = n_batch
self.n_groups = n_groups
self.m_g_gene_level_prior = m_g_gene_level_prior
self.alpha_g_phi_hyp_prior = alpha_g_phi_hyp_prior
self.w_sf_mean_var_ratio = w_sf_mean_var_ratio
self.gene_add_alpha_hyp_prior = gene_add_alpha_hyp_prior
self.gene_add_mean_hyp_prior = gene_add_mean_hyp_prior
detection_hyp_prior["mean"] = detection_mean
detection_hyp_prior["alpha"] = detection_alpha
self.detection_hyp_prior = detection_hyp_prior
self.register_buffer(
"detection_hyp_prior_alpha",
torch.tensor(self.detection_hyp_prior["alpha"]),
)
self.register_buffer(
"detection_mean_hyp_prior_alpha",
torch.tensor(self.detection_hyp_prior["mean_alpha"]),
)
self.register_buffer(
"detection_mean_hyp_prior_beta",
torch.tensor(self.detection_hyp_prior["mean_alpha"] / self.detection_hyp_prior["mean"]),
)
# compute hyperparameters from mean and sd
self.register_buffer("m_g_mu_hyp", torch.tensor(self.m_g_gene_level_prior["mean"]))
self.register_buffer(
"m_g_mu_mean_var_ratio_hyp",
torch.tensor(self.m_g_gene_level_prior["mean_var_ratio"]),
)
self.register_buffer("m_g_alpha_hyp_mean", torch.tensor(self.m_g_gene_level_prior["alpha_mean"]))
self.cell_state_mat = cell_state_mat
self.register_buffer("cell_state", torch.tensor(cell_state_mat.T))
self.register_buffer("N_cells_per_location", torch.tensor(N_cells_per_location))
self.register_buffer("A_factors_per_location", torch.tensor(A_factors_per_location))
self.register_buffer("N_cells_mean_var_ratio", torch.tensor(N_cells_mean_var_ratio))
self.register_buffer(
"alpha_g_phi_hyp_prior_alpha",
torch.tensor(self.alpha_g_phi_hyp_prior["alpha"]),
)
self.register_buffer(
"alpha_g_phi_hyp_prior_beta",
torch.tensor(self.alpha_g_phi_hyp_prior["beta"]),
)
self.register_buffer(
"gene_add_alpha_hyp_prior_alpha",
torch.tensor(self.gene_add_alpha_hyp_prior["alpha"]),
)
self.register_buffer(
"gene_add_alpha_hyp_prior_beta",
torch.tensor(self.gene_add_alpha_hyp_prior["beta"]),
)
self.register_buffer(
"gene_add_mean_hyp_prior_alpha",
torch.tensor(self.gene_add_mean_hyp_prior["alpha"]),
)
self.register_buffer(
"gene_add_mean_hyp_prior_beta",
torch.tensor(self.gene_add_mean_hyp_prior["beta"]),
)
self.register_buffer("w_sf_mean_var_ratio_tensor", torch.tensor(self.w_sf_mean_var_ratio))
self.register_buffer("n_factors_tensor", torch.tensor(self.n_factors))
self.register_buffer("n_groups_tensor", torch.tensor(self.n_groups))
self.register_buffer("ones", torch.ones((1, 1)))
self.register_buffer("ones_1_n_groups", torch.ones((1, self.n_groups)))
self.register_buffer("ones_1_n_factors", torch.ones((1, self.n_factors)))
self.register_buffer("ones_n_batch_1", torch.ones((self.n_batch, 1)))
self.register_buffer("eps", torch.tensor(1e-8))
@staticmethod
def _get_fn_args_from_batch(tensor_dict):
x_data = tensor_dict[_CONSTANTS.X_KEY]
ind_x = tensor_dict["ind_x"].long().squeeze()
batch_index = tensor_dict[_CONSTANTS.BATCH_KEY]
return (x_data, ind_x, batch_index), {}
def create_plates(self, x_data, idx, batch_index):
return pyro.plate("obs_plate", size=self.n_obs, dim=-2, subsample=idx)
def list_obs_plate_vars(self):
"""Create a dictionary with:
1. "name" - the name of observation/minibatch plate;
2. "input" - indexes of model args to provide to encoder network when using amortised inference;
3. "sites" - dictionary with
keys - names of variables that belong to the observation plate (used to recognise
and merge posterior samples for minibatch variables)
values - the dimensions in non-plate axis of each variable (used to construct output
layer of encoder network when using amortised inference)
"""
return {
"name": "obs_plate",
"input": [0, 2], # expression data + (optional) batch index
"input_transform": [
torch.log1p,
lambda x: x,
], # how to transform input data before passing to NN
"sites": {
"w_sf": self.n_factors,
"detection_y_s": 1,
},
}
def forward(self, x_data, idx, batch_index):
obs2sample = one_hot(batch_index, self.n_batch)
obs_plate = self.create_plates(x_data, idx, batch_index)
# =====================Gene expression level scaling m_g======================= #
# Explains difference in sensitivity for each gene between single cell and spatial technology
m_g_mean = pyro.sample(
"m_g_mean",
dist.Gamma(
self.m_g_mu_mean_var_ratio_hyp * self.m_g_mu_hyp,
self.m_g_mu_mean_var_ratio_hyp,
)
.expand([1, 1])
.to_event(2),
) # (1, 1)
m_g_alpha_e_inv = pyro.sample(
"m_g_alpha_e_inv",
dist.Exponential(self.m_g_alpha_hyp_mean).expand([1, 1]).to_event(2),
) # (1, 1)
m_g_alpha_e = self.ones / m_g_alpha_e_inv.pow(2)
m_g = pyro.sample(
"m_g",
dist.Gamma(m_g_alpha_e, m_g_alpha_e / m_g_mean).expand([1, self.n_vars]).to_event(2), # self.m_g_mu_hyp)
) # (1, n_vars)
# =====================Cell abundances w_sf======================= #
# factorisation prior on w_sf models similarity in locations
# across cell types f and reflects the absolute scale of w_sf
n_cells_per_location = pyro.sample(
"n_cells_per_location",
dist.Gamma(
self.N_cells_per_location * self.N_cells_mean_var_ratio,
self.N_cells_mean_var_ratio,
),
)
a_factors_per_location = pyro.sample(
"a_factors_per_location",
dist.Gamma(self.A_factors_per_location, self.ones),
)
# cell group loadings
shape = self.ones_1_n_factors * a_factors_per_location / self.n_factors_tensor
rate = self.ones_1_n_factors / (n_cells_per_location / a_factors_per_location)
with obs_plate:
w_sf = pyro.sample(
"w_sf",
dist.Gamma(
shape,
rate,
),
) # (self.n_obs, self.n_factors)
# =====================Location-specific detection efficiency ======================= #
# y_s with hierarchical mean prior
detection_mean_y_e = pyro.sample(
"detection_mean_y_e",
dist.Gamma(
self.ones * self.detection_mean_hyp_prior_alpha,
self.ones * self.detection_mean_hyp_prior_beta,
)
.expand([self.n_batch, 1])
.to_event(2),
)
detection_hyp_prior_alpha = pyro.deterministic(
"detection_hyp_prior_alpha",
self.ones_n_batch_1 * self.detection_hyp_prior_alpha,
)
beta = (obs2sample @ detection_hyp_prior_alpha) / (obs2sample @ detection_mean_y_e)
with obs_plate:
detection_y_s = pyro.sample(
"detection_y_s",
dist.Gamma(obs2sample @ detection_hyp_prior_alpha, beta),
) # (self.n_obs, 1)
# =====================Gene-specific additive component ======================= #
# per gene molecule contribution that cannot be explained by
# cell state signatures (e.g. background, free-floating RNA)
s_g_gene_add_alpha_hyp = pyro.sample(
"s_g_gene_add_alpha_hyp",
dist.Gamma(self.gene_add_alpha_hyp_prior_alpha, self.gene_add_alpha_hyp_prior_beta),
)
s_g_gene_add_mean = pyro.sample(
"s_g_gene_add_mean",
dist.Gamma(
self.gene_add_mean_hyp_prior_alpha,
self.gene_add_mean_hyp_prior_beta,
)
.expand([self.n_batch, 1])
.to_event(2),
) # (self.n_batch)
s_g_gene_add_alpha_e_inv = pyro.sample(
"s_g_gene_add_alpha_e_inv",
dist.Exponential(s_g_gene_add_alpha_hyp).expand([self.n_batch, 1]).to_event(2),
) # (self.n_batch)
s_g_gene_add_alpha_e = self.ones / s_g_gene_add_alpha_e_inv.pow(2)
s_g_gene_add = pyro.sample(
"s_g_gene_add",
dist.Gamma(s_g_gene_add_alpha_e, s_g_gene_add_alpha_e / s_g_gene_add_mean)
.expand([self.n_batch, self.n_vars])
.to_event(2),
) # (self.n_batch, n_vars)
# =====================Gene-specific overdispersion ======================= #
alpha_g_phi_hyp = pyro.sample(
"alpha_g_phi_hyp",
dist.Gamma(self.alpha_g_phi_hyp_prior_alpha, self.alpha_g_phi_hyp_prior_beta),
)
alpha_g_inverse = pyro.sample(
"alpha_g_inverse",
dist.Exponential(alpha_g_phi_hyp).expand([self.n_batch, self.n_vars]).to_event(2),
) # (self.n_batch, self.n_vars)
# =====================Expected expression ======================= #
# expected expression
mu = ((w_sf @ self.cell_state) * m_g + (obs2sample @ s_g_gene_add)) * detection_y_s
alpha = obs2sample @ (self.ones / alpha_g_inverse.pow(2))
# convert mean and overdispersion to total count and logits
# total_count, logits = _convert_mean_disp_to_counts_logits(
# mu, alpha, eps=self.eps
# )
# =====================DATA likelihood ======================= #
# Likelihood (sampling distribution) of data_target & add overdispersion via NegativeBinomial
with obs_plate:
pyro.sample(
"data_target",
dist.GammaPoisson(concentration=alpha, rate=alpha / mu),
# dist.NegativeBinomial(total_count=total_count, logits=logits),
obs=x_data,
)
# =====================Compute mRNA count from each factor in locations ======================= #
with obs_plate:
mRNA = w_sf * (self.cell_state * m_g).sum(-1)
pyro.deterministic("u_sf_mRNA_factors", mRNA)
def compute_expected(self, samples, adata, ind_x=None):
r"""Compute expected expression of each gene in each location. Useful for evaluating how well
the model learned expression pattern of all genes in the data.
"""
if ind_x is None:
ind_x = np.arange(adata.n_obs).astype(int)
else:
ind_x = ind_x.astype(int)
obs2sample = get_from_registry(adata, _CONSTANTS.BATCH_KEY)
obs2sample = pd.get_dummies(obs2sample.flatten()).values[ind_x, :]
mu = (
np.dot(samples["w_sf"][ind_x, :], self.cell_state_mat.T) * samples["m_g"]
+ np.dot(obs2sample, samples["s_g_gene_add"])
) * samples["detection_y_s"][ind_x, :]
alpha = np.dot(obs2sample, 1 / np.power(samples["alpha_g_inverse"], 2))
return {"mu": mu, "alpha": alpha, "ind_x": ind_x}
|
131610
|
from abc import ABC
from typing import Optional
from recipe_db.analytics.spotlight.style import StyleAnalysis
from recipe_db.models import Style
from web_app.charts.utils import NoDataException, Chart, ChartDefinition
from web_app.meta import OPEN_GRAPH_IMAGE_WIDTH, OPEN_GRAPH_IMAGE_HEIGHT
from web_app.plot import LinesChart, PreAggregatedBoxPlot, \
PreAggregateHistogramChart, PreAggregatedPairsBoxPlot
class StyleChart(ChartDefinition, ABC):
CHART_TITLE = None
IMAGE_ALT = None
def __init__(self, style: Style, filter_param: Optional[str]) -> None:
self.style = style
self.filter_param = filter_param
def get_chart_title(self) -> str:
return self.CHART_TITLE % self.style.name
def get_image_alt(self) -> str:
return self.IMAGE_ALT % self.style.name
class StyleAbvChart(StyleChart):
CHART_TITLE = "Alcohol per Volume of <b>%s</b>"
IMAGE_ALT = "Histogram of Alcohol per Volume (ABV) in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).metric_histogram('abv')
if len(df) == 0:
raise NoDataException()
figure = PreAggregateHistogramChart().plot(df, 'abv', 'count')
return Chart(figure, 500, 350, title=self.get_chart_title())
class StyleIbuChart(StyleChart):
CHART_TITLE = "Bitterness of <b>%s</b>"
IMAGE_ALT = "Histogram of bitterness (IBU) in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).metric_histogram('ibu')
if len(df) == 0:
raise NoDataException()
figure = PreAggregateHistogramChart().plot(df, 'ibu', 'count')
return Chart(figure, 500, 350, title=self.get_chart_title())
class StyleColorChart(StyleChart):
CHART_TITLE = "Color of <b>%s</b>"
IMAGE_ALT = "Histogram of beer color (SRM) in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).metric_histogram('srm')
if len(df) == 0:
raise NoDataException()
figure = PreAggregateHistogramChart().plot(df, 'srm', 'count')
return Chart(figure, 500, 350, title=self.get_chart_title())
class StyleOGChart(StyleChart):
CHART_TITLE = "Original Gravity of <b>%s</b>"
IMAGE_ALT = "Histogram of original gravity (OG) in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).metric_histogram('og')
if len(df) == 0:
raise NoDataException()
figure = PreAggregateHistogramChart().plot(df, 'og', 'count')
return Chart(figure, 500, 350, title=self.get_chart_title())
class StyleFGChart(StyleChart):
CHART_TITLE = "Final Gravity of <b>%s</b>"
IMAGE_ALT = "Histogram of final gravity (FG) in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).metric_histogram('fg')
if len(df) == 0:
raise NoDataException()
figure = PreAggregateHistogramChart().plot(df, 'fg', 'count')
return Chart(figure, 500, 350, title=self.get_chart_title())
class StylePopularityChart(StyleChart):
CHART_TITLE = "Popularity of <b>%s</b> over time"
IMAGE_ALT = "Popularity of the %s beer style over time"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).popularity()
if len(df) <= 1: # 1, because a single data point is also meaningless
raise NoDataException()
figure = LinesChart().plot(df, 'month', 'recipes_percent', 'beer_style', None, '% of All Recipes')
return Chart(figure, height=Chart.DEFAULT_HEIGHT * 0.66, title=self.get_chart_title())
class StyleTrendingYeastsChart(StyleChart):
CHART_TITLE = "Trending yeasts in <b>%s</b>"
IMAGE_ALT = "Trending yeasts in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).trending_yeasts()
if len(df) == 0:
raise NoDataException()
figure = LinesChart(force_legend=True).plot(df, 'month', 'recipes_percent', 'yeast', None, '% of Style Recipes')
return Chart(figure, title=self.get_chart_title())
class StylePopularYeastsChart(StyleChart):
CHART_TITLE = "Popular yeasts used in <b>%s</b>"
IMAGE_ALT = "Popular yeasts used in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).popular_yeasts()
if len(df) == 0:
raise NoDataException()
figure = LinesChart(force_legend=True).plot(df, 'month', 'recipes_percent', 'yeast', None, '% of Style Recipes')
return Chart(figure, height=Chart.DEFAULT_HEIGHT * 0.66, title=self.get_chart_title())
class StyleTrendingHopsChart(StyleChart):
CHART_TITLE = "Trending hops in <b>%s</b>"
IMAGE_ALT = "Trending hops in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).trending_hops()
if len(df) == 0:
raise NoDataException()
figure = LinesChart(force_legend=True).plot(df, 'month', 'recipes_percent', 'hop', None, '% of Style Recipes')
return Chart(figure, title=self.get_chart_title())
class StylePopularHopsChart(StyleChart):
CHART_TITLE = "Popular hops used in <b>%s</b>"
IMAGE_ALT = "Popular hops used in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).popular_hops(use_filter=self.filter_param)
if len(df) == 0:
raise NoDataException()
figure = LinesChart(force_legend=True).plot(df, 'month', 'recipes_percent', 'hop', None, '% of Style Recipes')
return Chart(figure, height=Chart.DEFAULT_HEIGHT * 0.66, title=self.get_chart_title())
class StylePopularHopsAmountChart(StyleChart):
CHART_TITLE = "Typical amount of hops used in <b>%s</b>"
IMAGE_ALT = "Typical amount of hops used in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).popular_hops_amount(use_filter=self.filter_param)
if len(df) == 0:
raise NoDataException()
figure = PreAggregatedBoxPlot().plot(df, 'hop', 'amount_percent', None, '% of Weight in Recipe')
return Chart(figure, title=self.get_chart_title())
class StyleHopPairingsChart(StyleChart):
CHART_TITLE = "Popular hop pairings used in <b>%s</b>"
IMAGE_ALT = "Popular hop pairings used in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).hop_pairings()
if len(df) == 0:
raise NoDataException()
figure = PreAggregatedPairsBoxPlot().plot(df, 'pairing', 'hop', 'amount_percent', None, '% of Weight in Recipe')
return Chart(figure, title=self.get_chart_title())
class StylePopularFermentablesChart(StyleChart):
CHART_TITLE = "Popular fermentables/malts used in <b>%s</b>"
IMAGE_ALT = "Popular fermentables/malts used in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).popular_fermentables(type_filter=self.filter_param)
if len(df) == 0:
raise NoDataException()
figure = LinesChart(force_legend=True).plot(df, 'month', 'recipes_percent', 'fermentable', None, '% of Style Recipes')
return Chart(figure, height=Chart.DEFAULT_HEIGHT * 0.66, title=self.get_chart_title())
class StylePopularFermentablesAmountChart(StyleChart):
CHART_TITLE = "Typical amount of fermentables/malts used in <b>%s</b>"
IMAGE_ALT = "Typical amount of fermentables/malts used in the %s beer style"
def plot(self) -> Chart:
df = StyleAnalysis(self.style).popular_fermentables_amount(type_filter=self.filter_param)
if len(df) == 0:
raise NoDataException()
figure = PreAggregatedBoxPlot().plot(df, 'fermentable', 'amount_percent', None, '% of Weight in Recipe')
return Chart(figure, title=self.get_chart_title())
class StyleOpenGraphChart(StylePopularityChart):
def plot(self) -> Chart:
chart = super().plot()
chart.width = OPEN_GRAPH_IMAGE_WIDTH
chart.height = OPEN_GRAPH_IMAGE_HEIGHT
return chart
class StyleChartFactory:
CHARTS = dict(
og=StyleOpenGraphChart,
abv_histogram=StyleAbvChart,
ibu_histogram=StyleIbuChart,
color_srm_histogram=StyleColorChart,
original_gravity_histogram=StyleOGChart,
final_gravity_histogram=StyleFGChart,
popularity=StylePopularityChart,
# Yeasts
popular_yeasts=StylePopularYeastsChart,
trending_yeasts=StyleTrendingYeastsChart,
# Hops
popular_hops=StylePopularHopsChart,
popular_hops_amount=StylePopularHopsAmountChart,
trending_hops=StyleTrendingHopsChart,
hop_pairings=StyleHopPairingsChart,
# Fermentables
popular_fermentables=StylePopularFermentablesChart,
popular_fermentables_amount=StylePopularFermentablesAmountChart,
)
@classmethod
def get_chart(cls, style: Style, chart_type: str, filter_param: Optional[str] = "") -> ChartDefinition:
chart_type = cls.normalize_type(chart_type)
chart = cls.CHARTS[chart_type]
return chart(style, filter_param)
@classmethod
def plot_chart(cls, style: Style, chart_type: str, filter_param: Optional[str] = "") -> Chart:
return cls.get_chart(style, chart_type, filter_param).plot()
@classmethod
def is_supported_chart(cls, chart_type: str) -> bool:
chart_type = cls.normalize_type(chart_type)
return chart_type in cls.CHARTS
@classmethod
def normalize_type(cls, chart_type: str) -> str:
return chart_type.replace('-', '_')
@classmethod
def get_types(cls):
return list(cls.CHARTS.keys())
|
131619
|
from cyclone import web
from oonib import log
from oonib.config import config
class _LaxDict(dict):
"""
This is like a dictionary, but when a key is missing it returns the
empty string.
"""
def __missing__(self, _):
return ""
def log_function(handler):
values = _LaxDict({
'request_time': 1000.0 * handler.request.request_time(),
'protocol': handler.request.protocol,
'status': str(handler.get_status()),
'request_method': handler.request.method,
'request_uri': handler.request.uri,
'remote_ip': handler.request.remote_ip
})
log_format = config.main.log_format
if not log_format:
log_format = ("[{protocol}] {status} {request_method} {request_uri} "
"127.0.0.1 {request_time}ms")
log.msg(log_format.format(**values))
class OONICollector(web.Application):
def __init__(self):
from oonib.main.api import mainAPI
from oonib.deck.api import deckAPI
from oonib.report.api import reportAPI
from oonib.input.api import inputAPI
from oonib.policy.api import policyAPI
from oonib.report.handlers import checkForStaleReports
handlers = []
handlers += reportAPI
if config.main.input_dir:
handlers += inputAPI
if config.main.deck_dir:
handlers += deckAPI
if config.main.policy_file:
handlers += policyAPI
handlers += mainAPI
checkForStaleReports()
web.Application.__init__(self, handlers, name='collector',
log_function=log_function)
class OONIBouncer(web.Application):
def __init__(self):
from oonib.bouncer.api import bouncerAPI
handlers = []
handlers += bouncerAPI
# Follows the same pattern as the above so we can put some
# initialisation logic for the bouncer as well in here perhaps in
# the future.
web.Application.__init__(self, handlers, name='bouncer',
log_function=log_function)
|
131645
|
from .deepfm import DeepFM
from .deepmf import DMF
from .fism import FISM
from .ncf import NCF
from .xdeepfm import xDeepFM
from .dssm import DSSM
from .afm import AFM
from .dcn import DCN
from .widedeep import WideDeep
from .nais import NAIS
from .cccf import CCCFNet
from .ddtcdr import DDTCDR
from .autoint import AutoInt
from .duration import DURation
from .utils import ModelType
model_map = {
# General Model
"DMF": DMF,
"FISM": FISM,
"NCF": NCF,
# Context Model
"DeepFM": DeepFM,
"xDeepFM": xDeepFM,
"DCN": DCN,
"AFM": AFM,
"DSSM": DSSM,
"WideDeep": WideDeep,
"NAIS": NAIS,
"AutoInt": AutoInt,
# Heterogeneous Model
"CCCF": CCCFNet,
"DDTCDR": DDTCDR,
"DURation": DURation,
}
|
131674
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import RedirectView
import djangosaml2_spid.urls
urlpatterns = [
path('admin/', admin.site.urls),
path('', include((djangosaml2_spid.urls, 'djangosaml2_spid',))),
path('', RedirectView.as_view(url=settings.SPID_URLS_PREFIX), name='example-index'),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
131732
|
from abc import ABC
from pathlib import Path
from collections import defaultdict
import random
import numpy as np
from enum import Enum
import torch
from torch.utils.data import Dataset, DataLoader
import MinkowskiEngine as ME
from plyfile import PlyData
import lib.transforms as t
from lib.dataloader import InfSampler
from lib.voxelizer import Voxelizer
class DatasetPhase(Enum):
Train = 0
Val = 1
Val2 = 2
TrainVal = 3
Test = 4
def datasetphase_2str(arg):
if arg == DatasetPhase.Train:
return 'train'
elif arg == DatasetPhase.Val:
return 'val'
elif arg == DatasetPhase.Val2:
return 'val2'
elif arg == DatasetPhase.TrainVal:
return 'trainval'
elif arg == DatasetPhase.Test:
return 'test'
else:
raise ValueError('phase must be one of dataset enum.')
def str2datasetphase_type(arg):
if arg.upper() == 'TRAIN':
return DatasetPhase.Train
elif arg.upper() == 'VAL':
return DatasetPhase.Val
elif arg.upper() == 'VAL2':
return DatasetPhase.Val2
elif arg.upper() == 'TRAINVAL':
return DatasetPhase.TrainVal
elif arg.upper() == 'TEST':
return DatasetPhase.Test
else:
raise ValueError('phase must be one of train/val/test')
def cache(func):
def wrapper(self, *args, **kwargs):
# Assume that args[0] is index
index = args[0]
if self.cache:
if index not in self.cache_dict[func.__name__]:
results = func(self, *args, **kwargs)
self.cache_dict[func.__name__][index] = results
return self.cache_dict[func.__name__][index]
else:
return func(self, *args, **kwargs)
return wrapper
class DictDataset(Dataset, ABC):
IS_FULL_POINTCLOUD_EVAL = False
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
data_root='/'):
"""
data_paths: list of lists, [[str_path_to_input, str_path_to_label], [...]]
"""
Dataset.__init__(self)
# Allows easier path concatenation
if not isinstance(data_root, Path):
data_root = Path(data_root)
self.data_root = data_root
self.data_paths = sorted(data_paths)
self.prevoxel_transform = prevoxel_transform
self.input_transform = input_transform
self.target_transform = target_transform
# dictionary of input
self.data_loader_dict = {
'input': (self.load_input, self.input_transform),
'target': (self.load_target, self.target_transform)
}
# For large dataset, do not cache
self.cache = cache
self.cache_dict = defaultdict(dict)
self.loading_key_order = ['input', 'target']
def load_input(self, index):
raise NotImplementedError
def load_target(self, index):
raise NotImplementedError
def get_classnames(self):
pass
def reorder_result(self, result):
return result
def __getitem__(self, index):
out_array = []
for k in self.loading_key_order:
loader, transformer = self.data_loader_dict[k]
v = loader(index)
if transformer:
v = transformer(v)
out_array.append(v)
return out_array
def __len__(self):
return len(self.data_paths)
class VoxelizationDatasetBase(DictDataset, ABC):
IS_TEMPORAL = False
CLIP_BOUND = (-1000, -1000, -1000, 1000, 1000, 1000)
ROTATION_AXIS = None
NUM_IN_CHANNEL = None
NUM_LABELS = -1 # Number of labels in the dataset, including all ignore classes
IGNORE_LABELS = None # labels that are not evaluated
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
data_root='/',
ignore_mask=255,
return_transformation=False,
**kwargs):
"""
ignore_mask: label value for ignore class. It will not be used as a class in the loss or evaluation.
"""
DictDataset.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
cache=cache,
data_root=data_root)
self.ignore_mask = ignore_mask
self.return_transformation = return_transformation
def __getitem__(self, index):
raise NotImplementedError
def load_ply(self, index):
filepath = self.data_root / self.data_paths[index]
plydata = PlyData.read(filepath)
data = plydata.elements[0].data
coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T
labels = np.array(data['label'], dtype=np.int32)
return coords, feats, labels, None
def __len__(self):
num_data = len(self.data_paths)
return num_data
class VoxelizationDataset(VoxelizationDatasetBase):
"""This dataset loads RGB point clouds and their labels as a list of points
and voxelizes the pointcloud with sufficient data augmentation.
"""
# Voxelization arguments
VOXEL_SIZE = 0.05 # 5cm
# Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate
# augmentation has to be done before voxelization
SCALE_AUGMENTATION_BOUND = (0.9, 1.1)
ROTATION_AUGMENTATION_BOUND = ((-np.pi / 6, np.pi / 6), (-np.pi, np.pi), (-np.pi / 6, np.pi / 6))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.05, 0.05), (-0.2, 0.2))
ELASTIC_DISTORT_PARAMS = None
# MISC.
PREVOXELIZATION_VOXEL_SIZE = None
# Augment coords to feats
AUGMENT_COORDS_TO_FEATS = False
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
data_root='/',
ignore_label=255,
return_transformation=False,
augment_data=False,
config=None,
**kwargs):
self.augment_data = augment_data
self.config = config
VoxelizationDatasetBase.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
cache=cache,
data_root=data_root,
ignore_mask=ignore_label,
return_transformation=return_transformation)
# Prevoxel transformations
self.voxelizer = Voxelizer(
voxel_size=self.VOXEL_SIZE,
clip_bound=self.CLIP_BOUND,
use_augmentation=augment_data,
scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND,
rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND,
translation_augmentation_ratio_bound=self.TRANSLATION_AUGMENTATION_RATIO_BOUND,
ignore_label=ignore_label)
# map labels not evaluated to ignore_label
label_map = {}
n_used = 0
for l in range(self.NUM_LABELS):
if l in self.IGNORE_LABELS:
label_map[l] = self.ignore_mask
else:
label_map[l] = n_used
n_used += 1
label_map[self.ignore_mask] = self.ignore_mask
self.label_map = label_map
self.NUM_LABELS -= len(self.IGNORE_LABELS)
def _augment_coords_to_feats(self, coords, feats, labels=None):
norm_coords = coords - coords.mean(0)
# color must come first.
if isinstance(coords, np.ndarray):
feats = np.concatenate((feats, norm_coords), 1)
else:
feats = torch.cat((feats, norm_coords), 1)
return coords, feats, labels
def convert_mat2cfl(self, mat):
# Generally, xyz,rgb,label
return mat[:, :3], mat[:, 3:-1], mat[:, -1]
def __getitem__(self, index):
coords, feats, labels, center = self.load_ply(index)
# Downsample the pointcloud with finer voxel size before transformation for memory and speed
if self.PREVOXELIZATION_VOXEL_SIZE is not None:
inds = ME.utils.sparse_quantize(
coords / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True)
coords = coords[inds]
feats = feats[inds]
labels = labels[inds]
# Prevoxel transformations
if self.prevoxel_transform is not None:
coords, feats, labels = self.prevoxel_transform(coords, feats, labels)
coords, feats, labels, transformation = self.voxelizer.voxelize(
coords, feats, labels, center=center)
# map labels not used for evaluation to ignore_label
if self.input_transform is not None:
coords, feats, labels = self.input_transform(coords, feats, labels)
if self.target_transform is not None:
coords, feats, labels = self.target_transform(coords, feats, labels)
if self.IGNORE_LABELS is not None:
labels = np.array([self.label_map[x] for x in labels], dtype=np.int)
# Use coordinate features if config is set
if self.AUGMENT_COORDS_TO_FEATS:
coords, feats, labels = self._augment_coords_to_feats(coords, feats, labels)
return_args = [coords, feats, labels]
if self.return_transformation:
return_args.append(transformation.astype(np.float32))
return tuple(return_args)
class TemporalVoxelizationDataset(VoxelizationDataset):
IS_TEMPORAL = True
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
data_root='/',
ignore_label=255,
temporal_dilation=1,
temporal_numseq=3,
return_transformation=False,
augment_data=False,
config=None,
**kwargs):
VoxelizationDataset.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
data_root=data_root,
ignore_label=ignore_label,
return_transformation=return_transformation,
augment_data=augment_data,
config=config,
**kwargs)
self.temporal_dilation = temporal_dilation
self.temporal_numseq = temporal_numseq
temporal_window = temporal_dilation * (temporal_numseq - 1) + 1
self.numels = [len(p) - temporal_window + 1 for p in self.data_paths]
if any([numel <= 0 for numel in self.numels]):
raise ValueError('Your temporal window configuration is too wide for '
'this dataset. Please change the configuration.')
def load_world_pointcloud(self, filename):
raise NotImplementedError
def __getitem__(self, index):
for seq_idx, numel in enumerate(self.numels):
if index >= numel:
index -= numel
else:
break
numseq = self.temporal_numseq
if self.augment_data and self.config.temporal_rand_numseq:
numseq = random.randrange(1, self.temporal_numseq + 1)
dilations = [self.temporal_dilation for i in range(numseq - 1)]
if self.augment_data and self.config.temporal_rand_dilation:
dilations = [random.randrange(1, self.temporal_dilation + 1) for i in range(numseq - 1)]
files = [self.data_paths[seq_idx][index + sum(dilations[:i])] for i in range(numseq)]
world_pointclouds = [self.load_world_pointcloud(f) for f in files]
ptcs, centers = zip(*world_pointclouds)
# Downsample pointcloud for speed and memory
if self.PREVOXELIZATION_VOXEL_SIZE is not None:
new_ptcs = []
for ptc in ptcs:
inds = ME.utils.sparse_quantize(
ptc[:, :3] / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True)
new_ptcs.append(ptc[inds])
ptcs = new_ptcs
# Apply prevoxel transformations
ptcs = [self.prevoxel_transform(ptc) for ptc in ptcs]
coords, feats, labels = zip(*ptcs)
outs = self.voxelizer.voxelize_temporal(
coords, feats, labels, centers=centers, return_transformation=self.return_transformation)
if self.return_transformation:
coords_t, feats_t, labels_t, transformation_t = outs
else:
coords_t, feats_t, labels_t = outs
joint_coords = np.vstack([
np.hstack((coords, np.ones((coords.shape[0], 1)) * i)) for i, coords in enumerate(coords_t)
])
joint_feats = np.vstack(feats_t)
joint_labels = np.hstack(labels_t)
# map labels not used for evaluation to ignore_label
if self.input_transform is not None:
joint_coords, joint_feats, joint_labels = self.input_transform(joint_coords, joint_feats,
joint_labels)
if self.target_transform is not None:
joint_coords, joint_feats, joint_labels = self.target_transform(joint_coords, joint_feats,
joint_labels)
if self.IGNORE_LABELS is not None:
joint_labels = np.array([self.label_map[x] for x in joint_labels], dtype=np.int)
return_args = [joint_coords, joint_feats, joint_labels]
if self.return_transformation:
pointclouds = np.vstack([
np.hstack((pointcloud[0][:, :6], np.ones((pointcloud[0].shape[0], 1)) * i))
for i, pointcloud in enumerate(world_pointclouds)
])
transformations = np.vstack(
[np.hstack((transformation, [i])) for i, transformation in enumerate(transformation_t)])
return_args.extend([pointclouds.astype(np.float32), transformations.astype(np.float32)])
return tuple(return_args)
def __len__(self):
num_data = sum(self.numels)
return num_data
def initialize_data_loader(DatasetClass,
config,
phase,
num_workers,
shuffle,
repeat,
augment_data,
batch_size,
limit_numpoints,
input_transform=None,
target_transform=None):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
if config.return_transformation:
collate_fn = t.cflt_collate_fn_factory(limit_numpoints)
else:
collate_fn = t.cfl_collate_fn_factory(limit_numpoints)
prevoxel_transform_train = []
if augment_data:
prevoxel_transform_train.append(t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS))
if len(prevoxel_transform_train) > 0:
prevoxel_transforms = t.Compose(prevoxel_transform_train)
else:
prevoxel_transforms = None
input_transforms = []
if input_transform is not None:
input_transforms += input_transform
if augment_data:
input_transforms += [
t.RandomDropout(0.2),
t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL),
t.ChromaticAutoContrast(),
t.ChromaticTranslation(config.data_aug_color_trans_ratio),
t.ChromaticJitter(config.data_aug_color_jitter_std),
# t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max),
]
if len(input_transforms) > 0:
input_transforms = t.Compose(input_transforms)
else:
input_transforms = None
dataset = DatasetClass(
config,
prevoxel_transform=prevoxel_transforms,
input_transform=input_transforms,
target_transform=target_transform,
cache=config.cache_data,
augment_data=augment_data,
phase=phase)
data_args = {
'dataset': dataset,
'num_workers': num_workers,
'batch_size': batch_size,
'collate_fn': collate_fn,
}
if repeat:
data_args['sampler'] = InfSampler(dataset, shuffle)
else:
data_args['shuffle'] = shuffle
data_loader = DataLoader(**data_args)
return data_loader
|
131753
|
from fastapi import FastAPI
from pytest import fixture
from fastapi_pagination import LimitOffsetPage, Page, add_pagination, paginate
from .base import BasePaginationTestCase, SafeTestClient, UserOut
from .utils import faker
app = FastAPI()
entities = [UserOut(name=faker.name()) for _ in range(100)]
@app.get("/default", response_model=Page[UserOut])
@app.get("/limit-offset", response_model=LimitOffsetPage[UserOut])
async def route():
return paginate(entities)
add_pagination(app)
class TestPaginationParams(BasePaginationTestCase):
@fixture(scope="session")
def entities(self):
return entities
@fixture(scope="session")
def client(self):
with SafeTestClient(app) as c:
yield c
|
131762
|
import FWCore.ParameterSet.Config as cms
import os
from Configuration.Eras.Era_Phase2C9_cff import Phase2C9
process = cms.Process('CLIENT',Phase2C9)
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load('Configuration.Geometry.GeometryExtended2026D46Reco_cff')
process.load('Configuration.Geometry.GeometryExtended2026D46_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 1
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['upgradePLS3']
process.load("Validation.HGCalValidation.HGCalRecHitsClient_cfi")
process.hgcalRecHitClientEE.Verbosity = 2
process.hgcalRecHitClientHEF = process.hgcalRecHitClientEE.clone(
DetectorName = cms.string("HGCalHESiliconSensitive"))
process.hgcalRecHitClientHEB = process.hgcalRecHitClientEE.clone(
DetectorName = cms.string("HGCalHEScintillatorSensitive"))
process.load("DQMServices.Core.DQM_cfg")
process.DQM.collectorHost = ''
# summary
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) ) ##
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:./test_output_rechitVal.root')
)
process.load("Configuration.StandardSequences.EDMtoMEAtRunEnd_cff")
process.dqmSaver.referenceHandling = cms.untracked.string('all')
cmssw_version = os.environ.get('CMSSW_VERSION','CMSSW_X_Y_Z')
Workflow = '/HGCalValidation/'+'Harvesting/'+str(cmssw_version)
process.dqmSaver.workflow = Workflow
process.load("Validation.HGCalValidation.HGCalRecHitsClient_cfi")
process.p = cms.Path(process.EDMtoME *
process.hgcalRecHitClientEE *
process.hgcalRecHitClientHEF *
process.hgcalRecHitClientHEB *
process.dqmSaver)
|
131772
|
import os
import astropy.constants as const
import astropy.units as u
import numpy as np
from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun
from astropy.time import Time
from sora.config import input_tests
__all__ = ['plot_occ_map']
def xy2latlon(x, y, loncen, latcen, time):
"""Calculates the longitude and latitude given projected positions x and y.
Parameters
----------
x : `int`, `float`
Projected position in x, in the GCRS, in meters.
y : `int`, `float`
Projected position in y, in the GCRS, in meters.
loncen : `int`, `float`
Center longitude of projection, in degrees.
latcen : `int`, `float`
Center latitude of projection, in degrees.
time : `astropy.time.Time`
Time of referred projection.
Returns
-------
lon, lat : `list`
Longitude and Latitude whose projection at loncen, lat results
in x, y. (deg).
"""
r = const.R_earth.to(u.m).value
site_cen = EarthLocation(loncen*u.deg, latcen*u.deg)
itrs_cen = site_cen.get_itrs(obstime=time)
gcrs_cen = itrs_cen.transform_to(GCRS(obstime=time))
z = np.array(y, ndmin=1)
y = np.array(x, ndmin=1)
x2 = r*r-y*y-z*z
a = np.where(x2 >= 0.0)
x = np.sqrt(x2[a])
y = y[a]
z = z[a]
lon = np.repeat(1e+31, len(x2))
lat = np.repeat(1e+31, len(x2))
center_frame = SkyOffsetFrame(origin=gcrs_cen)
if len(x) > 0:
n = 0
if not time.isscalar and len(time) == len(x2):
time = time[a]
while True:
n += 1
new_pos = SkyCoord(x*u.m, y*u.m, z*u.m, representation_type='cartesian', frame=center_frame[a])
n_coord = new_pos.transform_to(GCRS(obstime=time))
n_itrs = n_coord.transform_to(ITRS(obstime=time))
n_site = n_itrs.earth_location
n_site = EarthLocation(n_site.lon, n_site.lat, 0)
itrs_site = n_site.get_itrs(obstime=time)
gcrs_site = itrs_site.transform_to(GCRS(obstime=time))
target1 = gcrs_site.transform_to(center_frame[a])
if n == 4:
lon[a] = n_site.lon.deg
lat[a] = n_site.lat.deg
break
x = target1.cartesian.x.to(u.m).value
return lon, lat
def latlon2xy(lon, lat, loncen, latcen):
"""Calculates the projection of longitude and latitude in the loncen,
latcen direction.
Parameters
----------
lon : `int`, `float`
Longitude to calculate projection.
lat : `int`, `float`
Latitude to calculate projection.
loncen : `int`, `float`
Center longitude of projection, in degrees.
latcen : `int`, `float`
Center latitude of projection, in degrees.
Returns
-------
x, y : `list`
Projection of lon, lat at loncen, latcen, in the ITRS (meters).
"""
site_cen = EarthLocation(loncen*u.deg, latcen*u.deg)
itrs_cen = site_cen.get_itrs()
lon = np.array(lon, ndmin=1)
lat = np.array(lat, ndmin=1)
site = EarthLocation(lon*u.deg, lat*u.deg, height=0*u.m)
itrs_site = site.get_itrs()
target = itrs_site.transform_to(SkyOffsetFrame(origin=itrs_cen))
y = target.cartesian.y.to(u.m).value
z = target.cartesian.z.to(u.m).value
k = np.where(target.cartesian.x.to(u.m).value < 0.0)
y[k] = 1e+31
z[k] = 1e+31
return y, z
def plot_occ_map(name, radius, coord, time, ca, pa, vel, dist, mag=0, longi=0, **kwargs):
"""Plots the map of the occultation.
Parameters
----------
name : `str`
Name of the object.
radius : `int`, `float`
Radius of the object, in km.
coord : `str`, `astropy.coordinates.SkyCoord`
Coordinates of the star (``"hh mm ss.sss dd mm ss.sss"`` or
``"hh.hhhhhhhh dd.dddddddd"``).
time : `str`, `astropy.time.Time`
Instant of Closest Approach (iso or isot format).
ca : `int`, `float`
Closest Approach Distance, in arcsec.
pa : `int`, `float`
Position Angle at C/A, in degrees.
vel : `int`, `float`
Velocity of the event, in km/s.
dist : `int`, `float`
Object distance at C/A, in AU.
mag : `int`, `float`, default=0
Mag* = Normalized magnitude to vel=20km/s.
longi : `int`, `float`, default=0
East longitude of sub-planet point, deg, positive towards East.
nameimg : `str`
Change the name of the imaged saved.
path : `str`
Path to a directory where to save map.
resolution : `int`, default=2
Cartopy feature resolution.\n
- ``1`` means a resolution of "10m";\n
- ``2`` a resolution of "50m";\n
- ``3`` a resolution of "100m".
states : `bool`
If True, plots the states borders of the countries. The states
of some countries will only be shown depending on the resolution.
zoom : `int`, `float`
Zooms in or out of the map.
centermap_geo : `list`, default=None
Center the map given coordinates in longitude and latitude. It must be
a list with two numbers.
centermap_delta : `list`, default=None
Displace the center of the map given displacement in X and Y, in km.
It must be a list with two numbers.
centerproj : `list`
Rotates the Earth to show occultation with the center projected at a
given longitude and latitude. It must be a list with two numbers.
labels : `bool`, default=True
Plots text above and below the map with the occultation parameters.
meridians : `int`, default=30
Plots lines representing the meridians for given interval, in degrees.
parallels : `int`, default=30
Plots lines representing the parallels for given interval, in degrees.
sites : `dict`
Plots site positions in map. It must be a python dictionary where the
key is the `name` of the site, and the value is a list with `longitude`,
`latitude`, `delta_x`, `delta_y` and `color`. `delta_x` and `delta_y`
are displacement, in km, from the point position of the site in the map
and the `name`. `color` is the color of the point.
site_name : `bool`
If True, it prints the name of the sites given, else it plots only the points.
site_box_alpha : `int`, `float`, default=0
Sets the transparency of a box surrounding each station name. 0 equals to
transparent, and 1 equals to opaque.
countries : `dict`
Plots the names of countries. It must be a python dictionary where the
key is the name of the country and the value is a list with longitude
and latitude of the lower left part of the text.
offset : `list`
Applies an offset to the ephemeris, calculating new CA and instant of
CA. It is a pair of `delta_RA*cosDEC` and `delta_DEC`.
mapstyle : `int`, default=1
Define the color style of the map. ``'1'`` is the default black
and white scale. ``'2'`` is a colored map.
error : `int`, `float`
Ephemeris error in mas. It plots a dashed line representing radius + error.
ercolor : `str`
Changes the color of the lines of the error bar.
ring : `int`, `float`
Plots a dashed line representing the location of a ring. It is given
in km, from the center.
rncolor : `str`
Changes the color of ring lines.
atm : `int`, `float`
Plots a dashed line representing the location of an atmosphere. It is
given in km, from the center.
atcolor : `str`
Changes the color of atm lines.
chord_delta : `list`
List with distances from center to plot chords.
chord_geo : `2d-list`
List with pairs of coordinates to plot chords.
chcolor : `str`, default='grey'
Color of the line of the chords.
heights : `list`
It plots a circular dashed line showing the locations where the observer
would observe the occultation at a given height above the horizons.
This must be a list.
hcolor : `str`
Changes the color of the height lines.
mapsize : `list`, default= [46.0, 38.0]
The size of figure, in cm. It must be a list with two values.
cpoints : `int`, `float`, default=60
Interval for the small points marking the center of shadow, in seconds.
ptcolor : `str`
Change the color of the center points.
alpha : `float`, default=0.2
The transparency of the night shade, where 0.0 is full transparency and
1.0 is full black.
fmt : `str`, default:'png'
The format to save the image. It is parsed directly by `matplotlib.pyplot`.
dpi : `int`, default=100
Resolution in "dots per inch". It defines the quality of the image.
lncolor : `str`
Changes the color of the line that represents the limits of the shadow
over Earth.
outcolor :`str`
Changes the color of the lines that represents the limits of the shadow
outside Earth.
scale : `int`, `float`
Arbitrary scale for the size of the name of the site.
cscale : `int`, `float`
Arbitrary scale for the name of the country.
sscale : `int`, `float`
Arbitrary scale for the size of point of the site.
pscale : `int`, `float`
Arbitrary scale for the size of the points that represent the center of
the shadow.
arrow : `bool`
If True, it plots the arrow with the occultation direction.
Important
---------
Required parameters to plot an occultation map: 'name', 'radius', 'coord',
'time', 'ca', 'pa', 'vel', and 'dist'.
Note
----
The parameters 'mag' and 'longi' are optional and only printed in label.
All other remaining parameters can be used to further customize the Map
configuration.
When producing the map, only one of 'centermap_geo' or 'centermap_delta'
options can be used at a time.
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
allowed_kwargs = ['alpha', 'arrow', 'atcolor', 'atm', 'centermap_delta', 'centermap_geo', 'centerproj',
'chcolor', 'chord_delta', 'chord_geo', 'countries', 'cpoints', 'cscale', 'dpi', 'ercolor',
'error', 'fmt', 'hcolor', 'heights', 'labels', 'lncolor', 'mapsize', 'mapstyle', 'meridians',
'nameimg', 'nscale', 'offset', 'outcolor', 'parallels', 'path', 'pscale', 'ptcolor',
'resolution', 'ring', 'rncolor', 'site_name', 'sites', 'sscale', 'states', 'zoom',
'site_box_alpha']
input_tests.check_kwargs(kwargs, allowed_kwargs=allowed_kwargs)
if not type(name) == str:
raise TypeError('name keyword must be a string')
radius = radius*u.km
occs = {}
try:
occs['stars'] = SkyCoord(coord, frame='icrs', unit=(u.hourangle, u.degree))
except:
raise KeyError('"star" keyword is not in the format: "hh mm ss.sss dd mm ss.sss" or "hh.hhhhhhhh dd.dddddddd"')
try:
occs['datas'] = Time(time)
except:
raise KeyError('"time" keyword is not a iso or isot time format')
occs['ca'] = ca*u.arcsec
occs['posa'] = pa*u.deg
occs['vel'] = vel*(u.km/u.s)
occs['dist'] = dist*u.AU
occs['magG'] = mag
occs['longi'] = longi
mapstyle = kwargs.get('mapstyle', 1)
if mapstyle not in [1, 2]:
raise ValueError('mapstyle must be 1 or 2]')
resolution = kwargs.get('resolution', 2)
if resolution not in [1, 2, 3]:
raise TypeError('resolution keyword must be one of these: [1, 2, 3] where 1=10m, 2=50m and 3=100m')
res = ['10m', '50m', '110m']
resolution = res[resolution-1]
nameimg = kwargs.get('nameimg', '{}_{}'.format(name, occs['datas'].isot.replace(':', '_')))
fmt = kwargs.get('fmt', 'png')
dpi = kwargs.get('dpi', 100)
step = kwargs.get('step', 1)
mapsize = kwargs.get('mapsize', [46.0, 38.0])*u.cm
erro = kwargs.get('error', None)
ring = kwargs.get('ring', None)
atm = kwargs.get('atm', None)
cpoints = kwargs.get('cpoints', 60)
states = kwargs.get('states', True)
labels = kwargs.get('labels', True)
meridians = kwargs.get('meridians', 30)
parallels = kwargs.get('parallels', 30)
nscale = kwargs.get('nscale', 1)
cscale = kwargs.get('cscale', 1)
sscale = kwargs.get('sscale', 1)
pscale = kwargs.get('pscale', 1)
heights = np.array(kwargs.get('heights'), None)
alpha = kwargs.get('alpha', 0.2)
site_box_alpha = kwargs.get('site_box_alpha', 0.0)
centermap_geo = kwargs.get('centermap_geo', None)
centermap_delta = kwargs.get('centermap_delta', None)
if 'centermap_geo' in kwargs and 'centermap_delta' in kwargs:
raise ValueError('User must give "centermap_geo" OR "centermap_delta"')
zoom = kwargs.get('zoom', 1)
if zoom <= 0:
raise ValueError('zoom can not be equal or smaller than 0.')
off_ra, off_de = kwargs.get('offset', [0.0, 0.0])*u.mas
arrow = kwargs.get('arrow', True)
site_name = kwargs.get('site_name', True)
path = kwargs.get('path', '.')
if not os.path.exists(path):
raise IOError('Path does not exists')
chord_delta = np.array(kwargs.get('chord_delta', []), ndmin=1)*u.km
chord_geo = kwargs.get('chord_geo', [])
if len(chord_geo) > 0:
try:
b = np.array(chord_geo, ndmin=2)
chord_geo = b.reshape(len(b), 2)
except:
raise ValueError('chord_geo must a set of pairs with longitude and latitude')
chord_geo = EarthLocation(*chord_geo.T)
sites = {}
if 'sites' in kwargs.keys():
if type(kwargs['sites']) == str and os.path.isfile(kwargs['sites']):
data = np.loadtxt(kwargs['sites'], dtype={'names': ('name', 'lon', 'lat', 'offx', 'offy', 'color'),
'formats': ('S30', 'f8', 'f8', 'f8', 'f8', 'S30')},
delimiter=',', ndmin=1)
for i, s in enumerate(data):
sites[s['name'].strip().decode()] = [s['lon'], s['lat'], s['offx'], s['offy'], s['color'].strip().decode()]
elif type(kwargs['sites']) == dict:
sites = kwargs['sites']
else:
raise TypeError('sites keyword must be a file or a dictionary')
countries = {}
if 'countries' in kwargs.keys():
if type(kwargs['countries']) == str and os.path.isfile(kwargs['countries']):
data = np.loadtxt(kwargs['countries'], dtype={'names': ('name', 'lon', 'lat'), 'formats': ('S30', 'f8', 'f8')},
delimiter=',', ndmin=1)
for i, c in enumerate(data):
countries[c['name'].strip().decode()] = [c['lon'], c['lat']]
elif type(kwargs['countries']) == dict:
countries = kwargs['countries']
else:
raise TypeError('country keyword must be a file or a dictionary')
# calculates offsets
dca = off_ra*np.sin(occs['posa']) + off_de*np.cos(occs['posa'])
dt = (-(off_ra * np.cos(occs['posa']) - off_de * np.sin(occs['posa'])).to(u.rad) * occs['dist'].to(u.km) / np.absolute(
occs['vel'])).value * u.s
ca1 = occs['ca'] + dca
data = occs['datas'] + dt
# define map parameters
center_gcrs = GCRS(occs['stars'].ra, occs['stars'].dec, 1*u.R_earth, obstime=data)
center_itrs = center_gcrs.transform_to(ITRS(obstime=data))
center_map = center_itrs.earth_location
centert = True
if 'centerproj' in kwargs.keys():
if type(kwargs['centerproj']) == EarthLocation:
center_map = kwargs['centerproj']
elif np.array(kwargs['centerproj']).shape == (2,):
center_map = EarthLocation.from_geodetic(*kwargs['centerproj'], 0.0)
else:
raise TypeError('centerproj must be an Astropy EarthLocation Object or an array with Longitude and Latitude only')
centert = False
fig = plt.figure(figsize=(mapsize.to(u.imperial.inch).value),facecolor='w')
projection = ccrs.Orthographic(central_longitude=center_map.lon.value, central_latitude=center_map.lat.value)
if labels:
axf = plt.axes(projection=projection)
else:
axf = plt.axes([-0.001, -0.001, 1.002, 1.002], projection=projection)
axf.set_global()
# calculates regions for zoom
limits = None
r = const.R_earth.to(u.m).value
if centermap_geo is not None:
cx, cy = latlon2xy(centermap_geo[0], centermap_geo[1], center_map.lon.value, center_map.lat.value)
limits = [cx/1000.0, cy/1000.0]
if np.any(np.absolute(limits) > r):
raise ValueError('Coordinates for centermap_geo are outside the visible range.')
elif centermap_delta is not None:
limits = centermap_delta
elif zoom != 1:
limits = [0, 0]
if limits is not None:
dr = r/zoom
l0 = (limits[0]*u.km).to(u.m).value
l1 = (limits[1]*u.km).to(u.m).value
dmsize = mapsize[0]/mapsize[1]
if mapsize[1] < mapsize[0]:
lx = l0 - dr*dmsize
ux = l0 + dr*dmsize
ly = l1 - dr
uy = l1 + dr
else:
lx = l0 - dr
ux = l0 + dr
ly = l1 - dr/dmsize
uy = l1 + dr/dmsize
axf.set_xlim(lx, ux)
axf.set_ylim(ly, uy)
if labels and zoom > 1:
centert = False
# plots features
axf.coastlines(resolution=resolution, color='0.3')
ocean = cfeature.NaturalEarthFeature('physical', 'ocean', resolution)
land = cfeature.NaturalEarthFeature('physical', 'land', resolution)
border = cfeature.NaturalEarthFeature('cultural', 'admin_0_countries', resolution)
if mapstyle == 1:
axf.add_feature(ocean, zorder=0, color='0.9')
axf.add_feature(land, zorder=0, edgecolor='None', color='1.0')
axf.add_feature(border, zorder=0.1, edgecolor='0.4', facecolor='None')
axf.add_feature(cfeature.RIVERS, zorder=0, edgecolor='0.7')
axf.add_feature(cfeature.LAKES, zorder=0, color='0.7')
ptcolor = 'black'
lncolor = 'blue'
ercolor = 'blue'
rncolor = 'blue'
atcolor = 'blue'
outcolor = 'red'
hcolor = 'black'
chcolor = 'gray'
elif mapstyle == 2:
axf.add_feature(ocean, zorder=0, facecolor=cfeature.COLORS['water'])
axf.add_feature(land, zorder=0, edgecolor='None', facecolor=cfeature.COLORS['land'])
axf.add_feature(border, zorder=0, edgecolor='0.5', facecolor=cfeature.COLORS['land'])
axf.add_feature(border, zorder=0.1, edgecolor='0.5', facecolor='None')
axf.add_feature(cfeature.RIVERS, zorder=0)
axf.add_feature(cfeature.LAKES, zorder=0)
ptcolor = 'red'
lncolor = 'blue'
ercolor = 'red'
rncolor = 'black'
atcolor = 'black'
outcolor = 'red'
hcolor = 'black'
chcolor = 'gray'
if states:
states_r = cfeature.NaturalEarthFeature('cultural', 'admin_1_states_provinces', resolution)
axf.add_feature(states_r, zorder=0, edgecolor='0.6', facecolor='None')
gl = axf.gridlines(xlocs=np.arange(-180, 180.001, meridians), ylocs=np.arange(-90, 90.001, parallels))
gl.n_steps = 180
sun = get_sun(data)
sun_lat = sun.dec
sun_lon = sun.ra - data.sidereal_time('mean', 'greenwich')
pole_lon = sun_lon.deg
pole_lat = sun_lat.deg
proj_sun = ccrs.Orthographic(central_longitude=pole_lon+180, central_latitude=-pole_lat)
bordx = r*np.cos(np.arange(0, 361, 0.5)*u.deg)
bordy = r*np.sin(np.arange(0, 361, 0.5)*u.deg)
axf.fill(bordx, bordy, transform=proj_sun, linewidth=0, color='black', alpha=alpha)
axf.fill(bordx*np.cos(18*u.deg), bordy*np.cos(18*u.deg), transform=proj_sun, linewidth=0, color='black', alpha=alpha)
ptcolor = kwargs.get('ptcolor', ptcolor)
lncolor = kwargs.get('lncolor', lncolor)
ercolor = kwargs.get('ercolor', ercolor)
rncolor = kwargs.get('rncolor', rncolor)
atcolor = kwargs.get('atcolor', atcolor)
outcolor = kwargs.get('outcolor', outcolor)
hcolor = kwargs.get('hcolor', hcolor)
chcolor = kwargs.get('chcolor', chcolor)
# calculates path
vec = np.arange(0, int(8000/(np.absolute(occs['vel'].value))), step)
vec = np.sort(np.concatenate((vec, -vec[1:]), axis=0))
pa = Angle(occs['posa'])
pa.wrap_at('180d', inplace=True)
if pa > 90*u.deg:
paplus = pa - 180*u.deg
elif pa < -90*u.deg:
paplus = pa + 180*u.deg
else:
paplus = pa
deltatime = vec*u.s
datas1 = data + deltatime
centers_gcrs = GCRS(np.repeat(occs['stars'].ra, len(datas1)), np.repeat(occs['stars'].dec, len(datas1)),
1*u.R_earth, obstime=datas1)
centers_itrs = centers_gcrs.transform_to(ITRS(obstime=datas1))
centers = centers_itrs.earth_location
dista = (occs['dist'].to(u.km)*ca1.to(u.rad)).value*u.km
ax = dista*np.sin(pa) + (deltatime*occs['vel'])*np.cos(paplus)
by = dista*np.cos(pa) - (deltatime*occs['vel'])*np.sin(paplus)
ax2 = ax - radius * np.sin(paplus)
by2 = by - radius * np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], transform=ccrs.Geodetic(), color=lncolor)
j = np.where(lon1 > 1e+30)
if 'centerproj' not in kwargs:
plt.plot(ax2[j].to(u.m).value, by2[j].to(u.m).value, color=outcolor, clip_on=(not centert), zorder=-0.2)
ax3 = ax + radius * np.sin(paplus)
by3 = by + radius * np.cos(paplus)
lon2, lat2 = xy2latlon(ax3.to(u.m).value, by3.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon2 < 1e+30)
axf.plot(lon2[j], lat2[j], transform=ccrs.Geodetic(), color=lncolor)
j = np.where(lon2 > 1e+30)
if 'centerproj' not in kwargs:
plt.plot(ax3[j].to(u.m).value, by3[j].to(u.m).value, color=outcolor, clip_on=(not centert), zorder=-0.2)
# plots chords_delta
for val in chord_delta:
ax2 = ax + val*np.sin(paplus)
by2 = by + val*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], transform=ccrs.Geodetic(), color=chcolor)
# plots chords_geo
for coord_geo in chord_geo:
xt, yt = latlon2xy(coord_geo.lon.deg, coord_geo.lat.deg, centers.lon.value, centers.lat.value)*u.m
val = np.sqrt((xt-ax)**2 + (yt-by)**2)
k = val.argmin()
ang = np.arctan2((yt-by)[k], (xt-ax)[k])
val = np.sign(np.sin(ang))*val[k]
ax2 = ax + val*np.sin(paplus)
by2 = by + val*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], transform=ccrs.Geodetic(), color=chcolor)
# plots error
if erro is not None:
err = erro*u.mas
errd = (occs['dist'].to(u.km)*err.to(u.rad)).value*u.km
ax2 = ax - errd*np.sin(paplus) - radius*np.sin(paplus)
by2 = by - errd*np.cos(paplus) - radius*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], '--', transform=ccrs.Geodetic(), color=ercolor)
ax3 = ax + errd*np.sin(paplus) + radius*np.sin(paplus)
by3 = by + errd*np.cos(paplus) + radius*np.cos(paplus)
lon2, lat2 = xy2latlon(ax3.to(u.m).value, by3.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon2 < 1e+30)
axf.plot(lon2[j], lat2[j], '--', transform=ccrs.Geodetic(), color=ercolor)
# plots ring
if ring is not None:
rng = ring*u.km
ax2 = ax - rng*np.sin(paplus)
by2 = by - rng*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], '--', transform=ccrs.Geodetic(), color=rncolor)
ax3 = ax + rng*np.sin(paplus)
by3 = by + rng*np.cos(paplus)
lon2, lat2 = xy2latlon(ax3.to(u.m).value, by3.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon2 < 1e+30)
axf.plot(lon2[j], lat2[j], '--', transform=ccrs.Geodetic(), color=rncolor)
# plots atm
if atm is not None:
atmo = atm*u.km
ax2 = ax - atmo*np.sin(paplus)
by2 = by - atmo*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], '--', transform=ccrs.Geodetic(), color=atcolor)
ax3 = ax + atmo*np.sin(paplus)
by3 = by + atmo*np.cos(paplus)
lon2, lat2 = xy2latlon(ax3.to(u.m).value, by3.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon2 < 1e+30)
axf.plot(lon2[j], lat2[j], '--', transform=ccrs.Geodetic(), color=atcolor)
# plots center points
vec = np.arange(0, int(8000/(np.absolute(occs['vel'].value))), cpoints)
deltatime = np.sort(np.concatenate((vec, -vec[1:]), axis=0))*u.s
axc = dista*np.sin(pa) + (deltatime*occs['vel'])*np.cos(paplus)
byc = dista*np.cos(pa) - (deltatime*occs['vel'])*np.sin(paplus)
plt.plot(axc.to(u.m).value, byc.to(u.m).value, 'o', color=ptcolor, clip_on=(not centert),
markersize=mapsize[0].value*pscale*8.0/46.0, zorder=-0.2)
datas2 = data + deltatime
centers_p_gcrs = GCRS(np.repeat(occs['stars'].ra, len(datas2)), np.repeat(occs['stars'].dec, len(datas2)),
1*u.R_earth, obstime=datas2)
centers_p_itrs = centers_p_gcrs.transform_to(ITRS(obstime=datas2))
centers_p = centers_p_itrs.earth_location
clon1, clat1 = xy2latlon(axc.to(u.m).value, byc.to(u.m).value, centers_p.lon.value, centers_p.lat.value, datas2)
j = np.where(clon1 < 1e+30)
axf.plot(clon1[j], clat1[j], 'o', transform=ccrs.Geodetic(), color=ptcolor, clip_on=True,
markersize=mapsize[0].value*pscale*8.0/46.0)
datas1 = data + deltatime
center_gcrs = GCRS(np.repeat(occs['stars'].ra, 1), np.repeat(occs['stars'].dec, 1),
1*u.R_earth, obstime=data)
center_itrs = center_gcrs.transform_to(ITRS(obstime=data))
center = center_itrs.earth_location
xp = [(dista.to(u.m)*np.sin(pa)).value]
yp = [(dista.to(u.m)*np.cos(pa)).value]
loncen, latcen = xy2latlon(xp, yp, center.lon.value, center.lat.value, data)
j = np.where(loncen < 1e+30)
if len(j) > 0:
axf.plot(loncen[j], latcen[j], 'o', transform=ccrs.Geodetic(), color=ptcolor, clip_on=True,
markersize=mapsize[0].value*pscale*24.0/46.0)
elif not centert:
plt.plot(xp, yp, 'o', color=ptcolor, clip_on=False, markersize=mapsize[0].value*pscale*24.0/46.0)
# plots the heights
if 'heights' in kwargs.keys():
for h in heights:
lonb, latb = xy2latlon(bordx * np.cos(h * u.deg), bordy * np.cos(h * u.deg), center.lon.value,
center.lat.value, data)
axf.plot(lonb, latb, transform=ccrs.Geodetic(), linestyle='dotted', color=hcolor)
# plots the the direction arrow
if arrow:
if limits is None:
dx = 1000000*(np.sin(paplus+90*u.deg)*np.sign(occs['vel'])).value
dy = 1000000*(np.cos(paplus+90*u.deg)*np.sign(occs['vel'])).value
plt.annotate('', xy=(5500000+dx, -5500000+dy), xycoords='data',
xytext=(5500000, -5500000), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top', annotation_clip=False
)
else:
dx = (1000000/zoom) * (np.sin(paplus + 90 * u.deg) * np.sign(occs['vel'])).value
dy = (1000000/zoom) * (np.cos(paplus + 90 * u.deg) * np.sign(occs['vel'])).value
plt.annotate('', xy=(lx + (ux-lx)*0.9 + dx, ly + (uy-ly)*0.1 + dy), xycoords='data',
xytext=(lx + (ux-lx)*0.9, ly + (uy-ly)*0.1), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top', annotation_clip=False
)
# plots the countries names
for country in countries.keys():
plt.text(countries[country][0], countries[country][1], country, transform=ccrs.Geodetic(),
weight='bold', color='grey', fontsize=30*cscale, family='monospace')
# plots the sites
for site in sites.keys():
s = EarthLocation.from_geodetic(sites[site][0], sites[site][1], 0.0*u.km)
axf.plot(s.lon.deg, s.lat.deg, 'o', transform=ccrs.Geodetic(),
markersize=mapsize[0].value*sscale*10.0/46.0, color=sites[site][4])
if site_name:
xt, yt = latlon2xy(s.lon.deg, s.lat.deg, center_map.lon.value, center_map.lat.value)
axf.text(xt + sites[site][2]*1000, yt+sites[site][3]*1000, site, weight='bold',
fontsize=25*nscale, family='monospace',
bbox={'facecolor': 'white', 'alpha': site_box_alpha, 'pad': 2, 'edgecolor':'none'})
# Define the title and label of the output
title = ('Object Diam Tmax dots <> ra_offset_dec\n'
'{:10s} {:4.0f} km {:5.1f}s {:02d} s <>{:+6.1f} {:+6.1f} \n'.
format(name, 2*radius.value, (2*radius/np.absolute(occs['vel'])).value,
cpoints, off_ra.value, off_de.value))
labelx = ("\n year-m-d h:m:s UT ra__dec__J2000__candidate C/A P/A vel Delta G* long\n"
"{} {:02d} {:02d} {:07.4f} {:+03d} {:02d} {:06.3f} {:6.3f} {:6.2f} {:6.2f} {:5.2f} {:5.1f} {:3.0f}".
format(data.iso, int(occs['stars'].ra.hms.h), int(occs['stars'].ra.hms.m), occs['stars'].ra.hms.s,
int(occs['stars'].dec.dms.d), np.absolute(int(occs['stars'].dec.dms.m)),
np.absolute(occs['stars'].dec.dms.s), ca1.value, occs['posa'].value,
occs['vel'].value, occs['dist'].value, occs['magG'], occs['longi']))
# plots the map
if labels:
axf.set_title(title, family='monospace', weight='bold', fontsize=22)
axf.text(0.5, -0.1, labelx, va='bottom', ha='center', rotation='horizontal', rotation_mode='anchor',
transform=axf.transAxes, family='monospace', weight='bold', fontsize=22)
filepath = os.path.join(path, '{}.{}'.format(nameimg, fmt))
plt.savefig(filepath, format=fmt, dpi=dpi)
print('{}.{} generated'.format(nameimg, fmt))
plt.clf()
plt.close()
|
131782
|
import os
from keras.datasets import mnist
from autokeras.image.image_supervised import ImageClassifier
from autokeras.utils import pickle_from_file
from graphviz import Digraph
def to_pdf(graph, path):
dot = Digraph(comment='The Round Table')
for index, node in enumerate(graph.node_list):
dot.node(str(index), str(node.shape))
for u in range(graph.n_nodes):
for v, layer_id in graph.adj_list[u]:
dot.edge(str(u), str(v), str(graph.layer_list[layer_id]))
dot.render(path)
def visualize(path):
cnn_module = pickle_from_file(os.path.join(path, 'module'))
cnn_module.searcher.path = path
for item in cnn_module.searcher.history:
model_id = item['model_id']
graph = cnn_module.searcher.load_model_by_id(model_id)
to_pdf(graph, os.path.join(path, str(model_id)))
if __name__ == '__main__':
# 需要把数据放到 ~/.keras/dataset 中,不然下载会报错
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape)
# (60000, 28, 28)
print('增加一个维度,以符合格式要求')
x_train = x_train.reshape(x_train.shape + (1,))
print(x_train.shape)
# (60000, 28, 28, 1)
x_test = x_test.reshape(x_test.shape + (1,))
# 指定模型更新路径
clf = ImageClassifier(path="automodels/", verbose=True)
# 限制为 4 个小时
# 搜索部分
gap = 6
clf.fit(x_train[::gap], y_train[::gap], time_limit=4*60*60)
# 用表现最好的再训练一次
clf.final_fit(x_train[::gap], y_train[::gap], x_test, y_train, retrain=True)
y = clf.evaluate(x_test, y_test)
print(y)
print("导出训练好的模型")
clf.export_autokeras_model("automodels/auto_mnist_model")
print("可视化模型")
visualize("automodels/")
|
131807
|
from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.netsparker.parser import NetsparkerParser
class TestNetsparkerParser(DojoTestCase):
def test_parse_file_with_one_finding(self):
testfile = open("unittests/scans/netsparker/netsparker_one_finding.json")
parser = NetsparkerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(16, finding.cwe)
self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y"))
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:L/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C", finding.cvssv3)
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php")
def test_parse_file_with_multiple_finding(self):
testfile = open("unittests/scans/netsparker/netsparker_many_findings.json")
parser = NetsparkerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(16, len(findings))
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(16, finding.cwe)
self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y"))
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:L/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C", finding.cvssv3)
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php")
with self.subTest(i=1):
finding = findings[1]
self.assertEqual("Critical", finding.severity)
self.assertEqual(89, finding.cwe)
self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y"))
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H", finding.cvssv3)
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertEqual(str(endpoint), "http://php.testsparker.com/artist.php?id=-1%20OR%2017-7=10")
with self.subTest(i=2):
finding = findings[2]
self.assertEqual("Medium", finding.severity)
self.assertEqual(205, finding.cwe)
self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y"))
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:L/A:N/E:H/RL:O/RC:C", finding.cvssv3)
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertEqual(str(endpoint), "http://php.testsparker.com")
|
131809
|
import pyautogui
import PySimpleGUI as sg
import cv2
import numpy as np
"""
Demo program that displays a webcam using OpenCV
"""
def main():
sg.theme('Black')
# define the window layout
layout = [[sg.Text('OpenCV Demo', size=(40, 1), justification='center', font='Helvetica 20')],
[sg.Image(filename='', key='image')],
[sg.Button('Record', size=(10, 1), font='Arial 14'),
sg.Button('Stop', size=(10, 1), font='Arial 14'),
sg.Button('Exit', size=(10, 1), font='Arial 14'),
sg.Button('Screenshot',size=(10,1),font='Arial 14') ]]
# create the window and show it without the plot
window = sg.Window('Demo Application - OpenCV Integration',
layout, location=(800, 400))
# ---===--- Event LOOP Read and display frames, operate the GUI --- #
cap = cv2.VideoCapture(0)
recording = False
while True:
event, values = window.read(timeout=20)
if event == 'Exit' or event == sg.WIN_CLOSED:
return
elif event == 'Record':
recording = True
elif event=='Screenshot':
myScreenshot = pyautogui.screenshot()
myScreenshot.save(r'shot.png')
elif event == 'Stop':
recording = False
img = np.full((480, 640), 255)
# this is faster, shorter and needs less includes
imgbytes = cv2.imencode('.png', img)[1].tobytes()
window['image'].update(data=imgbytes)
if recording:
ret, frame = cap.read()
imgbytes = cv2.imencode('.png', frame)[1].tobytes() # ditto
window['image'].update(data=imgbytes)
main()
|
131878
|
import iam_floyd as statement
import importlib
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
helperDir = '%s/../../helper/python' % currentdir
sys.path.insert(0, helperDir)
test = importlib.import_module('python_test')
out = getattr(test, 'out')
deploy = getattr(test, 'deploy')
s = (
# doc-start
statement.Ec2() \
.allow() \
.all_write_actions()
# doc-end
)
all = [s]
out(all)
# deploy(all) disabled, bc exceeds policy size limit
|
131915
|
import unittest
import os
import sys
sys.path.append('../../../../source/code/tools/scx_prune_repository')
from moffile import MofFile
class MofFileTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
try:
os.remove('EmptyFile.mof')
os.remove('FileWithNoDependentClasses.mof')
os.remove('FileWithOneDependentClass.mof')
os.remove('TwoSubClasses.mof')
os.remove('SameFileDependency.mof')
except OSError:
pass
def testNoSuchFile(self):
moffile = MofFile('ThisFileShouldNotExist.mof')
self.assertEqual(moffile.GetDependentClasses(), [])
def testEmptyFile(self):
open('EmptyFile.mof', 'w')
moffile = MofFile('EmptyFile.mof')
self.assertEqual(moffile.GetDependentClasses(), [])
def testMofFileWithNoDependentClasses(self):
out = open('FileWithNoDependentClasses.mof', 'w')
out.write('class TestClass {\n')
out.write(' string Caption\n')
out.write('}')
out.close()
moffile = MofFile('FileWithNoDependentClasses.mof')
self.assertEqual(moffile.GetDependentClasses(), [])
def testMofFileWithOneDependentClass(self):
out = open('FileWithOneDependentClass.mof', 'w')
out.write('class TestClass : DependentClass {\n')
out.write(' string Caption\n')
out.write('}')
out.close()
moffile = MofFile('FileWithOneDependentClass.mof')
self.assertEqual(moffile.GetDependentClasses(), ['DependentClass'])
def testMofFileWithTwoClassesWithOneBaseClass(self):
out = open('TwoSubClasses.mof', 'w')
out.write('class SubClass1 : BaseClass {\n')
out.write('}')
out.write('class SubClass2 : BaseClass {\n')
out.write('}')
out.close()
moffile = MofFile('TwoSubClasses.mof')
self.assertEqual(moffile.GetDependentClasses(), ['BaseClass'])
def testMofFileWithInterDependency(self):
out = open('SameFileDependency.mof', 'w')
out.write('class BaseClass {\n')
out.write('}')
out.write('class SubClass2 : BaseClass {\n')
out.write('}')
out.close()
moffile = MofFile('SameFileDependency.mof')
self.assertEqual(moffile.GetDependentClasses(), [])
def testDefinedClasses(self):
out = open('SameFileDependency.mof', 'w')
out.write('class BaseClass {\n')
out.write('}')
out.write('class SubClass1 : BaseClass {\n')
out.write('}')
out.write('class SubClass2 : BaseClass {\n')
out.write('}')
out.write('class SubClass3 : SubClass1 {\n')
out.write('}')
out.close()
moffile = MofFile('SameFileDependency.mof')
self.assertEqual(moffile.GetDefinedClasses(), ['BaseClass', 'SubClass1', 'SubClass2', 'SubClass3'])
|
131953
|
import sys
import os
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir)
)
sys.path.append(PROJECT_ROOT)
from RFEM.Loads.freeLoad import FreeLoad
from RFEM.enums import *
from RFEM.initModel import Model
from RFEM.BasicObjects.material import Material
from RFEM.BasicObjects.thickness import Thickness
from RFEM.BasicObjects.node import Node
from RFEM.BasicObjects.line import Line
from RFEM.BasicObjects.surface import Surface
from RFEM.TypesForNodes.nodalSupport import NodalSupport
from RFEM.LoadCasesAndCombinations.staticAnalysisSettings import StaticAnalysisSettings
from RFEM.LoadCasesAndCombinations.loadCase import LoadCase
if Model.clientModel is None:
Model()
def test_free_load():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
Material(1, 'S235')
Node(1, 0.0, 0.0, 0.0)
Node(2, 10.0, 0.0, 0.0)
Node(3, 10.0, 10.0, 0.0)
Node(4, 0.0, 10.0, 0.0)
Node(5, 0.0, 0.0, 10.0)
Node(6, 0.0, 10.0, 10.0)
Line(1, '1 2')
Line(2, '2 3')
Line(3, '3 4')
Line(4, '4 1')
Thickness(1, 'Dicke', 1, 0.05)
Surface(1, '1-4', 1)
NodalSupport(1, '1', NodalSupportType.HINGED)
NodalSupport(2, '2', NodalSupportType.HINGED)
NodalSupport(3, '3', NodalSupportType.HINGED)
NodalSupport(4, '4', NodalSupportType.HINGED)
StaticAnalysisSettings(1, 'Geometrisch-linear', StaticAnalysisType.GEOMETRICALLY_LINEAR)
LoadCase(1 , 'Einzell- u. Linienlast')
LoadCase(2 , 'Rechtecklast 1')
LoadCase(3 , 'Rechtecklast 2')
LoadCase(4 , 'Kreislast')
LoadCase(5 , 'Polygonlast')
# Prüfung der freien Einzellasten
FreeLoad.ConcentratedLoad(1, 1, load_parameter= [5000, 4, 2])
FreeLoad.ConcentratedLoad(2, 1, load_parameter= [50, 8, 8], load_type= FreeConcentratedLoadLoadType.LOAD_TYPE_MOMENT, load_direction= FreeConcentratedLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Y)
# Prüfung der freien Linienlasten
FreeLoad.LineLoad(3, 1, '1',
FreeLineLoadLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeLineLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000, 2, 2, 4, 4])
# Prüfung der freien Rechtecklasten
## LOAD_LOCATION_RECTANGLE_CORNER_POINTS
FreeLoad.RectangularLoad(1, 2, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CORNER_POINTS,
[1, 8, 3, 10, 0])
FreeLoad.RectangularLoad(2, 2, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_LINEAR_FIRST,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000, 2000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CORNER_POINTS,
[4, 8, 6, 10, 0])
FreeLoad.RectangularLoad(3, 2, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_LINEAR_SECOND,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000, 2000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CORNER_POINTS,
[7, 8, 9, 10, 0])
FreeLoad.RectangularLoad(4, 2, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_VARYING_IN_Z,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CORNER_POINTS,
[1, 5, 3, 7, [[-3, 0.3], [-1, 0.4], [0, 1]]])
FreeLoad.RectangularLoad(5, 2, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_VARYING_ALONG_PERIMETER,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CORNER_POINTS,
[4, 5, 6, 7, [5, 7, 0], [5, 9, 2], 0, [[0, 0.5], [90, 1.75], [180, 1.25], [270, 1], [360, 0.5]]])
FreeLoad.RectangularLoad(6, 2, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_VARYING_IN_Z_AND_ALONG_PERIMETER,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CORNER_POINTS,
[7, 5, 9, 7, [[-3, 0.3], [-1, 0.4], [0, 1]], [5, 7, 0], [5, 9, 2], 0, [[0, 0.5], [90, 1.75], [180, 1.25], [270, 1], [360, 0.5]]])
## LOAD_LOCATION_RECTANGLE_CENTER_AND_SIDES
FreeLoad.RectangularLoad(1, 3, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CENTER_AND_SIDES,
[2, 9, 2, 2, 0])
FreeLoad.RectangularLoad(2, 3, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_LINEAR_FIRST,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000, 2000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CENTER_AND_SIDES,
[5, 9, 2, 2, 0])
FreeLoad.RectangularLoad(3, 3, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_LINEAR_SECOND,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000, 2000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CENTER_AND_SIDES,
[8, 9, 2, 2, 0])
FreeLoad.RectangularLoad(4, 3, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_VARYING_IN_Z,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CENTER_AND_SIDES,
[2, 6, 2, 2, [[-3, 0.3], [-1, 0.4], [0, 1]]])
FreeLoad.RectangularLoad(5, 3, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_VARYING_ALONG_PERIMETER,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CENTER_AND_SIDES,
[5, 6, 2, 2, [5, 7, 0], [5, 9, 2], 0, [[0, 0.5], [90, 1.75], [180, 1.25], [270, 1], [360, 0.5]]])
FreeLoad.RectangularLoad(6, 3, '1',
FreeRectangularLoadLoadDistribution.LOAD_DISTRIBUTION_VARYING_IN_Z_AND_ALONG_PERIMETER,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeRectangularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[5000],
FreeRectangularLoadLoadLocationRectangle.LOAD_LOCATION_RECTANGLE_CENTER_AND_SIDES,
[8, 6, 2, 2, [[-3, 0.3], [-1, 0.4], [0, 1]], [5, 7, 0], [5, 9, 2], 0, [[0, 0.5], [90, 1.75], [180, 1.25], [270, 1], [360, 0.5]]])
# Prüfung der freien Kreislasten
FreeLoad.CircularLoad(1, 4, '1',
FreeCircularLoadLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeCircularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[10000, 7.5, 5, 2])
FreeLoad.CircularLoad(2, 4, '1',
FreeCircularLoadLoadDistribution.LOAD_DISTRIBUTION_LINEAR,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreeCircularLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[10000, 2500, 2.5, 5, 2])
# Prüfung der freien Polygonlasten
FreeLoad.PolygonLoad(1, 5, '1',
FreePolygonLoadLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreePolygonLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[[1, 0], [0, 2], [2, 2]],
[5000])
FreeLoad.PolygonLoad(2, 5, '1',
FreePolygonLoadLoadDistribution.LOAD_DISTRIBUTION_LINEAR,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreePolygonLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[[6, 0], [4, 2], [8, 2]],
[5000, 2500, 1000, 1, 2, 3])
FreeLoad.PolygonLoad(3, 5, '1',
FreePolygonLoadLoadDistribution.LOAD_DISTRIBUTION_LINEAR_FIRST,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreePolygonLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[[6, 4], [4, 6], [8, 6]],
[5000, 2500, 1, 3])
FreeLoad.PolygonLoad(4, 5, '1',
FreePolygonLoadLoadDistribution.LOAD_DISTRIBUTION_LINEAR_SECOND,
FreeLoadLoadProjection.LOAD_PROJECTION_XY_OR_UV,
FreePolygonLoadLoadDirection.LOAD_DIRECTION_GLOBAL_Z_TRUE,
[[1, 4], [0, 6], [2, 6]],
[1500, 7500, 2, 1])
#Calculate_all() # Don't use in unit tests. See template for more info.
Model.clientModel.service.finish_modification()
|
131991
|
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test(self):
self.expect_expr("int i; __typeof__(i) j = 1; j", result_type="typeof (i)", result_value="1")
self.expect_expr("int i; typeof(i) j = 1; j", result_type="typeof (i)", result_value="1")
self.expect_expr("int i; decltype(i) j = 1; j", result_type="decltype(i)", result_value="1")
|
131996
|
import logging
from datetime import timedelta
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from airmaps.instruments import settings
from airmaps.instruments import storage
from airmaps.instruments.utils import make_rm_build_task
from airmaps.instruments.utils import run_generation_from_first_stage
from maps_generator.generator import stages_declaration as sd
from maps_generator.generator.env import Env
from maps_generator.generator.env import PathProvider
from maps_generator.generator.env import get_all_countries_list
from maps_generator.maps_generator import run_generation
logger = logging.getLogger("airmaps")
MAPS_STORAGE_PATH = f"{settings.STORAGE_PREFIX}/maps"
class MapsGenerationDAG(DAG):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
build_prolog_task = PythonOperator(
task_id="Build_prolog_task",
provide_context=True,
python_callable=MapsGenerationDAG.build_prolog,
dag=self,
)
build_epilog_task = PythonOperator(
task_id="Build_epilog_task",
provide_context=True,
python_callable=MapsGenerationDAG.build_epilog,
dag=self,
)
publish_maps_task = PythonOperator(
task_id="Publish_maps_task",
provide_context=True,
python_callable=MapsGenerationDAG.publish_maps,
dag=self,
)
rm_build_task = make_rm_build_task(self)
build_epilog_task >> publish_maps_task >> rm_build_task
for country in get_all_countries_list(PathProvider.borders_path()):
build_prolog_task >> self.make_mwm_operator(country) >> build_epilog_task
@staticmethod
def get_params(namespace="env", **kwargs):
return kwargs.get("params", {}).get(namespace, {})
@staticmethod
def build_prolog(**kwargs):
params = MapsGenerationDAG.get_params(**kwargs)
env = Env(**params)
kwargs["ti"].xcom_push(key="build_name", value=env.build_name)
run_generation(
env,
(
sd.StageDownloadAndConvertPlanet(),
sd.StageCoastline(),
sd.StagePreprocess(),
sd.StageFeatures(),
sd.StageDownloadDescriptions(),
),
)
@staticmethod
def make_build_mwm_func(country):
def build_mwm(**kwargs):
build_name = kwargs["ti"].xcom_pull(key="build_name")
params = MapsGenerationDAG.get_params(**kwargs)
params.update({"build_name": build_name, "countries": [country,]})
env = Env(**params)
# We need to check existing of mwm.tmp. It is needed if we want to
# build mwms from part of planet.
tmp_mwm_name = env.get_tmp_mwm_names()
assert len(tmp_mwm_name) <= 1
if not tmp_mwm_name:
logger.warning(f"mwm.tmp does not exist for {country}.")
return
run_generation_from_first_stage(env, (sd.StageMwm(),), build_lock=False)
return build_mwm
@staticmethod
def build_epilog(**kwargs):
build_name = kwargs["ti"].xcom_pull(key="build_name")
params = MapsGenerationDAG.get_params(**kwargs)
params.update({"build_name": build_name})
env = Env(**params)
run_generation_from_first_stage(
env,
(
sd.StageCountriesTxt(),
sd.StageExternalResources(),
sd.StageLocalAds(),
sd.StageStatistics(),
sd.StageCleanup(),
),
)
env.finish()
@staticmethod
def publish_maps(**kwargs):
build_name = kwargs["ti"].xcom_pull(key="build_name")
params = MapsGenerationDAG.get_params(**kwargs)
params.update({"build_name": build_name})
env = Env(**params)
subdir = MapsGenerationDAG.get_params(namespace="storage", **kwargs)["subdir"]
storage_path = f"{MAPS_STORAGE_PATH}/{subdir}"
storage.wd_publish(env.paths.mwm_path, f"{storage_path}/{env.mwm_version}/")
def make_mwm_operator(self, country):
normalized_name = "__".join(country.lower().split())
return PythonOperator(
task_id=f"Build_country_{normalized_name}_task",
provide_context=True,
python_callable=MapsGenerationDAG.make_build_mwm_func(country),
dag=self,
)
PARAMS = {"storage": {"subdir": "open_source"}}
if settings.DEBUG:
PARAMS["env"] = {
# The planet file in debug mode does not contain Russia_Moscow territory.
# It is needed for testing.
"countries": ["Cuba", "Haiti", "Jamaica", "Cayman Islands", "Russia_Moscow"]
}
OPEN_SOURCE_MAPS_GENERATION_DAG = MapsGenerationDAG(
"Generate_open_source_maps",
schedule_interval=timedelta(days=7),
default_args={
"owner": "OMaps",
"depends_on_past": True,
"start_date": days_ago(0),
"email": settings.EMAILS,
"email_on_failure": True,
"email_on_retry": False,
"retries": 0,
"retry_delay": timedelta(minutes=5),
"priority_weight": 1,
"params": PARAMS,
},
)
|
132009
|
import numpy as np
import torch
import ptan
def unpack_batch_a2c(batch, net, last_val_gamma, device="cpu"):
"""
Convert batch into training tensors
:param batch:
:param net:
:return: states variable, actions tensor, reference values variable
"""
states = []
actions = []
rewards = []
not_done_idx = []
last_states = []
for idx, exp in enumerate(batch):
states.append(exp.state)
actions.append(exp.action)
rewards.append(exp.reward)
if exp.last_state is not None:
not_done_idx.append(idx)
last_states.append(exp.last_state)
states_v = ptan.agent.float32_preprocessor(states).to(device)
actions_v = torch.FloatTensor(actions).to(device)
# handle rewards
rewards_np = np.array(rewards, dtype=np.float32)
if not_done_idx:
last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
last_vals_v = net(last_states_v)
last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
rewards_np[not_done_idx] += last_val_gamma * last_vals_np
ref_vals_v = torch.FloatTensor(rewards_np).to(device)
return states_v, actions_v, ref_vals_v
|
132020
|
import os
import glob
import importlib
import logging
from six import string_types
from jinja2 import DictLoader
import jsonschema
import nbconvert # noqa: F401
from ipypublish.utils import (
pathlib,
handle_error,
get_module_path,
read_file_from_directory,
read_file_from_module,
)
from ipypublish import export_plugins
from ipypublish import schema
from ipypublish.templates.create_template import create_template
_TEMPLATE_KEY = "new_template"
_EXPORT_SCHEMA_FILE = "export_config.schema.json"
_EXPORT_SCHEMA = None
logger = logging.getLogger("configuration")
def get_export_config_path(export_key, config_folder_paths=()):
# type (string, Tuple[str]) -> Union[string, None]
"""we search for a plugin name, which matches the supplied plugin name
"""
for name, jsonpath in iter_all_export_paths(config_folder_paths):
if name == export_key:
return pathlib.Path(jsonpath)
return None
def iter_all_export_paths(config_folder_paths=(), regex="*.json"):
"""we iterate through all json files in the
supplied plugin_folder_paths, and then in the `export_plugins` folder
"""
for plugin_folder_path in config_folder_paths:
for jsonpath in glob.glob(os.path.join(plugin_folder_path, regex)):
name = os.path.splitext(os.path.basename(jsonpath))[0]
yield name, pathlib.Path(jsonpath)
module_path = get_module_path(export_plugins)
for jsonpath in glob.glob(os.path.join(str(module_path), regex)):
name = os.path.splitext(os.path.basename(jsonpath))[0]
yield name, pathlib.Path(jsonpath)
def load_export_config(export_config_path):
"""load the export configuration"""
if isinstance(export_config_path, string_types):
export_config_path = pathlib.Path(export_config_path)
data = read_file_from_directory(
export_config_path.parent,
export_config_path.name,
"export configuration",
logger,
interp_ext=True,
)
# validate against schema
global _EXPORT_SCHEMA
if _EXPORT_SCHEMA is None:
# lazy load schema once
_EXPORT_SCHEMA = read_file_from_directory(
get_module_path(schema),
_EXPORT_SCHEMA_FILE,
"export configuration schema",
logger,
interp_ext=True,
)
try:
jsonschema.validate(data, _EXPORT_SCHEMA)
except jsonschema.ValidationError as err:
handle_error(
"validation of export config {} failed against {}: {}".format(
export_config_path, _EXPORT_SCHEMA_FILE, err.message
),
jsonschema.ValidationError,
logger=logger,
)
return data
def iter_all_export_infos(config_folder_paths=(), regex="*.json", get_mime=False):
"""iterate through all export configuration and yield a dict of info"""
for name, path in iter_all_export_paths(config_folder_paths, regex):
data = load_export_config(path)
info = dict(
[
("key", str(name)),
("class", data["exporter"]["class"]),
("path", str(path)),
("description", data["description"]),
]
)
if get_mime:
info["mime_type"] = create_exporter_cls(
data["exporter"]["class"]
).output_mimetype
yield info
def create_exporter_cls(class_str):
# type: (str) -> nbconvert.exporters.Exporter
"""dynamically load export class"""
export_class_path = class_str.split(".")
module_path = ".".join(export_class_path[0:-1])
class_name = export_class_path[-1]
try:
export_module = importlib.import_module(module_path)
except ModuleNotFoundError: # noqa: F821
handle_error(
"module {} containing exporter class {} not found".format(
module_path, class_name
),
ModuleNotFoundError,
logger=logger,
) # noqa: F821
if hasattr(export_module, class_name):
export_class = getattr(export_module, class_name)
else:
handle_error(
"module {} does not contain class {}".format(module_path, class_name),
ImportError,
logger=logger,
)
return export_class
def get_export_extension(export_config_path):
"""return the file extension of the exporter class"""
data = load_export_config(export_config_path)
exporter_cls = create_exporter_cls(data["exporter"]["class"])
return exporter_cls.file_extension
def str_to_jinja(template_str, template_key="jinja_template"):
return DictLoader({template_key: template_str})
def load_template(template_key, template_dict):
if template_dict is None:
return None
if "directory" in template_dict["outline"]:
outline_template = read_file_from_directory(
template_dict["outline"]["directory"],
template_dict["outline"]["file"],
"template outline",
logger,
interp_ext=False,
)
outline_name = "{0}/{1}".format(
template_dict["outline"]["directory"], template_dict["outline"]["file"]
)
else:
outline_template = read_file_from_module(
template_dict["outline"]["module"],
template_dict["outline"]["file"],
"template outline",
logger,
interp_ext=False,
)
outline_name = "{0}/{1}".format(
template_dict["outline"]["module"], template_dict["outline"]["file"]
)
segments = []
for snum, segment in enumerate(template_dict.get("segments", [])):
if "file" not in segment:
handle_error("'file' expected in segment {}".format(snum), KeyError, logger)
if "directory" in segment:
seg_data = read_file_from_directory(
segment["directory"],
segment["file"],
"template segment",
logger,
interp_ext=True,
)
elif "module" in segment:
seg_data = read_file_from_module(
segment["module"],
segment["file"],
"template segment",
logger,
interp_ext=True,
)
else:
handle_error(
"'directory' or 'module' expected in segment {}".format(snum),
KeyError,
logger,
)
segments.append(seg_data)
template_str = create_template(outline_template, outline_name, segments)
return str_to_jinja(template_str, template_key)
|
132050
|
from baremetal import *
from math import ceil, log
def accumulator(clk, delta, channels):
t = delta.subtype
def tree(delta, x, n):
x, xn=t.register(clk, d=x), t.register(clk, d=x+delta*n)
delta=t.register(clk, d=delta)
if n == 1:
return [x, xn]
else:
return tree(delta, x, n//2) + tree(delta, xn, n//2)
counter = t.register(clk, init=0)
shifts = int(ceil(log(channels)))
counter.d(counter+(delta<<shifts))
counters = tree(delta, counter, channels//2)
return counters
if __name__ == "__main__":
clk = Clock("clk")
counters = accumulator(clk, Unsigned(10).constant(1), 8)
results = []
clk.initialise()
clk.tick()
clk.tick()
for i in range(16):
for j in range(8):
clk.tick()
for channel in counters:
sample = channel.get()
print sample
results.append(sample),
if results == range(1024):
print "pass"
else:
print "fail"
counters = [Unsigned(16).output("i_%u"%i, x) for i, x in enumerate(counters)]
netlist = Netlist("accum",[clk], [], counters)
#print netlist.generate()
|
132052
|
from . import extracts, published_branches, release, steps
__all__ = ["steps", "extracts", "release", "published_branches"]
|
132075
|
from time import time
import os
from typing import Union
from pathlib import Path
import click
import csv
from elasticsearch import helpers, Elasticsearch
from .conf import Conf
@click.command()
@click.option(
'-d',
'--directory',
help="The location of the csv file(s) to be indexed. Can be a single file or a directory",
required=True,
type=str,
)
@click.option(
"--es-index",
help="The index name to be use. If it does not exist it will be created",
required=True,
type=str,
)
@click.option(
"--es-host",
help="The host name to be used.",
required=True,
type=str,
)
@click.option(
"--es-port",
help="The port name to be used.",
required=True,
type=str,
)
@click.option(
'--alias',
required=False,
default="None",
type=str,
help="set alias"
)
def run(es_index: str, directory: str,es_host: str,es_port: str, alias: str):
print("Starting Gamechanger Hermes Pipeline")
start = time()
# Download PDF and metadata files
csv_reader(directory, es_host, es_port, es_index, alias)
end = time()
print(f'Total time -- It took {end - start} seconds!')
print("DONE!!!!!!")
def csv_reader(file_name,host,port,index,alias):
print(index)
es = Conf.ch.es_client
if (str(file_name).endswith("csv")):
with open(file_name, 'r') as outfile:
reader = csv.DictReader(outfile)
helpers.bulk(es, reader, index=index)
elif (str(file_name).endswith("/")):
pathlist = Path(file_name).rglob('*.csv')
for path in pathlist:
with open(path, 'r') as outfile:
reader = csv.DictReader(outfile)
helpers.bulk(es, reader, index=index)
if alias is not "None":
es.indices.put_alias(index=index,name=alias)
if __name__ == '__main__':
run()
|
132091
|
import xmlrpc.client
s = xmlrpc.client.ServerProxy('http://localhost:9000')
print("Available Methods:")
print(s.system.listMethods())
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_1")
s.wait(200)
s.mouseClick("mainWindow/Button_2")
s.wait(200)
resultText = s.getStringProperty("mainWindow/results", "text")
print("Result:\n{}".format(resultText))
s.quit()
|
132095
|
from json import dumps
from typing import Callable, Dict, Final, List
from uuid import uuid4
import sqlalchemy as sa
from databases.core import Database
from oaff.app.data.retrieval.feature_set_provider import FeatureSetProvider
from oaff.app.data.sources.postgresql.stac_hybrid.postgresql_layer import PostgresqlLayer
from oaff.app.responses.models.collection_items_html import CollectionItemsHtml
from oaff.app.responses.models.link import Link, PageLinkRel
from oaff.app.util import now_as_rfc3339
class PostgresqlFeatureSetProvider(FeatureSetProvider):
FEATURES_PLACEHOLDER: Final = str(uuid4())
def __init__(
self,
db: Database,
id_set: sa.sql.expression.Select,
layer: PostgresqlLayer,
total_count: int,
):
self.db = db
self.id_set = id_set
self.layer = layer
self.total_count = total_count
async def as_geojson(
self,
links: List[Link],
page_links_provider: Callable[[int, int], Dict[PageLinkRel, Link]],
) -> str:
rows = [
row[0]
for row in await self.db.fetch_all(
# fmt: off
sa.select([
sa.text(f"""
JSON_BUILD_OBJECT(
'type', 'Feature',
'id', source."{self.layer.unique_field_name}",
'geometry', ST_AsGeoJSON(
source."{self.layer.geometry_field_name}"
)::JSONB,
'properties', TO_JSONB(source) - '{
self.layer.unique_field_name
}' - '{
self.layer.geometry_field_name
}'
)
""")
])
.select_from(
self.layer.model.alias("source").join(
self.id_set,
self.layer.model.alias("source").c[self.layer.unique_field_name]
== self.id_set.c["id"],
)
)
# fmt: on
)
]
return dumps(
{
"type": "FeatureCollection",
"features": self.FEATURES_PLACEHOLDER,
"links": [
dict(link)
for link in links
+ list(page_links_provider(self.total_count, len(rows)).values())
],
"numberMatched": self.total_count,
"numberReturned": len(rows),
"timeStamp": now_as_rfc3339(),
}
).replace(f'"{self.FEATURES_PLACEHOLDER}"', f'[{",".join(rows)}]')
async def as_html_compatible(
self, links: List[Link], page_links_provider: Callable[[int, int], List[Link]]
) -> CollectionItemsHtml:
rows = [
dict(row)
for row in await self.db.fetch_all(
sa.select(
[
col
for col in self.layer.model.c
if col.name != self.layer.geometry_field_name
]
).select_from(
self.layer.model.join(
self.id_set,
self.layer.model.primary_key.columns[self.layer.unique_field_name]
== self.id_set.c["id"],
)
)
)
]
page_links = page_links_provider(self.total_count, len(rows))
return CollectionItemsHtml(
format_links=links,
next_link=page_links[PageLinkRel.NEXT]
if PageLinkRel.NEXT in page_links
else None,
prev_link=page_links[PageLinkRel.PREV]
if PageLinkRel.PREV in page_links
else None,
features=rows,
collection_id=self.layer.id,
unique_field_name=self.layer.unique_field_name,
)
|
132102
|
import pickle
from datetime import timedelta
from django import forms
from django.core.exceptions import ValidationError
from django.db import (
connection,
models,
)
from django.test import override_settings
from gcloudc.db.models.fields.charfields import (
CharField,
CharOrNoneField,
)
from gcloudc.db.models.fields.computed import (
ComputedBooleanField,
ComputedCharField,
ComputedIntegerField,
ComputedPositiveIntegerField,
ComputedTextField,
)
from gcloudc.db.models.fields.iterable import SetField, ListField
from gcloudc.db.models.fields.related import RelatedSetField, RelatedListField, GenericRelationField
from gcloudc.db.models.fields.json import JSONField
from . import TestCase
from .models import (
BasicTestModel,
BinaryFieldModel,
ModelWithCharField,
NonIndexedModel,
PFAwards,
PFAuthor,
PFPost,
ISOther,
ISStringReferenceModel,
)
class BasicTest(TestCase):
def test_basic_connector_usage(self):
# Create
instance = BasicTestModel.objects.create(field1="Hello World!", field2=1998)
# Count
self.assertEqual(1, BasicTestModel.objects.count())
# Get
self.assertEqual(instance, BasicTestModel.objects.get())
# Update
instance.field1 = "Hello Mars!"
instance.save()
# Query
instance2 = BasicTestModel.objects.filter(field1="Hello Mars!")[0]
self.assertEqual(instance, instance2)
self.assertEqual(instance.field1, instance2.field1)
# Query by PK
instance2 = BasicTestModel.objects.filter(pk=instance.pk)[0]
self.assertEqual(instance, instance2)
self.assertEqual(instance.field1, instance2.field1)
# Non-existent PK
instance3 = BasicTestModel.objects.filter(pk=999).first()
self.assertIsNone(instance3)
# Unique field
instance2 = BasicTestModel.objects.filter(field2=1998)[0]
self.assertEqual(instance, instance2)
self.assertEqual(instance.field1, instance2.field1)
class CharFieldModelTests(TestCase):
def test_char_field_with_max_length_set(self):
test_bytestrings = [(u"01234567891", 11), (u"ążźsęćńół", 17)]
for test_text, byte_len in test_bytestrings:
test_instance = ModelWithCharField(char_field_with_max=test_text)
self.assertRaisesMessage(
ValidationError,
"Ensure this value has at most 10 bytes (it has %d)." % byte_len,
test_instance.full_clean,
)
def test_char_field_with_not_max_length_set(self):
longest_valid_value = u"0123456789" * 150
too_long_value = longest_valid_value + u"more"
test_instance = ModelWithCharField(char_field_without_max=longest_valid_value)
test_instance.full_clean() # max not reached so it's all good
test_instance.char_field_without_max = too_long_value
self.assertRaisesMessage(
ValidationError, u"Ensure this value has at most 1500 bytes (it has 1504).", test_instance.full_clean
)
def test_additional_validators_work(self):
test_instance = ModelWithCharField(char_field_as_email="bananas")
self.assertRaisesMessage(ValidationError, "failed", test_instance.full_clean)
def test_too_long_max_value_set(self):
try:
class TestModel(models.Model):
test_char_field = CharField(max_length=1501)
except AssertionError as e:
self.assertEqual(str(e), "CharFields max_length must not be greater than 1500 bytes.")
class ModelWithCharOrNoneField(models.Model):
char_or_none_field = CharOrNoneField(max_length=100)
class CharOrNoneFieldTests(TestCase):
def test_char_or_none_field(self):
# Ensure that empty strings are coerced to None on save
obj = ModelWithCharOrNoneField.objects.create(char_or_none_field="")
obj.refresh_from_db()
self.assertIsNone(obj.char_or_none_field)
class StringReferenceRelatedSetFieldModelTests(TestCase):
def test_can_update_related_field_from_form(self):
related = ISOther.objects.create()
thing = ISStringReferenceModel.objects.create(related_things={related})
before_set = thing.related_things
thing.related_list.field.save_form_data(thing, set())
thing.save()
self.assertNotEqual(before_set.all(), thing.related_things.all())
def test_saving_forms(self):
class TestForm(forms.ModelForm):
class Meta:
model = ISStringReferenceModel
fields = ("related_things",)
related = ISOther.objects.create()
post_data = {"related_things": [str(related.pk)]}
form = TestForm(post_data)
self.assertTrue(form.is_valid())
instance = form.save()
self.assertEqual({related.pk}, instance.related_things_ids)
class RelatedFieldPrefetchTests(TestCase):
def test_prefetch_related(self):
award = PFAwards.objects.create(name="award")
author = PFAuthor.objects.create(awards={award})
PFPost.objects.create(authors={author})
posts = list(PFPost.objects.all().prefetch_related("authors__awards"))
with self.assertNumQueries(0):
list(posts[0].authors.all()[0].awards.all())
class PickleTests(TestCase):
def test_all_fields_are_pickleable(self):
""" In order to work with Djangae's migrations, all fields must be pickeable. """
fields = [
CharField(),
CharOrNoneField(),
ComputedBooleanField("method_name"),
ComputedCharField("method_name"),
ComputedIntegerField("method_name"),
ComputedPositiveIntegerField("method_name"),
ComputedTextField("method_name"),
GenericRelationField(),
JSONField(default=list),
ListField(CharField(), default=["badger"]),
SetField(CharField(), default=set(["badger"])),
]
fields.extend(
[RelatedListField(ModelWithCharField), RelatedSetField(ModelWithCharField)]
)
for field in fields:
try:
pickle.dumps(field)
except (pickle.PicklingError, TypeError) as e:
self.fail("Could not pickle %r: %s" % (field, e))
class BinaryFieldModelTests(TestCase):
binary_value = b"\xff"
def test_insert(self):
obj = BinaryFieldModel.objects.create(binary=self.binary_value)
obj.save()
readout = BinaryFieldModel.objects.get(pk=obj.pk)
assert readout.binary == self.binary_value
def test_none(self):
obj = BinaryFieldModel.objects.create()
obj.save()
readout = BinaryFieldModel.objects.get(pk=obj.pk)
assert readout.binary is None
def test_update(self):
obj = BinaryFieldModel.objects.create()
obj.save()
toupdate = BinaryFieldModel.objects.get(pk=obj.pk)
toupdate.binary = self.binary_value
toupdate.save()
readout = BinaryFieldModel.objects.get(pk=obj.pk)
assert readout.binary == self.binary_value
class CharFieldModel(models.Model):
char_field = models.CharField(max_length=500)
class CharFieldModelTest(TestCase):
def test_query(self):
instance = CharFieldModel(char_field="foo")
instance.save()
readout = CharFieldModel.objects.get(char_field="foo")
self.assertEqual(readout, instance)
def test_query_unicode(self):
name = u"Jacqu\xe9s"
instance = CharFieldModel(char_field=name)
instance.save()
readout = CharFieldModel.objects.get(char_field=name)
self.assertEqual(readout, instance)
@override_settings(DEBUG=True)
def test_query_unicode_debug(self):
""" Test that unicode query can be performed in DEBUG mode,
which will use CursorDebugWrapper and call last_executed_query.
"""
name = u"Jacqu\xe9s"
instance = CharFieldModel(char_field=name)
instance.save()
readout = CharFieldModel.objects.get(char_field=name)
self.assertEqual(readout, instance)
class DurationFieldModelWithDefault(models.Model):
duration = models.DurationField(default=timedelta(1, 0))
class DurationFieldModelTests(TestCase):
def test_creates_with_default(self):
instance = DurationFieldModelWithDefault()
self.assertEqual(instance.duration, timedelta(1, 0))
instance.save()
readout = DurationFieldModelWithDefault.objects.get(pk=instance.pk)
self.assertEqual(readout.duration, timedelta(1, 0))
def test_none_saves_as_default(self):
instance = DurationFieldModelWithDefault()
# this could happen if we were reading an existing instance out of the database that didn't have this field
instance.duration = None
instance.save()
readout = DurationFieldModelWithDefault.objects.get(pk=instance.pk)
self.assertEqual(readout.duration, timedelta(1, 0))
class ModelWithNonNullableFieldAndDefaultValue(models.Model):
some_field = models.IntegerField(null=False, default=1086)
class NonIndexedModelFieldsTests(TestCase):
def test_long_textfield(self):
long_text = "A" * 1501
instance = NonIndexedModel()
instance.content = long_text
instance.save()
def test_big_binaryfield(self):
long_binary = ("A" * 1501).encode('utf-8')
instance = NonIndexedModel()
instance.binary = long_binary
instance.save()
# ModelWithNonNullableFieldAndDefaultValueTests verifies that we maintain same
# behavior as Django with respect to a model field that is non-nullable with default value.
class ModelWithNonNullableFieldAndDefaultValueTests(TestCase):
def _create_instance_with_null_field_value(self):
instance = ModelWithNonNullableFieldAndDefaultValue.objects.create(some_field=1)
client = connection.connection.gclient
entity = client.get(
client.key(
ModelWithNonNullableFieldAndDefaultValue._meta.db_table,
instance.pk,
namespace=connection.settings_dict.get("NAMESPACE", ""),
)
)
del entity["some_field"]
client.put(entity)
instance.refresh_from_db()
return instance
def test_none_in_db_reads_as_none_in_model(self):
instance = self._create_instance_with_null_field_value()
self.assertIsNone(instance.some_field)
def test_none_in_model_saved_as_default(self):
instance = self._create_instance_with_null_field_value()
instance.save()
instance.refresh_from_db()
self.assertEqual(instance.some_field, 1086)
|
132182
|
import math
import multiprocessing as mp
import os
import torch
import torch.distributed as dist
from torch.nn import Module
from torch.utils.data import Sampler
class DistModule(Module):
def __init__(self, module):
super(DistModule, self).__init__()
self.module = module
broadcast_params(self.module)
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def train(self, mode=True):
super(DistModule, self).train(mode)
self.module.train(mode)
def average_gradients(model):
""" average gradients """
for param in model.parameters():
if param.requires_grad and param.grad is not None:
dist.all_reduce(param.grad.data)
def broadcast_params(model):
""" broadcast model parameters """
for p in model.state_dict().values():
dist.broadcast(p, 0)
def average_params(model):
""" broadcast model parameters """
worldsize = dist.get_world_size()
for p in model.state_dict().values():
dist.all_reduce(p)
p /= worldsize
def dist_init(port):
if mp.get_start_method(allow_none=True) != 'spawn':
mp.set_start_method('spawn')
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
if '[' in node_list:
beg = node_list.find('[')
pos1 = node_list.find('-', beg)
if pos1 < 0:
pos1 = 1000
pos2 = node_list.find(',', beg)
if pos2 < 0:
pos2 = 1000
node_list = node_list[:min(pos1, pos2)].replace('[', '')
addr = node_list[8:].replace('-', '.')
print(addr)
os.environ['MASTER_PORT'] = port
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend='nccl')
rank = dist.get_rank()
world_size = dist.get_world_size()
return rank, world_size
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError(
"Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError(
"Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(
math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
indices = [i for i in range(len(self.dataset))]
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank * self.num_samples:(self.rank + 1) *
self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
132184
|
import numpy as np
from scipy.special import expit
from librosa.core import midi_to_hz
from omnizart.constants.midi import LOWEST_MIDI_NOTE
def inference(feature, model, timestep=128, batch_size=10, feature_num=384):
assert len(feature.shape) == 2
# Padding
total_samples = len(feature)
pad_bottom = (feature_num - feature.shape[1]) // 2
pad_top = feature_num - feature.shape[1] - pad_bottom
pad_len = timestep - 1
feature = np.pad(feature, ((pad_len, pad_len), (pad_bottom, pad_top)))
# Prepare for prediction
output = np.zeros(feature.shape + (2,))
total_batches = int(np.ceil(total_samples / batch_size))
last_batch_idx = len(feature) - pad_len
for bidx in range(total_batches):
print(f"batch: {bidx+1}/{total_batches}", end="\r")
# Collect batch feature
start_idx = bidx * batch_size
end_idx = min(start_idx + batch_size, last_batch_idx)
batch = np.array([feature[idx:idx+timestep] for idx in range(start_idx, end_idx)]) # noqa: E226
batch = np.expand_dims(batch, axis=3)
# Predict contour
batch_pred = model.predict(batch)
batch_pred = 1 / (1 + np.exp(-expit(batch_pred)))
# Add the batch results to the output container.
for idx, pred in enumerate(batch_pred):
slice_start = start_idx + idx
slice_end = slice_start + timestep
output[slice_start:slice_end] += pred
output = output[pad_len:-pad_len, pad_bottom:-pad_top, 1] # Remove padding
# Filter values
avg_max_val = np.mean(np.max(output, axis=1))
output = np.where(output > avg_max_val, output, 0)
# Generate final output F0
f0 = [] # pylint: disable=invalid-name
for pitches in output:
if np.sum(pitches) > 0:
pidx = np.argmax(pitches)
f0.append(midi_to_hz(pidx / 4 + LOWEST_MIDI_NOTE))
else:
f0.append(0)
return np.array(f0)
|
132194
|
import numpy as np
from numpy.linalg import norm
from joblib import Parallel, delayed
import pandas
from bcdsugar.utils import Monitor
from sparse_ho.ho import grad_search
from itertools import product
from sparse_ho.criterion import HeldOutSmoothedHinge
from sparse_ho.models import SVM
from sparse_ho.forward import Forward
from sparse_ho.implicit_forward import ImplicitForward
from sparse_ho.implicit import Implicit
from sparse_ho.datasets.real import get_data
from sparse_ho.grid_search import grid_search
# from my_data import get_data
dataset_names = ["real-sim"]
# methods = ["implicit_forward", "implicit"]
methods = ["forward", "implicit_forward"]
# "grid_search",
tolerance_decreases = ["constant"]
tols = 1e-5
n_outers = [1]
dict_t_max = {}
dict_t_max["rcv1"] = 50
dict_t_max["real-sim"] = 100
dict_t_max["leukemia"] = 10
dict_t_max["20news"] = 500
def parallel_function(
dataset_name, method, tol=1e-5, n_outer=50,
tolerance_decrease='exponential'):
# load data
X_train, X_val, X_test, y_train, y_val, y_test = get_data(dataset_name, csr=True)
n_samples, n_features = X_train.shape
print('n_samples', n_samples)
print('n_features', n_features)
y_train[y_train == 0.0] = -1.0
y_val[y_val == 0.0] = -1.0
y_test[y_test == 0.0] = -1.0
C_max = 100
logC = np.log(1e-2)
n_outer = 5
if dataset_name == "rcv1":
size_loop = 1
else:
size_loop = 1
model = SVM(
X_train, y_train, logC, max_iter=10000, tol=tol)
for i in range(size_loop):
monitor = Monitor()
if method == "implicit_forward":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = ImplicitForward(criterion, tol_jac=1e-3, n_iter_jac=100)
_, _, _ = grad_search(
algo=algo, verbose=False,
log_alpha0=logC, tol=tol,
n_outer=n_outer, monitor=monitor,
t_max=dict_t_max[dataset_name],
tolerance_decrease=tolerance_decrease)
elif method == "forward":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Forward(criterion)
_, _, _ = grad_search(
algo=algo,
log_alpha0=logC, tol=tol,
n_outer=n_outer, monitor=monitor,
t_max=dict_t_max[dataset_name],
tolerance_decrease=tolerance_decrease)
elif method == "implicit":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Implicit(criterion)
_, _, _ = grad_search(
algo=algo,
log_alpha0=logC, tol=tol,
n_outer=n_outer, monitor=monitor,
t_max=dict_t_max[dataset_name],
tolerance_decrease=tolerance_decrease)
elif method == "grid_search":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Forward(criterion)
log_alpha_min = np.log(1e-2)
log_alpha_opt, min_g_func = grid_search(
algo, log_alpha_min, np.log(C_max), monitor, max_evals=25,
tol=tol, samp="grid")
print(log_alpha_opt)
elif method == "random":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Forward(criterion)
log_alpha_min = np.log(1e-2)
log_alpha_opt, min_g_func = grid_search(
algo, log_alpha_min, np.log(C_max), monitor, max_evals=25,
tol=tol, samp="random")
print(log_alpha_opt)
elif method == "lhs":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Forward(criterion)
log_alpha_min = np.log(1e-2)
log_alpha_opt, min_g_func = grid_search(
algo, log_alpha_min, np.log(C_max), monitor, max_evals=25,
tol=tol, samp="lhs")
print(log_alpha_opt)
monitor.times = np.array(monitor.times)
monitor.objs = np.array(monitor.objs)
monitor.objs_test = np.array(monitor.objs_test)
monitor.log_alphas = np.array(monitor.log_alphas)
return (dataset_name, method, tol, n_outer, tolerance_decrease,
monitor.times, monitor.objs, monitor.objs_test,
monitor.log_alphas, norm(y_val), norm(y_test))
print("enter parallel")
backend = 'loky'
n_jobs = 1
results = Parallel(n_jobs=n_jobs, verbose=100, backend=backend)(
delayed(parallel_function)(
dataset_name, method, n_outer=n_outer,
tolerance_decrease=tolerance_decrease, tol=tols)
for dataset_name, method, n_outer,
tolerance_decrease in product(
dataset_names, methods, n_outers, tolerance_decreases))
print('OK finished parallel')
df = pandas.DataFrame(results)
df.columns = [
'dataset', 'method', 'tol', 'n_outer', 'tolerance_decrease',
'times', 'objs', 'objs_test', 'log_alphas', 'norm y_val',
'norm y_test']
for dataset_name in dataset_names:
df[df['dataset'] == dataset_name].to_pickle(
"%s.pkl" % dataset_name)
|
132212
|
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from experiment import ComponentBase
from experiment.qa.model import weight_variable
from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax
class LSTMBasedImportanceWeighting(ComponentBase):
def __init__(self, config, config_global, logger):
super(LSTMBasedImportanceWeighting, self).__init__(config, config_global, logger)
self.__lstm_history = set()
self.only_reduction = self.config.get('only_reduction', True)
@property
def lstm_pooling_cell_size(self):
return self.config.get('lstm_pooling_cell_size', 50)
def importance_weighting(self, raw_representation, indices, item_type, apply_softmax=True):
re_use = item_type in self.__lstm_history
self.__lstm_history.add(item_type)
if item_type == 'question':
lstm_cell_fw = self.lstm_cell_weighting_Q_fw
lstm_cell_bw = self.lstm_cell_weighting_Q_bw
mul_weight = self.mul_Q
reduction_weight = self.reduction_Q
else:
lstm_cell_fw = self.lstm_cell_weighting_A_fw
lstm_cell_bw = self.lstm_cell_weighting_A_bw
mul_weight = self.mul_A
reduction_weight = self.reduction_A
tensor_non_zero_token = non_zero_tokens(tf.to_float(indices))
sequence_length = tf.to_int64(tf.reduce_sum(tensor_non_zero_token, 1))
with tf.variable_scope('positional_weighting_lstm_{}'.format(item_type), reuse=re_use):
lstm_outputs, _last = tf.nn.bidirectional_dynamic_rnn(
lstm_cell_fw,
lstm_cell_bw,
raw_representation,
dtype=tf.float32,
sequence_length=sequence_length
)
lstm_output = tf.concat(axis=2, values=lstm_outputs)
# apply dense over each individual lstm output
if self.only_reduction:
flat_lstm_output = tf.reshape(lstm_output, [-1, self.lstm_pooling_cell_size * 2])
dense_mul_flat = tf.matmul(flat_lstm_output, reduction_weight)
h1_layer = tf.reshape(dense_mul_flat, [-1, tf.shape(raw_representation)[1]])
else:
flat_lstm_output = tf.reshape(raw_representation, [-1, self.lstm_pooling_cell_size * 2])
dense_mul_flat = tf.nn.tanh(tf.matmul(flat_lstm_output, mul_weight))
reduction = tf.matmul(dense_mul_flat, reduction_weight)
h1_layer = tf.reshape(reduction, [-1, tf.shape(raw_representation)[1]])
if apply_softmax:
return attention_softmax(h1_layer, tensor_non_zero_token)
else:
return h1_layer
def initialize_weights(self):
cell_size = self.lstm_pooling_cell_size
self.mul_Q = weight_variable('mul_Q', [cell_size * 2, cell_size * 2])
self.reduction_Q = weight_variable('reduction_Q', [cell_size * 2, 1])
self.mul_A = weight_variable('mul_A', [cell_size * 2, cell_size * 2])
self.reduction_A = weight_variable('reduction_A', [cell_size * 2, 1])
with tf.variable_scope('lstm_cell_weighting_Q_fw'):
self.lstm_cell_weighting_Q_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_Q_bw'):
self.lstm_cell_weighting_Q_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_A_fw'):
self.lstm_cell_weighting_A_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_A_bw'):
self.lstm_cell_weighting_A_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
|
132258
|
import pandas as pd
import numpy as np
import mplfinance as mpf # pip install mplfinance
import akshare as ak
import talib as ta
# 第一步,获取数据并且计算指标
def get_data(item_code):
stock_df = ak.stock_zh_a_hist(symbol=item_code, adjust="qfq")
stock_df['日期'] = pd.to_datetime(stock_df['日期'])
stock_df.rename(columns={"日期": "date", '开盘': "open", "收盘": "close", "最高": "high", "最低": "low",
"成交量": "volume", "成交额": "value", "振幅": "amplitude", "涨跌额": "change",
"涨跌幅": "pct_change", "换手率": "turnover_rate"},
inplace=True)
stock_df.set_index("date", inplace=True)
stock_df['average'] = round(stock_df['high'] + stock_df['low'] / 2, 2)
stock_df['upper_lim'] = round(stock_df['open'] * 1.1, 2)
stock_df['lower_lim'] = round(stock_df['open'] * 0.9, 2)
stock_df['last_close'] = stock_df['close'].shift(1)
stock_df['MA5'] = ta.MA(stock_df['close'], timeperiod=5)
stock_df['MA10'] = ta.MA(stock_df['close'], timeperiod=10)
stock_df['MA20'] = ta.MA(stock_df['close'], timeperiod=20)
stock_df['MA60'] = ta.MA(stock_df['close'], timeperiod=60)
# 计算macd
stock_df['macd-m'], stock_df['macd-s'], stock_df['macd-h'] = ta.MACD(stock_df['close'], fastperiod=12,
slowperiod=26, signalperiod=9)
# 计算布林线
stock_df['bb-upper'], stock_df['bb-middle'], stock_df['bb-lower'] = ta.BBANDS(stock_df['close'], timeperiod=5,
nbdevup=2, nbdevdn=2, matype=0)
# dema
stock_df['dema'] = ta.DEMA(stock_df['close'], timeperiod=30)
# rsi
stock_df['rsi'] = ta.RSI(stock_df['close'], timeperiod=14)
return stock_df
# 绘制K线图
symbol = "000001"
stock_name = "平安银行"
data = get_data(symbol)
# 取一段数据绘图
data = data.loc["2021-05-20":, :]
my_color = mpf.make_marketcolors(up='r',
down='g',
edge='inherit',
wick='inherit',
volume='inherit')
my_style = mpf.make_mpf_style(marketcolors=my_color,
rc={'font.family': 'SimHei', 'axes.unicode_minus': 'False'},
figcolor='(0.82, 0.83, 0.85)',
gridcolor='(0.82, 0.83, 0.85)')
title_font = {
'size': '16',
'color': 'black',
'weight': 'bold',
'va': 'bottom',
'ha': 'center'}
large_red_font = {
'fontname': 'Arial',
'size': '24',
'color': 'red',
'weight': 'bold',
'va': 'bottom'}
large_green_font = {
'fontname': 'Arial',
'size': '24',
'color': 'green',
'weight': 'bold',
'va': 'bottom'}
small_red_font = {
'fontname': 'Arial',
'size': '12',
'color': 'red',
'weight': 'bold',
'va': 'bottom'}
small_green_font = {
'fontname': 'Arial',
'size': '12',
'color': 'green',
'weight': 'bold',
'va': 'bottom'}
normal_label_font = {
'size': '12',
'color': 'black',
'weight': 'normal',
'va': 'bottom',
'ha': 'right'}
normal_font = {
'fontname': 'Arial',
'size': '12',
'color': 'black',
'weight': 'normal',
'va': 'bottom',
'ha': 'left'}
# 初始化figure对象,在figure上建立三个Axes对象并分别设置好它们的位置和基本属性
fig = mpf.figure(style=my_style, figsize=(12, 8), facecolor=(0.82, 0.83, 0.85))
ax1 = fig.add_axes([0.08, 0.25, 0.88, 0.60])
ax2 = fig.add_axes([0.08, 0.15, 0.88, 0.10], sharex=ax1)
ax2.set_ylabel('volume')
ax3 = fig.add_axes([0.08, 0.05, 0.88, 0.10], sharex=ax1)
ax3.set_ylabel('macd')
# 初始化figure对象,在figure上预先放置文本并设置格式,文本内容根据需要显示的数据实时更新
t1 = fig.text(0.50, 0.94, '{} - {}'.format(symbol, stock_name), **title_font)
t2 = fig.text(0.12, 0.90, '开/收: ', **normal_label_font)
t3 = fig.text(0.14, 0.89, f'', **large_red_font)
t4 = fig.text(0.14, 0.86, f'', **small_red_font)
t5 = fig.text(0.22, 0.86, f'', **small_red_font)
t6 = fig.text(0.12, 0.86, f'', **normal_label_font)
t7 = fig.text(0.40, 0.90, '高: ', **normal_label_font)
t8 = fig.text(0.40, 0.90, f'', **small_red_font)
t9 = fig.text(0.40, 0.86, '低: ', **normal_label_font)
t10 = fig.text(0.40, 0.86, f'', **small_green_font)
t11 = fig.text(0.55, 0.90, '量(万手): ', **normal_label_font)
t12 = fig.text(0.55, 0.90, f'', **normal_font)
t13 = fig.text(0.55, 0.86, '额(亿元): ', **normal_label_font)
t14 = fig.text(0.55, 0.86, f'', **normal_font)
t15 = fig.text(0.70, 0.90, '涨停: ', **normal_label_font)
t16 = fig.text(0.70, 0.90, f'', **small_red_font)
t17 = fig.text(0.70, 0.86, '跌停: ', **normal_label_font)
t18 = fig.text(0.70, 0.86, f'', **small_green_font)
t19 = fig.text(0.85, 0.90, '换手: ', **normal_label_font)
t20 = fig.text(0.85, 0.90, f'', **normal_font)
t21 = fig.text(0.85, 0.86, '昨收: ', **normal_label_font)
t22 = fig.text(0.85, 0.86, f'', **normal_font)
""" 更新K线图上的价格文本
"""
# data.iloc[-1]是一个交易日内的所有数据,将这些数据分别填入figure对象上的文本中
t3.set_text(f'{np.round(data.iloc[-1]["open"], 3)} / {np.round(data.iloc[-1]["close"], 3)}')
t4.set_text(f'{np.round(data.iloc[-1]["change"], 3)}')
t5.set_text(f'[{np.round(data.iloc[-1]["pct_change"], 3)}%]')
t6.set_text(f'{data.iloc[-1].name.date()}')
t8.set_text(f'{np.round(data.iloc[-1]["high"], 3)}')
t10.set_text(f'{np.round(data.iloc[-1]["low"], 3)}')
t12.set_text(f'{np.round(data.iloc[-1]["volume"] / 10000, 3)}')
t14.set_text(f'{np.round(data.iloc[-1]["value"]/100000000, 3)}')
t16.set_text(f'{np.round(data.iloc[-1]["upper_lim"], 3)}')
t18.set_text(f'{np.round(data.iloc[-1]["lower_lim"], 3)}')
t20.set_text(f'{np.round(data.iloc[-1]["turnover_rate"], 3)}')
t22.set_text(f'{np.round(data.iloc[-1]["last_close"], 3)}')
# 根据本交易日的价格变动值确定开盘价、收盘价的显示颜色
if data.iloc[-1]['change'] > 0: # 如果今日变动额大于0,即今天价格高于昨天,今天价格显示为红色
close_number_color = 'red'
elif data.iloc[-1]['change'] < 0: # 如果今日变动额小于0,即今天价格低于昨天,今天价格显示为绿色
close_number_color = 'green'
else:
close_number_color = 'black'
t3.set_color(close_number_color)
t4.set_color(close_number_color)
t5.set_color(close_number_color)
avg_type = "bb"
indicator = "macd"
ap = []
# 添加K线图重叠均线,根据均线类型添加移动均线或布林带线
if avg_type == 'ma':
ap.append(mpf.make_addplot(data['MA5'], ax=ax1, color="#000000"))
ap.append(mpf.make_addplot(data['MA10'], ax=ax1, color="#ff0000"))
ap.append(mpf.make_addplot(data['MA20'], ax=ax1, color="#00ff00"))
ap.append(mpf.make_addplot(data['MA60'], ax=ax1, color="#0000ff"))
elif avg_type == 'bb':
ap.append(mpf.make_addplot(data[['bb-upper', 'bb-middle', 'bb-lower']], ax=ax1))
# 添加指标,根据指标类型添加MACD或RSI或DEMA
if indicator == 'macd':
ap.append(mpf.make_addplot(data[['macd-m', 'macd-s']], ylabel='macd', ax=ax3))
bar_r = np.where(data['macd-h'] > 0, data['macd-h'], 0)
bar_g = np.where(data['macd-h'] <= 0, data['macd-h'], 0)
ap.append(mpf.make_addplot(bar_r, type='bar', color='red', ax=ax3))
ap.append(mpf.make_addplot(bar_g, type='bar', color='green', ax=ax3))
elif indicator == 'rsi':
ap.append(mpf.make_addplot([75] * len(data), color=(0.75, 0.6, 0.6), ax=ax3))
ap.append(mpf.make_addplot([30] * len(data), color=(0.6, 0.75, 0.6), ax=ax3))
ap.append(mpf.make_addplot(data['rsi'], ylabel='rsi', ax=ax3))
else: # 'dema'
ap.append(mpf.make_addplot(data['dema'], ylabel='dema', ax=ax3))
# 绘制图表
mpf.plot(data,
ax=ax1,
volume=ax2,
addplot=ap,
type='candle',
style=my_style,
datetime_format='%Y-%m-%d',
xrotation=0)
# fig.show()
mpf.show()
# 保存到本地
# fig.savefig('a.png')
|
132282
|
import datetime
from graphene_sqlalchemy import SQLAlchemyObjectType
from graphene import Mutation, Boolean, String, Field
from app.models.User import User as UserModel
from graphql import GraphQLError
class User(SQLAlchemyObjectType):
class Meta:
model = UserModel
exclude_fields = ("password",)
class LoginUser(Mutation):
ok = Boolean(description="Request status")
message = String(description="Request message")
access_token = String(description="User's Access Token")
refresh_token = String(description="User's Refresh Token")
user = Field(User)
class Input:
email = String(description="User's email address")
password = String(description="<PASSWORD>'s password")
def mutate(self, info, email, password):
user = UserModel.query.filter_by(email=email).scalar()
if user and user._verify_password(password):
user.last_logged_in = datetime.datetime.now()
try:
user.save()
except Exception as e:
raise GraphQLError('Unable to update user', e)
else:
ok = True
message = "User has successfully logged in"
return LoginUser(
access_token=user.generate_access_token(),
refresh_token=user.generate_refresh_token(),
ok=ok,
message=message,
user=user
)
else:
raise Exception('Invalid Login Credentials')
|
132304
|
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import StreamTableEnvironment, DataTypes
from pyflink.table.descriptors import Schema, OldCsv, FileSystem
from pyflink.table.udf import udf
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
t_env = StreamTableEnvironment.create(env)
add = udf(lambda i, j: i + j, [DataTypes.BIGINT(), DataTypes.BIGINT()], DataTypes.BIGINT())
@udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()], result_type=DataTypes.BIGINT())
def add(i, j):
from mpmath import fadd # add third-party dependency
return int(fadd(1, 2))
t_env.set_python_requirements("/opt/examples/data/requirements.txt")
t_env.register_function("add", add)
t_env.connect(FileSystem().path('/opt/examples/data/udf_add_input')) \
.with_format(OldCsv()
.field('a', DataTypes.BIGINT())
.field('b', DataTypes.BIGINT())) \
.with_schema(Schema()
.field('a', DataTypes.BIGINT())
.field('b', DataTypes.BIGINT())) \
.create_temporary_table('mySource')
t_env.connect(FileSystem().path('/opt/examples/data/udf_add_output')) \
.with_format(OldCsv()
.field('sum', DataTypes.BIGINT())) \
.with_schema(Schema()
.field('sum', DataTypes.BIGINT())) \
.create_temporary_table('mySink')
t_env.from_path('mySource')\
.select("add(a, b)") \
.insert_into('mySink')
t_env.execute("4-udf_add_with_dependency")
|
132307
|
import os
import pytest
import math
from astropy.io import fits
from astropy.table import Table
import numpy as np
import stwcs
from stwcs import updatewcs
from stsci.tools import fileutil
from ci_watson.artifactory_helpers import get_bigdata_root
from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds
try:
from ci_watson.artifactory_helpers import check_url
except ImportError:
from ci_watson.artifactory_helpers import _is_url as check_url
from .base_classes import BaseTest
__all__ = ['BaseHLATest', 'BaseHLAParTest', 'centroid_compare', 'BaseUnit']
@pytest.mark.usefixtures('_jail')
class BaseHLATest(BaseTest):
ignore_hdus = []
input_repo = 'hst-hla-pipeline'
results_root = 'hst-hla-pipeline-results'
output_shift_file = None
fit_limit = 0.010 # 10 milli-arcseconds
docopy = False # Do not make additional copy by default
rtol = 1e-6
refstr = 'jref'
prevref = os.environ.get(refstr)
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
reffile_lookup = ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE', 'DGEOFILE']
def set_environ(self):
# Enforce copies of data when TEST_BIGDATA is URL
input_dir = get_bigdata_root()
if input_dir and check_url(input_dir):
self.docopy = True
# NOTE: This could be explicitly controlled using pytest fixture
# but too many ways to do the same thing would be confusing.
# Refine this logic if using pytest fixture.
# HSTCAL cannot open remote CRDS on FTP but central storage is okay.
# So use central storage if available to avoid FTP.
if self.prevref is None or self.prevref.startswith(('ftp', 'http')):
os.environ[self.refstr] = self.curdir + os.sep
self.use_ftp_crds = True
# Turn off Astrometry updates
os.environ['ASTROMETRY_STEP_CONTROL'] = 'OFF'
def raw_from_asn(self, asn_file, suffix='_flt.fits'):
return raw_from_asn(asn_file, suffix='_flt.fits')
def get_input_file(self, *args, refsep='$', **kwargs):
# If user has specified action for docopy, apply it with
# default behavior being whatever was defined in the base class.
docopy = kwargs.get('docopy', self.docopy)
# Download or copy input file (e.g., RAW) into the working directory.
# The associated CRDS reference files in ``refstr`` are also
# downloaded, if necessary.
curdir = os.getcwd()
filenames = self.get_data(*args, docopy=docopy)
for filename in filenames:
ref_files = ref_from_image(filename, reffile_lookup=self.reffile_lookup)
print("Looking for {} REF_FILES: {}".format(filename, ref_files))
for ref_file in ref_files:
if ref_file.strip() == '':
continue
if refsep not in ref_file: # Local file
self.get_data('customRef', ref_file, docopy=docopy)
else:
# Start by checking to see whether IRAF variable *ref/*tab
# has been added to os.environ
refdir, refname = ref_file.split(refsep)
refdir_parent = os.path.split(refdir)[0]
# Define refdir to point to current directory if:
# i. refdir is not defined in environment already
# ii. refdir in os.environ points to another test directory
# This logic should leave refdir unchanged if it already
# points to a globally defined directory.
if refdir not in os.environ or refdir_parent in curdir:
os.environ[refdir] = curdir + os.sep
# Download from FTP, if applicable
if self.use_ftp_crds:
download_crds(ref_file, timeout=self.timeout)
return filenames
# Pytest function to support the parameterization of these classes
def pytest_generate_tests(metafunc):
# called once per each test function
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0])
idlist = [funcargs['id'] for funcargs in funcarglist]
del argnames[argnames.index('id')]
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
for funcargs in funcarglist], ids=idlist)
@pytest.mark.usefixtures('_jail')
class BaseHLAParTest(BaseHLATest):
params = {'test_modes':[dict(input="",
test_dir=None,
step_class=None,
step_pars=dict(),
output_truth="",
output_hdus=[])
]
}
def test_modes(self, input, test_dir, step_class, step_pars,
output_truth, output_hdus):
"""
Template method for parameterizing some tests based on JWST code.
"""
if test_dir is None:
return
self.test_dir = test_dir
self.ref_loc = [self.test_dir, 'truth']
# can be removed once all truth files have been updated
self.ignore_keywords += ['FILENAME']
input_file = self.get_data(self.test_dir, input)
result = step_class.call(input_file, **step_pars)
output_file = result.meta.filename
result.save(output_file)
result.close()
output_pars = None
if isinstance(output_truth, tuple):
output_pars = output_truth[1]
output_truth = output_truth[0]
if not output_pars:
if output_hdus:
output_spec = (output_file, output_truth, output_hdus)
else:
output_spec = (output_file, output_truth)
else:
output_spec = {'files':(output_file, output_truth),
'pars':output_pars}
outputs = [output_spec]
self.compare_outputs(outputs)
def centroid_compare(centroid):
return centroid[1]
class BaseUnit(BaseHLATest):
buff = 0
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
atol = 1.0e-5
def bound_image(self, image):
"""
Compute region where image is non-zero
"""
coords = np.nonzero(image)
ymin = coords[0].min()
ymax = coords[0].max()
xmin = coords[1].min()
xmax = coords[1].max()
return (ymin, ymax, xmin, xmax)
def centroid(self, image, size, center):
"""
Compute the centroid of a rectangular area
"""
ylo = int(center[0]) - size // 2
yhi = min(ylo + size, image.shape[0])
xlo = int(center[1]) - size // 2
xhi = min(xlo + size, image.shape[1])
center = [0.0, 0.0, 0.0]
for y in range(ylo, yhi):
for x in range(xlo, xhi):
center[0] += y * image[y,x]
center[1] += x * image[y,x]
center[2] += image[y,x]
if center[2] == 0.0: return None
center[0] /= center[2]
center[1] /= center[2]
return center
def centroid_close(self, list_of_centroids, size, point):
"""
Find if any centroid is close to a point
"""
for i in range(len(list_of_centroids)-1, -1, -1):
if (abs(list_of_centroids[i][0] - point[0]) < size / 2 and
abs(list_of_centroids[i][1] - point[1]) < size / 2):
return 1
return 0
def centroid_distances(self, image1, image2, amp, size):
"""
Compute a list of centroids and the distances between them in two images
"""
distances = []
list_of_centroids, lst_pts = self.centroid_list(image2, amp, size)
for center2, pt in zip(list_of_centroids, lst_pts):
center1 = self.centroid(image1, size, pt)
if center1 is None: continue
disty = center2[0] - center1[0]
distx = center2[1] - center1[1]
dist = math.sqrt(disty * disty + distx * distx)
dflux = abs(center2[2] - center1[2])
distances.append([dist, dflux, center1, center2])
distances.sort(key=centroid_compare)
return distances
def centroid_list(self, image, amp, size):
"""
Find the next centroid
"""
list_of_centroids = []
list_of_points = []
points = np.transpose(np.nonzero(image > amp))
for point in points:
if not self.centroid_close(list_of_centroids, size, point):
center = self.centroid(image, size, point)
list_of_centroids.append(center)
list_of_points.append(point)
return list_of_centroids, list_of_points
def centroid_statistics(self, title, fname, image1, image2, amp, size):
"""
write centroid statistics to compare differences btw two images
"""
stats = ("minimum", "median", "maximum")
images = (None, None, image1, image2)
im_type = ("", "", "test", "reference")
diff = []
distances = self.centroid_distances(image1, image2, amp, size)
indexes = (0, len(distances)//2, len(distances)-1)
fd = open(fname, 'w')
fd.write("*** %s ***\n" % title)
if len(distances) == 0:
diff = [0.0, 0.0, 0.0]
fd.write("No matches!!\n")
elif len(distances) == 1:
diff = [distances[0][0], distances[0][0], distances[0][0]]
fd.write("1 match\n")
fd.write("distance = %f flux difference = %f\n" % (distances[0][0], distances[0][1]))
for j in range(2, 4):
ylo = int(distances[0][j][0]) - (1+self.buff)
yhi = int(distances[0][j][0]) + (2+self.buff)
xlo = int(distances[0][j][1]) - (1+self.buff)
xhi = int(distances[0][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s image centroid = (%f,%f) image flux = %f\n" %
(im_type[j], distances[0][j][0], distances[0][j][1], distances[0][j][2]))
fd.write(str(subimage) + "\n")
else:
fd.write("%d matches\n" % len(distances))
for k in range(0,3):
i = indexes[k]
diff.append(distances[i][0])
fd.write("\n%s distance = %f flux difference = %f\n" % (stats[k], distances[i][0], distances[i][1]))
for j in range(2, 4):
ylo = int(distances[i][j][0]) - (1+self.buff)
yhi = int(distances[i][j][0]) + (2+self.buff)
xlo = int(distances[i][j][1]) - (1+self.buff)
xhi = int(distances[i][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s %s image centroid = (%f,%f) image flux = %f\n" %
(stats[k], im_type[j], distances[i][j][0], distances[i][j][1], distances[i][j][2]))
fd.write(str(subimage) + "\n")
fd.close()
return tuple(diff)
def make_point_image(self, input_image, point, value):
"""
Create an image with a single point set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
output_image[point] = value
return output_image
def make_grid_image(self, input_image, spacing, value):
"""
Create an image with points on a grid set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
shape = output_image.shape
for y in range(spacing//2, shape[0], spacing):
for x in range(spacing//2, shape[1], spacing):
output_image[y,x] = value
return output_image
def print_wcs(self, title, wcs):
"""
Print the wcs header cards
"""
print("=== %s ===" % title)
print(wcs.to_header_string())
def read_image(self, filename):
"""
Read the image from a fits file
"""
hdu = fits.open(filename)
image = hdu[1].data
hdu.close()
return image
def read_wcs(self, filename):
"""
Read the wcs of a fits file
"""
hdu = fits.open(filename)
wcs = stwcs.wcsutil.HSTWCS(hdu, 1)
hdu.close()
return wcs
def write_wcs(self, hdu, image_wcs):
"""
Update header with WCS keywords
"""
hdu.header['ORIENTAT'] = image_wcs.orientat
hdu.header['CD1_1'] = image_wcs.wcs.cd[0][0]
hdu.header['CD1_2'] = image_wcs.wcs.cd[0][1]
hdu.header['CD2_1'] = image_wcs.wcs.cd[1][0]
hdu.header['CD2_2'] = image_wcs.wcs.cd[1][1]
hdu.header['CRVAL1'] = image_wcs.wcs.crval[0]
hdu.header['CRVAL2'] = image_wcs.wcs.crval[1]
hdu.header['CRPIX1'] = image_wcs.wcs.crpix[0]
hdu.header['CRPIX2'] = image_wcs.wcs.crpix[1]
hdu.header['CTYPE1'] = image_wcs.wcs.ctype[0]
hdu.header['CTYPE2'] = image_wcs.wcs.ctype[1]
hdu.header['VAFACTOR'] = 1.0
def write_image(self, filename, wcs, *args):
"""
Read the image from a fits file
"""
extarray = ['SCI', 'WHT', 'CTX']
pimg = fits.HDUList()
phdu = fits.PrimaryHDU()
phdu.header['NDRIZIM'] = 1
phdu.header['ROOTNAME'] = filename
pimg.append(phdu)
for img in args:
# Create a MEF file with the specified extname
extn = extarray.pop(0)
extname = fileutil.parseExtn(extn)
ehdu = fits.ImageHDU(data=img)
ehdu.header['EXTNAME'] = extname[0]
ehdu.header['EXTVER'] = extname[1]
self.write_wcs(ehdu, wcs)
pimg.append(ehdu)
pimg.writeto(filename)
del pimg
|
132312
|
import secrets
from base64 import urlsafe_b64decode, urlsafe_b64encode
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
class Secret:
"""Encrypt and decrypt secrets."""
__slots__ = ("secret", "passphrase")
def __init__(self, secret: bytes, passphrase: str):
self.secret = secret
self.passphrase = passphrase
def _derive_key(self, salt: bytes, iterations: int) -> bytes:
"""Derive a secret key from a given passphrase and salt."""
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=iterations,
backend=default_backend(),
)
return urlsafe_b64encode(kdf.derive(self.passphrase.encode()))
def encrypt(self, iterations: int = 100_000) -> bytes:
"""Encrypt secret."""
salt = secrets.token_bytes(16)
key = self._derive_key(salt, iterations)
return urlsafe_b64encode(
b"%b%b%b"
% (
salt,
iterations.to_bytes(4, "big"),
urlsafe_b64decode(Fernet(key).encrypt(self.secret)),
)
)
def decrypt(self) -> str:
"""Decrypt secret."""
decoded = urlsafe_b64decode(self.secret)
salt, iteration, message = (
decoded[:16],
decoded[16:20],
urlsafe_b64encode(decoded[20:]),
)
iterations = int.from_bytes(iteration, "big")
key = self._derive_key(salt, iterations)
return Fernet(key).decrypt(message).decode("utf-8")
|
132327
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from random import shuffle, random
from collections import defaultdict
import itertools
import mazebase.items as mi
from .mazeutils import MazeException
def sprinkle(game, tiles, tilemask=None):
'''
Sprinkles blocks into a map. Tiles is given in the format like:
[(MazeItem, float), ...] ex.
[(Block, .5)]
where we sprinkle MazeItem with the percent chance given by the second arg.
Defaults to generating on empty tiles, but you can override this with
tilemask and specify a list of locations.
Returns list of item ids
'''
if tilemask is None:
tilemask = empty_locations(game)
ids = []
for (x, y) in tilemask:
shuffle(tiles)
for tile, pct in tiles:
if random() < pct:
ids.append(game._add_item(tile(location=(x, y))))
break
return ids
def empty_locations(game, bad_blocks=None, mask=lambda x, y: True):
'''By default, finds empty locations in the map.
If bad_blocks is not none, then finds locations without any bad_blocks,
but maybe with other block types
mask is a function that provides valid coordinates
'''
empties = []
for x, y in itertools.product(range(game.width), range(game.height)):
if not mask(x, y):
continue
itemlst = game._map[x][y]
if bad_blocks is None and itemlst == []:
empties.append((x, y))
elif bad_blocks is not None and not any(
isinstance(item, typ) for
item, typ in itertools.product(itemlst, bad_blocks)):
empties.append((x, y))
return empties
def dijkstra(game, initial, movefunc, weighted=False):
'''
Accepts:
game
initial: (x, y) tuple of start location
movefunc: f(loc) determines the locations you can move to from loc
weighted: use the _approx_reward_map instead of # of moves
Returns:
visited: dictionary of {location: distance} pairs
path: dictionary of {location: previous_location} pairs
'''
visited = defaultdict(lambda: 1e309)
visited[initial] = 0
path = {}
nodes = set(itertools.product(range(game.width), range(game.height)))
while nodes:
current = nodes.intersection(visited.keys())
if not current:
break
min_node = min(current, key=visited.get)
nodes.remove(min_node)
current_weight = visited[min_node]
x, y = min_node
for edge in movefunc(game, min_node):
# Maximize reward by minimizing "distance = - reward"
w = -game._approx_reward_map[edge[0]][edge[1]] if weighted else 1
weight = current_weight + w
if edge not in visited or weight < visited[edge]:
visited[edge] = weight
path[edge] = min_node
return visited, path
def __movefunc_helper(game, loc, movefunc_helper):
res = []
x, y = loc
for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
nx, ny = x + dx, y + dy
if not game._in_bounds((nx, ny)):
continue
if movefunc_helper(game, loc, (dx, dy)):
res.append((nx, ny))
return res
def agent_movefunc(game, loc):
''' Can move to non-block spaces '''
def helper(game, loc, dloc):
x, y = loc
dx, dy = dloc
nx, ny = x + dx, y + dy
return game._tile_get_block((nx, ny), mi.Block) is None
return __movefunc_helper(game, loc, helper)
def pushblock_movefunc(game, loc):
''' Can move if tile behind and in front are not blocked (so agent can push
from behind) '''
def helper(game, loc, dloc):
x, y = loc
dx, dy = dloc
tx, ty = x - dx, y - dy
nx, ny = x + dx, y + dy
return (game._in_bounds((tx, ty)) and
game._tile_get_block((nx, ny), mi.Block) is None and
game._tile_get_block((tx, ty), mi.Block) is None)
return __movefunc_helper(game, loc, helper)
|
132335
|
from libcloud.dns.types import Provider
from libcloud.dns.providers import get_driver
cls = get_driver(Provider.POINTDNS)
driver = cls('username', 'apikey')
|
132336
|
from ctypes import c_uint
from ctypes.wintypes import HWND, WPARAM
from travertino.size import at_least
from travertino.constants import TRANSPARENT
from toga_winforms.colors import native_color
from toga_winforms.libs import HorizontalTextAlignment, WinForms, user32
from .base import Widget
class TextInput(Widget):
def create(self):
self.native = WinForms.TextBox()
self.native.Multiline = False
self.native.DoubleClick += self.winforms_double_click
self.native.TextChanged += self.winforms_text_changed
self.native.Validated += self.winforms_validated
self.native.GotFocus += self.winforms_got_focus
self.native.LostFocus += self.winforms_lost_focus
self.error_provider = WinForms.ErrorProvider()
self.error_provider.SetIconAlignment(
self.native, WinForms.ErrorIconAlignment.MiddleRight
)
self.error_provider.SetIconPadding(self.native, -20)
self.error_provider.BlinkStyle = WinForms.ErrorBlinkStyle.NeverBlink
def set_readonly(self, value):
self.native.ReadOnly = value
def set_placeholder(self, value):
# This solution is based on https://stackoverflow.com/questions/4902565/watermark-textbox-in-winforms
if self.interface.placeholder:
# Message Code for setting Cue Banner (Placeholder)
EM_SETCUEBANNER = c_uint(0x1501)
# value 0 means placeholder is hidden as soon the input gets focus
# value 1 means placeholder is hidden only after something is typed into input
show_placeholder_on_focus = WPARAM(1)
window_handle = HWND(self.native.Handle.ToInt32())
user32.SendMessageW(window_handle, EM_SETCUEBANNER, show_placeholder_on_focus, self.interface.placeholder)
def get_value(self):
return self.native.Text
def set_value(self, value):
self.native.Text = value
def set_alignment(self, value):
self.native.TextAlign = HorizontalTextAlignment(value)
def set_font(self, font):
if font:
self.native.Font = font.bind(self.interface.factory).native
def set_color(self, color):
if color:
self.native.ForeColor = native_color(color)
else:
self.native.ForeColor = self.native.DefaultForeColor
def set_background_color(self, value):
if value:
self.native.BackColor = native_color(value)
else:
self.native.BackColor = native_color(TRANSPARENT)
def rehint(self):
# Height of a text input is known and fixed.
# Width must be > 100
# print("REHINT TextInput", self, self.native.PreferredSize)
self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
self.interface.intrinsic.height = self.native.PreferredSize.Height
def set_on_change(self, handler):
# No special handling required
pass
def set_on_gain_focus(self, handler):
# No special handling required
pass
def set_on_lose_focus(self, handler):
# No special handling required
pass
def winforms_text_changed(self, sender, event):
if self.interface._on_change:
self.interface.on_change(self.interface)
def winforms_validated(self, sender, event):
self.interface.validate()
def winforms_got_focus(self, sender, event):
if self.container and self.interface.on_gain_focus:
self.interface.on_gain_focus(self.interface)
def winforms_lost_focus(self, sender, event):
if self.container and self.interface.on_lose_focus:
self.interface.on_lose_focus(self.interface)
def clear_error(self):
self.error_provider.SetError(self.native, "")
def set_error(self, error_message):
self.error_provider.SetError(self.native, error_message)
def winforms_double_click(self, sender, event):
self.native.SelectAll()
|
132341
|
import os, sys, logging
import json
import numpy as np
import random
from collections import defaultdict, Counter
import cPickle as pickle
import cProfile, pstats
import threading
import time
import multiprocessing
import math
from sklearn import metrics
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from src.datasets import data_utils
from src.datasets.data_utils import timed, TextTooShortException, DataSampler, WordVectorBuilder
from src.datasets.imdb import IMDB
from src.datasets.sentiment140 import Sentiment140
from src.datasets.amazon_reviews import AmazonReviews
from src.datasets.open_weiboscope import OpenWeibo
from src.datasets.arabic_twitter import ArabicTwitter
from src.datasets.word_vector_embedder import WordVectorEmbedder
data_fraction_test = 0.20
data_fraction_train = 0.80
num_threads = multiprocessing.cpu_count()
threadLock = threading.Lock()
# setup logging
logger = data_utils.syslogger(__name__)
# set output directory
dir_data = "/data"
try:
dir_results = os.path.join(dir_data, os.path.dirname(os.path.realpath(__file__)), 'results')
except NameError:
dir_results = os.path.join(dir_data, 'results')
# data inputs
datasets = [
# { 'sentiment140': {
# 'class': Sentiment140,
# 'path': os.path.join(dir_data, 'sentiment140.csv'),
# 'args': { 'load': { 'rng_seed': 13337 },
# 'embed': { 'type': 'averaged' },
# 'normalize': { 'min_length': 70,
# 'max_length': 150,
# 'reverse': False,
# 'pad_out': False
# },
# 'shuffle_after_load': False,
# 'models': [
# 'glove',
# 'word2vec'
# ]
# }
# }
# },
# { 'imdb': {
# 'class': IMDB,
# 'path': os.path.join(dir_data, 'imdb'),
# 'args': { 'load': { 'rng_seed': 13337 },
# 'embed': { 'type': 'averaged' },
# 'normalize': { 'encoding': None,
# 'reverse': False,
# 'pad_out': False,
# 'min_length': 0,
# 'max_length': 9999999
# },
# 'shuffle_after_load': False,
# 'models': [
# 'glove',
# 'word2vec'
# ]
# }
# }
# },
# { 'amazon': {
# 'class': AmazonReviews,
# 'path': os.path.join(dir_data, 'amazonreviews.gz'),
# 'args': { 'load': { 'rng_seed': 13337 },
# 'embed': { 'type': 'averaged' },
# 'normalize': { 'encoding': None,
# 'reverse': False,
# 'min_length': 0,
# 'max_length': 9999999,
# 'pad_out': False
# },
# 'shuffle_after_load': True,
# 'models': [
# 'glove',
# 'word2vec',
# {
# 'word2vec': { 'model': '/data/amazon/amazon_800000.bin' }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibo'),
# 'args': { 'load': { 'rng_seed': 13337 },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# 'glove',
# 'word2vec',
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_800000.bin' }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibocensored'),
# 'args': { 'load': { 'form': 'hanzi',
# 'rng_seed': 13337,
# 'label_type': 'denied'
# },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# 'glove',
# 'word2vec',
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_hanzi_CLEAN_vocab31357747.bin' }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibo'),
# 'args': { 'load': { 'form': 'hanzi',
# 'rng_seed': 13337
# },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# 'glove',
# 'word2vec',
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_hanzi_CLEAN_vocab31357747.bin' }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibo'),
# 'args': { 'load': { 'form': 'hanzi',
# 'rng_seed': 13337
# },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_min10_hanzi_vocab2548911_binary_CLEAN.bin',
# 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin',
# 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin',
# 'args': { 'binary': 'True' }
# }
# },
# {
# 'word2vec': { 'model': '/data/GoogleNews-vectors-negative300.bin.gz',
# 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin',
# 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin'
# }
# },
# {
# 'glove': {
# 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin',
# 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin'
# }
# },
# {
# 'word2vec': {
# 'model': '/data/sentiment140_800000.bin',
# 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin',
# 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin'
# }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibo'),
# 'args': { 'load': { 'form': 'hanzi',
# 'rng_seed': 13337,
# 'label_type': 'denied'
# },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_min10_hanzi_vocab2548911_binary_CLEAN.bin',
# 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin',
# 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin',
# 'args': { 'binary': 'True' }
# }
# },
# {
# 'word2vec': { 'model': '/data/GoogleNews-vectors-negative300.bin.gz',
# 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin',
# 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin',
# 'args': { 'binary': 'True' }
# }
# },
# {
# 'glove': {
# 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin',
# 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin',
# }
# },
# {
# 'word2vec': {
# 'model': '/data/sentiment140_800000.bin',
# 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin',
# 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin',
# }
# }
# ]
# }
# }
# },
{ 'arabic_twitter': {
'class': ArabicTwitter,
'path': os.path.join(dir_data, 'arabic_twitter'),
'args': { 'load': { 'form': 'arabic',
'rng_seed': 13337
},
'embed': { 'type': 'averaged' },
'shuffle_after_load': True,
'models': [
# {
# 'word2vec': { 'model': '/data/arabic_tweets/arabic_tweets_min10vocab_vocab1520226.bin',
# 'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
# 'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
# 'args': { 'binary': 'True' }
# }
# },
{
'word2vec': { 'model': '/data/GoogleNews-vectors-negative300.bin.gz',
'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
'args': { 'binary': 'True' }
}
},
{
'glove': {
'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
}
},
{
'word2vec': {
'model': '/data/sentiment140_800000.bin',
'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
}
},
{
'word2vec': { 'model': '/data/arabic_tweets/arabic_tweets_NLTK_min10vocab_vocab981429.bin',
'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
'args': { 'binary': 'True' }
}
}
]
}
}
}
]
def classifiers():
"""
Returns a list of classifier tuples (name, model)
for use in training
"""
return [("LogisticRegression", LogisticRegression(C=1.0,
class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
penalty='l2',
random_state=None,
tol=0.0001)),
("RandomForests", RandomForestClassifier(n_jobs=-1,
n_estimators = 15,
max_features = 'sqrt')),
("Gaussian NaiveBayes", GaussianNB())] #,
#("LinearSVM", svm.LinearSVC())]
# profiled methods
@timed
def timed_training(classifier, values, labels):
return classifier.fit(values, labels)
@timed
def timed_testing(classifier, values):
return classifier.predict(values)
@timed
def timed_dataload(loader, data, args, embedder, values, labels):
# use separate counter to account for invalid input along the way
counter = 0
for text,sentiment in data:
try:
if (counter % 10000 == 0):
print("Loading at {}".format(counter))
# normalize and tokenize if necessary
if args.has_key('normalize'):
text_normalized = data_utils.normalize(text, **args['normalize'])
else:
text_normalized = text
# tokenize
if args.get('load', {}).get('form', None) == 'hanzi':
tokens = data_utils.tokenize_hanzi(text_normalized)
elif args.get('load', {}).get('form', None) == 'arabic':
text_stripped = loader.twitter_strip(text_normalized)
tokens = loader.tokenize_arabic(text_stripped)
else:
tokens = data_utils.tokenize(text_normalized)
# choose embedding type
vector = None
if args['embed']['type'] == 'concatenated':
vector = embedder.embed_words_into_vectors_concatenated(tokens, **self.args['embed'])
elif args['embed']['type'] == 'averaged':
vector = embedder.embed_words_into_vectors_averaged(tokens)
else:
pass
# data labeled by sentiment score (thread-safe with lock)
if vector is not None:
values.append(vector)
labels.append(sentiment)
counter += 1
except TextTooShortException as e:
pass
# iterate all datasources
for dataset in datasets:
for data_source, data_params in dataset.iteritems():
# prepare data loader
klass = data_params['class']
loader = klass(data_params['path'])
data_args = data_params['args']
load_args = data_args.get('load', {})
data = loader.load_data(**load_args)
# test all vector models
for embedder_model in data_args['models']:
# identify prebuilt model if exists
if isinstance(embedder_model, dict):
# initialize word vector embedder
embedder_model, prebuilt_model_params = embedder_model.items().pop()
prebuilt_path_model = prebuilt_model_params.get('model', None)
model_args = prebuilt_model_params.get('args', {})
embedder = WordVectorEmbedder(embedder_model, model_fullpath=prebuilt_path_model, model_args=model_args)
# update embedder parameters
if prebuilt_path_model:
model_path_dir, model_path_filename, model_path_filext = WordVectorBuilder.filename_components(prebuilt_path_model)
embedder.model_subset = model_path_filename
# training data (custom or default)
if prebuilt_model_params.get('train', None):
prebuilt_path_train = prebuilt_model_params.get('train')
else:
prebuilt_path_train = WordVectorBuilder.filename_train(prebuilt_path_model)
with open(prebuilt_path_train, 'rb') as f:
data_train = pickle.load(f)
# testing data (custom or default)
if prebuilt_model_params.get('test', None):
prebuilt_path_test = prebuilt_model_params.get('test')
else:
prebuilt_path_test = WordVectorBuilder.filename_test(prebuilt_path_model)
with open(prebuilt_path_test, 'rb') as f:
data_test = pickle.load(f)
# initialize lists (will be converted later into numpy arrays)
values_train = []
labels_train = []
values_test = []
labels_test = []
# initialize timer
seconds_loading = 0
logger.info("processing {} samples from {}...".format(len(data_train)+len(data_test), prebuilt_path_model))
# load training dataset
profile_results = timed_dataload(loader, data_train, data_args, embedder, values_train, labels_train)
seconds_loading += profile_results.timer.total_tt
# load training dataset
profile_results = timed_dataload(loader, data_test, data_args, embedder, values_test, labels_test)
seconds_loading += profile_results.timer.total_tt
# shuffle if necessary
if data_args['shuffle_after_load']:
# store new lists
values_train_shuffled = []
labels_train_shuffled = []
values_test_shuffled = []
labels_test_shuffled = []
# generate subsample of random indices out of total available
random.seed(data_args.get('load', {}).get('rng_seed', None))
indices_train = range(len(values_train))
indices_test = range(len(values_test))
random.shuffle(indices_train)
random.shuffle(indices_test)
# keep entries at those random indices
for i in indices_train:
values_train_shuffled.append(values_train[i])
labels_train_shuffled.append(labels_train[i])
for i in indices_test:
values_test_shuffled.append(values_test[i])
labels_test_shuffled.append(labels_test[i])
# keep shuffled lists
values_train = values_train_shuffled
labels_train = labels_train_shuffled
values_test = values_test_shuffled
labels_test = labels_test_shuffled
# create numpy arrays for classifier input
values_train = np.array(values_train, dtype='float32')
labels_train = np.array(labels_train, dtype='float32')
values_test = np.array(values_test, dtype='float32')
labels_test = np.array(labels_test, dtype='float32')
else:
# initialize word vector embedder
embedder = WordVectorEmbedder(embedder_model)
# initialize lists (will be converted later into numpy arrays)
values = []
labels = []
# get equal-sized subsets of each class
data_sampler = DataSampler(klass, file_path=data_params['path'], num_classes=2)
data = data_sampler.sample_balanced(min_samples=data_args.get('min_samples', None), rng_seed=data_args.get('load', {}).get('rng_seed', None))
# load dataset
logger.info("processing {} samples from {}...".format(len(data), data_params['path']))
profile_results = timed_dataload(loader, data, data_args, embedder, values, labels)
# store loading time
seconds_loading = profile_results.timer.total_tt
# shuffle if necessary
if data_args['shuffle_after_load']:
# store new lists
values_shuffled = []
labels_shuffled = []
# generate subsample of random indices out of total available
random.seed(data_args.get('load', {}).get('rng_seed', None))
indices = range(len(values))
random.shuffle(indices)
# keep entries at those random indices
for i in indices:
values_shuffled.append(values[i])
labels_shuffled.append(labels[i])
# keep shuffled lists
values = values_shuffled
labels = labels_shuffled
# convert into nparray for sklearn
values = np.nan_to_num(np.array(values, dtype="float32"))
labels = np.nan_to_num(np.array(labels, dtype="float32"))
logger.info("Loaded {} samples...".format(len(values)))
# split into training and test data
logger.info("splitting dataset into training and testing sets...")
labels_train, labels_dev, labels_test = data_utils.split_data(labels, train=data_fraction_train, dev=0, test=data_fraction_test)
values_train, values_dev, values_test = data_utils.split_data(values, train=data_fraction_train, dev=0, test=data_fraction_test)
# calculate distribution
dist = Counter()
dist.update(labels_test)
# setup classifier
logger.info("Training on {}, Testing on {}...".format(len(values_train), len(values_test)))
for classifier_name,classifier in classifiers():
# profiled training
logger.info("Training %s classifier..." % classifier.__class__.__name__)
profile_results = timed_training(classifier, values_train, labels_train)
seconds_training = profile_results.timer.total_tt
# profiled testing
logger.info("Testing %s classifier..." % classifier.__class__.__name__)
profile_results = timed_testing(classifier, values_test)
predictions = profile_results.results
seconds_testing = profile_results.timer.total_tt
# calculate metrics
data_size = len(labels_test)
data_positive = np.sum(labels_test)
data_negative = data_size - data_positive
confusion_matrix = metrics.confusion_matrix(labels_test, predictions)
TN = confusion_matrix[0][0]
FP = confusion_matrix[0][1]
FN = confusion_matrix[1][0]
TP = confusion_matrix[1][1]
accuracy = metrics.accuracy_score(labels_test, predictions)
precision = metrics.precision_score(labels_test, predictions)
recall = metrics.recall_score(labels_test, predictions)
f1 = metrics.f1_score(labels_test, predictions)
# build results object
results = { 'classifier': str(classifier.__class__.__name__),
'data': { 'source': str(data_source),
'testsize': str(data_size),
'positive': str(data_positive),
'negative': str(data_negative),
'time_in_seconds_loading': str(seconds_loading)
},
'embedding': { 'model': str(embedder_model),
'subset': str(embedder.model_subset)
},
'data_args': data_args,
'metrics': { 'TP': str(TP),
'FP': str(FP),
'TN': str(TN),
'FN': str(FN),
'accuracy': str(accuracy),
'precision': str(precision),
'recall': str(recall),
'f1': str(f1),
'time_in_seconds_training': str(seconds_training),
'time_in_seconds_testing': str(seconds_testing)
}
}
# ensure output directory exists
if not os.path.isdir(dir_results):
data_utils.mkdir_p(dir_results)
# save json file
filename_results = "{}_{}_{}.json".format(data_source, embedder_model, classifier.__class__.__name__)
logger.info("Saving results to {}...".format(filename_results))
with open(os.path.join(dir_results,filename_results), 'a') as outfile:
json.dump(results, outfile, sort_keys=True, indent=4, separators=(',', ': '))
outfile.write('\n')
|
132371
|
import pcl
import numpy as np
def run_icp(data):
delta_theta_z, delta_x, delta_y, pc_in, pc_out, iter_t, iter_x, iter_y = data
# if do_exhaustive_serach:
transf_ini = np.eye(4)
transf_ini[0, 0] = np.cos(delta_theta_z[iter_t])
transf_ini[0, 1] = -np.sin(delta_theta_z[iter_t])
transf_ini[1, 0] = np.sin(delta_theta_z[iter_t])
transf_ini[1, 1] = np.cos(delta_theta_z[iter_t])
transf_ini[0, 3] = delta_x[iter_x]
transf_ini[1, 3] = delta_y[iter_y]
pc_in_try = (
np.matmul(transf_ini[0:3, 0:3], pc_in.transpose())
+ transf_ini[0:3, 3][:, np.newaxis]
)
pc_in_try = pc_in_try.transpose()
cloud_in = pcl.PointCloud()
cloud_out = pcl.PointCloud()
cloud_in.from_array(pc_in_try.astype(np.float32))
cloud_out.from_array(pc_out.astype(np.float32))
gicp = cloud_in.make_GeneralizedIterativeClosestPoint()
# tried open3d but found pcl version is more robust
converged, transf_iter, estimate, fitness = gicp.gicp(
cloud_in, cloud_out, max_iter=1000
)
if not converged:
fitness = 0
transf = np.eye(4)
transf[0:3, 0:3] = np.matmul(transf_iter[0:3, 0:3], transf_ini[0:3, 0:3])
transf[0:3, 3] = (
np.matmul(transf_iter[0:3, 0:3], transf_ini[0:3, 3]) + transf_iter[0:3, 3]
)
# import pdb; pdb.set_trace()
# pc_in_vec = PointCloud()
# pc_in_vec.points = Vector3dVector(pc_in)
# pc_out_vec = PointCloud()
# pc_out_vec.points = Vector3dVector(pc_out)
# draw_registration_result(pc_in_vec, pc_out_vec, transf)
return transf, fitness
|
132374
|
from setuptools import setup, find_packages
dependencies = []
try:
import json
except ImportError:
dependencies.append('simplejson')
setup(
name="Djangy",
version="0.14",
packages = find_packages(),
author="<NAME>",
author_email="<EMAIL>",
description="Djangy.com client application",
keywords="djangy django",
url="http://www.djangy.com",
install_requires = dependencies,
entry_points = {
'console_scripts': [
'djangy = djangy:main'
]
},
license="University of Illinois/NCSA Open Source License"
)
|
132452
|
from model import *
from data_import import *
import sys, getopt
# SPEECH ENHANCEMENT NETWORK
SE_LAYERS = 13 # NUMBER OF INTERNAL LAYERS
SE_CHANNELS = 64 # NUMBER OF FEATURE CHANNELS PER LAYER
SE_LOSS_LAYERS = 6 # NUMBER OF FEATURE LOSS LAYERS
SE_NORM = "NM" # TYPE OF LAYER NORMALIZATION (NM, SBN or None)
SE_LOSS_TYPE = "FL" # TYPE OF TRAINING LOSS (L1, L2 or FL)
# FEATURE LOSS NETWORK
LOSS_LAYERS = 14 # NUMBER OF INTERNAL LAYERS
LOSS_BASE_CHANNELS = 32 # NUMBER OF FEATURE CHANNELS PER LAYER IN FIRT LAYER
LOSS_BLK_CHANNELS = 5 # NUMBER OF LAYERS BETWEEN CHANNEL NUMBER UPDATES
LOSS_NORM = "SBN" # TYPE OF LAYER NORMALIZATION (NM, SBN or None)
SET_WEIGHT_EPOCH = 10 # NUMBER OF EPOCHS BEFORE FEATURE LOSS BALANCE
SAVE_EPOCHS = 10 # NUMBER OF EPOCHS BETWEEN MODEL SAVES
log_file = open("logfile.txt", 'w+')
# COMMAND LINE OPTIONS
datafolder = "dataset"
modfolder = "models"
outfolder = "."
try:
opts, args = getopt.getopt(sys.argv[1:],"hd:l:o:",["ifolder=,lossfolder=,outfolder="])
except getopt.GetoptError:
print 'Usage: python senet_infer.py -d <datafolder> -l <lossfolder> -o <outfolder>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Usage: python senet_infer.py -d <datafolder> -l <lossfolder> -o <outfolder>'
sys.exit()
elif opt in ("-d", "--datafolder"):
datafolder = arg
elif opt in ("-l", "--lossfolder"):
modfolder = arg
elif opt in ("-o", "--outfolder"):
outfolder = arg
print 'Data folder is "' + datafolder + '/"'
print 'Loss model folder is "' + modfolder + '/"'
print 'Output model folder is "' + outfolder + '/"'
# SET LOSS FUNCTIONS AND PLACEHOLDERS
with tf.variable_scope(tf.get_variable_scope()):
input=tf.placeholder(tf.float32,shape=[None,1,None,1])
clean=tf.placeholder(tf.float32,shape=[None,1,None,1])
enhanced=senet(input, n_layers=SE_LAYERS, norm_type=SE_NORM, n_channels=SE_CHANNELS)
if SE_LOSS_TYPE == "L1": # L1 LOSS
loss_weights = tf.placeholder(tf.float32, shape=[])
loss_fn = l1_loss(clean, enhanced)
elif SE_LOSS_TYPE == "L2": # L2 LOSS
loss_weights = tf.placeholder(tf.float32, shape=[])
loss_fn = l2_loss(clean, enhanced)
else: # FEATURE LOSS
loss_weights = tf.placeholder(tf.float32, shape=[SE_LOSS_LAYERS])
loss_fn = featureloss(clean, enhanced, loss_weights, loss_layers=SE_LOSS_LAYERS, n_layers=LOSS_LAYERS, norm_type=LOSS_NORM,
base_channels=LOSS_BASE_CHANNELS, blk_channels=LOSS_BLK_CHANNELS)
# LOAD DATA
trainset, valset = load_full_data_list(datafolder = datafolder)
trainset, valset = load_full_data(trainset, valset)
# TRAINING OPTIMIZER
opt=tf.train.AdamOptimizer(learning_rate=1e-4).\
minimize(loss_fn[0],var_list=[var for var in tf.trainable_variables() if var.name.startswith("se_")])
# BEGIN SCRIPT #########################################################################################################
# INITIALIZE GPU CONFIG
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
sess=tf.Session(config=config)
print "Config ready"
sess.run(tf.global_variables_initializer())
print "Session initialized"
# LOAD FEATURE LOSS
if SE_LOSS_TYPE == "FL":
loss_saver = tf.train.Saver([var for var in tf.trainable_variables() if var.name.startswith("loss_")])
loss_saver.restore(sess, "./%s/loss_model.ckpt" % modfolder)
Nepochs = 320
saver = tf.train.Saver([var for var in tf.trainable_variables() if var.name.startswith("se_")])
########################################################################################################################
if SE_LOSS_TYPE == "FL":
loss_train = np.zeros((len(trainset["innames"]),SE_LOSS_LAYERS+1))
loss_val = np.zeros((len(valset["innames"]),SE_LOSS_LAYERS+1))
else:
loss_train = np.zeros((len(trainset["innames"]),1))
loss_val = np.zeros((len(valset["innames"]),1))
if SE_LOSS_TYPE == "FL":
loss_w = np.ones(SE_LOSS_LAYERS)
else:
loss_w = []
#####################################################################################
for epoch in range(1,Nepochs+1):
print("Epoch no.%d"%epoch)
# TRAINING EPOCH ################################################################
ids = np.random.permutation(len(trainset["innames"])) # RANDOM FILE ORDER
for id in tqdm(range(0, len(ids)), file=sys.stdout):
i = ids[id] # RANDOMIZED ITERATION INDEX
inputData = trainset["inaudio"][i] # LOAD DEGRADED INPUT
outputData = trainset["outaudio"][i] # LOAD GROUND TRUTH
# TRAINING ITERATION
_, loss_vec = sess.run([opt, loss_fn],
feed_dict={input: inputData, clean: outputData, loss_weights: loss_w})
# SAVE ITERATION LOSS
loss_train[id,0] = loss_vec[0]
if SE_LOSS_TYPE == "FL":
for j in range(SE_LOSS_LAYERS):
loss_train[id,j+1] = loss_vec[j+1]
# PRINT EPOCH TRAINING LOSS AVERAGE
str = "T: %d\t " % (epoch)
if SE_LOSS_TYPE == "FL":
for j in range(SE_LOSS_LAYERS+1):
str += ", %10.6e"%(np.mean(loss_train, axis=0)[j])
else:
str += ", %10.6e"%(np.mean(loss_train, axis=0)[0])
log_file.write(str + "\n")
log_file.flush()
# SET WEIGHTS AFTER M EPOCHS
if SE_LOSS_TYPE == "FL" and epoch == SET_WEIGHT_EPOCH:
loss_w = np.mean(loss_train, axis=0)[1:]
# SAVE MODEL EVERY N EPOCHS
if epoch % SAVE_EPOCHS != 0:
continue
saver.save(sess, outfolder + "/se_model.ckpt")
# VALIDATION EPOCH ##############################################################
print("Validation epoch")
for id in tqdm(range(0, len(valset["innames"])), file=sys.stdout):
i = id # NON-RANDOMIZED ITERATION INDEX
inputData = valset["inaudio"][i] # LOAD DEGRADED INPUT
outputData = valset["outaudio"][i] # LOAD GROUND TRUTH
# VALIDATION ITERATION
output, loss_vec = sess.run([enhanced, loss_fn],
feed_dict={input: inputData, clean: outputData, loss_weights: loss_w})
# SAVE ITERATION LOSS
loss_val[id,0] = loss_vec[0]
if SE_LOSS_TYPE == "FL":
for j in range(SE_LOSS_LAYERS):
loss_val[id,j+1] = loss_vec[j+1]
# PRINT VALIDATION EPOCH LOSS AVERAGE
str = "V: %d " % (epoch)
if SE_LOSS_TYPE == "FL":
for j in range(SE_LOSS_LAYERS+1):
str += ", %10.6e"%(np.mean(loss_val, axis=0)[j]*1e9)
else:
str += ", %10.6e"%(np.mean(loss_val, axis=0)[0]*1e9)
log_file.write(str + "\n")
log_file.flush()
log_file.close()
|
132459
|
import logging
from typing import List
from omegaconf import DictConfig
from nuplan.planning.script.builders.metric_builder import build_metrics_engines
from nuplan.planning.simulation.callback.metric_callback import MetricCallback
from nuplan.planning.simulation.runner.metric_runner import MetricRunner
from nuplan.planning.simulation.simulation_log import SimulationLog
logger = logging.getLogger(__name__)
def build_metric_runners(cfg: DictConfig, simulation_logs: List[SimulationLog]) -> List[MetricRunner]:
"""
Build metric runners.
:param cfg: DictConfig. Configuration that is used to run the experiment.
:param simulation_logs: A list of simulation logs.
:return A list of metric runners.
"""
logger.info('Building metric runners...')
# Create a list of metric runners
metric_runners = list()
# Build a list of scenarios
logger.info('Extracting scenarios...')
scenarios = [simulation_log.scenario for simulation_log in simulation_logs]
logger.info('Extracting scenarios...DONE!')
logger.info('Building metric engines...')
metric_engines_map = build_metrics_engines(cfg=cfg, scenarios=scenarios)
logger.info('Building metric engines...DONE')
logger.info(f'Building metric_runner from {len(scenarios)} scenarios...')
for simulation_log in simulation_logs:
scenario = simulation_log.scenario
metric_engine = metric_engines_map.get(scenario.scenario_type, None)
if not metric_engine:
raise ValueError(f'{scenario.scenario_type} not found in a metric engine.')
if not simulation_log:
raise ValueError(f'{scenario.scenario_name} not found in simulation logs.')
metric_callback = MetricCallback(metric_engine=metric_engine)
metric_runner = MetricRunner(simulation_log=simulation_log, metric_callback=metric_callback)
metric_runners.append(metric_runner)
logger.info('Building metric runners...DONE!')
return metric_runners
|
132466
|
from django import forms
from django.core.exceptions import ValidationError
from core.site.models import Site
class SiteForm(forms.ModelForm):
full_name = forms.CharField()
class Meta:
model = Site
include = ('full_name',)
exclude = ('parent', 'name')
def validate_unique(self):
try:
self.instance.validate_unique()
except ValidationError, e:
if 'full_name' in e.message_dict:
e.message_dict['__all__'] = e.message_dict['full_name']
self._update_errors(e.message_dict)
|
132511
|
from ...connection_cursor import cur
def position(sid, pid):
cur.execute("""DELETE FROM positions WHERE symbol_id={} AND portfolio_id={}
""".format(sid, pid))
|
132518
|
import pytest
import concepts
def test_empty_objects():
with pytest.raises(ValueError, match=r'empty objects'):
concepts.Context((), ('spam',), [(False,)])
def test_empty_properies():
with pytest.raises(ValueError, match=r'empty properties'):
concepts.Context(('spam',), (), [(False,)])
def test_duplicate_object():
with pytest.raises(ValueError, match=r'duplicate objects'):
concepts.Context(('spam', 'spam'),
('ham', 'eggs'),
[(True, False), (False, True)])
def test_duplicate_property():
with pytest.raises(ValueError, match=r'duplicate properties'):
concepts.Context(('spam', 'eggs'),
('ham', 'ham'),
[(True, False), (False, True)])
def test_object_property_overlap():
with pytest.raises(ValueError, match=r'overlap'):
concepts.Context(('spam', 'eggs'),
('eggs', 'ham'),
[(True, False), (False, True)])
def test_invalid_bools_1():
with pytest.raises(ValueError, match=r'bools is not 2 items of length 2'):
concepts.Context(('spam', 'eggs'),
('camelot', 'launcelot'),
[(True, False)])
def test_invalid_bools_2():
with pytest.raises(ValueError, match=r'bools is not 2 items of length 2'):
concepts.Context(('spam', 'eggs'),
('camelot', 'launcelot'),
[(True, False, False), (False, True)])
def test_init():
c = concepts.Context(('spam', 'eggs'),
('camelot', 'launcelot'),
[(True, False), (False, True)])
assert c.objects == ('spam', 'eggs')
assert c.properties == ('camelot', 'launcelot')
assert c.bools == [(True, False), (False, True)]
def test_copy(context):
context = concepts.Context(context.objects,
context.properties,
context.bools)
assert context.lattice is not None
copy = context.copy()
assert copy == context
assert 'lattice' not in copy.__dict__
def test_eq_noncontext(context):
assert not (context == object())
def test_eq_true(context):
assert context == concepts.Context(context.objects,
context.properties,
context.bools)
def test_eq_false(context):
d = context.definition()
d.move_object('3pl', 0)
assert not context == concepts.Context(*d)
def test_ne_concontext(context):
assert context != object()
def test_ne_true(context):
d = context.definition()
d.move_object('3pl', 0)
assert context != concepts.Context(*d)
def test_ne_false(context):
assert not context != concepts.Context(context.objects,
context.properties,
context.bools)
def test_crc32(context):
assert context.crc32() == 'b9d20179' == context.definition().crc32()
def test_minimize_infimum(context):
assert list(context._minimize((), context.properties)) == [context.properties]
def test_raw(context):
Objects = context._Objects # noqa: N806
Properties = context._Properties # noqa: N806
assert context.intension(['1sg', '1pl'], raw=True) == Properties('1001010000')
assert context.extension(['+1', '+sg'], raw=True) == Objects('100000')
assert context.neighbors(['1sg'], raw=True) == \
[(Objects('110000'), Properties('1001010000')),
(Objects('101000'), Properties('0000011001')),
(Objects('100010'), Properties('0001001001'))]
def test_tofile(tmp_path, context, filename='context.cxt', encoding='utf-8'):
filepath = tmp_path / filename
context.tofile(str(filepath), encoding=encoding)
assert filepath.read_text(encoding=encoding) == '''\
B
6
10
1sg
1pl
2sg
2pl
3sg
3pl
+1
-1
+2
-2
+3
-3
+sg
+pl
-sg
-pl
X..X.XX..X
X..X.X.XX.
.XX..XX..X
.XX..X.XX.
.X.XX.X..X
.X.XX..XX.
'''
def test_definition(context):
assert context.definition() == (context.objects,
context.properties,
context.bools)
|
132557
|
import sonnet as snt
import tensorflow as tf
from luminoth.utils.bbox_transform_tf import decode, clip_boxes, change_order
class SSDProposal(snt.AbstractModule):
"""Transforms anchors and SSD predictions into object proposals.
Using the fixed anchors and the SSD predictions for both classification
and regression (adjusting the bounding box), we return a list of proposals
with assigned class.
In the process it tries to remove duplicated suggestions by applying non
maximum suppresion (NMS).
We apply NMS because the way object detectors are usually scored is by
treating duplicated detections (multiple detections that overlap the same
ground truth value) as false positive. It is resonable to assume that there
may exist such case that applying NMS is completely unnecesary.
Besides applying NMS it also filters the top N results, both for classes
and in general. These values are easily modifiable in the configuration
files.
"""
def __init__(self, num_classes, config, variances, name='proposal_layer'):
super(SSDProposal, self).__init__(name=name)
self._num_classes = num_classes
# Threshold to use for NMS.
self._class_nms_threshold = config.class_nms_threshold
# Max number of proposals detections per class.
self._class_max_detections = config.class_max_detections
# Maximum number of detections to return.
self._total_max_detections = config.total_max_detections
self._min_prob_threshold = config.min_prob_threshold or 0.0
self._filter_outside_anchors = config.filter_outside_anchors
self._variances = variances
def _build(self, cls_prob, loc_pred, all_anchors, im_shape):
"""
Args:
cls_prob: A softmax probability for each anchor where the idx = 0
is the background class (which we should ignore).
Shape (total_anchors, num_classes + 1)
loc_pred: A Tensor with the regression output for each anchor.
Its shape should be (total_anchors, 4).
all_anchors: A Tensor with the anchors bounding boxes of shape
(total_anchors, 4), having (x_min, y_min, x_max, y_max) for
each anchor.
im_shape: A Tensor with the image shape in format (height, width).
Returns:
prediction_dict with the following keys:
raw_proposals: The raw proposals i.e. the anchors adjusted
using loc_pred.
proposals: The proposals of the network after appling some
filters like negative area; and NMS. It's shape is
(final_num_proposals, 4), where final_num_proposals is
unknown before-hand (it depends on NMS).
The 4-length Tensor for each corresponds to:
(x_min, y_min, x_max, y_max).
proposal_label: It's shape is (final_num_proposals,)
proposal_label_prob: It's shape is (final_num_proposals,)
"""
selected_boxes = []
selected_probs = []
selected_labels = []
selected_anchors = [] # For debugging
for class_id in range(self._num_classes):
# Get the confidences for this class (+ 1 is to ignore background)
class_cls_prob = cls_prob[:, class_id + 1]
# Filter by min_prob_threshold
min_prob_filter = tf.greater_equal(
class_cls_prob, self._min_prob_threshold)
class_cls_prob = tf.boolean_mask(class_cls_prob, min_prob_filter)
class_loc_pred = tf.boolean_mask(loc_pred, min_prob_filter)
anchors = tf.boolean_mask(all_anchors, min_prob_filter)
# Using the loc_pred and the anchors, we generate the proposals.
raw_proposals = decode(anchors, class_loc_pred, self._variances)
# Clip boxes to image.
clipped_proposals = clip_boxes(raw_proposals, im_shape)
# Filter proposals that have an non-valid area.
(x_min, y_min, x_max, y_max) = tf.unstack(
clipped_proposals, axis=1)
proposal_filter = tf.greater(
tf.maximum(x_max - x_min, 0.) * tf.maximum(y_max - y_min, 0.),
0.
)
class_proposals = tf.boolean_mask(
clipped_proposals, proposal_filter)
class_loc_pred = tf.boolean_mask(
class_loc_pred, proposal_filter)
class_cls_prob = tf.boolean_mask(
class_cls_prob, proposal_filter)
proposal_anchors = tf.boolean_mask(
anchors, proposal_filter)
# Log results of filtering non-valid area proposals
total_anchors = tf.shape(all_anchors)[0]
total_proposals = tf.shape(class_proposals)[0]
total_raw_proposals = tf.shape(raw_proposals)[0]
tf.summary.scalar(
'invalid_proposals',
total_proposals - total_raw_proposals, ['ssd']
)
tf.summary.scalar(
'valid_proposals_ratio',
tf.cast(total_anchors, tf.float32) /
tf.cast(total_proposals, tf.float32), ['ssd']
)
# We have to use the TensorFlow's bounding box convention to use
# the included function for NMS.
# After gathering results we should normalize it back.
class_proposal_tf = change_order(class_proposals)
# Apply class NMS.
class_selected_idx = tf.image.non_max_suppression(
class_proposal_tf, class_cls_prob, self._class_max_detections,
iou_threshold=self._class_nms_threshold
)
# Using NMS resulting indices, gather values from Tensors.
class_proposal_tf = tf.gather(
class_proposal_tf, class_selected_idx)
class_cls_prob = tf.gather(class_cls_prob, class_selected_idx)
# We append values to a regular list which will later be
# transformed to a proper Tensor.
selected_boxes.append(class_proposal_tf)
selected_probs.append(class_cls_prob)
# In the case of the class_id, since it is a loop on classes, we
# already have a fixed class_id. We use `tf.tile` to create that
# Tensor with the total number of indices returned by the NMS.
selected_labels.append(
tf.tile([class_id], [tf.shape(class_selected_idx)[0]])
)
selected_anchors.append(proposal_anchors)
# We use concat (axis=0) to generate a Tensor where the rows are
# stacked on top of each other
proposals_tf = tf.concat(selected_boxes, axis=0)
# Return to the original convention.
proposals = change_order(proposals_tf)
proposal_label = tf.concat(selected_labels, axis=0)
proposal_label_prob = tf.concat(selected_probs, axis=0)
proposal_anchors = tf.concat(selected_anchors, axis=0)
# Get topK detections of all classes.
k = tf.minimum(
self._total_max_detections,
tf.shape(proposal_label_prob)[0]
)
top_k = tf.nn.top_k(proposal_label_prob, k=k)
top_k_proposal_label_prob = top_k.values
top_k_proposals = tf.gather(proposals, top_k.indices)
top_k_proposal_label = tf.gather(proposal_label, top_k.indices)
top_k_proposal_anchors = tf.gather(proposal_anchors, top_k.indices)
return {
'objects': top_k_proposals,
'labels': top_k_proposal_label,
'probs': top_k_proposal_label_prob,
'raw_proposals': raw_proposals,
'anchors': top_k_proposal_anchors,
}
|
132610
|
import asyncio
# import numpy as np
# import importlib
# import pkgutil
import logging
import aiohttp
import voluptuous as vol
# from ledfx.events import Event
from ledfx.integrations import Integration
from ledfx.utils import async_fire_and_forget, resolve_destination
# import time
# import os
# import re
_LOGGER = logging.getLogger(__name__)
class QLC(Integration):
"""QLC+ Integration"""
_widget_types = ["Button", "Slider", "Audio Triggers"]
NAME = "QLC+"
DESCRIPTION = "Web Api Integration for Q Light Controller Plus"
CONFIG_SCHEMA = vol.Schema(
{
vol.Required(
"name",
description="Name of this integration instance and associated settings",
default="QLC+",
): str,
vol.Required(
"description",
description="Description of this integration",
default="Web Api Integration for Q Light Controller Plus",
): str,
vol.Required(
"ip_address",
description="QLC+ ip address",
default="127.0.0.1",
): str,
vol.Required(
"port", description="QLC+ port", default=9999
): vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
}
)
def __init__(self, ledfx, config, active, data):
super().__init__(ledfx, config, active, data)
self._ledfx = ledfx
self._config = config
self._client = None
self._data = []
self._listeners = []
self._connect_task = None
self.restore_from_data(data)
def restore_from_data(self, data):
"""Creates the event listeners from saved data"""
if data is not None:
try:
for entry in data:
event_type, event_filter, active, qlc_payload = entry
self.create_event(
event_type, event_filter, active, qlc_payload
)
except ValueError:
_LOGGER.error("Failed to restore QLC+ settings")
def get_events(self):
"""Get all events in data:
[(event_type, event_filter, active, qlc_payload), ...]
event_type : type of event, str
event_filter : filter for event, dict eg. {"effect_name": "Scroll"}
active : whether there is an active listener for this event
qlc_payload : the payload that is sent when this event is triggered
"""
return self._data
def create_event(self, event_type, event_filter, active, qlc_payload):
"""Create or update event listener that sends a qlc payload on a specific event"""
# If it exists, remove the existing listener and update data
for idx, entry in enumerate(self._data):
_event_type, _event_filter, _active, _qlc_payload = entry
if (_event_type == event_type) and (_event_filter == event_filter):
self._data[idx] = [
event_type,
event_filter,
active,
qlc_payload,
]
# if it was active, remove existing listener
if _active:
self._remove_listener(_event_type, event_filter)
break
# If it doesn't already exist, add it as a new entry to data
else:
self.data.append([event_type, event_filter, active, qlc_payload])
# Finally, subscribe to the ledfx event if the listener is now active
if active:
self._add_listener(event_type, event_filter, qlc_payload)
_LOGGER.info(
f"QLC+ payload linked to event '{event_type}' with filter {event_filter}"
)
def delete_event(self, event_type, event_filter):
"""Completely delete event listener and saved payload from data"""
# remove listener if it exists
self._remove_listener(event_type, event_filter)
# remove event and payload from data
for idx, entry in enumerate(self._data):
_event_type, _event_filter, _active, _qlc_payload = entry
if (_event_type == event_type) and (_event_filter == event_filter):
del self._data[idx]
_LOGGER.info(
f"QLC+ payload deleted for event '{event_type}' with filter {event_filter}"
)
def toggle_event(self, event_type, event_filter):
"""Toggle a payload linked to event on or off"""
# Update "active" flag in data
for idx, entry in enumerate(self._data):
_event_type, _event_filter, _active, _qlc_payload = entry
if (_event_type == event_type) and (_event_filter == event_filter):
# toggle active flag in data
self._data[idx] = [
event_type,
event_filter,
not _active,
_qlc_payload,
]
# Enable/disable listener
if _active:
self._remove_listener(_event_type, event_filter)
else:
# no listener exists, so create it
self._add_listener(event_type, event_filter, _qlc_payload)
# log action
_LOGGER.info(
f"QLC+ payload {'disabled' if _active else 'enabled'} for event '{event_type}' with filter {event_filter}"
)
return True # success
return False # failed to find event_type with this event_filter
def _remove_listener(self, event_type, event_filter):
"""Internal function to remove ledfx events listener if it exists"""
for idx, entry in enumerate(self._listeners):
_event_type, _event_filter, listener = entry
if (_event_type == event_type) and (_event_filter == event_filter):
# Call the listener function that removes the listener
listener()
del self._listeners[idx]
break
def _add_listener(self, event_type, event_filter, qlc_payload):
"""Internal function that links payload to send on the specified event"""
def make_callback(qlc_payload):
def callback(_):
_LOGGER.info(
f"QLC+ sent payload, triggered by event '{event_type}' with filter {event_filter}"
)
async_fire_and_forget(
self._send_payload(qlc_payload), loop=self._ledfx.loop
)
return callback
callback = make_callback(qlc_payload)
listener = self._ledfx.events.add_listener(
callback, event_type, event_filter
)
# store "listener", a function to remove the listener later if needed
self._listeners.append((event_type, event_filter, listener))
async def get_widgets(self):
"""Returns a list of widgets as tuples: [(ID, Type, Name),...]"""
# First get list of widgets (ID, Name)
widgets = []
message = "QLC+API|getWidgetsList"
response = await self._client.query(message)
widgets_list = response.lstrip(f"{message}|").split("|")
# Then get the type for each widget (in individual requests bc QLC api be like that)
for widget_id, widget_name in zip(
widgets_list[::2], widgets_list[1::2]
):
message = "QLC+API|getWidgetType"
response = await self._client.query(f"{message}|{widget_id}")
widget_type = response.lstrip(f"{message}|")
if widget_type in self._widget_types:
widgets.append((widget_id, widget_type, widget_name))
return widgets
async def _send_payload(self, qlc_payload):
"""Sends payload of {id:value, ...} pairs to QLC"""
for widget_id, value in qlc_payload.items():
await self._client.send(f"{int(widget_id)}|{value}")
async def connect(self):
resolved_ip = resolve_destination(self._config["ip_address"])
domain = f"{resolved_ip }:{self._config['port']}"
url = f"http://{domain}/qlcplusWS"
if self._client is None:
self._client = QLCWebsocketClient(url, domain)
self._cancel_connect()
self._connect_task = asyncio.create_task(self._client.connect())
if await self._connect_task:
await super().connect(f"Connected to QLC+ websocket at {domain}")
async def disconnect(self):
self._cancel_connect()
if self._client is not None:
# fire and forget bc for some reason close() never returns... -o-
async_fire_and_forget(
self._client.disconnect(), loop=self._ledfx.loop
)
await super().disconnect("Disconnected from QLC+ websocket")
else:
await super().disconnect()
def _cancel_connect(self):
if self._connect_task is not None:
self._connect_task.cancel()
self._connect_task = None
class QLCWebsocketClient(aiohttp.ClientSession):
def __init__(self, url, domain):
super().__init__()
self.websocket = None
self.url = url
self.domain = domain
async def connect(self):
"""Connect to the WebSocket."""
while True:
try:
self.websocket = await self.ws_connect(self.url)
return True
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.info(
f"Connection to {self.domain} failed. Retrying in 5s..."
)
await asyncio.sleep(5)
except asyncio.CancelledError:
return False
async def disconnect(self):
if self.websocket is not None:
await self.websocket.close()
async def begin(self, callback):
"""Connect and indefinitely read from websocket, returning messages to callback func"""
await self.connect()
await self.read(callback)
async def query(self, message):
"""Send a message, and return the response"""
await self.send(message)
result = await self.receive()
return result.lstrip("QLC+API|")
async def send(self, message):
"""Send a message to the WebSocket."""
if self.websocket is None:
_LOGGER.error("Websocket not yet established")
return
await self.websocket.send_str(message)
_LOGGER.debug(f"Sent message {message} to {self.domain}")
async def receive(self):
"""Receive one message from the WebSocket."""
if self.websocket is None:
_LOGGER.error("Websocket not yet established")
return
return (await self.websocket.receive()).data
async def read(self, callback):
"""Read messages from the WebSocket."""
if self.websocket is None:
_LOGGER.error("Websocket not yet established")
return
while await self.websocket.receive():
message = await self.receive()
if message.type == aiohttp.WSMsgType.TEXT:
self.callback(message)
elif message.type == aiohttp.WSMsgType.CLOSED:
break
elif message.type == aiohttp.WSMsgType.ERROR:
break
|
132645
|
from esphome.components import light
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.const import (
CONF_OUTPUT_ID,
CONF_MIN_VALUE,
CONF_MAX_VALUE,
CONF_GAMMA_CORRECT,
CONF_DEFAULT_TRANSITION_LENGTH,
CONF_SWITCH_DATAPOINT,
CONF_COLD_WHITE_COLOR_TEMPERATURE,
CONF_WARM_WHITE_COLOR_TEMPERATURE,
CONF_COLOR_INTERLOCK,
)
from .. import tuya_ns, CONF_TUYA_ID, Tuya
DEPENDENCIES = ["tuya"]
CONF_DIMMER_DATAPOINT = "dimmer_datapoint"
CONF_MIN_VALUE_DATAPOINT = "min_value_datapoint"
CONF_COLOR_TEMPERATURE_DATAPOINT = "color_temperature_datapoint"
CONF_COLOR_TEMPERATURE_INVERT = "color_temperature_invert"
CONF_COLOR_TEMPERATURE_MAX_VALUE = "color_temperature_max_value"
CONF_RGB_DATAPOINT = "rgb_datapoint"
CONF_HSV_DATAPOINT = "hsv_datapoint"
TuyaLight = tuya_ns.class_("TuyaLight", light.LightOutput, cg.Component)
CONFIG_SCHEMA = cv.All(
light.BRIGHTNESS_ONLY_LIGHT_SCHEMA.extend(
{
cv.GenerateID(CONF_OUTPUT_ID): cv.declare_id(TuyaLight),
cv.GenerateID(CONF_TUYA_ID): cv.use_id(Tuya),
cv.Optional(CONF_DIMMER_DATAPOINT): cv.uint8_t,
cv.Optional(CONF_MIN_VALUE_DATAPOINT): cv.uint8_t,
cv.Optional(CONF_SWITCH_DATAPOINT): cv.uint8_t,
cv.Exclusive(CONF_RGB_DATAPOINT, "color"): cv.uint8_t,
cv.Exclusive(CONF_HSV_DATAPOINT, "color"): cv.uint8_t,
cv.Optional(CONF_COLOR_INTERLOCK, default=False): cv.boolean,
cv.Inclusive(
CONF_COLOR_TEMPERATURE_DATAPOINT, "color_temperature"
): cv.uint8_t,
cv.Optional(CONF_COLOR_TEMPERATURE_INVERT, default=False): cv.boolean,
cv.Optional(CONF_MIN_VALUE): cv.int_,
cv.Optional(CONF_MAX_VALUE): cv.int_,
cv.Optional(CONF_COLOR_TEMPERATURE_MAX_VALUE): cv.int_,
cv.Inclusive(
CONF_COLD_WHITE_COLOR_TEMPERATURE, "color_temperature"
): cv.color_temperature,
cv.Inclusive(
CONF_WARM_WHITE_COLOR_TEMPERATURE, "color_temperature"
): cv.color_temperature,
# Change the default gamma_correct and default transition length settings.
# The Tuya MCU handles transitions and gamma correction on its own.
cv.Optional(CONF_GAMMA_CORRECT, default=1.0): cv.positive_float,
cv.Optional(
CONF_DEFAULT_TRANSITION_LENGTH, default="0s"
): cv.positive_time_period_milliseconds,
}
).extend(cv.COMPONENT_SCHEMA),
cv.has_at_least_one_key(
CONF_DIMMER_DATAPOINT,
CONF_SWITCH_DATAPOINT,
CONF_RGB_DATAPOINT,
CONF_HSV_DATAPOINT,
),
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_OUTPUT_ID])
await cg.register_component(var, config)
await light.register_light(var, config)
if CONF_DIMMER_DATAPOINT in config:
cg.add(var.set_dimmer_id(config[CONF_DIMMER_DATAPOINT]))
if CONF_MIN_VALUE_DATAPOINT in config:
cg.add(var.set_min_value_datapoint_id(config[CONF_MIN_VALUE_DATAPOINT]))
if CONF_SWITCH_DATAPOINT in config:
cg.add(var.set_switch_id(config[CONF_SWITCH_DATAPOINT]))
if CONF_RGB_DATAPOINT in config:
cg.add(var.set_rgb_id(config[CONF_RGB_DATAPOINT]))
elif CONF_HSV_DATAPOINT in config:
cg.add(var.set_hsv_id(config[CONF_HSV_DATAPOINT]))
if CONF_COLOR_TEMPERATURE_DATAPOINT in config:
cg.add(var.set_color_temperature_id(config[CONF_COLOR_TEMPERATURE_DATAPOINT]))
cg.add(var.set_color_temperature_invert(config[CONF_COLOR_TEMPERATURE_INVERT]))
cg.add(
var.set_cold_white_temperature(config[CONF_COLD_WHITE_COLOR_TEMPERATURE])
)
cg.add(
var.set_warm_white_temperature(config[CONF_WARM_WHITE_COLOR_TEMPERATURE])
)
if CONF_MIN_VALUE in config:
cg.add(var.set_min_value(config[CONF_MIN_VALUE]))
if CONF_MAX_VALUE in config:
cg.add(var.set_max_value(config[CONF_MAX_VALUE]))
if CONF_COLOR_TEMPERATURE_MAX_VALUE in config:
cg.add(
var.set_color_temperature_max_value(
config[CONF_COLOR_TEMPERATURE_MAX_VALUE]
)
)
cg.add(var.set_color_interlock(config[CONF_COLOR_INTERLOCK]))
paren = await cg.get_variable(config[CONF_TUYA_ID])
cg.add(var.set_tuya_parent(paren))
|
132668
|
import asyncio
from collections import defaultdict
from ipaddress import ip_network
from typing import Dict, List
from gql import Client, gql
from gql.transport.aiohttp import AIOHTTPTransport
from irrexplorer.settings import config
from irrexplorer.state import DataSource, IPNetwork, RouteInfo, RPKIStatus
IRRD_TIMEOUT = 600
COMMON_GRAPHQL_FIELDS = """
rpslPk
objectClass
source
objectText
... on RPSLRoute {
prefix
asn
rpkiStatus
rpkiMaxLength
}
... on RPSLRoute6 {
prefix
asn
rpkiStatus
rpkiMaxLength
}
"""
GQL_QUERY_ASN = gql(
f"""
query getRoutes ($asn: [ASN!]!) {{
rpslObjects(
asn: $asn
objectClass: ["route", "route6"],
rpkiStatus: [valid,invalid,not_found]
) {{
{COMMON_GRAPHQL_FIELDS}
}}
}}
"""
)
GQL_QUERY_PREFIX = gql(
f"""
query getRoutes ($prefix: IP!, $object_class: [String!]!) {{
rpslObjects(
ipAny: $prefix
objectClass: $object_class,
rpkiStatus: [valid,invalid,not_found]
) {{
{COMMON_GRAPHQL_FIELDS}
}}
}}
"""
)
GQL_QUERY_AS_MEMBER_OF = gql(
"""
query getMemberOf($target: String!) {
asSet: rpslObjects(
members: [$target]
objectClass: ["as-set"]
rpkiStatus: [valid, invalid, not_found]
) {
rpslPk
source
}
autNum: rpslObjects(
rpslPk: [$target]
objectClass: ["aut-num"]
rpkiStatus: [valid, invalid, not_found]
) {
rpslPk
source
mntBy
... on RPSLAutNum {
memberOfObjs {
rpslPk
source
mbrsByRef
}
}
}
}
"""
)
GQL_QUERY_SET_MEMBERS = gql(
"""
query setMembers($names: [String!]!) {
recursiveSetMembers(setNames:$names, depth:1) {
rpslPk
rootSource
members
}
}
"""
)
class IRRDQuery:
def __init__(self):
# Read at this point to allow tests to change the endpoint
endpoint = config("IRRD_ENDPOINT")
self.transport = AIOHTTPTransport(url=endpoint, timeout=IRRD_TIMEOUT)
async def query_set_members(self, names: List[str]) -> Dict[str, Dict[str, List[str]]]:
async with Client(transport=self.transport, execute_timeout=IRRD_TIMEOUT) as session:
response = await session.execute(GQL_QUERY_SET_MEMBERS, {"names": names})
members_per_set: Dict[str, Dict[str, List[str]]] = defaultdict(dict)
for item in response["recursiveSetMembers"]:
members_per_set[item["rpslPk"]][item["rootSource"]] = item["members"]
return dict(members_per_set)
async def query_member_of(self, target: str):
if target.isnumeric():
target = "AS" + target
async with Client(transport=self.transport, execute_timeout=IRRD_TIMEOUT) as session:
return await session.execute(GQL_QUERY_AS_MEMBER_OF, {"target": target})
async def query_asn(self, asn: int):
async with Client(transport=self.transport, execute_timeout=IRRD_TIMEOUT) as session:
result = await session.execute(GQL_QUERY_ASN, {"asn": asn})
return self._graphql_to_route_info(result)
async def query_prefixes_any(self, prefixes: List[IPNetwork]) -> List[RouteInfo]:
tasks = []
async with Client(transport=self.transport, execute_timeout=IRRD_TIMEOUT) as session:
for prefix in prefixes:
object_class = ["route"] if prefix.version == 4 else ["route6"]
task = session.execute(
GQL_QUERY_PREFIX,
{
"prefix": str(prefix),
"object_class": object_class,
},
)
tasks.append(task)
results_lists = await asyncio.gather(*tasks)
objects_lists = [
self._graphql_to_route_info(results_list) for results_list in results_lists
]
return [obj for objects_list in objects_lists for obj in objects_list]
def _graphql_to_route_info(self, graphql_result) -> List[RouteInfo]:
"""
Convert the response to an IRRd rpslObjects query
to a list of RouteInfo objects.
"""
results = []
for rpsl_obj in graphql_result["rpslObjects"]:
results.append(
RouteInfo(
source=DataSource.IRR,
prefix=ip_network(rpsl_obj["prefix"]),
asn=rpsl_obj["asn"] if rpsl_obj["asn"] else 0, # TODO: fix in irrd
rpsl_pk=rpsl_obj["rpslPk"],
irr_source=rpsl_obj["source"],
rpki_status=RPKIStatus[rpsl_obj["rpkiStatus"]],
rpki_max_length=rpsl_obj["rpkiMaxLength"],
rpsl_text=rpsl_obj["objectText"],
)
)
return results
|
132672
|
from typing import List
from infrastructure.cqrs.decorators.responseclass import responseclass
from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListDto import GetDataOperationJobExecutionLogListDto
@responseclass
class GetDataOperationJobExecutionLogListResponse:
Data: List[GetDataOperationJobExecutionLogListDto] = None
|
132674
|
import logging
import os
from functools import wraps
from rcon.commands import CommandFailedError
from discord_webhook import DiscordEmbed
from rcon.recorded_commands import RecordedRcon
from rcon.player_history import (
save_player,
save_start_player_session,
save_end_player_session,
safe_save_player_action,
get_player,
_get_set_player,
)
from rcon.game_logs import on_connected, on_disconnected, on_camera, on_chat
from rcon.models import enter_session, PlayerSteamID, SteamInfo, enter_session
from rcon.discord import send_to_discord_audit, dict_to_discord
from rcon.steam_utils import (
get_player_bans,
STEAM_KEY,
get_steam_profile,
update_db_player_info,
)
from rcon.discord import send_to_discord_audit
from rcon.user_config import CameraConfig, RealVipConfig
from rcon.discord import get_prepared_discord_hooks, send_to_discord_audit
from rcon.map_recorder import VoteMap
from rcon.user_config import VoteMapConfig
from rcon.workers import temporary_broadcast, temporary_welcome
logger = logging.getLogger(__name__)
@on_chat
def count_vote(rcon: RecordedRcon, struct_log):
config = VoteMapConfig()
if not config.get_vote_enabled():
return
v = VoteMap()
if vote := v.is_vote(struct_log.get("sub_content")):
logger.debug("Vote chat detected: %s", struct_log["message"])
map_name = v.register_vote(
struct_log["player"], struct_log["timestamp_ms"] / 1000, vote
)
try:
temporary_broadcast(
rcon,
config.get_votemap_thank_you_text().format(
player_name=struct_log["player"], map_name=map_name
),
5,
)
except Exception:
logger.warning("Unable to output thank you message")
v.apply_with_retry(nb_retry=2)
MAX_DAYS_SINCE_BAN = os.getenv("BAN_ON_VAC_HISTORY_DAYS", 0)
AUTO_BAN_REASON = os.getenv(
"BAN_ON_VAC_HISTORY_REASON", "VAC ban history ({DAYS_SINCE_LAST_BAN} days ago)"
)
MAX_GAME_BAN_THRESHOLD = os.getenv("MAX_GAME_BAN_THRESHOLD", 0)
def ban_if_blacklisted(rcon: RecordedRcon, steam_id_64, name):
with enter_session() as sess:
player = get_player(sess, steam_id_64)
if not player:
logger.error("Can't check blacklist, player not found %s", steam_id_64)
return
if player.blacklist and player.blacklist.is_blacklisted:
try:
logger.info(
"Player %s was banned due blacklist, reason: %s",
str(name),
player.blacklist.reason,
)
rcon.do_perma_ban(
player=name,
reason=player.blacklist.reason,
by=f"BLACKLIST: {player.blacklist.by}",
)
safe_save_player_action(
rcon=rcon,
player_name=name,
action_type="PERMABAN",
reason=player.blacklist.reason,
by=f"BLACKLIST: {player.blacklist.by}",
steam_id_64=steam_id_64,
)
try:
send_to_discord_audit(
f"`BLACKLIST` -> {dict_to_discord(dict(player=name, reason=player.blacklist.reason))}",
"BLACKLIST",
)
except:
logger.error("Unable to send blacklist to audit log")
except:
send_to_discord_audit(
"Failed to apply ban on blacklisted players, please check the logs and report the error",
"ERROR",
)
def should_ban(bans, max_game_bans, max_days_since_ban):
try:
days_since_last_ban = int(bans["DaysSinceLastBan"])
number_of_game_bans = int(bans.get("NumberOfGameBans", 0))
except ValueError: # In case DaysSinceLastBan can be null
return
has_a_ban = bans.get("VACBanned") == True or number_of_game_bans >= max_game_bans
if days_since_last_ban <= 0:
return False
if days_since_last_ban <= max_days_since_ban and has_a_ban:
return True
return False
def ban_if_has_vac_bans(rcon: RecordedRcon, steam_id_64, name):
try:
max_days_since_ban = int(MAX_DAYS_SINCE_BAN)
max_game_bans = (
float("inf")
if int(MAX_GAME_BAN_THRESHOLD) <= 0
else int(MAX_GAME_BAN_THRESHOLD)
)
except ValueError: # No proper value is given
logger.error(
"Invalid value given for environment variable BAN_ON_VAC_HISTORY_DAYS or MAX_GAME_BAN_THRESHOLD"
)
return
if max_days_since_ban <= 0:
return # Feature is disabled
with enter_session() as sess:
player = get_player(sess, steam_id_64)
if not player:
logger.error("Can't check VAC history, player not found %s", steam_id_64)
return
bans = get_player_bans(steam_id_64)
if not bans or not isinstance(bans, dict):
logger.warning(
"Can't fetch Bans for player %s, received %s", steam_id_64, bans
)
# Player couldn't be fetched properly (logged by get_player_bans)
return
if should_ban(bans, max_game_bans, max_days_since_ban):
reason = AUTO_BAN_REASON.format(
DAYS_SINCE_LAST_BAN=bans.get("DaysSinceLastBan"),
MAX_DAYS_SINCE_BAN=str(max_days_since_ban),
)
logger.info(
"Player %s was banned due VAC history, last ban: %s days ago",
str(player),
bans.get("DaysSinceLastBan"),
)
rcon.do_perma_ban(player=name, reason=reason, by="VAC BOT")
try:
audit_params = dict(
player=name,
steam_id_64=player.steam_id_64,
reason=reason,
days_since_last_ban=bans.get("DaysSinceLastBan"),
vac_banned=bans.get("VACBanned"),
number_of_game_bans=bans.get("NumberOfGameBans"),
)
send_to_discord_audit(
f"`VAC/GAME BAN` -> {dict_to_discord(audit_params)}", "AUTOBAN"
)
except:
logger.exception("Unable to send vac ban to audit log")
def inject_steam_id_64(func):
@wraps(func)
def wrapper(rcon, struct_log):
try:
name = struct_log["player"]
info = rcon.get_player_info(name)
steam_id_64 = info.get("steam_id_64")
except KeyError:
logger.exception("Unable to inject steamid %s", struct_log)
raise
if not steam_id_64:
logger.warning("Can't get player steam_id for %s", name)
return
return func(rcon, struct_log, steam_id_64)
return wrapper
@on_connected
def handle_on_connect(rcon, struct_log):
steam_id_64 = rcon.get_player_info.get_cached_value_for(struct_log["player"])
try:
if type(rcon) == RecordedRcon:
rcon.invalidate_player_list_cache()
else:
rcon.get_player.cache_clear()
rcon.get_player_info.clear_for(struct_log["player"])
rcon.get_player_info.clear_for(player=struct_log["player"])
except Exception:
logger.exception("Unable to clear cache for %s", steam_id_64)
try:
info = rcon.get_player_info(struct_log["player"])
steam_id_64 = info.get("steam_id_64")
except (CommandFailedError, KeyError):
if not steam_id_64:
logger.exception("Unable to get player steam ID for %s", struct_log)
raise
else:
logger.error(
"Unable to get player steam ID for %s, falling back to cached value %s",
struct_log,
steam_id_64,
)
timestamp = int(struct_log["timestamp_ms"]) / 1000
save_player(
struct_log["player"],
steam_id_64,
timestamp=int(struct_log["timestamp_ms"]) / 1000,
)
save_start_player_session(steam_id_64, timestamp=timestamp)
ban_if_blacklisted(rcon, steam_id_64, struct_log["player"])
ban_if_has_vac_bans(rcon, steam_id_64, struct_log["player"])
@on_disconnected
@inject_steam_id_64
def handle_on_disconnect(rcon, struct_log, steam_id_64):
save_end_player_session(steam_id_64, struct_log["timestamp_ms"] / 1000)
@on_connected
@inject_steam_id_64
def update_player_steaminfo_on_connect(rcon, struct_log, steam_id_64):
if not steam_id_64:
logger.error(
"Can't update steam info, no steam id available for %s",
struct_log.get("player"),
)
return
profile = get_steam_profile(steam_id_64)
if not profile:
logger.error(
"Can't update steam info, no steam profile returned for %s",
struct_log.get("player"),
)
return
logger.info("Updating steam profile for player %s", struct_log["player"])
with enter_session() as sess:
player = _get_set_player(
sess, player_name=struct_log["player"], steam_id_64=steam_id_64
)
update_db_player_info(player, profile)
sess.commit()
def _set_real_vips(rcon: RecordedRcon, struct_log):
config = RealVipConfig()
if not config.get_enabled():
logger.debug("Real VIP is disabled")
return
desired_nb_vips = config.get_desired_total_number_vips()
min_vip_slot = config.get_minimum_number_vip_slot()
vip_count = rcon.get_vips_count()
remaining_vip_slots = max(desired_nb_vips - vip_count, max(min_vip_slot, 0))
rcon.set_vip_slots_num(remaining_vip_slots)
logger.info("Real VIP set slots to %s", remaining_vip_slots)
@on_connected
def do_real_vips(rcon: RecordedRcon, struct_log):
_set_real_vips(rcon, struct_log)
@on_disconnected
def undo_real_vips(rcon: RecordedRcon, struct_log):
_set_real_vips(rcon, struct_log)
@on_camera
def notify_camera(rcon: RecordedRcon, struct_log):
send_to_discord_audit(message=struct_log["message"], by=struct_log["player"])
try:
if hooks := get_prepared_discord_hooks("camera"):
embeded = DiscordEmbed(
title=f'{struct_log["player"]} - {struct_log["steam_id_64_1"]}',
description=struct_log["sub_content"],
color=242424,
)
for h in hooks:
h.add_embed(embeded)
h.execute()
except Exception:
logger.exception("Unable to forward to hooks")
config = CameraConfig()
if config.is_broadcast():
temporary_broadcast(rcon, struct_log["message"], 60)
if config.is_welcome():
temporary_welcome(rcon, struct_log["message"], 60)
if __name__ == "__main__":
from rcon.settings import SERVER_INFO
log = {'version': 1, 'timestamp_ms': 1627734269000, 'relative_time_ms': 221.212, 'raw': '[543 ms (1627734269)] CONNECTED Dr.WeeD', 'line_without_time': 'CONNECTED Dr.WeeD', 'action': 'CONNECTED', 'player': 'Dr.WeeD', 'steam_id_64_1': None, 'player2': None, 'steam_id_64_2': None, 'weapon': None, 'message': 'Dr.WeeD', 'sub_content': None}
real_vips(RecordedRcon(SERVER_INFO), struct_log=log)
|
132680
|
import socket
import sys
import threading
import time
import uuid
import unittest
from mock import patch
from nose import SkipTest
from nose.tools import eq_
from nose.tools import raises
from kazoo.testing import KazooTestCase
from kazoo.exceptions import (
AuthFailedError,
BadArgumentsError,
ConfigurationError,
ConnectionClosedError,
ConnectionLoss,
InvalidACLError,
NoAuthError,
NoNodeError,
NodeExistsError,
SessionExpiredError,
)
from kazoo.protocol.connection import _CONNECTION_DROP
from kazoo.protocol.states import KeeperState, KazooState
from kazoo.tests.util import TRAVIS_ZK_VERSION
if sys.version_info > (3, ): # pragma: nocover
def u(s):
return s
else: # pragma: nocover
def u(s):
return unicode(s, "unicode_escape")
class TestClientTransitions(KazooTestCase):
def test_connection_and_disconnection(self):
states = []
rc = threading.Event()
@self.client.add_listener
def listener(state):
states.append(state)
if state == KazooState.CONNECTED:
rc.set()
self.client.stop()
eq_(states, [KazooState.LOST])
states.pop()
self.client.start()
rc.wait(2)
eq_(states, [KazooState.CONNECTED])
rc.clear()
states.pop()
self.expire_session()
rc.wait(2)
req_states = [KazooState.LOST, KazooState.CONNECTED]
eq_(states, req_states)
class TestClientConstructor(unittest.TestCase):
def _makeOne(self, *args, **kw):
from kazoo.client import KazooClient
return KazooClient(*args, **kw)
def test_invalid_handler(self):
from kazoo.handlers.threading import SequentialThreadingHandler
self.assertRaises(ConfigurationError,
self._makeOne, handler=SequentialThreadingHandler)
def test_chroot(self):
self.assertEqual(self._makeOne(hosts='127.0.0.1:2181/').chroot, '')
self.assertEqual(self._makeOne(hosts='127.0.0.1:2181/a').chroot, '/a')
self.assertEqual(self._makeOne(hosts='127.0.0.1/a').chroot, '/a')
self.assertEqual(self._makeOne(hosts='127.0.0.1/a/b').chroot, '/a/b')
self.assertEqual(self._makeOne(
hosts='127.0.0.1:2181,127.0.0.1:2182/a/b').chroot, '/a/b')
def test_connection_timeout(self):
from kazoo.handlers.threading import TimeoutError
client = self._makeOne(hosts='127.0.0.1:9')
self.assertTrue(client.handler.timeout_exception is TimeoutError)
self.assertRaises(TimeoutError, client.start, 0.1)
def test_ordered_host_selection(self):
client = self._makeOne(hosts='127.0.0.1:9,127.0.0.2:9/a',
randomize_hosts=False)
hosts = [h for h in client.hosts]
eq_(hosts, [('127.0.0.1', 9), ('127.0.0.2', 9)])
def test_invalid_hostname(self):
client = self._makeOne(hosts='nosuchhost/a')
timeout = client.handler.timeout_exception
self.assertRaises(timeout, client.start, 0.1)
def test_retry_options_dict(self):
from kazoo.retry import KazooRetry
client = self._makeOne(command_retry=dict(max_tries=99),
connection_retry=dict(delay=99))
self.assertTrue(type(client._conn_retry) is KazooRetry)
self.assertTrue(type(client._retry) is KazooRetry)
eq_(client._retry.max_tries, 99)
eq_(client._conn_retry.delay, 99)
class TestAuthentication(KazooTestCase):
def _makeAuth(self, *args, **kwargs):
from kazoo.security import make_digest_acl
return make_digest_acl(*args, **kwargs)
def test_auth(self):
username = uuid.uuid4().hex
password = uuid.uuid4().hex
digest_auth = "%s:%s" % (username, password)
acl = self._makeAuth(username, password, all=True)
client = self._get_client()
client.start()
client.add_auth("digest", digest_auth)
client.default_acl = (acl,)
try:
client.create("/1")
client.create("/1/2")
client.ensure_path("/1/2/3")
eve = self._get_client()
eve.start()
self.assertRaises(NoAuthError, eve.get, "/1/2")
# try again with the wrong auth token
eve.add_auth("digest", "badbad:bad")
self.assertRaises(NoAuthError, eve.get, "/1/2")
finally:
# Ensure we remove the ACL protected nodes
client.delete("/1", recursive=True)
eve.stop()
eve.close()
def test_connect_auth(self):
username = uuid.uuid4().hex
password = uuid.uuid4().hex
digest_auth = "%s:%s" % (username, password)
acl = self._makeAuth(username, password, all=True)
client = self._get_client(auth_data=[('digest', digest_auth)])
client.start()
try:
client.create('/1', acl=(acl,))
# give ZK a chance to copy data to other node
time.sleep(0.1)
self.assertRaises(NoAuthError, self.client.get, "/1")
finally:
client.delete('/1')
client.stop()
client.close()
def test_unicode_auth(self):
username = u("xe4/\hm")
password = u("/\<PASSWORD>")
digest_auth = "%s:%s" % (username, password)
acl = self._makeAuth(username, password, all=True)
client = self._get_client()
client.start()
client.add_auth("digest", digest_auth)
client.default_acl = (acl,)
try:
client.create("/1")
client.ensure_path("/1/2/3")
eve = self._get_client()
eve.start()
self.assertRaises(NoAuthError, eve.get, "/1/2")
# try again with the wrong auth token
eve.add_auth("digest", "badbad:bad")
self.assertRaises(NoAuthError, eve.get, "/1/2")
finally:
# Ensure we remove the ACL protected nodes
client.delete("/1", recursive=True)
eve.stop()
eve.close()
def test_invalid_auth(self):
client = self._get_client()
client.start()
self.assertRaises(TypeError, client.add_auth,
'digest', ('user', 'pass'))
self.assertRaises(TypeError, client.add_auth,
None, ('user', 'pass'))
def test_async_auth(self):
client = self._get_client()
client.start()
username = uuid.uuid4().hex
password = <PASSWORD>
digest_auth = "%s:%s" % (username, password)
result = client.add_auth_async("digest", digest_auth)
self.assertTrue(result.get())
def test_async_auth_failure(self):
client = self._get_client()
client.start()
username = uuid.uuid4().hex
password = <PASSWORD>
digest_auth = "%s:%s" % (username, password)
self.assertRaises(AuthFailedError, client.add_auth,
"unknown-scheme", digest_auth)
def test_add_auth_on_reconnect(self):
client = self._get_client()
client.start()
client.add_auth("digest", "jsmith:jsmith")
client._connection._socket.shutdown(socket.SHUT_RDWR)
while not client.connected:
time.sleep(0.1)
self.assertTrue(("digest", "jsmith:jsmith") in client.auth_data)
class TestConnection(KazooTestCase):
def test_chroot_warning(self):
k = self._get_nonchroot_client()
k.chroot = 'abba'
try:
with patch('warnings.warn') as mock_func:
k.start()
assert mock_func.called
finally:
k.stop()
def test_session_expire(self):
from kazoo.protocol.states import KazooState
cv = threading.Event()
def watch_events(event):
if event == KazooState.LOST:
cv.set()
self.client.add_listener(watch_events)
self.expire_session()
cv.wait(3)
assert cv.is_set()
def test_bad_session_expire(self):
from kazoo.protocol.states import KazooState
cv = threading.Event()
ab = threading.Event()
def watch_events(event):
if event == KazooState.LOST:
ab.set()
raise Exception("oops")
cv.set()
self.client.add_listener(watch_events)
self.expire_session()
ab.wait(0.5)
assert ab.is_set()
cv.wait(0.5)
assert not cv.is_set()
def test_state_listener(self):
from kazoo.protocol.states import KazooState
states = []
condition = threading.Condition()
def listener(state):
with condition:
states.append(state)
condition.notify_all()
self.client.stop()
eq_(self.client.state, KazooState.LOST)
self.client.add_listener(listener)
self.client.start(5)
with condition:
if not states:
condition.wait(5)
eq_(len(states), 1)
eq_(states[0], KazooState.CONNECTED)
def test_invalid_listener(self):
self.assertRaises(ConfigurationError, self.client.add_listener, 15)
def test_listener_only_called_on_real_state_change(self):
from kazoo.protocol.states import KazooState
self.assertTrue(self.client.state, KazooState.CONNECTED)
called = [False]
condition = threading.Event()
def listener(state):
called[0] = True
condition.set()
self.client.add_listener(listener)
self.client._make_state_change(KazooState.CONNECTED)
condition.wait(3)
self.assertFalse(called[0])
def test_no_connection(self):
client = self.client
client.stop()
self.assertFalse(client.connected)
self.assertTrue(client.client_id is None)
self.assertRaises(ConnectionClosedError, client.exists, '/')
def test_close_connecting_connection(self):
client = self.client
client.stop()
ev = threading.Event()
def close_on_connecting(state):
if state in (KazooState.CONNECTED, KazooState.LOST):
ev.set()
client.add_listener(close_on_connecting)
client.start()
# Wait until we connect
ev.wait(5)
ev.clear()
self.client._call(_CONNECTION_DROP, client.handler.async_result())
client.stop()
# ...and then wait until the connection is lost
ev.wait(5)
self.assertRaises(ConnectionClosedError,
self.client.create, '/foobar')
def test_double_start(self):
self.assertTrue(self.client.connected)
self.client.start()
self.assertTrue(self.client.connected)
def test_double_stop(self):
self.client.stop()
self.assertFalse(self.client.connected)
self.client.stop()
self.assertFalse(self.client.connected)
def test_restart(self):
self.assertTrue(self.client.connected)
self.client.restart()
self.assertTrue(self.client.connected)
def test_closed(self):
client = self.client
client.stop()
write_pipe = client._connection._write_pipe
# close the connection to free the pipe
client.close()
eq_(client._connection._write_pipe, None)
# sneak in and patch client to simulate race between a thread
# calling stop(); close() and one running a command
oldstate = client._state
client._state = KeeperState.CONNECTED
client._connection._write_pipe = write_pipe
try:
# simulate call made after write pipe is closed
self.assertRaises(ConnectionClosedError, client.exists, '/')
# simualte call made after write pipe is set to None
client._connection._write_pipe = None
self.assertRaises(ConnectionClosedError, client.exists, '/')
finally:
# reset for teardown
client._state = oldstate
client._connection._write_pipe = None
class TestClient(KazooTestCase):
def _getKazooState(self):
from kazoo.protocol.states import KazooState
return KazooState
def test_client_id(self):
client_id = self.client.client_id
self.assertEqual(type(client_id), tuple)
# make sure password is of correct length
self.assertEqual(len(client_id[1]), 16)
def test_connected(self):
client = self.client
self.assertTrue(client.connected)
def test_create(self):
client = self.client
path = client.create("/1")
eq_(path, "/1")
self.assertTrue(client.exists("/1"))
def test_create_on_broken_connection(self):
client = self.client
client.start()
client._state = KeeperState.EXPIRED_SESSION
self.assertRaises(SessionExpiredError, client.create,
'/closedpath', b'bar')
client._state = KeeperState.AUTH_FAILED
self.assertRaises(AuthFailedError, client.create,
'/closedpath', b'bar')
client._state = KeeperState.CONNECTING
self.assertRaises(SessionExpiredError, client.create,
'/closedpath', b'bar')
client.stop()
client.close()
self.assertRaises(ConnectionClosedError, client.create,
'/closedpath', b'bar')
def test_create_null_data(self):
client = self.client
client.create("/nulldata", None)
value, _ = client.get("/nulldata")
self.assertEqual(value, None)
def test_create_empty_string(self):
client = self.client
client.create("/empty", b"")
value, _ = client.get("/empty")
eq_(value, b"")
def test_create_unicode_path(self):
client = self.client
path = client.create(u("/ascii"))
eq_(path, u("/ascii"))
path = client.create(u("/\xe4hm"))
eq_(path, u("/\xe4hm"))
def test_create_async_returns_unchrooted_path(self):
client = self.client
path = client.create_async('/1').get()
eq_(path, "/1")
def test_create_invalid_path(self):
client = self.client
self.assertRaises(TypeError, client.create, ('a', ))
self.assertRaises(ValueError, client.create, ".")
self.assertRaises(ValueError, client.create, "/a/../b")
self.assertRaises(BadArgumentsError, client.create, "/b\x00")
self.assertRaises(BadArgumentsError, client.create, "/b\x1e")
def test_create_invalid_arguments(self):
from kazoo.security import OPEN_ACL_UNSAFE
single_acl = OPEN_ACL_UNSAFE[0]
client = self.client
self.assertRaises(TypeError, client.create, 'a', acl='all')
self.assertRaises(TypeError, client.create, 'a', acl=single_acl)
self.assertRaises(TypeError, client.create, 'a', value=['a'])
self.assertRaises(TypeError, client.create, 'a', ephemeral='yes')
self.assertRaises(TypeError, client.create, 'a', sequence='yes')
self.assertRaises(TypeError, client.create, 'a', makepath='yes')
def test_create_value(self):
client = self.client
client.create("/1", b"bytes")
data, stat = client.get("/1")
eq_(data, b"bytes")
def test_create_unicode_value(self):
client = self.client
self.assertRaises(TypeError, client.create, "/1", u("\xe4hm"))
def test_create_large_value(self):
client = self.client
kb_512 = b"a" * (512 * 1024)
client.create("/1", kb_512)
self.assertTrue(client.exists("/1"))
mb_2 = b"a" * (2 * 1024 * 1024)
self.assertRaises(ConnectionLoss, client.create, "/2", mb_2)
def test_create_acl_duplicate(self):
from kazoo.security import OPEN_ACL_UNSAFE
single_acl = OPEN_ACL_UNSAFE[0]
client = self.client
client.create("/1", acl=[single_acl, single_acl])
acls, stat = client.get_acls("/1")
# ZK >3.4 removes duplicate ACL entries
if TRAVIS_ZK_VERSION:
version = TRAVIS_ZK_VERSION
else:
version = client.server_version()
self.assertEqual(len(acls), 1 if version > (3, 4) else 2)
def test_create_acl_empty_list(self):
from kazoo.security import OPEN_ACL_UNSAFE
client = self.client
client.create("/1", acl=[])
acls, stat = client.get_acls("/1")
self.assertEqual(acls, OPEN_ACL_UNSAFE)
def test_version_no_connection(self):
@raises(ConnectionLoss)
def testit():
self.client.server_version()
self.client.stop()
testit()
def test_create_ephemeral(self):
client = self.client
client.create("/1", b"ephemeral", ephemeral=True)
data, stat = client.get("/1")
eq_(data, b"ephemeral")
eq_(stat.ephemeralOwner, client.client_id[0])
def test_create_no_ephemeral(self):
client = self.client
client.create("/1", b"val1")
data, stat = client.get("/1")
self.assertFalse(stat.ephemeralOwner)
def test_create_ephemeral_no_children(self):
from kazoo.exceptions import NoChildrenForEphemeralsError
client = self.client
client.create("/1", b"ephemeral", ephemeral=True)
self.assertRaises(NoChildrenForEphemeralsError,
client.create, "/1/2", b"val1")
self.assertRaises(NoChildrenForEphemeralsError,
client.create, "/1/2", b"val1", ephemeral=True)
def test_create_sequence(self):
client = self.client
client.create("/folder")
path = client.create("/folder/a", b"sequence", sequence=True)
eq_(path, "/folder/a0000000000")
path2 = client.create("/folder/a", b"sequence", sequence=True)
eq_(path2, "/folder/a0000000001")
path3 = client.create("/folder/", b"sequence", sequence=True)
eq_(path3, "/folder/0000000002")
def test_create_ephemeral_sequence(self):
basepath = "/" + uuid.uuid4().hex
realpath = self.client.create(basepath, b"sandwich", sequence=True,
ephemeral=True)
self.assertTrue(basepath != realpath and realpath.startswith(basepath))
data, stat = self.client.get(realpath)
eq_(data, b"sandwich")
def test_create_makepath(self):
self.client.create("/1/2", b"val1", makepath=True)
data, stat = self.client.get("/1/2")
eq_(data, b"val1")
self.client.create("/1/2/3/4/5", b"val2", makepath=True)
data, stat = self.client.get("/1/2/3/4/5")
eq_(data, b"val2")
self.assertRaises(NodeExistsError, self.client.create, "/1/2/3/4/5",
b"val2", makepath=True)
def test_create_makepath_incompatible_acls(self):
from kazoo.client import KazooClient
from kazoo.security import make_digest_acl_credential, CREATOR_ALL_ACL
credential = make_digest_acl_credential("username", "password")
alt_client = KazooClient(self.cluster[0].address + self.client.chroot,
max_retries=5, auth_data=[("digest", credential)])
alt_client.start()
alt_client.create("/1/2", b"val2", makepath=True, acl=CREATOR_ALL_ACL)
try:
self.assertRaises(NoAuthError, self.client.create, "/1/2/3/4/5",
b"val2", makepath=True)
finally:
alt_client.delete('/', recursive=True)
alt_client.stop()
def test_create_no_makepath(self):
self.assertRaises(NoNodeError, self.client.create, "/1/2", b"val1")
self.assertRaises(NoNodeError, self.client.create, "/1/2", b"val1",
makepath=False)
self.client.create("/1/2", b"val1", makepath=True)
self.assertRaises(NoNodeError, self.client.create, "/1/2/3/4", b"val1",
makepath=False)
def test_create_exists(self):
from kazoo.exceptions import NodeExistsError
client = self.client
path = client.create("/1")
self.assertRaises(NodeExistsError, client.create, path)
def test_create_get_set(self):
nodepath = "/" + uuid.uuid4().hex
self.client.create(nodepath, b"sandwich", ephemeral=True)
data, stat = self.client.get(nodepath)
eq_(data, b"sandwich")
newstat = self.client.set(nodepath, b"hats", stat.version)
self.assertTrue(newstat)
assert newstat.version > stat.version
# Some other checks of the ZnodeStat object we got
eq_(newstat.acl_version, stat.acl_version)
eq_(newstat.created, stat.ctime / 1000.0)
eq_(newstat.last_modified, newstat.mtime / 1000.0)
eq_(newstat.owner_session_id, stat.ephemeralOwner)
eq_(newstat.creation_transaction_id, stat.czxid)
eq_(newstat.last_modified_transaction_id, newstat.mzxid)
eq_(newstat.data_length, newstat.dataLength)
eq_(newstat.children_count, stat.numChildren)
eq_(newstat.children_version, stat.cversion)
def test_get_invalid_arguments(self):
client = self.client
self.assertRaises(TypeError, client.get, ('a', 'b'))
self.assertRaises(TypeError, client.get, 'a', watch=True)
def test_bad_argument(self):
client = self.client
client.ensure_path("/1")
self.assertRaises(TypeError, self.client.set, "/1", 1)
def test_ensure_path(self):
client = self.client
client.ensure_path("/1/2")
self.assertTrue(client.exists("/1/2"))
client.ensure_path("/1/2/3/4")
self.assertTrue(client.exists("/1/2/3/4"))
def test_sync(self):
client = self.client
self.assertTrue(client.sync('/'), '/')
def test_exists(self):
nodepath = "/" + uuid.uuid4().hex
exists = self.client.exists(nodepath)
eq_(exists, None)
self.client.create(nodepath, b"sandwich", ephemeral=True)
exists = self.client.exists(nodepath)
self.assertTrue(exists)
assert isinstance(exists.version, int)
multi_node_nonexistent = "/" + uuid.uuid4().hex + "/hats"
exists = self.client.exists(multi_node_nonexistent)
eq_(exists, None)
def test_exists_invalid_arguments(self):
client = self.client
self.assertRaises(TypeError, client.exists, ('a', 'b'))
self.assertRaises(TypeError, client.exists, 'a', watch=True)
def test_exists_watch(self):
nodepath = "/" + uuid.uuid4().hex
event = self.client.handler.event_object()
def w(watch_event):
eq_(watch_event.path, nodepath)
event.set()
exists = self.client.exists(nodepath, watch=w)
eq_(exists, None)
self.client.create(nodepath, ephemeral=True)
event.wait(1)
self.assertTrue(event.is_set())
def test_exists_watcher_exception(self):
nodepath = "/" + uuid.uuid4().hex
event = self.client.handler.event_object()
# if the watcher throws an exception, all we can really do is log it
def w(watch_event):
eq_(watch_event.path, nodepath)
event.set()
raise Exception("test exception in callback")
exists = self.client.exists(nodepath, watch=w)
eq_(exists, None)
self.client.create(nodepath, ephemeral=True)
event.wait(1)
self.assertTrue(event.is_set())
def test_create_delete(self):
nodepath = "/" + uuid.uuid4().hex
self.client.create(nodepath, b"zzz")
self.client.delete(nodepath)
exists = self.client.exists(nodepath)
eq_(exists, None)
def test_get_acls(self):
from kazoo.security import make_digest_acl
acl = make_digest_acl('user', 'pass', all=True)
client = self.client
try:
client.create('/a', acl=[acl])
self.assertTrue(acl in client.get_acls('/a')[0])
finally:
client.delete('/a')
def test_get_acls_invalid_arguments(self):
client = self.client
self.assertRaises(TypeError, client.get_acls, ('a', 'b'))
def test_set_acls(self):
from kazoo.security import make_digest_acl
acl = make_digest_acl('user', 'pass', all=True)
client = self.client
client.create('/a')
try:
client.set_acls('/a', [acl])
self.assertTrue(acl in client.get_acls('/a')[0])
finally:
client.delete('/a')
def test_set_acls_empty(self):
client = self.client
client.create('/a')
self.assertRaises(InvalidACLError, client.set_acls, '/a', [])
def test_set_acls_no_node(self):
from kazoo.security import OPEN_ACL_UNSAFE
client = self.client
self.assertRaises(NoNodeError, client.set_acls, '/a', OPEN_ACL_UNSAFE)
def test_set_acls_invalid_arguments(self):
from kazoo.security import OPEN_ACL_UNSAFE
single_acl = OPEN_ACL_UNSAFE[0]
client = self.client
self.assertRaises(TypeError, client.set_acls, ('a', 'b'), ())
self.assertRaises(TypeError, client.set_acls, 'a', single_acl)
self.assertRaises(TypeError, client.set_acls, 'a', 'all')
self.assertRaises(TypeError, client.set_acls, 'a', [single_acl], 'V1')
def test_set(self):
client = self.client
client.create('a', b'first')
stat = client.set('a', b'second')
data, stat2 = client.get('a')
self.assertEqual(data, b'second')
self.assertEqual(stat, stat2)
def test_set_null_data(self):
client = self.client
client.create("/nulldata", b"not none")
client.set("/nulldata", None)
value, _ = client.get("/nulldata")
self.assertEqual(value, None)
def test_set_empty_string(self):
client = self.client
client.create("/empty", b"not empty")
client.set("/empty", b"")
value, _ = client.get("/empty")
eq_(value, b"")
def test_set_invalid_arguments(self):
client = self.client
client.create('a', b'first')
self.assertRaises(TypeError, client.set, ('a', 'b'), b'value')
self.assertRaises(TypeError, client.set, 'a', ['v', 'w'])
self.assertRaises(TypeError, client.set, 'a', b'value', 'V1')
def test_delete(self):
client = self.client
client.ensure_path('/a/b')
self.assertTrue('b' in client.get_children('a'))
client.delete('/a/b')
self.assertFalse('b' in client.get_children('a'))
def test_delete_recursive(self):
client = self.client
client.ensure_path('/a/b/c')
client.ensure_path('/a/b/d')
client.delete('/a/b', recursive=True)
client.delete('/a/b/c', recursive=True)
self.assertFalse('b' in client.get_children('a'))
def test_delete_invalid_arguments(self):
client = self.client
client.ensure_path('/a/b')
self.assertRaises(TypeError, client.delete, '/a/b', recursive='all')
self.assertRaises(TypeError, client.delete, ('a', 'b'))
self.assertRaises(TypeError, client.delete, '/a/b', version='V1')
def test_get_children(self):
client = self.client
client.ensure_path('/a/b/c')
client.ensure_path('/a/b/d')
self.assertEqual(client.get_children('/a'), ['b'])
self.assertEqual(set(client.get_children('/a/b')), set(['c', 'd']))
self.assertEqual(client.get_children('/a/b/c'), [])
def test_get_children2(self):
client = self.client
client.ensure_path('/a/b')
children, stat = client.get_children('/a', include_data=True)
value, stat2 = client.get('/a')
self.assertEqual(children, ['b'])
self.assertEqual(stat2.version, stat.version)
def test_get_children2_many_nodes(self):
client = self.client
client.ensure_path('/a/b')
client.ensure_path('/a/c')
client.ensure_path('/a/d')
children, stat = client.get_children('/a', include_data=True)
value, stat2 = client.get('/a')
self.assertEqual(set(children), set(['b', 'c', 'd']))
self.assertEqual(stat2.version, stat.version)
def test_get_children_no_node(self):
client = self.client
self.assertRaises(NoNodeError, client.get_children, '/none')
self.assertRaises(NoNodeError, client.get_children,
'/none', include_data=True)
def test_get_children_invalid_path(self):
client = self.client
self.assertRaises(ValueError, client.get_children, '../a')
def test_get_children_invalid_arguments(self):
client = self.client
self.assertRaises(TypeError, client.get_children, ('a', 'b'))
self.assertRaises(TypeError, client.get_children, 'a', watch=True)
self.assertRaises(TypeError, client.get_children,
'a', include_data='yes')
def test_invalid_auth(self):
from kazoo.exceptions import AuthFailedError
from kazoo.protocol.states import KeeperState
client = self.client
client.stop()
client._state = KeeperState.AUTH_FAILED
@raises(AuthFailedError)
def testit():
client.get('/')
testit()
def test_client_state(self):
from kazoo.protocol.states import KeeperState
eq_(self.client.client_state, KeeperState.CONNECTED)
def test_update_host_list(self):
from kazoo.client import KazooClient
from kazoo.protocol.states import KeeperState
hosts = self.cluster[0].address
# create a client with only one server in its list
client = KazooClient(hosts=hosts)
client.start()
# try to change the chroot, not currently allowed
self.assertRaises(ConfigurationError,
client.set_hosts, hosts + '/new_chroot')
# grow the cluster to 3
client.set_hosts(self.servers)
# shut down the first host
try:
self.cluster[0].stop()
time.sleep(5)
eq_(client.client_state, KeeperState.CONNECTED)
finally:
self.cluster[0].run()
dummy_dict = {
'aversion': 1, 'ctime': 0, 'cversion': 1,
'czxid': 110, 'dataLength': 1, 'ephemeralOwner': 'ben',
'mtime': 1, 'mzxid': 1, 'numChildren': 0, 'pzxid': 1, 'version': 1
}
class TestClientTransactions(KazooTestCase):
def setUp(self):
KazooTestCase.setUp(self)
skip = False
if TRAVIS_ZK_VERSION and TRAVIS_ZK_VERSION < (3, 4):
skip = True
elif TRAVIS_ZK_VERSION and TRAVIS_ZK_VERSION >= (3, 4):
skip = False
else:
ver = self.client.server_version()
if ver[1] < 4:
skip = True
if skip:
raise SkipTest("Must use Zookeeper 3.4 or above")
def test_basic_create(self):
t = self.client.transaction()
t.create('/freddy')
t.create('/fred', ephemeral=True)
t.create('/smith', sequence=True)
results = t.commit()
eq_(results[0], '/freddy')
eq_(len(results), 3)
self.assertTrue(results[2].startswith('/smith0'))
def test_bad_creates(self):
args_list = [(True,), ('/smith', 0), ('/smith', b'', 'bleh'),
('/smith', b'', None, 'fred'),
('/smith', b'', None, True, 'fred')]
@raises(TypeError)
def testit(args):
t = self.client.transaction()
t.create(*args)
for args in args_list:
testit(args)
def test_default_acl(self):
from kazoo.security import make_digest_acl
username = uuid.uuid4().hex
password = uuid.uuid4().hex
digest_auth = "%s:%s" % (username, password)
acl = make_digest_acl(username, password, all=True)
self.client.add_auth("digest", digest_auth)
self.client.default_acl = (acl,)
t = self.client.transaction()
t.create('/freddy')
results = t.commit()
eq_(results[0], '/freddy')
def test_basic_delete(self):
self.client.create('/fred')
t = self.client.transaction()
t.delete('/fred')
results = t.commit()
eq_(results[0], True)
def test_bad_deletes(self):
args_list = [(True,), ('/smith', 'woops'), ]
@raises(TypeError)
def testit(args):
t = self.client.transaction()
t.delete(*args)
for args in args_list:
testit(args)
def test_set(self):
self.client.create('/fred', b'01')
t = self.client.transaction()
t.set_data('/fred', b'oops')
t.commit()
res = self.client.get('/fred')
eq_(res[0], b'oops')
def test_bad_sets(self):
args_list = [(42, 52), ('/smith', False), ('/smith', b'', 'oops')]
@raises(TypeError)
def testit(args):
t = self.client.transaction()
t.set_data(*args)
for args in args_list:
testit(args)
def test_check(self):
self.client.create('/fred')
version = self.client.get('/fred')[1].version
t = self.client.transaction()
t.check('/fred', version)
t.create('/blah')
results = t.commit()
eq_(results[0], True)
eq_(results[1], '/blah')
def test_bad_checks(self):
args_list = [(42, 52), ('/smith', 'oops')]
@raises(TypeError)
def testit(args):
t = self.client.transaction()
t.check(*args)
for args in args_list:
testit(args)
def test_bad_transaction(self):
from kazoo.exceptions import RolledBackError, NoNodeError
t = self.client.transaction()
t.create('/fred')
t.delete('/smith')
results = t.commit()
eq_(results[0].__class__, RolledBackError)
eq_(results[1].__class__, NoNodeError)
def test_bad_commit(self):
t = self.client.transaction()
@raises(ValueError)
def testit():
t.commit()
t.committed = True
testit()
def test_bad_context(self):
@raises(TypeError)
def testit():
with self.client.transaction() as t:
t.check(4232)
testit()
def test_context(self):
with self.client.transaction() as t:
t.create('/smith', b'32')
eq_(self.client.get('/smith')[0], b'32')
class TestCallbacks(unittest.TestCase):
def test_session_callback_states(self):
from kazoo.protocol.states import KazooState, KeeperState
from kazoo.client import KazooClient
client = KazooClient()
client._handle = 1
client._live.set()
result = client._session_callback(KeeperState.CONNECTED)
eq_(result, None)
# Now with stopped
client._stopped.set()
result = client._session_callback(KeeperState.CONNECTED)
eq_(result, None)
# Test several state transitions
client._stopped.clear()
client.start_async = lambda: True
client._session_callback(KeeperState.CONNECTED)
eq_(client.state, KazooState.CONNECTED)
client._session_callback(KeeperState.AUTH_FAILED)
eq_(client.state, KazooState.LOST)
client._handle = 1
client._session_callback(-250)
eq_(client.state, KazooState.SUSPENDED)
class TestNonChrootClient(KazooTestCase):
def test_create(self):
client = self._get_nonchroot_client()
self.assertEqual(client.chroot, '')
client.start()
node = uuid.uuid4().hex
path = client.create(node, ephemeral=True)
client.delete(path)
client.stop()
def test_unchroot(self):
client = self._get_nonchroot_client()
client.chroot = '/a'
self.assertEquals(client.unchroot('/a/b'), '/b')
self.assertEquals(client.unchroot('/b/c'), '/b/c')
|
132683
|
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
opt = float("inf")
for i in range(len(nums)):
# Fix one integer
fixed = nums[i]
newTarget = target-fixed
l,r = i+1, len(nums)-1
while(l<r):
now = nums[l]+nums[r]
# If sum of other two integer<newTarget,
# meaning need to make left integer bigger.
if now<newTarget:
l = l+1
elif now==newTarget:
return target
else:
r = r-1
if(abs(opt-target)>abs(now+fixed-target)):
opt = now+fixed
return opt
|
132702
|
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.test.test_api import BaseApiTest
class TestBoolObject(BaseApiTest):
def test_fromlong(self, space, api):
for i in range(-3, 3):
obj = api.PyBool_FromLong(i)
if i:
assert obj is space.w_True
else:
assert obj is space.w_False
def test_check(self, space, api):
assert api.PyBool_Check(space.w_True)
assert api.PyBool_Check(space.w_False)
assert not api.PyBool_Check(space.w_None)
assert not api.PyBool_Check(api.PyFloat_FromDouble(1.0))
class AppTestBoolMacros(AppTestCpythonExtensionBase):
def test_macros(self):
module = self.import_extension('foo', [
("get_true", "METH_NOARGS", "Py_RETURN_TRUE;"),
("get_false", "METH_NOARGS", "Py_RETURN_FALSE;"),
])
assert module.get_true() == True
assert module.get_false() == False
|
132722
|
import six
import pytest
from mock import Mock, patch
from nefertari import tweens
def mock_timer():
mock_timer.time = 0
def time_func():
mock_timer.time += 1
return mock_timer.time
return time_func
class DummyConfigurator(object):
def __init__(self):
self.subscribed = []
def add_subscriber(self, wrapped, ifaces):
self.subscribed.append((wrapped, ifaces))
class TestTweens(object):
@patch('nefertari.tweens.time')
@patch('nefertari.tweens.log')
def test_request_timing(self, mock_log, mock_time):
mock_time.time = mock_timer()
request = Mock(method='GET', url='http://example.com')
registry = Mock()
registry.settings = {'request_timing.slow_request_threshold': 1000}
handler = lambda request: request
timing = tweens.request_timing(handler, registry)
timing(request)
mock_log.debug.assert_called_once_with(
'GET (http://example.com) request took 1 seconds')
assert not mock_log.warning.called
@patch('nefertari.tweens.time')
@patch('nefertari.tweens.log')
def test_request_timing_slow_request(self, mock_log, mock_time):
mock_time.time = mock_timer()
request = Mock(method='GET', url='http://example.com')
registry = Mock()
registry.settings = {'request_timing.slow_request_threshold': 0}
handler = lambda request: request
timing = tweens.request_timing(handler, registry)
timing(request)
mock_log.warning.assert_called_once_with(
'GET (http://example.com) request took 1 seconds')
assert not mock_log.debug.called
def test_get_tunneling(self):
class GET(dict):
def mixed(self):
return self
request = Mock(GET=GET({'_m': 'POST', 'foo': 'bar'}), method='GET')
get_tunneling = tweens.get_tunneling(lambda x: x, None)
get_tunneling(request)
assert request.GET == {"foo": "bar"}
assert request.method == 'POST'
assert request.content_type == 'application/json'
assert request.body == six.b('{"foo": "bar"}')
def test_get_tunneling_reserved_params_dropped(self):
from nefertari import RESERVED_PARAMS
class GET(dict):
def mixed(self):
return self
reserved = RESERVED_PARAMS[0]
get_data = GET({
'_m': 'POST',
'foo': 'bar',
reserved: 'boo',
})
request = Mock(GET=get_data, method='GET')
get_tunneling = tweens.get_tunneling(lambda x: x, None)
get_tunneling(request)
assert request.GET == {'foo': 'bar', reserved: 'boo'}
assert request.method == 'POST'
assert request.content_type == 'application/json'
assert request.body == six.b('{"foo": "bar"}')
assert request._tunneled_get
def test_get_tunneling_not_allowed_method(self):
class GET(dict):
def mixed(self):
return self
request = Mock(
GET=GET({'_m': 'DELETE', 'foo': 'bar'}), method='GET',
body=None, content_type=None)
get_tunneling = tweens.get_tunneling(lambda x: x, None)
get_tunneling(request)
assert request.GET == {"foo": "bar"}
assert request.method == 'DELETE'
assert request.content_type is None
assert request.body is None
def test_cors_no_origins_no_creds(self):
registry = Mock(settings={
'cors.allow_origins': '',
'cors.allow_credentials': None,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8080'},
host_url='127.0.0.1:8080')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == []
def test_cors_disallow_creds(self):
registry = Mock(settings={
'cors.allow_origins': '',
'cors.allow_credentials': False,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8080'},
host_url='127.0.0.1:8080')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Credentials', False)]
def test_cors_allow_creds_and_origin(self):
registry = Mock(settings={
'cors.allow_origins': '127.0.0.1:8080,127.0.0.1:8090',
'cors.allow_credentials': True,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8080'},
host_url='127.0.0.1:8080')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Origin', '127.0.0.1:8080'),
('Access-Control-Allow-Credentials', True)]
def test_cors_wrong_origin(self):
registry = Mock(settings={
'cors.allow_origins': '127.0.0.1:8080,127.0.0.1:8090',
'cors.allow_credentials': None,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8000'},
host_url='127.0.0.1:8000')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == []
def test_cors_source_or_host_url(self):
registry = Mock(settings={
'cors.allow_origins': '127.0.0.1:8080,127.0.0.1:8090',
'cors.allow_credentials': None,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8080'},
host_url='')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Origin', '127.0.0.1:8080')]
request = Mock(
headers={},
host_url='127.0.0.1:8080')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Origin', '127.0.0.1:8080')]
def test_cors_allow_origins_star_credentials_true(self):
registry = Mock(settings={
'cors.allow_origins': '*',
'cors.allow_credentials': True,
})
handler = lambda x: Mock(headerlist=[])
with pytest.raises(Exception) as ex:
tweens.cors(handler, registry)
expected = ('Not allowed Access-Control-Allow-Credentials '
'to set to TRUE if origin is *')
assert str(ex.value) == expected
def test_cors_allow_origins_star_credentials_false(self):
registry = Mock(settings={
'cors.allow_origins': '*',
'cors.allow_credentials': None,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={},
host_url='127.1.2.3:1234')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Origin', '127.1.2.3:1234')]
def test_cache_control_header_not_set(self):
handler = lambda x: Mock(headerlist=[('Cache-Control', '')])
response = tweens.cache_control(handler, None)(None)
assert not response.cache_expires.called
def test_cache_control_header_set(self):
handler = lambda x: Mock(headerlist=[])
response = tweens.cache_control(handler, None)(None)
response.cache_expires.assert_called_once_with(0)
def test_ssl_url_scheme(self):
request = Mock(
scheme=None,
environ={'HTTP_X_URL_SCHEME': 'Foo'}
)
tweens.ssl(lambda x: x, None)(request)
assert request.environ['wsgi.url_scheme'] == 'foo'
assert request.scheme == 'foo'
def test_ssl_forwarded_proto(self):
request = Mock(
scheme=None,
environ={'HTTP_X_FORWARDED_PROTO': 'Foo'}
)
tweens.ssl(lambda x: x, None)(request)
assert request.environ['wsgi.url_scheme'] == 'foo'
assert request.scheme == 'foo'
def test_ssl_no_scheme(self):
request = Mock(scheme=None, environ={})
tweens.ssl(lambda x: x, None)(request)
assert request.environ == {}
assert request.scheme is None
def test_enable_selfalias(self):
from pyramid.events import ContextFound
config = DummyConfigurator()
assert config.subscribed == []
tweens.enable_selfalias(config, 'foo')
assert len(config.subscribed) == 1
assert six.callable(config.subscribed[0][0])
assert config.subscribed[0][1] is ContextFound
def test_context_found_subscriber_alias_enabled(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=Mock(username='user12'),
matchdict={'foo': 'self'})
context_found_subscriber(Mock(request=request))
assert request.matchdict['foo'] == 'user12'
def test_context_found_subscriber_no_matchdict(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=Mock(username='user12'),
matchdict=None)
context_found_subscriber(Mock(request=request))
assert request.matchdict is None
def test_context_found_subscriber_not_self(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=Mock(username='user12'),
matchdict={'foo': '1'})
context_found_subscriber(Mock(request=request))
assert request.matchdict['foo'] == '1'
def test_context_found_subscriber_not_authenticated(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=None,
matchdict={'foo': 'self'})
context_found_subscriber(Mock(request=request))
assert request.matchdict['foo'] == 'self'
def test_context_found_subscriber_wrong_id_name(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=Mock(username='user12'),
matchdict={'qoo': 'self'})
context_found_subscriber(Mock(request=request))
assert request.matchdict['qoo'] == 'self'
|
132736
|
from django.conf.urls import url
from core.views import general, host, host_group, task_template, task, ansible_user, rest
urlpatterns = [
url(r'^$', general.index, name='index'),
url(r'^hosts/$', host.search, name='host_search'),
url(r'^hosts/create/$', host.edit, name='host_create'),
url(r'^hosts/(?P<pk>\d+)/$', host.edit, name='host_update'),
url(r'^hosts/(?P<pk>\d+)/delete/$', host.delete, name='host_delete'),
url(r'^host_groups/$', host_group.search, name='host_group_search'),
url(r'^host_groups/create/$', host_group.edit, name='host_group_create'),
url(r'^host_groups/(?P<pk>\d+)/$', host_group.edit, name='host_group_update'),
url(r'^host_groups/(?P<pk>\d+)/delete/$', host_group.delete, name='host_group_delete'),
url(r'^task_templates/$', task_template.search, name='task_template_search'),
url(r'^task_templates/create/$', task_template.edit, name='task_template_create'),
url(r'^task_templates/(?P<pk>\d+)/$', task_template.edit, name='task_template_update'),
url(r'^task_templates/(?P<pk>\d+)/copy/$', task_template.copy, name='task_template_copy'),
url(r'^task_templates/(?P<pk>\d+)/run/$', task_template.run, name='task_template_run'),
url(r'^task_templates/(?P<pk>\d+)/delete/$', task_template.delete, name='task_template_delete'),
url(r'^task_templates/(?P<pk>\d+)/inventory/$', task_template.inventory, name='task_template_inventory'),
url(r'^task_templates/(?P<pk>\d+)/repeat_settings/$', task_template.repeat_settings, name='task_template_repeat_settings'),
url(r'^tasks/$', task.search, name='task_search'),
url(r'^tasks/create/$', task.create, name='task_create'),
url(r'^tasks/(?P<pk>\d+)/stop/$', task.stop, name='task_stop'),
url(r'^tasks/(?P<pk>\d+)/replay/$', task.replay, name='task_replay'),
url(r'^tasks/(?P<pk>\d+)/log/$', task.log, name='task_log'),
url(r'^tasks/(?P<pk>\d+)/inventory/$', task.inventory, name='task_inventory'),
url(r'^ansible_users/$', ansible_user.search, name='ansible_user_search'),
url(r'^ansible_users/create/$', ansible_user.edit, name='ansible_user_create'),
url(r'^ansible_users/(?P<pk>\d+)/$', ansible_user.edit, name='ansible_user_update'),
url(r'^ansible_users/(?P<pk>\d+)/delete/$', ansible_user.delete, name='ansible_user_delete'),
url(r'^api/task/(?P<task_id>\d+)/logs/$', rest.task_logs, name='rest_task_logs')
]
|
132740
|
from pyecharts import options as opts
from pyecharts.charts import Tree
data = [
{
"children": [
{"name": "B"},
{
"children": [{"children": [{"name": "I"}], "name": "E"}, {"name": "F"}],
"name": "C",
},
{
"children": [
{"children": [{"name": "J"}, {"name": "K"}], "name": "G"},
{"name": "H"},
],
"name": "D",
},
],
"name": "A",
}
]
c = (
Tree()
.add("", data)
.set_global_opts(title_opts=opts.TitleOpts(title="Tree-基本示例"))
.render("tree_base.html")
)
|
132751
|
import sys
import time
import numpy as np
from gym.spaces.discrete import Discrete
from tianshou.data import Batch
from tianshou.env import DummyVectorEnv, RayVectorEnv, ShmemVectorEnv, SubprocVectorEnv
if __name__ == '__main__':
from env import MyTestEnv, NXEnv
else: # pytest
from test.base.env import MyTestEnv, NXEnv
def has_ray():
try:
import ray # noqa: F401
return True
except ImportError:
return False
def recurse_comp(a, b):
try:
if isinstance(a, np.ndarray):
if a.dtype == object:
return np.array([recurse_comp(m, n) for m, n in zip(a, b)]).all()
else:
return np.allclose(a, b)
elif isinstance(a, (list, tuple)):
return np.array([recurse_comp(m, n) for m, n in zip(a, b)]).all()
elif isinstance(a, dict):
return np.array([recurse_comp(a[k], b[k]) for k in a.keys()]).all()
except (Exception):
return False
def test_async_env(size=10000, num=8, sleep=0.1):
# simplify the test case, just keep stepping
env_fns = [
lambda i=i: MyTestEnv(size=i, sleep=sleep, random_sleep=True)
for i in range(size, size + num)
]
test_cls = [SubprocVectorEnv, ShmemVectorEnv]
if has_ray():
test_cls += [RayVectorEnv]
for cls in test_cls:
v = cls(env_fns, wait_num=num // 2, timeout=1e-3)
v.seed(None)
v.reset()
# for a random variable u ~ U[0, 1], let v = max{u1, u2, ..., un}
# P(v <= x) = x^n (0 <= x <= 1), pdf of v is nx^{n-1}
# expectation of v is n / (n + 1)
# for a synchronous environment, the following actions should take
# about 7 * sleep * num / (num + 1) seconds
# for async simulation, the analysis is complicated, but the time cost
# should be smaller
action_list = [1] * num + [0] * (num * 2) + [1] * (num * 4)
current_idx_start = 0
action = action_list[:num]
env_ids = list(range(num))
o = []
spent_time = time.time()
while current_idx_start < len(action_list):
A, B, C, D = v.step(action=action, id=env_ids)
b = Batch({'obs': A, 'rew': B, 'done': C, 'info': D})
env_ids = b.info.env_id
o.append(b)
current_idx_start += len(action)
# len of action may be smaller than len(A) in the end
action = action_list[current_idx_start:current_idx_start + len(A)]
# truncate env_ids with the first terms
# typically len(env_ids) == len(A) == len(action), except for the
# last batch when actions are not enough
env_ids = env_ids[:len(action)]
spent_time = time.time() - spent_time
Batch.cat(o)
v.close()
# assure 1/7 improvement
if sys.platform != "darwin": # macOS cannot pass this check
assert spent_time < 6.0 * sleep * num / (num + 1)
def test_async_check_id(size=100, num=4, sleep=.2, timeout=.7):
env_fns = [
lambda: MyTestEnv(size=size, sleep=sleep * 2),
lambda: MyTestEnv(size=size, sleep=sleep * 3),
lambda: MyTestEnv(size=size, sleep=sleep * 5),
lambda: MyTestEnv(size=size, sleep=sleep * 7)
]
test_cls = [SubprocVectorEnv, ShmemVectorEnv]
if has_ray():
test_cls += [RayVectorEnv]
total_pass = 0
for cls in test_cls:
pass_check = 1
v = cls(env_fns, wait_num=num - 1, timeout=timeout)
v.reset()
expect_result = [
[0, 1],
[0, 1, 2],
[0, 1, 3],
[0, 1, 2],
[0, 1],
[0, 2, 3],
[0, 1],
]
ids = np.arange(num)
for res in expect_result:
t = time.time()
_, _, _, info = v.step([1] * len(ids), ids)
t = time.time() - t
ids = Batch(info).env_id
print(ids, t)
if not (
len(ids) == len(res) and np.allclose(sorted(ids), res) and
(t < timeout) == (len(res) == num - 1)
):
pass_check = 0
break
total_pass += pass_check
if sys.platform == "linux": # Windows/macOS may not pass this check
assert total_pass >= 2
def test_vecenv(size=10, num=8, sleep=0.001):
env_fns = [
lambda i=i: MyTestEnv(size=i, sleep=sleep, recurse_state=True)
for i in range(size, size + num)
]
venv = [
DummyVectorEnv(env_fns),
SubprocVectorEnv(env_fns),
ShmemVectorEnv(env_fns),
]
if has_ray():
venv += [RayVectorEnv(env_fns)]
for v in venv:
v.seed(0)
action_list = [1] * 5 + [0] * 10 + [1] * 20
o = [v.reset() for v in venv]
for a in action_list:
o = []
for v in venv:
A, B, C, D = v.step([a] * num)
if sum(C):
A = v.reset(np.where(C)[0])
o.append([A, B, C, D])
for index, infos in enumerate(zip(*o)):
if index == 3: # do not check info here
continue
for info in infos:
assert recurse_comp(infos[0], info)
if __name__ == '__main__':
t = [0] * len(venv)
for i, e in enumerate(venv):
t[i] = time.time()
e.reset()
for a in action_list:
done = e.step([a] * num)[2]
if sum(done) > 0:
e.reset(np.where(done)[0])
t[i] = time.time() - t[i]
for i, v in enumerate(venv):
print(f'{type(v)}: {t[i]:.6f}s')
for v in venv:
assert v.size == list(range(size, size + num))
assert v.env_num == num
assert v.action_space == [Discrete(2)] * num
for v in venv:
v.close()
def test_env_obs():
for obs_type in ["array", "object"]:
envs = SubprocVectorEnv(
[lambda i=x: NXEnv(i, obs_type) for x in [5, 10, 15, 20]]
)
obs = envs.reset()
assert obs.dtype == object
obs = envs.step([1, 1, 1, 1])[0]
assert obs.dtype == object
if __name__ == '__main__':
test_env_obs()
test_vecenv()
test_async_env()
test_async_check_id()
|
132756
|
import pytest
import numpy as np
from sklearn.datasets import load_iris, load_boston
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.svm import SVR
from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure
from alibi.confidence.model_linearity import _linear_superposition, _sample_grid, _sample_knn
from functools import reduce
@pytest.mark.parametrize('input_shape', ((3,), (4, 4, 1)))
@pytest.mark.parametrize('nb_instances', (1, 10))
def test_linear_superposition(input_shape, nb_instances):
alphas = np.array([0.5, 0.5])
vecs_list = []
for i in range(nb_instances):
v0 = np.zeros((1,) + input_shape)
v1 = np.ones((1,) + input_shape)
vec = np.stack((v0, v1), axis=1)
vecs_list.append(vec)
vecs = reduce(lambda x, y: np.vstack((x, y)), vecs_list)
summ = _linear_superposition(alphas, vecs, input_shape)
assert summ.shape[0] == nb_instances
assert summ.shape[1:] == input_shape
assert (summ == 0.5).all()
@pytest.mark.parametrize('nb_instances', (1, 5))
@pytest.mark.parametrize('nb_samples', (2, 10))
def test_sample_knn(nb_instances, nb_samples):
iris = load_iris()
X_train = iris.data
input_shape = X_train.shape[1:]
x = np.ones((nb_instances, ) + input_shape)
X_samples = _sample_knn(x=x, X_train=X_train, nb_samples=nb_samples)
assert X_samples.shape[0] == nb_instances
assert X_samples.shape[1] == nb_samples
@pytest.mark.parametrize('nb_instances', (5, ))
@pytest.mark.parametrize('nb_samples', (3, ))
@pytest.mark.parametrize('input_shape', ((3,), (4, 4, 1)))
def test_sample_grid(nb_instances, nb_samples, input_shape):
x = np.ones((nb_instances, ) + input_shape)
nb_features = x.reshape(x.shape[0], -1).shape[1]
feature_range = np.array([[0, 1] for _ in range(nb_features)])
X_samples = _sample_grid(x, feature_range, nb_samples=nb_samples)
assert X_samples.shape[0] == nb_instances
assert X_samples.shape[1] == nb_samples
@pytest.mark.parametrize('method', ('knn', 'grid'))
@pytest.mark.parametrize('epsilon', (0.04,))
@pytest.mark.parametrize('res', (100,))
@pytest.mark.parametrize('nb_instances', (1, 10))
@pytest.mark.parametrize('agg', ('global', 'pairwise'))
def test_linearity_measure_class(method, epsilon, res, nb_instances, agg):
iris = load_iris()
X_train = iris.data
y_train = iris.target
x = X_train[0: nb_instances].reshape(nb_instances, -1)
lg = LogisticRegression()
lg.fit(X_train, y_train)
def predict_fn(x):
return lg.predict_proba(x)
lin = linearity_measure(predict_fn, x, method=method, epsilon=epsilon, X_train=X_train, res=res,
model_type='classifier', agg=agg)
assert lin.shape[0] == nb_instances, 'Checking shapes'
assert (lin >= 0).all(), 'Linearity measure must be >= 0'
feature_range = [[0, 1] for _ in range(X_train.shape[1])]
lin_2 = linearity_measure(predict_fn, x, method='grid', epsilon=epsilon, feature_range=feature_range,
res=res, model_type='classifier', agg=agg)
assert lin_2.shape[0] == nb_instances, 'Nb of linearity values returned different from number of instances'
assert (lin_2 >= 0).all(), 'Linearity measure must be >= 0'
@pytest.mark.parametrize('method', ('knn', 'grid'))
@pytest.mark.parametrize('epsilon', (0.04,))
@pytest.mark.parametrize('res', (100,))
@pytest.mark.parametrize('nb_instances', (1, 10))
@pytest.mark.parametrize('agg', ('global', 'pairwise'))
def test_linearity_measure_reg(method, epsilon, res, nb_instances, agg):
boston = load_boston()
X_train, y_train = boston.data, boston.target
x = X_train[0: nb_instances].reshape(nb_instances, -1)
lg = LinearRegression()
lg.fit(X_train, y_train)
svr = SVR(kernel='linear')
svr.fit(X_train, y_train)
def predict_fn_svr(x):
return svr.predict(x)
def predict_fn(x):
return lg.predict(x)
lin = linearity_measure(predict_fn, x, method=method, epsilon=epsilon, X_train=X_train, res=res,
model_type='regressor', agg=agg)
assert lin.shape[0] == nb_instances, 'Checking shapes'
assert (lin >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin, np.zeros(lin.shape))
lin_svr = linearity_measure(predict_fn_svr, x, method=method, epsilon=epsilon, X_train=X_train,
res=res, model_type='regressor', agg=agg)
assert lin_svr.shape[0] == nb_instances, 'Checking shapes'
assert (lin_svr >= 0).all(), 'Linearity measure must be >= 0'
feature_range = [[0, 1] for _ in range(X_train.shape[1])]
lin_2 = linearity_measure(predict_fn, x, method='grid', epsilon=epsilon, feature_range=feature_range,
res=res, model_type='regressor', agg=agg)
assert lin_2.shape[0] == nb_instances, 'Checking shapes'
assert (lin_2 >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin_2, np.zeros(lin_2.shape))
feature_range = [[0, 1] for _ in range(X_train.shape[1])]
lin_2_svr = linearity_measure(predict_fn_svr, x, method='grid', epsilon=epsilon,
feature_range=feature_range, res=res, model_type='regressor', agg=agg)
assert lin_2_svr.shape[0] == nb_instances, 'Checking shapes'
assert (lin_2_svr >= 0).all(), 'Linearity measure must be >= 0'
y_train_multi = np.stack((y_train, y_train), axis=1)
lg_multi = LinearRegression()
lg_multi.fit(X_train, y_train_multi)
def predict_fn_multi(x):
return lg_multi.predict(x)
lm_multi = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='regressor', agg=agg)
lm_multi.fit(X_train)
lin_multi = lm_multi.score(predict_fn_multi, x)
assert lin_multi.shape[0] == nb_instances, 'Checking shapes'
assert (lin_multi >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin_multi, np.zeros(lin_multi.shape))
@pytest.mark.parametrize('method', ('knn', 'grid'))
@pytest.mark.parametrize('epsilon', (0.04,))
@pytest.mark.parametrize('res', (100,))
@pytest.mark.parametrize('nb_instances', (1, 10))
@pytest.mark.parametrize('agg', ('global', 'pairwise'))
def test_LinearityMeasure_class(method, epsilon, res, nb_instances, agg):
iris = load_iris()
X_train = iris.data
y_train = iris.target
x = X_train[0: nb_instances].reshape(nb_instances, -1)
lg = LogisticRegression()
lg.fit(X_train, y_train)
def predict_fn(x):
return lg.predict_proba(x)
lm = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='classifier', agg=agg)
lm.fit(X_train)
lin = lm.score(predict_fn, x)
assert lin.shape[0] == nb_instances, 'Checking shapes'
assert (lin >= 0).all(), 'Linearity measure must be >= 0'
@pytest.mark.parametrize('method', ('knn', 'grid'))
@pytest.mark.parametrize('epsilon', (0.04,))
@pytest.mark.parametrize('res', (100,))
@pytest.mark.parametrize('nb_instances', (1, 10))
@pytest.mark.parametrize('agg', ('global', 'pairwise'))
def test_LinearityMeasure_reg(method, epsilon, res, nb_instances, agg):
boston = load_boston()
X_train, y_train = boston.data, boston.target
x = X_train[0: nb_instances].reshape(nb_instances, -1)
lg = LinearRegression()
lg.fit(X_train, y_train)
def predict_fn(x):
return lg.predict(x)
y_train_multi = np.stack((y_train, y_train), axis=1)
lg_multi = LinearRegression()
lg_multi.fit(X_train, y_train_multi)
def predict_fn_multi(x):
return lg_multi.predict(x)
lm = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='regressor', agg=agg)
lm.fit(X_train)
lin = lm.score(predict_fn, x)
assert lin.shape[0] == nb_instances, 'Checking shapes'
assert (lin >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin, np.zeros(lin.shape))
lm_multi = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='regressor', agg=agg)
lm_multi.fit(X_train)
lin_multi = lm_multi.score(predict_fn_multi, x)
assert lin_multi.shape[0] == nb_instances, 'Checking shapes'
assert (lin_multi >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin_multi, np.zeros(lin_multi.shape))
|
132790
|
import time
import telepot
from telepot.utils import clean_data
import telepot.test_settings as st
class AdminBot(telepot.Bot):
def on_chat_message(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print(chat_id)
bot.sendAnimation(chat_id, open('TestFiles/tenor.gif', 'rb'), thumb=open('TestFiles/tenor.gif', 'rb'))
def on_passport_data(self, msg):
chat_id, passport_data = telepot.glance(msg, flavor='all_passport_data')
output = clean_data(bot, passport_data, 'TestFiles/private.key')
def on_poll_data(self, msg):
poll_id, extra_data, chat_id = telepot.glance(msg, flavor='poll_data')
print(poll_id, extra_data, chat_id)
TOKEN = st.TOKEN
bot = AdminBot(TOKEN)
bot.message_loop()
print('Send me a text message ...')
while 1:
time.sleep(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.