id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
413996
|
from unittest.mock import Mock
import pytest
from weaverbird.backends.sql_translator.steps import translate_table
from weaverbird.pipeline.steps import TableStep
@pytest.fixture
def sql_query_describer():
def f(domain):
return {'toto': 'integer', 'raichu': 'integer'}
return f
def test_translate_table(sql_query_describer):
sql_table_retriever_mock = Mock(
return_value='SELECT * FROM products'
) # TODO update when retrieve_query will be updated
step = TableStep(name='domain', domain='kalimdor')
query = translate_table(
step,
None,
sql_query_retriever=sql_table_retriever_mock,
sql_query_describer=sql_query_describer,
index=0,
)
sql_table_retriever_mock.assert_called_once_with('kalimdor')
assert query.transformed_query == 'WITH SELECT_STEP_0 AS (SELECT * FROM products)'
assert query.selection_query == 'SELECT TOTO, RAICHU FROM SELECT_STEP_0'
assert query.query_name == 'SELECT_STEP_0'
def test_translate_table_error_retrieve_table(sql_query_describer):
sql_table_retriever_mock = Mock(
side_effect=Exception
) # TODO update when retrieve_query will be updated
step = TableStep(name='domain', domain='kalimdor')
with pytest.raises(Exception):
translate_table(
step,
None,
sql_table_retriever=sql_table_retriever_mock,
sql_query_describer=sql_query_describer,
index=0,
)
|
413998
|
import numpy as np
from math import log, ceil
from reduce_tree import ReduceTree, SumTree
# A simple memory to store and sample experiences.
class Memory(object):
def __init__(self, size):
self.size = size
self.idx = 0
self.memory = np.zeros(size, dtype=object)
def store(self, exp):
self.memory[self.idx % self.size] = exp
self.idx += 1
def sample(self, batch_size):
indices = np.random.choice(min(self.idx, self.size), batch_size)
return self.memory[indices]
# Prioritized Replay: https://arxiv.org/abs/1511.05952
class PrioritizedMemory(object):
def __init__(self, size, alpha=0.6):
self.size = int(2 ** ceil(log(size, 2)))
self.memory = np.zeros(self.size, dtype=object)
self.sum_tree = SumTree(self.size)
self.min_tree = ReduceTree(self.size, min)
self.idx = 0
self.max_value = 1.
self.max_value_upper = 1000.
self.alpha = alpha
def store(self, exp):
idx = self.idx % self.size
self.memory[idx] = exp
self.sum_tree[idx] = self.max_value
self.min_tree[idx] = self.max_value
self.idx += 1
def sample(self, batch_size, beta):
indices = []
max_value = self.sum_tree.root
for _ in range(batch_size):
value = np.random.uniform(0, max_value)
idx = self.sum_tree.sample(value)
indices.append(idx)
min_value = self.min_tree.root
return indices, self.memory[indices], (self.sum_tree[indices] / (min_value + 1e-4)) ** (-beta)
def update(self, indices, values):
values = np.array(values)
values_modified = values ** self.alpha
self.sum_tree[indices] = values_modified
self.min_tree[indices] = values_modified
self.max_value = max(self.max_value, np.max(values))
self.max_value = min(self.max_value, self.max_value_upper)
def is_full(self):
return self.idx >= self.size
|
414015
|
import robloxapi, asyncio
client = robloxapi.Client("COOKIE")
"""
Gets all trades
"""
async def main():
trades = await client.get_trades()
for trade in trades:
await trade.decline()
asyncio.run(main())
|
414101
|
names = ['Christopher', 'Susan']
scores = []
scores.append(98)
scores.append(99)
print(names)
print(scores)
|
414151
|
from ztag.annotation import *
class MiniHTTPD(Annotation):
protocol = protocols.HTTP
subprotocol = protocols.HTTP.GET
port = None
def process(self, obj, meta):
server = obj["headers"]["server"]
meta = self.simple_banner_version(server.split(" ", 1)[0], "mini_httpd", meta)
if meta and " " in server:
meta.local_metadata.revision = server.split(" ", 1)[1]
return meta
|
414165
|
class TreeNode:
def __init__(self, val):
self.value_ = val
self.parent_ = self
self.rank_ = 0
# only useful for the root for elements traversal
self.children = [self]
def append(self, another_root):
"""
:type another_root: TreeNode
"""
another_root.parent_ = self
# since tree height increment by one
if self.rank_ == another_root.rank_:
self.rank_ += 1
# update children
self.children.extend(another_root.children)
another_root.children = None
@staticmethod
def link(node_x, node_y):
"""
:type node_x: TreeNode
:type node_y: TreeNode
"""
if node_x.rank_ > node_y.rank_:
node_x.append(node_y)
return node_x
else:
node_y.append(node_x)
return node_y
# attention: users need to keep the disjoint property
# self.node_dict: keep the mapping from ele to node
class ForestDisjointSet:
def __init__(self):
self.set_list_ = set()
self.node_dict_ = {}
def make_set(self, x):
new_node = TreeNode(x)
self.set_list_.add(new_node)
self.node_dict_[x] = new_node
def union(self, x, y):
node_x = self.find_set(x)
node_y = self.find_set(y)
# heuristic: union by rank
union_node = TreeNode.link(node_x, node_y)
if union_node is node_y:
self.set_list_.remove(node_x)
else:
self.set_list_.remove(node_y)
def find_set_detail(self, node_x):
# bottom condition
if node_x.parent_ == node_x:
return node_x.parent_
else:
# heuristic: path compression
# two pass method: 1) recursively find the root, 2) update parent in the path to root
node_x.parent_ = self.find_set_detail(node_x.parent_)
return node_x.parent_
def find_set(self, x):
"""
:rtype: TreeNode
"""
node_x = self.node_dict_[x]
assert isinstance(node_x, TreeNode)
return self.find_set_detail(node_x)
def __str__(self):
return str(map(lambda my_set: sorted(map(lambda node: node.value_, my_set.children)), self.set_list_))
def connected_component(vertex_collection, edge_list):
naive_disjoint_set = ForestDisjointSet()
for vertex in vertex_collection:
naive_disjoint_set.make_set(vertex)
for src, dst in edge_list:
if naive_disjoint_set.find_set(src) != naive_disjoint_set.find_set(dst):
naive_disjoint_set.union(src, dst)
return naive_disjoint_set
if __name__ == '__main__':
edge_list = [(1, 2), (2, 3), (4, 5), (4, 7), (5, 7)]
vertex_set = set()
for src, dst in edge_list:
vertex_set.add(src)
vertex_set.add(dst)
vertex_set.add(9)
# find connected component
print 'vertices:', vertex_set
print 'edges:', edge_list
print 'connected component:', connected_component(vertex_set, edge_list)
|
414193
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from io import open
from builtins import map
from builtins import range
from builtins import chr
from builtins import str
from future import standard_library
standard_library.install_aliases()
from .file import File, WrongFormatError
import numpy as np
import pandas as pd
class HAWCStab2PwrFile(File):
@staticmethod
def defaultExtensions():
return ['.pwr', '.txt']
@staticmethod
def formatName():
return 'HAWCStab2 power file'
def _read(self):
# Reading header line
with open(self.filename,'r',encoding=self.encoding) as f:
header = f.readline().strip()
if len(header)<=0 or header[0]!='#':
raise WrongFormatError('Pwr File {}: header line does not start with `#`'.format(self.filename)+e.args[0])
# Extracting column names
header = '0 '+header[1:].strip()
num_and_cols = [s.strip()+']' for s in header.split(']')[:-1]]
cols = [(' '.join(col.split(' ')[1:])).strip().replace(' ','_') for col in num_and_cols]
# Determining type based on number of columns (NOTE: could use col names as well maybe)
if len(cols)!=15:
raise WrongFormatError('Pwr File {}: '.format(self.filename))
self.colNames=cols
# Reading numerical data
try:
self.data = np.loadtxt(self.filename, skiprows=1)
except Exception as e:
raise BrokenFormatError('Pwr File {}: '.format(self.filename)+e.args[0])
if self.data.shape[1]!=len(cols):
raise BrokenFormatError('Pwr File {}: inconsistent number of header columns and data columns.'.format(self.filename)+e.args[0])
#def _write(self):
#self.data.to_csv(self.filename,sep=self.false,index=False)
def _toDataFrame(self):
return pd.DataFrame(data=self.data, columns=self.colNames)
|
414216
|
from typing import Any, List, TypeVar, Union
from statham.schema.constants import Maybe, NotPassed
from statham.schema.elements.base import Element
from statham.schema.helpers import remove_duplicates
from statham.schema.validation import InstanceOf
Item = TypeVar("Item")
# pylint: disable=too-many-instance-attributes
class Array(Element[List[Item]]): # pylint: disable=missing-param-doc
"""JSON Schema ``"array"`` element.
:param items:
As in :class:`statham.schema.elements.Element`, but as a required
positional argument.
"""
items: Union[Element[Item], List[Element]]
def __init__(
self,
items: Union[Element[Item], List[Element]],
*,
default: Maybe[List] = NotPassed(),
const: Maybe[Any] = NotPassed(),
enum: Maybe[List[Any]] = NotPassed(),
additionalItems: Union[Element, bool] = True,
minItems: Maybe[int] = NotPassed(),
maxItems: Maybe[int] = NotPassed(),
uniqueItems: bool = False,
contains: Maybe[Element] = NotPassed(),
):
self.items = items
self.default = default
self.const = const
self.enum = enum
self.additionalItems = additionalItems
self.minItems = minItems
self.maxItems = maxItems
self.uniqueItems = uniqueItems
self.contains = contains
@property
def annotation(self) -> str:
if not self.item_annotations:
return "List"
if len(self.item_annotations) == 1:
return f"List[{self.item_annotations[0]}]"
return f"List[Union[{', '.join(self.item_annotations)}]]"
@property
def item_annotations(self) -> List[str]:
"""Get a list of possible type annotations."""
if isinstance(self.items, Element):
return [self.items.annotation]
annotations: List[str] = [item.annotation for item in self.items]
if self.additionalItems is True:
return ["Any"]
if isinstance(self.additionalItems, Element):
annotations.append(self.additionalItems.annotation)
if "Any" in annotations:
return ["Any"]
return remove_duplicates(annotations)
@property
def type_validator(self):
return InstanceOf(list)
|
414262
|
import sys
sys.path.append('../')
from settings import *
import pandas as pd
import numpy as np
from utils.utils import create_folder
import librosa
import mir_eval
SAMPLING_RATE = TARGET_SAMPLING_RATE
if ISOLATED:
TEST_UNET_CONFIG = TYPE+'_baseline'
output_path = os.path.join(DUMPS_FOLDER, 'stitched', TEST_UNET_CONFIG, 'test')
folders = os.listdir(output_path)
metadata = ['filename', *[y + x for y in ['SDR_', 'SIR_', 'SAR_'] for x in SOURCES_SUBSET]]
df = pd.DataFrame(columns=metadata)
category = 'test'
setting = TEST_UNET_CONFIG
sr = TARGET_SAMPLING_RATE
GT = os.path.join(MUSDB_WAVS_FOLDER_PATH + '_' + str(TARGET_SAMPLING_RATE))
COMPARISON = os.path.join(DUMPS_FOLDER, 'stitched',
TEST_UNET_CONFIG) # [resampled_output_path,phasemixed_output_path,resampled_phasemixed_ouput_path]
results_folder = os.path.join(DUMPS_FOLDER, 'results',
setting) # {1.ori:mixphased,resampled,resampled_mixphased; 2.down:mixphased_10800} ##TODO stereo
create_folder(results_folder)
folders = sorted(os.listdir(os.path.join(GT, category)))
for i, folder in enumerate(folders):
print('[{0}/{1}] [TRACK NAME]: {2}'.format(i, len(folders), folder))
for idx, source in enumerate(SOURCES_SUBSET):
gt_i, _ = librosa.load(os.path.join(GT, category, folder, source + '.wav'), sr=sr)
y_i, _ = librosa.load(os.path.join(COMPARISON, category, folder, source + '.wav'), sr=sr)
if idx == 0:
L = len(gt_i)
gt = np.zeros([len(SOURCES_SUBSET), L])
y = np.zeros([len(SOURCES_SUBSET), L])
gt[idx] = gt_i[:L]
del gt_i
y[idx] = y_i[:L]
del y_i
# gt = gt[:,:y.shape[0]] ##Also measure the impact of this. notice that min and max value indices of gt and estimates do not always coincide (but are closeby)
(sdr, sir, sar, perm) = mir_eval.separation.bss_eval_sources(gt, y, compute_permutation=False)
row = [*sdr, *sir, *sar]
print('Perm : ' + str(perm))
del sdr, sar, sir, perm
df.loc[i] = [folder, *row]
del row
pd.DataFrame.to_csv(df, path_or_buf=os.path.join(results_folder, category + '_metrics.csv'), index=False)
|
414263
|
import os
import requests
import json
from src.Platform import pt
from bypy import ByPy
class Baidu(object):
def __init__(self):
bp = ByPy()
#print(bp.info()) # or whatever instance methods of ByPy class
# 使用上传接口之前,请申请接入,申请地址为:https://pan.baidu.com/union/apply/
# def get_token(self):
# #用不到
# url = "https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id={0}&client_secret={1}".format(self.AppKey, self.SecretKey)
# # 获取token
# res = requests.get(url).text
# data = json.loads(res) # 将json格式转换为字典格式
# self.access_token = data['access_token']
# self.filename = pt.filename()
def upload(self):
path = pt.getpath()
print("正在同步备份文件,如果文件过多 请耐心等待")
#re: https://www.jianshu.com/p/19ddb60e2b22
cmd = 'bypy syncup ' + path + " /"
print("上传完成: ", os.system(cmd))
def main(self):
self.upload() #bypy 把当前目录同步到云盘
|
414312
|
from .data import (TxtTokLmdb, DetectFeatLmdb,
ConcatDatasetWithLens, ImageLmdbGroup)
from .mlm import (MlmDataset, MlmEvalDataset,
BlindMlmDataset, BlindMlmEvalDataset,
mlm_collate, mlm_eval_collate,
mlm_blind_collate, mlm_blind_eval_collate)
from .mrm import (MrfrDataset, OnlyImgMrfrDataset,
MrcDataset, OnlyImgMrcDataset,
mrfr_collate, mrfr_only_img_collate,
mrc_collate, mrc_only_img_collate)
from .itm import (TokenBucketSamplerForItm,
ItmDataset, itm_collate, itm_ot_collate,
ItmRankDataset, ItmRankDatasetHardNeg, itm_rank_collate,
ItmRankDatasetHardNegFromText,
ItmRankDatasetHardNegFromImage, itm_rank_hnv2_collate,
ItmHardNegDataset, itm_hn_collate,
ItmValDataset, itm_val_collate,
ItmEvalDataset, itm_eval_collate)
from .sampler import TokenBucketSampler, DistributedSampler
from .loader import MetaLoader, PrefetchLoader
from .vqa import VqaDataset, vqa_collate, VqaEvalDataset, vqa_eval_collate
from .nlvr2 import (Nlvr2PairedDataset, nlvr2_paired_collate,
Nlvr2PairedEvalDataset, nlvr2_paired_eval_collate,
Nlvr2TripletDataset, nlvr2_triplet_collate,
Nlvr2TripletEvalDataset, nlvr2_triplet_eval_collate)
from .ve import VeDataset, ve_collate, VeEvalDataset, ve_eval_collate
|
414315
|
val = []
p = input('Digite uma sequência de números ').split()
for i in range(0, 4):
val.append(float(p[i]))
#https://pt.stackoverflow.com/q/149149/101
|
414349
|
import torch
from nanodet.model.arch import build_model
from nanodet.util import cfg, load_config
from nni.compression.pytorch import ModelSpeedup
from nni.algorithms.compression.pytorch.pruning import L1FilterPruner
"""
NanoDet model can be installed from https://github.com/RangiLyu/nanodet.git
"""
cfg_path = r"nanodet/config/nanodet-RepVGG-A0_416.yml"
load_config(cfg, cfg_path)
model = build_model(cfg.model).cpu()
dummy_input = torch.rand(8, 3, 416, 416)
op_names = []
# these three conv layers are followed by reshape-like functions
# that cannot be replaced, so we skip these three conv layers,
# you can also get such layers by `not_safe_to_prune` function
excludes = ['head.gfl_cls.0', 'head.gfl_cls.1', 'head.gfl_cls.2']
for name, module in model.named_modules():
if isinstance(module, torch.nn.Conv2d):
if name not in excludes:
op_names.append(name)
cfg_list = [{'op_types':['Conv2d'], 'sparsity':0.5, 'op_names':op_names}]
pruner = L1FilterPruner(model, cfg_list)
pruner.compress()
pruner.export_model('./model', './mask')
# need call _unwrap_model if you want run the speedup on the same model
pruner._unwrap_model()
# Speedup the nanodet
ms = ModelSpeedup(model, dummy_input, './mask')
ms.speedup_model()
model(dummy_input)
|
414373
|
import re
class Main:
def __init__(self):
self.t = int(input())
for i in range(0, self.t):
try:
self.s = re.compile(input())
except re.error:
print("False")
else:
print("True")
if __name__ == '__main__':
obj = Main()
|
414376
|
import torch
import torchtestcase
import unittest
from survae.tests.nn import ModuleTest
from survae.nn.nets import MultiscaleDenseNet
class DenseNetTest(ModuleTest):
def test_layer_is_well_behaved(self):
for gated_conv in [False, True]:
with self.subTest(gated_conv=gated_conv):
x = torch.randn(10, 3, 8, 8)
module = MultiscaleDenseNet(in_channels=3, out_channels=6, num_scales=2, num_blocks=1, mid_channels=12,
depth=2, growth=4, dropout=0.0, gated_conv=gated_conv, zero_init=False)
self.assert_layer_is_well_behaved(module, x)
def test_zero_init(self):
x = torch.randn(10, 3, 8, 8)
module = MultiscaleDenseNet(in_channels=3, out_channels=6, num_scales=2, num_blocks=1, mid_channels=12,
depth=2, growth=4, dropout=0.0, gated_conv=False, zero_init=True)
y = module(x)
self.assertEqual(y, torch.zeros(10, 6, 8, 8))
if __name__ == '__main__':
unittest.main()
|
414410
|
import tempfile
import shutil
import os.path
def pytest_funcarg__temp_dir(request):
dir = tempfile.mkdtemp()
print(dir)
def cleanup():
shutil.rmtree(dir)
request.addfinalizer(cleanup)
return dir
def test_osfiles(temp_dir):
os.mkdir(os.path.join(temp_dir, 'a'))
os.mkdir(os.path.join(temp_dir, 'b'))
dir_contents = os.listdir(temp_dir)
assert len(dir_contents) == 2
assert 'a' in dir_contents
assert 'b' in dir_contents
|
414424
|
from crawl import *
from settings import END_DATE, START_DATE, KEYWORD, XLSX_PATH
start = START_DATE
end = END_DATE
keyword = KEYWORD
driver = webdriver.Chrome(WEB_DRIVER_PATH)
dates = []
titles = []
texts = []
# 키워드, 검색 시작/종료 날짜의 포스팅 url을 가져오기
blog_posting_urls = get_blog_posting_urls(keyword, start, end, driver)
# blog_postings의 date, text, title 가져오기
for posting_addr in blog_posting_urls:
date = get_element(DATE, posting_addr, driver)
dates.append(date)
text = get_element(TEXT, posting_addr, driver)
texts.append(text)
title = get_element(TITLE, posting_addr, driver)
titles.append(title)
# XLSX_PATH에 저장하기
save_xlsx(XLSX_PATH, KEYWORD, KEYWORD, dates, titles, texts)
|
414426
|
import numpy as np
import scipy.sparse as sp
import hetu as ht
from hetu.communicator.mpi_nccl_comm import ncclDataType_t, ncclRedOp_t
import math
import time
import argparse
'''
Usage example: (in Dir Hetu/)
Original graph data:
Single GPU:
mpirun -quiet --allow-run-as-root -np 1 python tests/test_DistGCN/test_model_distGCN15d.py --replication 1 --dataset Reddit
Multiple GPU:
mpirun -quiet --allow-run-as-root -np 8 python tests/test_DistGCN/test_model_distGCN15d.py --replication 2 --dataset Reddit
Reordered graph data:
Single GPU:
mpirun -quiet --allow-run-as-root -np 1 python tests/test_DistGCN/test_model_distGCN15d.py --replication 1 --dataset Reddit --reorder 1 --reorder_alg metis
Multiple GPU:
mpirun -quiet --allow-run-as-root -np 8 python tests/test_DistGCN/test_model_distGCN15d.py --replication 2 --dataset Reddit --reorder 1 --reorder_alg metis
'''
def row_num(node_count, rank, size):
n_per_proc = math.ceil(float(node_count) / size)
if(node_count % size == 0):
return int(node_count/size)
if(rank < size-1):
return int(n_per_proc)
else:
return int(node_count % n_per_proc)
def col_num(node_count, replication, rank):
rank_col = rank % replication # j
col_block = math.ceil(float(node_count) / replication)
col_start = int(col_block*rank_col)
col_end = int(col_block*(rank_col+1))
if col_end > node_count:
col_end = node_count
return col_end-col_start
def convert_to_one_hot(vals, max_val=0):
"""Helper method to convert label array to one-hot array."""
if max_val == 0:
max_val = vals.max() + 1
one_hot_vals = np.zeros((vals.size, max_val))
one_hot_vals[np.arange(vals.size), vals] = 1
return one_hot_vals
def get_proc_groups(size, replication):
if replication == 1:
return None, None, None, None
row_procs = []
for i in range(0, size, replication):
row_procs.append(list(range(i, i + replication)))
col_procs = []
for i in range(replication):
col_procs.append(list(range(i, size, replication)))
row_groups = []
for i in range(len(row_procs)):
row_groups.append(ht.new_group_comm(row_procs[i]))
col_groups = []
for i in range(len(col_procs)):
col_groups.append(ht.new_group_comm(col_procs[i]))
return row_procs, col_procs, row_groups, col_groups
def load_data(args, size, replication, rank):
print("Loading data for rank %d..." % rank)
dataset = args.dataset
reorder_alg = args.reorder_alg
dir_name = "./tests/test_DistGCN/data_GCN15d/%s_size_%d_rep_%d/" % (
dataset, size, replication)
if args.reorder:
dir_name = "./tests/test_DistGCN/data_GCN15d_reorder/%s/%s_size_%d_rep_%d/" % (
reorder_alg, dataset, size, replication)
adj_part = sp.load_npz(dir_name+"adj_part"+str(rank)+".npz")
data_part, row_part, col_part = adj_part.data, adj_part.row, adj_part.col
input_part = np.load(dir_name+"input"+str(rank)+".npy")
label_part = np.load(dir_name+"label"+str(rank)+".npy")
print("Data loading done for rank %d." % rank)
return adj_part, data_part, row_part, col_part, input_part, label_part
def load_data_whole(args):
dataset = args.dataset
reorder_alg = args.reorder_alg
print("Loading dataset %s ..." % dataset)
dir_name = "./tests/test_DistGCN/data_GCN15d/%s_whole_graph/" % (dataset)
if args.reorder:
dir_name = "./tests/test_DistGCN/data_GCN15d_reorder/%s/%s_whole_graph/" % (
reorder_alg, dataset)
adj_whole = sp.load_npz(dir_name+"adj_whole.npz")
adj_whole = adj_whole.tocoo()
data_whole, row_whole, col_whole = adj_whole.data, adj_whole.row, adj_whole.col
input_whole = np.load(dir_name+"input_whole.npy")
label_whole = np.load(dir_name+"label_whole.npy")
print("Data loading done for dataset %s." % dataset)
return adj_whole, data_whole, row_whole, col_whole, input_whole, label_whole
def test(args):
comm = ht.wrapped_mpi_nccl_init()
device_id = comm.dev_id
rank = comm.rank
size = comm.nrank
dataset_info = {'Reddit': [232965, 602, 41], 'Proteins': [
132534, 602, 8], 'Arch': [1644228, 602, 10], 'Products': [2449029, 100, 47]}
node_count, num_features, num_classes = dataset_info[args.dataset]
hidden_layer_size = 128
if num_features < 128:
hidden_layer_size = 64
replication = args.replication
node_Count_Self = row_num(
node_count, rank//replication, size // replication)
node_Count_All = node_count
_, _, row_groups, col_groups = get_proc_groups(size, replication)
executor_ctx = ht.gpu(device_id)
if size > 1:
adj_part, data_part, row_part, col_part, input_part, label_part = load_data(
args, size, replication, rank)
else:
adj_part, data_part, row_part, col_part, input_part, label_part = load_data_whole(
args)
adj_matrix = ht.sparse_array(
data_part, (row_part, col_part), shape=adj_part.shape, ctx=executor_ctx)
# train:val:test=6:2:2
# Our optimization on distributed GNN algorithm does NOT affect the correctness!
# Here due to the limitation of current slice_op, data is split continuously.
# Continuous split is unfriendly for reordered graph data where nodes are already clustered.
# Specifically, training on some node clusters and testing on other clusters may cause poor test accuracy.
# The better way is to split data randomly!
train_split, test_split = 0.6, 0.8
train_node = int(train_split*node_Count_Self)
test_node = int(test_split*node_Count_Self)
A = ht.Variable(name="A", trainable=False)
H = ht.Variable(name="H")
np.random.seed(123)
bounds = np.sqrt(6.0 / (num_features + hidden_layer_size))
W1_val = np.random.uniform(
low=-bounds, high=bounds, size=[num_features, hidden_layer_size]).astype(np.float32)
W1 = ht.Variable(name="W1", value=W1_val)
bounds = np.sqrt(6.0 / (num_classes + hidden_layer_size))
np.random.seed(123)
W2_val = np.random.uniform(
low=-bounds, high=bounds, size=[hidden_layer_size, num_classes]).astype(np.float32)
W2 = ht.Variable(name="W2", value=W2_val)
y_ = ht.Variable(name="y_")
z = ht.distgcn_15d_op(A, H, W1, node_Count_Self, node_Count_All,
size, replication, device_id, comm, [row_groups, col_groups], True)
H1 = ht.relu_op(z)
y = ht.distgcn_15d_op(A, H1, W2, node_Count_Self, node_Count_All,
size, replication, device_id, comm, [row_groups, col_groups], True)
y_train = ht.slice_op(y, (0, 0), (train_node, num_classes))
label_train = ht.slice_op(y_, (0, 0), (train_node, num_classes))
y_test = ht.slice_op(
y, (test_node, 0), (node_Count_Self-test_node, num_classes))
label_test = ht.slice_op(
y_, (test_node, 0), (node_Count_Self-test_node, num_classes))
loss = ht.softmaxcrossentropy_op(y_train, label_train)
loss_test = ht.softmaxcrossentropy_op(y_test, label_test)
opt = ht.optim.AdamOptimizer()
train_op = opt.minimize(loss)
executor = ht.Executor([loss, y, loss_test, train_op], ctx=executor_ctx)
feed_dict = {
A: adj_matrix,
H: ht.array(input_part, ctx=executor_ctx),
y_: ht.array(convert_to_one_hot(label_part, max_val=num_classes), ctx=executor_ctx),
}
epoch_num = 100
epoch_all, epoch_0 = 0, 0
for i in range(epoch_num):
epoch_start_time = time.time()
results = executor.run(feed_dict=feed_dict)
loss = results[0].asnumpy().sum()
y_out = results[1]
loss_test = results[2].asnumpy().sum()
epoch_end_time = time.time()
epoch_time = epoch_end_time-epoch_start_time
epoch_all += epoch_time
if i == 0:
epoch_0 = epoch_time
print("[Epoch: %d, Rank: %d] Epoch time: %.3f, Total time: %.3f" %
(i, rank, epoch_time, epoch_all))
y_out_train, y_predict = y_out.asnumpy().argmax(
axis=1)[:train_node], y_out.asnumpy().argmax(axis=1)[test_node:]
label_train, label_test = label_part[:
train_node], label_part[test_node:]
train_acc = ht.array(
np.array([(y_out_train == label_train).sum()]), ctx=executor_ctx)
test_acc = ht.array(
np.array([(y_predict == label_test).sum()]), ctx=executor_ctx)
train_loss = ht.array(np.array([loss]), ctx=executor_ctx)
test_loss = ht.array(np.array([loss_test]), ctx=executor_ctx)
if replication > 1:
col_groups[rank % replication].dlarrayNcclAllReduce(
test_acc, test_acc, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
col_groups[rank % replication].dlarrayNcclAllReduce(
test_loss, test_loss, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
col_groups[rank % replication].dlarrayNcclAllReduce(
train_acc, train_acc, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
col_groups[rank % replication].dlarrayNcclAllReduce(
train_loss, train_loss, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
else:
comm.dlarrayNcclAllReduce(
test_acc, test_acc, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
comm.dlarrayNcclAllReduce(
test_loss, test_loss, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
comm.dlarrayNcclAllReduce(
train_acc, train_acc, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
comm.dlarrayNcclAllReduce(
train_loss, train_loss, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum)
test_acc = float(test_acc.asnumpy()[0]) / \
(node_count-test_split*node_count)
test_loss = test_loss.asnumpy()[0]/(node_count-test_split*node_count)
train_acc = float(train_acc.asnumpy()[0])/(train_split*node_count)
train_loss = train_loss.asnumpy()[0]/(train_split*node_count)
if rank == 0:
print("[Epoch: %d] Train Loss: %.3f, Train Accuracy: %.3f, Test Loss: %.3f, Test Accuracy: %.3f"
% (i, train_loss, train_acc, test_loss, test_acc))
avg_epoch_time = (epoch_all-epoch_0)/(epoch_num-1)
results = ht.array(np.array([epoch_all, avg_epoch_time]), ctx=executor_ctx)
comm.dlarrayNcclAllReduce(
results, results, ncclDataType_t.ncclFloat32, reduceop=ncclRedOp_t.ncclSum)
results = results.asnumpy()/size
if rank == 0:
print("\nAverage Total Time: %.3f, Average Epoch Time: %.3f" %
(results[0], results[1]))
def get_dataset(args):
if args.dataset in ['Reddit', 'reddit']:
args.dataset = 'Reddit'
elif args.dataset in ['Proteins', 'proteins']:
args.dataset = 'Proteins'
elif args.dataset in ['Arch', 'arch']:
args.dataset = 'Arch'
elif args.dataset in ['Products', 'products']:
args.dataset = 'Products'
else:
print("Dataset should be in ['Reddit','Proteins','Arch','Products']")
assert False
parser = argparse.ArgumentParser()
parser.add_argument('--replication', type=int, default=1,
help='Replication of distGCN1.5D.')
parser.add_argument('--reorder', type=int, default=0,
help='Reorder graph or not.')
parser.add_argument('--reorder_alg', type=str, default="metis",
help='Graph reordering algorithm [rcm, metis, slashburn, deg].')
parser.add_argument('--dataset', type=str, default="Reddit",
help='Choose dataset [Reddit, Proteins, Arch, Products].')
args = parser.parse_args()
get_dataset(args)
test(args)
|
414486
|
from .if_else import if_else
from ramda.inc import inc
from ramda.dec import dec
from ramda.private.asserts import assert_equal
def positive(x):
return x > 0
def if_else_nocurry_test():
assert_equal(if_else(positive, inc, dec, 5), 6)
assert_equal(if_else(positive, inc, dec, -5), -6)
def if_else_curry_test():
inc_away_from_zero = if_else(positive, inc, dec)
assert_equal(inc_away_from_zero(5), 6)
assert_equal(inc_away_from_zero(-5), -6)
|
414498
|
from typing import Tuple, Union
import numpy as np
from PIL import Image
from scipy.linalg import solve
class RandomBetaAffine:
"""Apply a random affine transform on a PIL image
using a Beta distribution."""
def __init__(
self,
max_offset_ratio: float = 0.2,
alpha: float = 2,
beta: float = 2,
fillcolor: Union[None, int, Tuple[int, int, int]] = None,
) -> None:
assert max_offset_ratio > 0
assert alpha > 0
assert beta > 0
self.max_offset_ratio = max_offset_ratio
self.alpha = alpha
self.beta = beta
self.fillcolor = fillcolor
def __call__(self, img: Image.Image) -> Image.Image:
max_offset = min(img.size) * self.max_offset_ratio
z = np.random.beta(self.alpha, self.beta, size=(3, 2))
offset = ((2.0 * z - 1.0) * max_offset).astype(np.float32)
w, h = img.size
src = np.asarray([(0, 0), (0, h), (w, 0)], dtype=np.float32)
dst = src + offset
affine_mat = self.get_affine_transform(src, dst)
return img.transform(
img.size,
method=Image.AFFINE,
data=affine_mat,
resample=Image.BILINEAR,
fillcolor=self.fillcolor,
)
def __repr__(self) -> str:
return (
f"vision.{self.__class__.__name__}("
f"max_offset_ratio={self.max_offset_ratio}, "
f"alpha={self.alpha}, beta={self.beta}"
f"{f', fillcolor={self.fillcolor}' if self.fillcolor else ''})"
)
@staticmethod
def get_affine_transform(src: np.ndarray, dst: np.ndarray) -> np.ndarray:
assert src.shape == (3, 2)
assert dst.shape == (3, 2)
coeffs = np.zeros((6, 6), dtype=np.float32)
for i in [0, 1, 2]:
coeffs[i, 0:2] = coeffs[i + 3, 3:5] = src[i]
coeffs[i, 2] = coeffs[i + 3, 5] = 1
return solve(coeffs, dst.transpose().flatten())
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--alpha", type=float, default=2)
parser.add_argument("--beta", type=float, default=2)
parser.add_argument("--max_offset_ratio", type=float, default=0.2)
parser.add_argument("images", type=argparse.FileType("rb"), nargs="+")
args = parser.parse_args()
transformer = RandomBetaAffine(
alpha=args.alpha, beta=args.beta, max_offset_ratio=args.max_offset_ratio
)
print(transformer)
for f in args.images:
x = Image.open(f, "r").convert("L")
y = transformer(x)
w, h = x.size
z = Image.new("L", (w, 2 * h))
z.paste(x, (0, 0))
z.paste(y, (0, h))
z = z.resize(size=(w // 2, h), resample=Image.BICUBIC)
z.show()
input()
|
414502
|
import os
import torch
import random
import numpy as np
import torch.nn as nn
from sklearn.metrics import f1_score
from params import NUM_CLASSES
def seed_everything(seed):
"""
Seeds basic parameters for reproductibility of results
Arguments:
seed {int} -- Number of the seed
"""
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True # False
def save_model_weights(model, filename, verbose=1, cp_folder=""):
"""
Saves the weights of a PyTorch model
Arguments:
model {torch module} -- Model to save the weights of
filename {str} -- Name of the checkpoint
Keyword Arguments:
verbose {int} -- Whether to display infos (default: {1})
cp_folder {str} -- Folder to save to (default: {''})
"""
if verbose:
print(f"\n -> Saving weights to {os.path.join(cp_folder, filename)}\n")
torch.save(model.state_dict(), os.path.join(cp_folder, filename))
def load_model_weights(model, filename, verbose=1, cp_folder=""):
"""
Loads the weights of a PyTorch model. The exception handles cpu/gpu incompatibilities
Arguments:
model {torch module} -- Model to load the weights to
filename {str} -- Name of the checkpoint
Keyword Arguments:
verbose {int} -- Whether to display infos (default: {1})
cp_folder {str} -- Folder to load from (default: {''})
Returns:
torch module -- Model with loaded weights
"""
if verbose:
print(f"\n -> Loading weights from {os.path.join(cp_folder,filename)}\n")
try:
model.load_state_dict(os.path.join(cp_folder, filename), strict=strict)
except BaseException:
model.load_state_dict(
torch.load(os.path.join(cp_folder, filename), map_location="cpu"),
strict=True,
)
return model
def count_parameters(model, all=False):
"""
Count the parameters of a model
Arguments:
model {torch module} -- Model to count the parameters of
Keyword Arguments:
all {bool} -- Whether to include not trainable parameters in the sum (default: {False})
Returns:
int -- Number of parameters
"""
if all:
return sum(p.numel() for p in model.parameters())
else:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
ONE_HOT = np.eye(NUM_CLASSES)
def f1(truth, pred, threshold=0.5, avg="samples"):
"""
The f1 metric for the problem
Arguments:
truth {np array [N] or [N x C]} -- Ground truths
pred {np array [N x C]} -- Predicted probabilites
Keyword Arguments:
threshold {float} -- Threshold for classification (default: {0.5})
avg {str} -- How to perform average in the f1 score (default: {"samples"})
Returns:
float -- f1 score
"""
if len(truth.shape) == 1:
truth = ONE_HOT[truth]
pred = (pred > threshold).astype(int)
return f1_score(truth, pred, average=avg)
|
414513
|
import os
import unittest
import random
import string
from deen.loader import DeenPluginLoader
class TestDeenPlugins(unittest.TestCase):
plugin_categories = ['codecs',
'compressions',
'assemblies',
'hashs',
'formatters',
'misc']
def setUp(self):
self.loader = DeenPluginLoader()
def _random_str(self, length=16):
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(length))
def _random_bytes(self, length=16):
return os.urandom(length)
def _get_list_of_all_plugins(self):
"""Return a list of tuples of all loaded
plugins. This function can be used to get
an iterable to call functions of all
plugins."""
output = []
for category in self.plugin_categories:
plugins = getattr(self.loader, category)
for plugin in plugins:
output.append(plugin)
return output
def _all_plugins_call_func(self, func_name, data):
"""Call the process() function on all loaded
plugins."""
for plugin_name, plugin_class in self._get_list_of_all_plugins():
if not func_name in vars(plugin_class):
# Check if the class itself
# implements the func()
# function to ignore plugins
# that inherit it from DeenPlugin.
continue
pname = plugin_name + '.' + func_name + '()'
plugin = plugin_class()
func = getattr(plugin, func_name)
if not func:
# Could not get the function.
# This should never happen...
continue
try:
processed = func(data)
except AssertionError as e:
# AssertionErrors can be ignored
# as they are kind of supposed to
# happen.
print('AssertionError in ' + pname + ': ' + str(e))
continue
except Exception as e:
# Plugins should always handle exceptions.
self.fail('Unhandled exception in ' + pname + ': ' + str(e))
else:
if not processed:
# If the plugin did not return anything,
# check whether an expected error happened.
msg = pname + ' failed without setting plugin.error'
self.assertIsNotNone(plugin.error, msg)
def test_process_bytes(self):
data = self._random_bytes(256)
self._all_plugins_call_func('process', data)
def test_process_str(self):
data = self._random_str(256)
self._all_plugins_call_func('process', data)
def test_unprocess_bytes(self):
data = self._random_bytes(256)
self._all_plugins_call_func('unprocess', data)
def test_unprocess_str(self):
data = self._random_str(256)
self._all_plugins_call_func('unprocess', data)
def test_process_unprocess_bytes(self):
data = self._random_bytes(256)
for category in ['codecs', 'compressions']:
plugins = getattr(self.loader, category)
for plugin_name, plugin_class in plugins:
plugin = plugin_class()
try:
processed = plugin.process(data)
except AssertionError:
continue
unprocessed = plugin.unprocess(processed)
msg = plugin_name + ' process-unprocessed failed'
self.assertEqual(data, unprocessed, msg)
def test_process_unprocess_bytearray(self):
data = bytearray(self._random_bytes(256))
for category in ['codecs', 'compressions']:
plugins = getattr(self.loader, category)
for plugin_name, plugin_class in plugins:
plugin = plugin_class()
try:
processed = plugin.process(data)
except AssertionError:
continue
unprocessed = plugin.unprocess(processed)
msg = plugin_name + ' process-unprocessed failed'
self.assertEqual(data, unprocessed, msg)
if __name__ == '__main__':
unittest.main()
|
414521
|
from dingus import patch
def patches(patch_values):
patcher_collection = PatcherCollection()
for object_path, new_object in patch_values.iteritems():
patcher_collection.add_patcher(patch(object_path, new_object))
return patcher_collection
class PatcherCollection:
def __init__(self):
self.patchers = []
def __enter__(self):
for patcher in self.patchers:
patcher.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
for patcher in self.patchers:
patcher.__exit__(exc_type, exc_value, traceback)
def add_patcher(self, patcher):
self.patchers.append(patcher)
|
414541
|
import ssl
import threading
import pytest
from SimpleWebSocketServer import SimpleSSLWebSocketServer
from .mock_rt_server import MockRealtimeLogbook, MockRealtimeServer
from .utils import path_to_test_resource
def server_ssl_context():
"""
Returns an SSL context for the mock RT server to use, with a self signed
certificate.
"""
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
ssl_context.load_cert_chain(
path_to_test_resource("dummy_cert"),
keyfile=path_to_test_resource("dummy_key"),
password=lambda: "<PASSWORD>",
)
return ssl_context
@pytest.fixture()
def mock_server():
"""
Fixture for creating a mock RT server. The server is designed
to behave very similarly to the actual RT server, but returns
dummy responses to most messages.
The server runs in a background thread and is cleaned up as part of the
fixture's tear-down step.
Yields:
tests.mock_rt_server.MockRealtimeLogbook: An object used to record
information about the messages received and sent by the mock server.
"""
logbook = MockRealtimeLogbook()
logbook.url = "wss://127.0.0.1:8765/v2"
MockRealtimeServer.logbook = logbook
server = SimpleSSLWebSocketServer(
"127.0.0.1", 8765, MockRealtimeServer, ssl_context=server_ssl_context()
)
server_should_stop = False
def server_runner():
while not server_should_stop:
server.serveonce()
thread = threading.Thread(name="server_runner", target=server_runner)
thread.daemon = True
thread.start()
yield logbook
server_should_stop = True
thread.join(timeout=60.0)
assert (
not thread.is_alive()
) # check if the join timed out (this should never happen)
|
414560
|
import datetime
import pkg_resources
import sphinx_material
html_theme = 'sphinx_material'
html_theme_path = sphinx_material.html_theme_path()
html_context = sphinx_material.get_html_context()
html_sidebars = {
"**": ["globaltoc.html", "searchbox.html"]
}
html_theme_options = {
'base_url': 'http://aiorabbit.readthedocs.io',
'repo_url': 'https://github.com/gmr/aiorabbit/',
'repo_name': 'aiorabbit',
'html_minify': True,
'css_minify': True,
'nav_title': 'aiorabbit',
'logo_icon': '🐇',
'globaltoc_depth': 2,
'theme_color': 'fc6600',
'color_primary': 'grey',
'color_accent': 'orange',
'version_dropdown': False
}
html_static_path = ['_static']
html_css_files = [
'css/custom.css'
]
master_doc = 'index'
project = 'aiorabbit'
release = version = pkg_resources.get_distribution(project).version
copyright = '{}, <NAME>'.format(datetime.date.today().year)
extensions = [
'sphinx.ext.autodoc',
'sphinx_autodoc_typehints',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx_material'
]
set_type_checking_flag = True
typehints_fully_qualified = True
always_document_param_types = True
typehints_document_rtype = True
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
intersphinx_mapping = {'python': ('https://docs.python.org/3', None),
'pamqp': ('https://pamqp.readthedocs.io/en/latest',
None)}
autodoc_default_options = {'autodoc_typehints': 'description'}
|
414571
|
from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
# Misc functions
from ts_emod.views.ScenarioBrowseView import ScenarioBrowseView
from ts_repr.utils.misc_functions import delete_scenario, determine_page, get_emod_snippet_ajax, get_om_snippet_ajax
# Entry points
from ts_repr.views.IndexView import IndexView
# Browsers
from ts_repr.views.BrowseScenarioView import BrowseScenarioView
# Creation process
from ts_repr.views.creation.NewScenarioView import NewScenarioView
from ts_repr.views.creation.WeatherView import WeatherView, save_weather_data, get_weather_data
from ts_repr.views.creation.DemographicsView import DemographicsView, save_demographics_data, get_demographics_data
from ts_repr.views.creation.SpeciesView import SpeciesView, get_species_data_ajax, save_species_data
from ts_repr.views.creation.ParasiteView import ParasiteView, save_parasite_data
from ts_repr.views.creation.DetailsView import DetailsView, save_scenario_name_ajax
# Managers
from ts_repr.views.managers.ManageWeatherView import ManageWeatherView
from ts_repr.views.managers.ManageDemographicsView import ManageDemographicsView
from ts_repr.views.managers.ManageSpeciesView import ManageSpeciesView
from ts_repr.views.managers.ManageSpeciesParameterView import ManageSpeciesParameterView
from ts_repr.views.managers.ManageEMODSnippetView import ManageEMODSnippetView
from ts_repr.views.managers.ManageOMSnippetView import ManageOMSnippetView
urlpatterns = patterns('ts_repr.views',
# Entry points
url(r'^$', IndexView.as_view(), name='ts_repr.index'),
url(r'^(?P<scenario_id>\d+)/$', login_required(determine_page), name='ts_repr.determine_view'),
# Browsers
url(r'^scenario/browse2/$', login_required(BrowseScenarioView.as_view()), name='ts_repr.browse_scenario2'),
url(r'^scenario/browse/$', login_required(ScenarioBrowseView.as_view()), name='ts_repr.browse_scenario'),
# Creation process
url(r'^new/$', login_required(NewScenarioView.as_view()), name='ts_repr.new_scenario'),
url(r'^weather/(?P<scenario_id>\d+)/$', login_required(WeatherView.as_view()), name='ts_repr.weather'),
url(r'^weather/data/(?P<option_id>\d+)/$', get_weather_data, name='ts_repr.get_weather_data'),
url(r'^weather/save_data/$', save_weather_data, name='ts_repr.save_weather_data'),
url(r'^demographics/(?P<scenario_id>\d+)/$', login_required(DemographicsView.as_view()), name='ts_repr.demographics'),
url(r'^demographics/data/(?P<option_id>\d+)/$', get_demographics_data, name='ts_repr.get_demographics_data'),
url(r'^demographics/save_data/$', save_demographics_data, name='ts_repr.save_demographics_data'),
url(r'^species/(?P<scenario_id>\d+)/$', login_required(SpeciesView.as_view()), name='ts_repr.species'),
url(r'^species/data/(?P<option_id>\d+)/$', get_species_data_ajax, name='ts_repr.get_species_data'),
url(r'^species/save_data/$', save_species_data, name='ts_repr.save_species_data'),
url(r'^parasite/(?P<scenario_id>\d+)/$', login_required(ParasiteView.as_view()), name='ts_repr.parasite'),
#url(r'^parasite/data/(?P<option_id>\d+)/$', get_parasite_data_ajax, name='ts_repr.get_parasite_data'),
url(r'^parasite/save_data/$', save_parasite_data, name='ts_repr.save_parasite_data'),
url(r'^details/(?P<scenario_id>\d+)/$', login_required(DetailsView.as_view()), name='ts_repr.details'),
url(r'^details/save_data/$', save_scenario_name_ajax, name='ts_repr.save_scenario_name'),
# Managers
url(r'^manage/weather/$', ManageWeatherView.as_view(), name='ts_repr.manage_weather'),
url(r'^manage/demographics/$', ManageDemographicsView.as_view(), name='ts_repr.manage_demographics'),
url(r'^manage/species/$', ManageSpeciesView.as_view(), name='ts_repr.manage_species'),
url(r'^manage/species_parameter/$', ManageSpeciesParameterView.as_view(), name='ts_repr.manage_species_parameter'),
url(r'^manage/emod_snippet/$', ManageEMODSnippetView.as_view(), name='ts_repr.manage_emod_snippet'),
url(r'^manage/om_snippet/$', ManageOMSnippetView.as_view(), name='ts_repr.manage_om_snippet'),
# Other
url(r'^scenario/delete/(?P<scenario_id>\d+)/$', delete_scenario, name='ts_repr.delete_scenario'),
url(r'^emod_snippet/data/(?P<option_id>\d+)/$', get_emod_snippet_ajax, name='ts_repr.emod_snippet_data'),
url(r'^om_snippet/data/(?P<option_id>\d+)/$', get_om_snippet_ajax, name='ts_repr.om_snippet_data'),
)
|
414598
|
import torch
from torch import nn
from ret_benchmark.modeling.registry import HEADS
from ret_benchmark.utils.init_methods import weights_init_kaiming
@HEADS.register("linear_norm")
class LinearNorm(nn.Module):
def __init__(self, cfg):
super(LinearNorm, self).__init__()
self.fc = nn.Linear(cfg.MODEL.HEAD.IN_CHANNELS, cfg.MODEL.HEAD.DIM)
self.fc.apply(weights_init_kaiming)
def forward(self, x):
x = self.fc(x)
x = nn.functional.normalize(x, p=2, dim=1)
return x
|
414600
|
class Track:
count = 0
def __init__(self, track_id, centroid, bbox=None, class_id=None):
"""
Track
Parameters
----------
track_id : int
Track id.
centroid : tuple
Centroid of the track pixel coordinate (x, y).
bbox : tuple, list, numpy.ndarray
Bounding box of the track.
class_id : int
Class label id.
"""
self.id = track_id
self.class_id = class_id
Track.count += 1
self.centroid = centroid
self.bbox = bbox
self.lost = 0
self.info = dict(
max_score=0.0,
lost=0,
score=0.0,
)
|
414618
|
import sys
import time
import Quartz
class Mouse():
down = [Quartz.kCGEventLeftMouseDown, Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
up = [Quartz.kCGEventLeftMouseUp, Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
[LEFT, RIGHT, OTHER] = [0, 1, 2]
def position(self):
point = Quartz.CGEventGetLocation( Quartz.CGEventCreate(None) )
return point.x, point.y
def __mouse_event(self, type, x, y):
mouse_event = Quartz.CGEventCreateMouseEvent(None, type, (x, y), Quartz.kCGMouseButtonLeft)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, mouse_event)
def move(self, x, y):
self.__mouse_event(Quartz.kCGEventMouseMoved, x, y)
Quartz.CGWarpMouseCursorPosition((x, y))
def press(self, x, y, button=0):
event = Quartz.CGEventCreateMouseEvent(None, Mouse.down[button], (x, y), button)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def release(self, x, y, button=0):
event = Quartz.CGEventCreateMouseEvent(None, Mouse.up[button], (x, y), button)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def doubleClick(self, x, y, clickCount, button=0):
print("Double click event")
theEvent = Quartz.CGEventCreateMouseEvent(None, Mouse.down[button], (x, y), button)
Quartz.CGEventSetIntegerValueField(theEvent, Quartz.kCGMouseEventClickState, clickCount)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventSetType(theEvent, Quartz.kCGEventLeftMouseUp)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventSetType(theEvent, Quartz.kCGEventLeftMouseDown)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventSetType(theEvent, Quartz.kCGEventLeftMouseUp)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
print("Double click event ended")
def click(self, button=0):
x, y = self.position()
self.press(x, y, button)
self.release(x, y, button)
def click_pos(self, x, y, button=0):
self.move(x, y)
self.click(button)
def torelative(self, x, y):
curr_pos = Quartz.CGEventGetLocation( Quartz.CGEventCreate(None) )
x += curr_pos.x;
y += curr_pos.y;
return [x, y]
def move_rel(self, x, y):
[x, y] = self.torelative(x, y)
moveEvent = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, Quartz.CGPointMake(x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, moveEvent)
def mouseEvent(self, type, posx, posy):
theEvent = Quartz.CGEventCreateMouseEvent(None, type, (posx,posy), Quartz.kCGMouseButtonLeft)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
def mousedrag(self, posx, posy):
self.mouseEvent(Quartz.kCGEventLeftMouseDragged, posx,posy)
if __name__ == '__main__':
mouse = Mouse()
if sys.platform == "darwin":
print("Current mouse position: %d:%d" % mouse.position())
mouse.move_rel(25, 16)
print("Clickingthe right button...");
mouse.move(25, 26)
time.sleep(0.05)
mouse.move(35, 26)
time.sleep(0.05)
mouse.move(40, 26)
time.sleep(0.05)
mouse.move(44, 26)
time.sleep(0.05)
mouse.move(50, 26)
time.sleep(0.05)
mouse.move(55, 26)
time.sleep(0.05)
mouse.doubleClick(1264, 416, 2, 0)
time.sleep(0.05)
mouse.click_pos(1264, 416, 1)
mouse.doubleClick(1264, 46, 2, 0)
#mouse.doubleClick(25, 26, 2, 0)
elif sys.platform == "win32":
print("Error: Platform not supported!")
|
414619
|
from msl.loadlib import Server32
class UnexpectedError(Server32):
def __init__(self, host, port, **kwargs):
# any error would be fine
x = 1/0
|
414652
|
import sys
import socket
import logging
from waitress import serve
from modules.app import App
def serve_to_lan():
host = '0.0.0.0' # Ubunut WSL would bind to localhost
port = 5000 # Can not bind to socket below 1024 on Unix without sudo
if sys.platform == 'win32':
host = socket.gethostbyname(socket.gethostname())
port = 80
logging.info('Serving at %s', host)
serve(App, host=host, port=port)
if __name__ == '__main__':
serve_to_lan()
|
414655
|
from itertools import islice
def sieve(n):
yield 2
multiples = set()
for p in range(3,n, 2):
if p not in multiples:
yield p
multiples.update(range(2*p, n, p))
print sum(sieve(2000000))
|
414659
|
import json
import multiprocessing
import os
import signal
import sys
import time
from functools import partial
import matplotlib as mpl
import pandas as pd
import psutil
from matplotlib import pyplot as plt
from src.preprocessing import preprocess_sample
from src.utils import matrix2answer, show_sample
from tqdm.notebook import tqdm
def sigterm_handler(_signo, _stack_frame):
sys.exit(0)
def process_file(
file_path,
PATH,
predictors,
preprocess_params=None,
color_params=None,
show_results=True,
break_after_answer=False,
queue=None,
process_whole_ds=False,
):
with open(os.path.join(PATH, file_path), "r") as file:
sample = json.load(file)
submission_list = []
sample = preprocess_sample(
sample, params=preprocess_params, color_params=color_params, process_whole_ds=process_whole_ds
)
signal.signal(signal.SIGTERM, sigterm_handler)
for predictor in predictors:
try:
submission_list = []
result, answer = predictor(sample)
if result == 0:
if show_results:
show_sample(sample)
for j in range(len(answer)):
answers = set()
for k in range(len(answer[j])):
if answer[j][k] is None:
continue
str_answer = matrix2answer(answer[j][k])
if str_answer not in answers:
if show_results and k < 3:
plt.matshow(answer[j][k], cmap="Set3", norm=mpl.colors.Normalize(vmin=0, vmax=9))
plt.show()
print(file_path, str_answer)
answers.add(str_answer)
submission_list.append({"output_id": file_path[:-5] + "_" + str(j), "output": str_answer})
if queue is not None:
queue.put(json.dumps(submission_list))
if break_after_answer:
break
except SystemExit:
break
time.sleep(3)
return
def run_parallel(
files_list,
PATH,
predictors,
preprocess_params=None,
color_params=None,
show_results=True,
break_after_answer=False,
processes=20,
timeout=300,
max_memory_by_process=1.4e10,
process_whole_ds=False,
):
process_list = []
timing_list = []
queue = multiprocessing.Queue(10000)
func = partial(
process_file,
PATH=PATH,
predictors=predictors,
preprocess_params=preprocess_params,
color_params=color_params,
show_results=show_results,
break_after_answer=break_after_answer,
queue=queue,
process_whole_ds=process_whole_ds,
)
result = []
with tqdm(total=len(files_list)) as pbar:
num_finished_previous = 0
try:
while True:
num_finished = 0
for process, start_time in zip(process_list, timing_list):
if process.is_alive():
if time.time() - start_time > timeout:
process.terminate()
while not queue.empty():
result = result + json.loads(queue.get())
process.join(10)
print("Time out. The process is killed.")
num_finished += 1
else:
process_data = psutil.Process(process.pid)
if process_data.memory_info().rss > max_memory_by_process:
process.terminate()
while not queue.empty():
result = result + json.loads(queue.get())
process.join(10)
print("Memory limit is exceeded. The process is killed.")
num_finished += 1
else:
num_finished += 1
if num_finished == len(files_list):
pbar.update(len(files_list) - num_finished_previous)
time.sleep(0.1)
break
elif len(process_list) - num_finished < processes and len(process_list) < len(files_list):
p = multiprocessing.Process(target=func, args=(files_list[len(process_list)],))
process_list.append(p)
timing_list.append(time.time())
p.start()
pbar.update(num_finished - num_finished_previous)
num_finished_previous = num_finished
while not queue.empty():
result = result + json.loads(queue.get())
time.sleep(0.1)
except KeyboardInterrupt:
for process in process_list:
process.terminate()
process.join(5)
print("Got Ctrl+C")
except Exception as error:
for process in process_list:
process.terminate()
process.join(5)
print(f"Function raised {error}")
return result
def generate_submission(predictions_list, sample_submission_path="data/sample_submission.csv"):
submission = pd.read_csv(sample_submission_path).to_dict("records")
initial_ids = {data["output_id"] for data in submission}
new_submission = []
ids = {data["output_id"] for data in predictions_list}
for output_id in ids:
predicts = list({data["output"] for data in predictions_list if data["output_id"] == output_id})
output = " ".join(predicts[:3])
new_submission.append({"output_id": output_id, "output": output})
for output_id in initial_ids:
if not output_id in ids:
new_submission.append({"output_id": output_id, "output": ""})
return pd.DataFrame(new_submission)
def combine_submission_files(list_of_dfs, sample_submission_path="data/sample_submission.csv"):
submission = pd.read_csv(sample_submission_path)
list_of_outputs = []
for df in list_of_dfs:
list_of_outputs.append(df.sort_values(by="output_id")["output"].astype(str).values)
merge_output = []
for i in range(len(list_of_outputs[0])):
list_of_answ = [
[x.strip() for x in output[i].strip().split(" ") if x.strip() != ""] for output in list_of_outputs
]
list_of_answ = [x for x in list_of_answ if len(x) != 0]
total_len = len(list({item for sublist in list_of_answ for item in sublist}))
print(total_len)
while total_len > 3:
for j in range(1, len(list_of_answ) + 1):
if len(list_of_answ[-j]) > (j > len(list_of_answ) - 3):
list_of_answ[-j] = list_of_answ[-j][:-1]
break
total_len = len(list({item for sublist in list_of_answ for item in sublist}))
o = list({item for sublist in list_of_answ for item in sublist})
answer = " ".join(o[:3]).strip()
while answer.find(" ") > 0:
answer = answer.replace(" ", " ")
merge_output.append(answer)
submission["output"] = merge_output
submission["output"] = submission["output"].astype(str)
return submission
|
414664
|
from numpy import arcsin, exp, pi, sqrt
from ....Classes.Arc1 import Arc1
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotW28
A SlotW28 object
Returns
-------
point_dict: dict
A dict of the slot point coordinates
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z8|| = W0
alpha = float(arcsin(self.W0 / (2 * Rbo)))
slot_pitch = 2 * pi / self.Zs
# comp point coordinate (in complex)
Z0 = Rbo * exp(1j * 0)
Z8 = Z0 * exp(-1j * alpha)
if self.is_outwards():
Z7 = Z8 + self.H0
# Rotation to get the tooth on X axis
Z7 = Z7 * exp(1j * slot_pitch / 2)
Z8 = Z8 * exp(1j * slot_pitch / 2)
# Z7 = x7 + 1j*y7
# Z6 = x + 1j * W3/2
# C2,Z6 _|_ Z6,Z5 => Re(C2) = Re(Z6)
# ||Z6,zc2|| = R1 => Zc2 = x + 1j*(W3/2+R1)
# ||Z7,zc2||² = R1² => (x7-x)²+ (y7-(W3/2+R1))² = R1²
# x² - 2*x7 x + (x7²+(y7-(W3/2+R1))²-R1²) = 0
# D = 4*x7² - 4*(x7²+(y7-(W3/2+R1))²-R1²) = -4((y7-(W3/2+R1))²-R1²)
# x = x7 + sqrt(-4((y7-(W3/2+R1))²-R1²))/2
Z6 = (
Z7.real
+ sqrt(-4 * ((Z7.imag - (self.W3 / 2.0 + self.R1)) ** 2 - self.R1 ** 2)) / 2
+ 1j * self.W3 / 2.0
)
Z5 = Z6 + self.H3
rot_sign = 1
else: # inward slot
Z7 = Z8 - self.H0
# Rotation to get the tooth on X axis
Z7 = Z7 * exp(1j * slot_pitch / 2)
Z8 = Z8 * exp(1j * slot_pitch / 2)
Z6 = (
Z7.real
- sqrt(-4 * ((Z7.imag - (self.W3 / 2.0 + self.R1)) ** 2 - self.R1 ** 2)) / 2
+ 1j * self.W3 / 2.0
)
Z5 = Z6 - self.H3
rot_sign = -1
# Tooth ref to slot
Z1, Z2, Z3, Z4 = (
Z8 * exp(-1j * slot_pitch / 2),
Z7 * exp(-1j * slot_pitch / 2),
Z6 * exp(-1j * slot_pitch / 2),
Z5 * exp(-1j * slot_pitch / 2),
)
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
# symetry
point_dict["Z5"] = Z4.conjugate()
point_dict["Z6"] = Z3.conjugate()
point_dict["Z7"] = Z2.conjugate()
point_dict["Z8"] = Z1.conjugate()
# Center
A = Arc1(Z2, Z3, rot_sign * self.R1, self.is_outwards())
point_dict["Zc1"] = A.get_center()
point_dict["Zc2"] = (point_dict["Z4"] + point_dict["Z5"]) / 2
point_dict["Zc3"] = point_dict["Zc1"].conjugate()
return point_dict
|
414676
|
import string
import random
from typing import List
def generate_id(length=8):
# helper function for generating an id
return ''.join(random.choices(string.ascii_uppercase, k=length))
class SupportTicket:
def __init__(self, customer, issue):
self.id = generate_id()
self.customer = customer
self.issue = issue
class CustomerSupport:
def __init__(self, processing_strategy: str = "fifo"):
self.tickets = []
self.processing_strategy = processing_strategy
def create_ticket(self, customer, issue):
self.tickets.append(SupportTicket(customer, issue))
def process_tickets(self):
# if it's empty, don't do anything
if len(self.tickets) == 0:
print("There are no tickets to process. Well done!")
return
if self.processing_strategy == "fifo":
for ticket in self.tickets:
self.process_ticket(ticket)
elif self.processing_strategy == "filo":
for ticket in reversed(self.tickets):
self.process_ticket(ticket)
elif self.processing_strategy == "random":
list_copy = self.tickets.copy()
random.shuffle(list_copy)
for ticket in list_copy:
self.process_ticket(ticket)
def process_ticket(self, ticket: SupportTicket):
print("==================================")
print(f"Processing ticket id: {ticket.id}")
print(f"Customer: {ticket.customer}")
print(f"Issue: {ticket.issue}")
print("==================================")
# create the application
app = CustomerSupport("filo")
# register a few tickets
app.create_ticket("<NAME>", "My computer makes strange sounds!")
app.create_ticket("<NAME>", "I can't upload any videos, please help.")
app.create_ticket("<NAME>", "VSCode doesn't automatically solve my bugs.")
# process the tickets
app.process_tickets()
|
414746
|
import numpy as np
import sys
a = np.load(sys.argv[1])
b = np.load(sys.argv[2])
print("Difference: ",np.sum(np.abs(a.squeeze()-b.squeeze())))
print(np.where(np.abs(a-b) > 0.01))
|
414771
|
from setuptools import setup
setup(name='jsonstreamer',
version='1.3.6',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/kashifrazzaqui/json-streamer',
description='Provides a SAX-like push parser which works with partial json fragments. '
'Also provides an ObjectStreamer that emits key-value pairs or array elements of the `root` json object/array.'
'Based on the fast c libary yajl',
packages=['jsonstreamer', 'jsonstreamer.yajl'],
install_requires=['again']
)
|
414773
|
import os
from glob import glob
import h5py
import numpy as np
import torch_em
import torch_em.shallow2deep as shallow2deep
from torch_em.model import UNet2d
def prepare_shallow2deep_isbi(args, out_folder):
patch_shape_min = [1, 256, 256]
patch_shape_max = [1, 512, 512]
raw_transform = torch_em.transform.raw.normalize
label_transform = shallow2deep.BoundaryTransform(ndim=2)
shallow2deep.prepare_shallow2deep(
raw_paths=args.input, raw_key="volumes/raw",
label_paths=args.input, label_key="volumes/labels/neuron_ids_3d",
patch_shape_min=patch_shape_min, patch_shape_max=patch_shape_max,
n_forests=args.n_rfs, n_threads=args.n_threads,
output_folder=out_folder, ndim=2,
raw_transform=raw_transform, label_transform=label_transform,
)
def get_isbi_loader(args, split, rf_folder):
rf_paths = glob(os.path.join(rf_folder, "*.pkl"))
rf_paths.sort()
patch_shape = (1, 512, 512)
with h5py.File(args.input, "r") as f:
nz = f["volumes/raw"].shape[0]
if split == "train":
n_samples = 100
roi = np.s_[:nz-2, :, :]
elif split == "val":
n_samples = 5
roi = np.s_[nz-2:, :, :]
else:
raise ValueError(f"Wrong split: {split}")
raw_transform = torch_em.transform.raw.normalize
label_transform = torch_em.transform.BoundaryTransform(ndim=2)
loader = shallow2deep.get_shallow2deep_loader(
raw_paths=args.input, raw_key="volumes/raw",
label_paths=args.input, label_key="volumes/labels/neuron_ids_3d",
rf_paths=rf_paths,
batch_size=args.batch_size, patch_shape=patch_shape, rois=roi,
raw_transform=raw_transform, label_transform=label_transform,
n_samples=n_samples, ndim=2, is_seg_dataset=True, shuffle=True,
num_workers=8
)
return loader
def train_shallow2deep(args):
name = "isbi2d"
# check if we need to train the rfs for preparation
rf_folder = os.path.join("checkpoints", name, "rfs")
have_rfs = len(glob(os.path.join(rf_folder, "*.pkl"))) == args.n_rfs
if not have_rfs:
prepare_shallow2deep_isbi(args, rf_folder)
assert os.path.exists(rf_folder)
model = UNet2d(in_channels=1, out_channels=1, final_activation="Sigmoid")
train_loader = get_isbi_loader(args, "train", rf_folder)
val_loader = get_isbi_loader(args, "val", rf_folder)
dice_loss = torch_em.loss.DiceLoss()
trainer = torch_em.default_segmentation_trainer(
name, model, train_loader, val_loader, loss=dice_loss, metric=dice_loss, learning_rate=1.0e-4,
device=args.device, log_image_interval=50
)
trainer.fit(args.n_iterations)
def get_pseudolabel_loader(args, split, ckpt_name):
patch_shape = (1, 512, 512)
with h5py.File(args.input, "r") as f:
nz = f["volumes/raw"].shape[0]
if split == "train":
n_samples = 100
roi = np.s_[:nz-2, :, :]
elif split == "val":
n_samples = 5
roi = np.s_[nz-2:, :, :]
else:
raise ValueError(f"Wrong split: {split}")
ckpt = os.path.join("./checkpoints", ckpt_name)
raw_transform = torch_em.transform.raw.normalize
# tf trained on isbi
rf_path = "./test_data/rf_0.pkl"
ndim = 2
filter_config = None
loader = shallow2deep.get_pseudolabel_loader(
raw_paths=args.input, raw_key="volumes/raw", checkpoint=ckpt,
rf_config=(rf_path, ndim, filter_config),
batch_size=args.batch_size, patch_shape=patch_shape, rois=roi,
raw_transform=raw_transform, n_samples=n_samples,
ndim=2, is_raw_dataset=True, shuffle=True, num_workers=0
)
return loader
def train_pseudo_label(args):
name = "cremi2d-pseudo-label-isbi"
model = UNet2d(in_channels=1, out_channels=1, final_activation="Sigmoid")
train_loader = get_pseudolabel_loader(args, "train", "isbi2d")
val_loader = get_pseudolabel_loader(args, "val", "isbi2d")
dice_loss = torch_em.loss.DiceLoss()
trainer = torch_em.default_segmentation_trainer(
name, model, train_loader, val_loader, loss=dice_loss, metric=dice_loss, learning_rate=1.0e-4,
device=args.device, log_image_interval=50
)
trainer.fit(args.n_iterations)
def check_loader(args, n=4):
from torch_em.util.debug import check_loader
loader = get_isbi_loader(args, "train", "./checkpoints/isbi2d/rfs")
check_loader(loader, n)
if __name__ == "__main__":
parser = torch_em.util.parser_helper()
parser.add_argument("-p", "--pseudo_label", type=int, default=0)
parser.add_argument("--n_rfs", type=int, default=500)
parser.add_argument("--n_threads", type=int, default=32)
args = parser.parse_args()
if args.check:
check_loader(args)
elif args.pseudo_label:
train_pseudo_label(args)
else:
train_shallow2deep(args)
|
414777
|
import unittest
from unittest.mock import patch, PropertyMock
import time
import mt5_correlation.correlation as correlation
import pandas as pd
from datetime import datetime, timedelta
from test_mt5 import Symbol
import random
import os
class TestCorrelation(unittest.TestCase):
# Mock symbols. 4 Symbols, 3 visible.
mock_symbols = [Symbol(name='SYMBOL1', visible=True),
Symbol(name='SYMBOL2', visible=True),
Symbol(name='SYMBOL3', visible=False),
Symbol(name='SYMBOL4', visible=True),
Symbol(name='SYMBOL5', visible=True)]
# Start and end date for price data and mock prices: base; correlated; and uncorrelated.
start_date = None
end_date = None
price_columns = None
mock_base_prices = None
mock_correlated_prices = None
mock_uncorrelated_prices = None
def setUp(self):
"""
Creates some price data fro use in tests
:return:
"""
# Start and end date for price data and mock price dataframes. One for: base; correlated; uncorrelated and
# different dates.
self.start_date = datetime(2021, 1, 1, 1, 5, 0)
self.end_date = datetime(2021, 1, 1, 11, 30, 0)
self.price_columns = ['time', 'close']
self.mock_base_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_uncorrelated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_different_dates = pd.DataFrame(columns=self.price_columns)
self.mock_inverse_correlated_prices = pd.DataFrame(columns=self.price_columns)
# Build the price data for the test. One price every 5 minutes for 500 rows. Base will use min for price,
# correlated will use min + 5 and uncorrelated will use random
for date in (self.start_date + timedelta(minutes=m) for m in range(0, 500*5, 5)):
self.mock_base_prices = self.mock_base_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute]]))
self.mock_correlated_prices = \
self.mock_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute + 5]]))
self.mock_uncorrelated_prices = \
self.mock_uncorrelated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, random.randint(0, 1000000)]]))
self.mock_correlated_different_dates = \
self.mock_correlated_different_dates.append(pd.DataFrame(columns=self.price_columns,
data=[[date + timedelta(minutes=100),
date.minute + 5]]))
self.mock_inverse_correlated_prices = \
self.mock_inverse_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, (date.minute + 5) * -1]]))
@patch('mt5_correlation.mt5.MetaTrader5')
def test_calculate(self, mock):
"""
Test the calculate method. Uses mock for MT5 symbols and prices.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Correlation class
cor = correlation.Correlation(monitoring_threshold=1, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We don't have a SYMBOL3 as this is set as not visible. Correlations should be as follows:
# SYMBOL1:SYMBOL2 should be fully correlated (1)
# SYMBOL1:SYMBOL4 should be uncorrelated (0)
# SYMBOL1:SYMBOL5 should be negatively correlated
# SYMBOL2:SYMBOL5 should be negatively correlated
# We will not use p_value as the last set uses random numbers so p value will not be useful.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_uncorrelated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# Test the output. We should have 6 rows. S1:S2 c=1, S1:S4 c<1, S1:S5 c=-1, S2:S5 c=-1. We are not checking
# S2:S4 or S4:S5
self.assertEqual(len(cor.coefficient_data.index), 6, "There should be six correlations rows calculated.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL2'), 1,
"The correlation for SYMBOL1:SYMBOL2 should be 1.")
self.assertTrue(cor.get_base_coefficient('SYMBOL1', 'SYMBOL4') < 1,
"The correlation for SYMBOL1:SYMBOL4 should be <1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL5'), -1,
"The correlation for SYMBOL1:SYMBOL5 should be -1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL2', 'SYMBOL5'), -1,
"The correlation for SYMBOL2:SYMBOL5 should be -1.")
# Monitoring threshold is 1 and we are monitoring inverse. Get filtered correlations. There should be 3 (S1:S2,
# S1:S5 and S2:S5)
self.assertEqual(len(cor.filtered_coefficient_data.index), 3,
"There should be 3 rows in filtered coefficient data when we are monitoring inverse "
"correlations.")
# Now aren't monitoring inverse correlations. There should only be one correlation when filtered
cor.monitor_inverse = False
self.assertEqual(len(cor.filtered_coefficient_data.index), 1,
"There should be only 1 rows in filtered coefficient data when we are not monitoring inverse "
"correlations.")
# Now were going to recalculate, but this time SYMBOL1:SYMBOL2 will have non overlapping dates and coefficient
# should be None. There shouldn't be a row. We should have correlations for S1:S4, S1:S5 and S4:S5
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_different_dates,
self.mock_correlated_prices, self.mock_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
self.assertEqual(len(cor.coefficient_data.index), 3, "There should be three correlations rows calculated.")
self.assertEqual(cor.coefficient_data.iloc[0, 2], 1, "The correlation for SYMBOL1:SYMBOL4 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[1, 2], 1, "The correlation for SYMBOL1:SYMBOL5 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[2, 2], 1, "The correlation for SYMBOL4:SYMBOL5 should be 1.")
# Get the price data used to calculate the coefficients for symbol 1. It should match mock_base_prices.
price_data = cor.get_price_data('SYMBOL1')
self.assertTrue(price_data.equals(self.mock_base_prices), "Price data returned post calculation should match "
"mock price data.")
def test_calculate_coefficient(self):
"""
Tests the coefficient calculation.
:return:
"""
# Correlation class
cor = correlation.Correlation()
# Test 2 correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_prices)
self.assertEqual(coefficient, 1, "Coefficient should be 1.")
# Test 2 uncorrelated sets. Set p value to 1 to force correlation to be returned.
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_uncorrelated_prices, max_p_value=1)
self.assertTrue(coefficient < 1, "Coefficient should be < 1.")
# Test 2 sets where prices dont overlap
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_different_dates)
self.assertTrue(coefficient < 1, "Coefficient should be None.")
# Test 2 inversely correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_inverse_correlated_prices)
self.assertEqual(coefficient, -1, "Coefficient should be -1.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_get_ticks(self, mock):
"""
Test that caching works. For the purpose of this test, we can use price data rather than tick data.
Mock 2 different sets of prices. Get three times. Base, One within cache threshold and one outside. Set 1
should match set 2 but differ from set 3.
:param mock:
:return:
"""
# Correlation class to test
cor = correlation.Correlation()
# Mock the tick data to contain 2 different sets. Then get twice. They should match as the data was cached.
mock.copy_ticks_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices]
# We need to start and stop the monitor as this will set the cache time
cor.start_monitor(interval=10, calculation_params={'from': 10, 'min_prices': 0, 'max_set_size_diff_pct': 0,
'overlap_pct': 0, 'max_p_value': 1}, cache_time=3)
cor.stop_monitor()
# Get the ticks within cache time and check that they match
base_ticks = cor.get_ticks('SYMBOL1', None, None)
cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(base_ticks.equals(cached_ticks),
"Both sets of tick data should match as set 2 came from cache.")
# Wait 3 seconds
time.sleep(3)
# Retrieve again. This one should be different as the cache has expired.
non_cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(not base_ticks.equals(non_cached_ticks),
"Both sets of tick data should differ as cached data had expired.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_start_monitor(self, mock):
"""
Test that starting the monitor and running for 2 seconds produces two sets of coefficient history when using an
interval of 1 second.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Create correlation class. We will set a divergence threshold so that we can test status.
cor = correlation.Correlation(divergence_threshold=0.8, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We dont have a SYMBOL2 as this is set as not visible. All pairs should be correlated for the purpose of this
# test.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_correlated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# We will build some tick data for each symbol and patch it in. Tick data will be from 10 seconds ago to now.
# We only need to patch in one set of tick data for each symbol as it will be cached.
columns = ['time', 'ask']
starttime = datetime.now() - timedelta(seconds=10)
tick_data_s1 = pd.DataFrame(columns=columns)
tick_data_s2 = pd.DataFrame(columns=columns)
tick_data_s4 = pd.DataFrame(columns=columns)
tick_data_s5 = pd.DataFrame(columns=columns)
now = datetime.now()
price_base = 1
while starttime < now:
tick_data_s1 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.5]]))
tick_data_s2 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.1]]))
tick_data_s4 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.25]]))
tick_data_s5 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * -0.25]]))
starttime = starttime + timedelta(milliseconds=10*random.randint(0, 100))
price_base += 1
# Patch it in
mock.copy_ticks_range.side_effect = [tick_data_s1, tick_data_s2, tick_data_s4, tick_data_s5]
# Start the monitor. Run every second. Use ~10 and ~5 seconds of data. Were not testing the overlap and price
# data quality metrics here as that is set elsewhere so these can be set to not take effect. Set cache level
# high and don't use autosave. Timer runs in a separate thread so test can continue after it has started.
cor.start_monitor(interval=1, calculation_params=[{'from': 0.66, 'min_prices': 0,
'max_set_size_diff_pct': 0, 'overlap_pct': 0,
'max_p_value': 1},
{'from': 0.33, 'min_prices': 0,
'max_set_size_diff_pct': 0, 'overlap_pct': 0,
'max_p_value': 1}], cache_time=100, autosave=False)
# Wait 2 seconds so timer runs twice
time.sleep(2)
# Stop the monitor
cor.stop_monitor()
# We should have 2 coefficients calculated for each symbol pair (6), for each date_from value (2),
# for each run (2) so 24 in total.
self.assertEqual(len(cor.coefficient_history.index), 24)
# We should have 2 coefficients calculated for a single symbol pair and timeframe
self.assertEqual(len(cor.get_coefficient_history({'Symbol 1': 'SYMBOL1', 'Symbol 2': 'SYMBOL2',
'Timeframe': 0.66})),
2, "We should have 2 history records for SYMBOL1:SYMBOL2 using the 0.66 min timeframe.")
# The status should be DIVERGED for SYMBOL1:SYMBOL2 and CORRELATED for SYMBOL1:SYMBOL4 and SYMBOL2:SYMBOL4.
self.assertTrue(cor.get_last_status('SYMBOL1', 'SYMBOL2') == correlation.STATUS_DIVERGED)
self.assertTrue(cor.get_last_status('SYMBOL1', 'SYMBOL4') == correlation.STATUS_CORRELATED)
self.assertTrue(cor.get_last_status('SYMBOL2', 'SYMBOL4') == correlation.STATUS_CORRELATED)
# We are monitoring inverse correlations, status for SYMBOL1:SYMBOL5 should be DIVERGED
self.assertTrue(cor.get_last_status('SYMBOL2', 'SYMBOL5') == correlation.STATUS_DIVERGED)
@patch('mt5_correlation.mt5.MetaTrader5')
def test_load_and_save(self, mock):
"""Calculate and run monitor for a few seconds. Store the data. Save it, load it then compare against stored
data."""
# Correlation class
cor = correlation.Correlation()
# Patch symbol and price data, then calculate
mock.symbols_get.return_value = self.mock_symbols
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_correlated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# Patch the tick data
columns = ['time', 'ask']
starttime = datetime.now() - timedelta(seconds=10)
tick_data_s1 = pd.DataFrame(columns=columns)
tick_data_s3 = pd.DataFrame(columns=columns)
tick_data_s4 = pd.DataFrame(columns=columns)
now = datetime.now()
price_base = 1
while starttime < now:
tick_data_s1 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.5]]))
tick_data_s3 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.1]]))
tick_data_s4 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.25]]))
starttime = starttime + timedelta(milliseconds=10 * random.randint(0, 100))
price_base += 1
mock.copy_ticks_range.side_effect = [tick_data_s1, tick_data_s3, tick_data_s4]
# Start monitor and run for a seconds with a 1 second interval to produce some coefficient history. Then stop
# the monitor
cor.start_monitor(interval=1, calculation_params={'from': 0.66, 'min_prices': 0, 'max_set_size_diff_pct': 0,
'overlap_pct': 0, 'max_p_value': 1},
cache_time=100, autosave=False)
time.sleep(2)
cor.stop_monitor()
# Get copies of data that will be saved.
cd_copy = cor.coefficient_data
pd_copy = cor.get_price_data('SYMBOL1')
mtd_copy = cor.get_ticks('SYMBOL1', cache_only=True)
ch_copy = cor.coefficient_history
# Save, reset data, then reload
cor.save("unittest.cpd")
cor.load("unittest.cpd")
# Test that the reloaded data matches the original
self.assertTrue(cd_copy.equals(cor.coefficient_data),
"Saved and reloaded coefficient data should match original.")
self.assertTrue(pd_copy.equals(cor.get_price_data('SYMBOL1')),
"Saved and reloaded price data should match original.")
self.assertTrue(mtd_copy.equals(cor.get_ticks('SYMBOL1', cache_only=True)),
"Saved and reloaded tick data should match original.")
self.assertTrue(ch_copy.equals(cor.coefficient_history),
"Saved and reloaded coefficient history should match original.")
# Cleanup. delete the file
os.remove("unittest.cpd")
@patch('mt5_correlation.correlation.Correlation.coefficient_data', new_callable=PropertyMock)
def test_diverged_symbols(self, mock):
"""
Test that diverged_symbols property correctly groups symbols and counts.
:param mock:
:return:
"""
# Correlation class
cor = correlation.Correlation()
# Mock the correlation data. Symbol 1 has diverged 3 times; symbols 2 has diverged twice; symbol 3 has
# diverged once; symbols 4 has diverged twice and symbol 5 has not diverged at all. Use all diverged status'
# (diverged, diverging & converging). Also add a row for a non diverged pair.
mock.return_value = pd.DataFrame(columns=['Symbol 1', 'Symbol 2', 'Status'], data=[
['SYMBOL1', 'SYMBOL2', correlation.STATUS_DIVERGED],
['SYMBOL1', 'SYMBOL3', correlation.STATUS_DIVERGING],
['SYMBOL1', 'SYMBOL4', correlation.STATUS_CONVERGING],
['SYMBOL2', 'SYMBOL4', correlation.STATUS_DIVERGED],
['SYMBOL2', 'SYMBOL3', correlation.STATUS_CORRELATED]])
# Get the diverged_symbols data and check the counts
diverged_symbols = cor.diverged_symbols
self.assertEqual(diverged_symbols.loc[(diverged_symbols['Symbol'] == 'SYMBOL1')]['Count'].iloc[0], 3,
"Symbol 1 has diverged three times.")
self.assertEqual(diverged_symbols.loc[(diverged_symbols['Symbol'] == 'SYMBOL2')]['Count'].iloc[0], 2,
"Symbol 2 has diverged twice.")
self.assertEqual(diverged_symbols.loc[(diverged_symbols['Symbol'] == 'SYMBOL3')]['Count'].iloc[0], 1,
"Symbol 3 has diverged once.")
self.assertEqual(diverged_symbols.loc[(diverged_symbols['Symbol'] == 'SYMBOL4')]['Count'].iloc[0], 2,
"Symbol 4 has diverged twice.")
self.assertFalse('SYMBOL5' in diverged_symbols['Symbol'])
if __name__ == '__main__':
unittest.main()
|
414784
|
import os
import numpy as np
import datetime as dt
import requests
import polyline
import folium
import time
"""
TO DO
"""
def next_best_date():
"""
Finds the next datetime which is a weekday, with a time of 9am
Returns:
(string): datetime from epoch in seconds as string
"""
date_today = dt.datetime.today()
if date_today.hour > 9:
date_today += dt.timedelta(days=1)
if date_today.weekday() == 5:
date_today += dt.timedelta(days=2)
elif date_today.weekday() == 6:
date_today += dt.timedelta(days=1)
departure = dt.datetime.combine(date_today, dt.time(9))
print('Departure date and time: ', departure.date(), departure.time())
# api takes in time in seconds from epoch
return str(int((departure-dt.datetime(1970,1,1)).total_seconds()))
def retrieve_travel_times(destination_lats, destination_lngs, \
API_key, travel_mode_, origin_lat, origin_lng, \
departure_time=next_best_date()):
"""
Retrieves time taken to travel to destination coordinates
Args:
destination_lats (list): destination latitudes
destination_lngs (list): destination longitudes
API_key (string): API key for googlemaps client
travel_mode_ (string): 'transit' or 'walking'
origin_lat (double): origin latitude
origin_lng (double): origin longitude
departure_time (string): datetime object from epoch converted to string, time to begin travelling
Returns:
destination_lats (numpy array): destination latitudes that can be travelled to
destination_lngs (numpy array): destination longitudes that can be travelled ot
travel_times (numpy array): travel times to each coordinate
"""
def _build_url(lats_subset, lngs_subset):
"""
Builds url used to retrieve data from google distancematrix api
Args:
lats_subset (list): chunk of 100 latitude coordinates
lngs_subseet (list): chunk of 100 longitude coordinates
Returns:
json file containing API data
"""
# base url for calling distance/time google api
url = "https://maps.googleapis.com/maps/api/distancematrix/json?"
# add in the origin
url += "origins=" + str(origin_lat) + "," + str(origin_lng)
# add in the destination with polyline encoding
url += "&destinations=enc:" + polyline.encode(
[(lat,lng) for lat,lng in zip(lats_subset, lngs_subset)],5) + ":"
url += "&key=" + API_key
url += "&mode=" + travel_mode_
url += "&departure_time=" + departure_time
return requests.get(url).json()
# this 100 value is used later to multiply the chunks -> use variable instead?
assert len(destination_lats) == len(destination_lngs), \
print('Number of latitude coordinates must equal number of longitude coordinates')
assert (len(destination_lats)**2/100).is_integer(), \
print('Total number of coordinates must be divisible by 100, number of coordinates is {}'.format(len(destination_lats**2)))
# we are limited to 100 requests per api so must split up our coordinates
lats_list = np.split(destination_lats, len(destination_lats)//100)
lngs_list = np.split(destination_lngs, len(destination_lngs)//100)
# loop over coordinates to retrieve all data
all_travel_times = []
all_bad_indices = []
for chunk in range(len(lats_list)):
data = _build_url(lats_list[chunk], lngs_list[chunk])
print('Chunk index: ', chunk, ' status: ', data['status'])
if chunk % 50 == 0 and chunk != 0:
time.sleep(20); print('Sleeping for 20s to prevent API timeout')
travel_times = []
bad_indices = []
for index, element in enumerate(data['rows'][0]['elements']):
if element['status'] != 'OK': # some areas are inaccessible (e.g. parts of Heathrow airport, Area 51)
print(lats_list[chunk][index], lngs_list[chunk][index], element)
print('bad chunk index: {}'.format(index + chunk*100)) # return the string of this bad lat/lng with the api if we want
bad_indices.append(index + chunk*100) # must save bad indices to remove them later
else:
travel_times.append(element['duration']['value'])
all_bad_indices = np.concatenate((all_bad_indices, np.asarray(bad_indices)))
all_travel_times = np.concatenate((all_travel_times, np.asarray(travel_times)))
# delete elements with a bad status, as no information was returned for these
destination_lats = np.delete(destination_lats, np.asarray(all_bad_indices))
destination_lngs = np.delete(destination_lngs, np.asarray(all_bad_indices))
assert len(all_travel_times) == len(destination_lats), \
print('Different number of travel times to destination coordinates')
return destination_lats, destination_lngs, all_travel_times
def generate_points(map_type_, N,
origin_coords, global_coords,
lat_multiplier=0.002, lng_multiplier=0.003):
"""
Generates destination coordinates to travel to for map
Args:
API_key (string): googlemaps api_key
map_type_ (string): 'local' or 'global' (see draw_local_grid and draw_local_grid subfuncs)
N (int): N**2 is the number of destination coordinates desired
origin_coords (dict): origin coordinates
global_coords (dict): maximum and minimum coordinates if generating a global map
lat_multipler (double): distance separator for latitude for the local grid
lng_multiple (double): distance separator for longitude for the local grid
Returns:
(numpy array): latitudes of destinations to travel to
(numyp array): longitudes of destinations to travel to
"""
def draw_local_grid(origin_lat, origin_lng):
"""
Return a lattice of N**2 points around origin coordinates
Args:
origin_lat (double): latitude of origin
origin_lng (double): longitude of origin
Returns:
(numpy array): x coordinates
(numpy array): y coordinates
"""
x = np.linspace(origin_lat - (int(N/2)*lat_multiplier), origin_lat + (int(N/2)*lat_multiplier), N)
y = np.linspace(origin_lng - (int(N/2)*lng_multiplier), origin_lng + (int(N/2)*lng_multiplier), N)
xv, yv = np.meshgrid(x, y)
return xv.flatten(), yv.flatten()
def draw_global_grid(max_lat, min_lat, max_lng, min_lng):
"""
Return a N**2 lattice for maximum/minimum coordinates given
Args:
max_lat (double): maximum latitude
min_lat (double): minimum latitude
max_lng (double): maximum longitude
min_lng (double): minimum longitude
Returns:
(numpy array): x coordinates
(numpy array): y coordinates
"""
# find a grid of points
x = np.linspace(min_lat, max_lat, N)
y = np.linspace(min_lng, max_lng, N)
xv, yv = np.meshgrid(x, y)
return xv.flatten(), yv.flatten()
if map_type_ == 'local':
destination_lats, destination_lngs = draw_local_grid(**origin_coords)
elif map_type_ == 'global':
destination_lats, destination_lngs = draw_global_grid(**global_coords)
else:
raise ValueError('Map type must be either "local" or "global"')
return destination_lats, destination_lngs
if __name__ == "__main__":
pass
""" Perform unit testing here
Need to add main folder to path otherwise will have issues import config
for unit test
"""
|
414799
|
from django.conf.urls import url
from . import views
app_name = "newsletter"
urlpatterns = [
url(r'^subscribe/(?P<user_pk>\d+)/$', views.subscribtion, name='subscribe'),
url(r'^unsubscribe/(?P<user_pk>\d+)/$', views.subscribtion, name='unsubscribe'),
]
|
414815
|
from multiprocessing.shared_memory import SharedMemory
import mmap
import os
import errno
import secrets
if os.name == "nt":
import _winapi
_USE_POSIX = False
else:
import _posixshmem
_USE_POSIX = True
_O_CREX = os.O_CREAT | os.O_EXCL
# FreeBSD (and perhaps other BSDs) limit names to 14 characters.
_SHM_SAFE_NAME_LENGTH = 14
# Shared memory block name prefix
if _USE_POSIX:
_SHM_NAME_PREFIX = '/psm_'
else:
_SHM_NAME_PREFIX = 'wnsm_'
def _make_filename():
"Create a random filename for the shared memory object."
# number of random bytes to use for name
nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2
assert nbytes >= 2, '_SHM_NAME_PREFIX too long'
name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes)
assert len(name) <= _SHM_SAFE_NAME_LENGTH
return name
# This shm wouldn't free memory when only attach
class ShM(SharedMemory):
def __init__(self, name=None, create=False, size=0):
if not size >= 0:
raise ValueError("'size' must be a positive integer")
if create:
self._flags = _O_CREX | os.O_RDWR
if name is None and not self._flags & os.O_EXCL:
raise ValueError("'name' can only be None if create=True")
if _USE_POSIX:
# POSIX Shared Memory
if name is None:
while True:
name = _make_filename()
try:
self._fd = _posixshmem.shm_open(
name,
self._flags,
mode=self._mode
)
except FileExistsError:
continue
self._name = name
break
else:
name = "/" + name if self._prepend_leading_slash else name
self._fd = _posixshmem.shm_open(
name,
self._flags,
mode=self._mode
)
self._name = name
try:
if create and size:
os.ftruncate(self._fd, size)
stats = os.fstat(self._fd)
size = stats.st_size
self._mmap = mmap.mmap(self._fd, size)
except OSError:
self.unlink()
raise
if create:
from multiprocessing.resource_tracker import register
register(self._name, "shared_memory")
else:
# Windows Named Shared Memory
if create:
while True:
temp_name = _make_filename() if name is None else name
# Create and reserve shared memory block with this name
# until it can be attached to by mmap.
h_map = _winapi.CreateFileMapping(
_winapi.INVALID_HANDLE_VALUE,
_winapi.NULL,
_winapi.PAGE_READWRITE,
(size >> 32) & 0xFFFFFFFF,
size & 0xFFFFFFFF,
temp_name
)
try:
last_error_code = _winapi.GetLastError()
if last_error_code == _winapi.ERROR_ALREADY_EXISTS:
if name is not None:
raise FileExistsError(
errno.EEXIST,
os.strerror(errno.EEXIST),
name,
_winapi.ERROR_ALREADY_EXISTS
)
else:
continue
self._mmap = mmap.mmap(-1, size, tagname=temp_name)
finally:
_winapi.CloseHandle(h_map)
self._name = temp_name
break
else:
self._name = name
# Dynamically determine the existing named shared memory
# block's size which is likely a multiple of mmap.PAGESIZE.
h_map = _winapi.OpenFileMapping(
_winapi.FILE_MAP_READ,
False,
name
)
try:
p_buf = _winapi.MapViewOfFile(
h_map,
_winapi.FILE_MAP_READ,
0,
0,
0
)
finally:
_winapi.CloseHandle(h_map)
size = _winapi.VirtualQuerySize(p_buf)
self._mmap = mmap.mmap(-1, size, tagname=name)
self._size = size
self._buf = memoryview(self._mmap)
|
414876
|
from .single_carla_evaluator import SingleCarlaEvaluator
from .carla_benchmark_evaluator import CarlaBenchmarkEvaluator
from .serial_evaluator import SerialEvaluator
|
414922
|
import argparse
import glob
import json
import logging
import os
from collections import ChainMap
from datetime import datetime
from typing import Optional
import pandas as pd
import torch
from allennlp.commands.train import train_model
from allennlp.common.params import Params, parse_overrides, with_fallback
from allennlp.common.util import import_module_and_submodules
import _jsonnet
from allentune.util.random_search import HyperparameterSearch
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class AllenNlpRunner(object):
name = "AllenNLP"
def get_run_func(
self,
args: argparse.Namespace,
):
if args is None:
raise ValueError("No run arguments found for AllenNLP runner.")
with open(args.base_config, "r") as parameter_f:
parameter_file_snippet = parameter_f.read()
def train_func(config, reporter):
logger.debug(f"CUDA_VISIBLE_DEVICES: {os.environ['CUDA_VISIBLE_DEVICES']}")
for package_name in getattr(args, "include_package", ()):
import_module_and_submodules(package_name)
search_space = HyperparameterSearch(**config)
sample = search_space.sample()
for k, v in sample.items():
config[k] = str(v)
params_dict = json.loads(
_jsonnet.evaluate_snippet(
"config", parameter_file_snippet, tla_codes={}, ext_vars=config
)
)
if args.num_gpus == 0:
logger.warning(f"No GPU specified, using CPU.")
params_dict["trainer"]["cuda_device"] = -1
if args.cpus_per_trial > 0:
torch.set_num_threads(args.cpus_per_trial)
params = Params(params_dict)
logger.debug(f"AllenNLP Configuration: {params.as_dict()}")
train_model(params=params, serialization_dir="trial")
reporter(done=True)
return train_func
|
414923
|
import pytest
import requests
from py42.services.storage.exfiltrateddata import ExfiltratedDataService
class TestExfiltratedDataService:
@pytest.fixture
def mock_request(self, mocker):
request = mocker.patch.object(requests, "get")
request.return_value = b"stream"
return request
def test_get_download_token_calls_get_with_valid_params(
self, mock_successful_connection
):
service = ExfiltratedDataService(
mock_successful_connection, mock_successful_connection
)
service.get_download_token("testeventid", "testdeviceid", "testfilepath", 1223)
qry = "deviceUid=testdeviceid&eventId=testeventid&filePath=testfilepath&versionTimestamp=1223"
expected = f"api/v1/file-download-token?{qry}"
mock_successful_connection.get.assert_called_once_with(
expected, headers={"Accept": "*/*"}
)
def test_get_file_calls_get_with_valid_params(
self, mock_successful_connection, mock_request
):
mock_successful_connection.host_address = "https://example.com"
service = ExfiltratedDataService(
mock_successful_connection, mock_successful_connection
)
service.get_file("testtoken")
mock_successful_connection.get.assert_called_once_with(
"https://example.com/api/v1/get-file",
headers={"Accept": "*/*"},
params={"token": "testtoken"},
stream=True,
)
|
414985
|
SPACE_TOKEN = "\u241F"
def replace_space(text: str) -> str:
return text.replace(" ", SPACE_TOKEN)
def revert_space(text: list) -> str:
clean = (
" ".join("".join(text).replace(SPACE_TOKEN, " ").split())
.strip()
)
return clean
|
414990
|
from collections import defaultdict, deque, Counter
# adapted from http://pit-claudel.fr/clement/blog/an-experimental-estimation-of-the-entropy-of-english-in-50-lines-of
# -python-code/ which I used in my ECE 587 course hmwk
def markov_model(stream, model_order):
model, stats = defaultdict(Counter), Counter()
circular_buffer = deque(maxlen=model_order)
for token in stream:
prefix = tuple(circular_buffer)
circular_buffer.append(token)
if len(prefix) == model_order:
stats[prefix] += 1
model[prefix][token] += 1
return model, stats
def finish_sentence(sentence, n, corpus):
result_sentence = sentence
model, stats = markov_model(corpus, n)
curr_char = sentence[-1]
i = 0
if n > len(sentence):
return "" # just wont happen
while curr_char != '!' and curr_char != '.' and curr_char != '?':
temp_search_str = result_sentence[-n:] # n-gram is n-i to deduct n
curr_char = model[tuple(temp_search_str)].most_common(1)[0][0] # should be the most
# print(curr_char)
result_sentence.append(curr_char)
i = i + 1
if i > 100: # just ensure it is not a infinite loop!
break
# print(result_sentence)
# print(len(result_sentence))
return result_sentence
|
415003
|
from typing import Dict
from mmdet.models import BaseDetector, TwoStageDetector
class MultiSteamDetector(BaseDetector):
def __init__(
self, model: Dict[str, TwoStageDetector], train_cfg=None, test_cfg=None
):
super(MultiSteamDetector, self).__init__()
self.submodules = list(model.keys())
for k, v in model.items():
setattr(self, k, v)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.inference_on = self.test_cfg.get("inference_on", self.submodules[0])
def model(self, **kwargs) -> TwoStageDetector:
if "submodule" in kwargs:
assert (
kwargs["submodule"] in self.submodules
), "Detector does not contain submodule {}".format(kwargs["submodule"])
model: TwoStageDetector = getattr(self, kwargs["submodule"])
else:
model: TwoStageDetector = getattr(self, self.inference_on)
return model
def freeze(self, model_ref: str):
assert model_ref in self.submodules
model = getattr(self, model_ref)
model.eval()
for param in model.parameters():
param.requires_grad = False
def forward_test(self, imgs, img_metas, **kwargs):
return self.model(**kwargs).forward_test(imgs, img_metas, **kwargs)
async def aforward_test(self, *, img, img_metas, **kwargs):
return self.model(**kwargs).aforward_test(img, img_metas, **kwargs)
def extract_feat(self, imgs):
return self.model().extract_feat(imgs)
async def aforward_test(self, *, img, img_metas, **kwargs):
return self.model(**kwargs).aforward_test(img, img_metas, **kwargs)
def aug_test(self, imgs, img_metas, **kwargs):
return self.model(**kwargs).aug_test(imgs, img_metas, **kwargs)
def simple_test(self, img, img_metas, **kwargs):
return self.model(**kwargs).simple_test(img, img_metas, **kwargs)
async def async_simple_test(self, img, img_metas, **kwargs):
return self.model(**kwargs).async_simple_test(img, img_metas, **kwargs)
def show_result(self, *args, **kwargs):
self.model().CLASSES = self.CLASSES
return self.model().show_result(*args, **kwargs)
|
415013
|
import os
files = [x for x in os.listdir('.') if x != "rename.py"]
for src in files:
dst = src[1:]
os.rename(src, dst)
|
415050
|
import time
def elapsed_time(func):
def wrapper(*args, **kwargs):
beg_ts = time.time() * 1000
retval = func(*args, **kwargs)
end_ts = time.time() * 1000
print("Function %s - elapsed time: %.2f (ms)" % (func.__name__, end_ts - beg_ts))
return retval
return wrapper
@elapsed_time
def my_function():
time.sleep(5)
if __name__ == '__main__':
my_function()
|
415055
|
import abc
import copy
import logging
import typing as tp
import math
import re
from enum import Enum
import attr
from benchbuild.settings import CFG
LOG = logging.getLogger(__name__)
RequirementSubType = tp.TypeVar("RequirementSubType", bound='Requirement')
@attr.s
class Requirement:
"""
Base class for requirements.
"""
@abc.abstractmethod
def to_option(self) -> str:
"""
Converts Requirement to a script options.
"""
@abc.abstractmethod
def to_cli_option(self) -> str:
"""
Converts Requirement to a command line options.
"""
@classmethod
@abc.abstractmethod
def merge_requirements(
cls: tp.Type[RequirementSubType], lhs_option: RequirementSubType,
rhs_option: RequirementSubType) -> RequirementSubType:
"""
Merge the requirements of the same type together.
"""
return type(lhs_option).merge_requirements(lhs_option, rhs_option)
################################################################################
# Slurm Requirements #
################################################################################
class SlurmRequirement(Requirement):
"""
Base class for slurm requirements.
"""
def to_option(self) -> str:
"""
Converts Requirement to a script options.
"""
return self.to_slurm_opt()
def to_cli_option(self) -> str:
"""
Converts Requirement to a command line options.
"""
return self.to_slurm_cli_opt()
def to_slurm_opt(self) -> str:
"""
Convert slurm option into a script usable option string, i.e., bash
#SBATCH option line.
"""
return f"#SBATCH {self.to_slurm_cli_opt()}"
@abc.abstractmethod
def to_slurm_cli_opt(self) -> str:
"""
Convert slurm option to command line string.
"""
@attr.s
class SlurmCoresPerSocket(SlurmRequirement):
"""
Restrict node selection to nodes with at least the specified number of
cores per socket. See additional information under -B option in the slurm
documentation. Only works when task/affinity plugin is enabled.
"""
cores: int = attr.ib()
def to_slurm_cli_opt(self) -> str:
return f"--cores-per-socket={self.cores}"
@classmethod
def merge_requirements(
cls, lhs_option: 'SlurmCoresPerSocket',
rhs_option: 'SlurmCoresPerSocket') -> 'SlurmCoresPerSocket':
"""
Merge the requirements of the same type together.
"""
return SlurmCoresPerSocket(max(lhs_option.cores, rhs_option.cores))
class SlurmExclusive(SlurmRequirement):
"""
The job allocation can not share nodes with other running jobs.
"""
def to_slurm_cli_opt(self) -> str:
return "--exclusive"
def __str__(self) -> str:
return "Run Exclusive"
def __repr__(self) -> str:
return "Exclusive"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmExclusive',
rhs_option: 'SlurmExclusive') -> 'SlurmExclusive':
"""
Merge the requirements of the same type together.
"""
return SlurmExclusive()
@attr.s
class SlurmNiceness(SlurmRequirement):
"""
Run the job with an adjusted scheduling priority within Slurm. With no
adjustment value the scheduling priority is decreased by 100. A negative
nice value increases the priority, otherwise decreases it. The adjustment
range is +/- 2147483645. Only privileged users can specify a negative
adjustment.
"""
niceness: int = attr.ib()
def to_slurm_cli_opt(self) -> str:
return f"--nice={self.niceness}"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmNiceness',
rhs_option: 'SlurmNiceness') -> 'SlurmNiceness':
"""
Merge the requirements of the same type together.
"""
if lhs_option.niceness != rhs_option.niceness:
LOG.info("Multiple different slurm niceness values specifcied, "
"choosing the smaller value.")
return SlurmNiceness(min(lhs_option.niceness, rhs_option.niceness))
@attr.s
class SlurmHint(SlurmRequirement):
"""
Bind tasks according to application hints.
* compute_bound
Select settings for compute bound applications: use all cores in
each socket, one thread per core.
* memory_bound
Select settings for memory bound applications: use only one core
in each socket, one thread per core.
* [no]multithread
[don't] use extra threads with in-core multi-threading which can
benefit communication intensive applications. Only supported with
the task/affinity plugin.
"""
class SlurmHints(Enum):
compute_bound = "compute_bound"
memory_bound = "memory_bound"
multithread = "multithread"
nomultithread = "nomultithread"
def __str__(self) -> str:
return str(self.value)
hints: tp.Set[SlurmHints] = attr.ib()
def to_slurm_cli_opt(self) -> str:
return f"--hint={','.join(map(str, self.hints))}"
def __str__(self) -> str:
return f"Hints: {','.join(map(str, self.hints))}"
def __repr__(self) -> str:
return f"Hint ({str(self)})"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmHint',
rhs_option: 'SlurmHint') -> 'SlurmHint':
"""
Merge the requirements of the same type together.
"""
combined_hints = set()
combined_hints |= lhs_option.hints | rhs_option.hints
if not cls.__hints_not_mutually_exclusive(combined_hints):
raise ValueError(
"Two mutally exclusive hints for slurm have be specified.")
return SlurmHint(combined_hints)
@staticmethod
def __hints_not_mutually_exclusive(hints: tp.Set[SlurmHints]) -> bool:
"""
Checks that a list of `SlurmHints` does not include mutally exclusive
hints.
Returns:
True, if no mutally exclusive hints are in the list
"""
if (SlurmHint.SlurmHints.compute_bound in hints and
SlurmHint.SlurmHints.memory_bound in hints):
return False
if (SlurmHint.SlurmHints.nomultithread in hints and
SlurmHint.SlurmHints.multithread in hints):
return False
return True
def _convert_to_time_tuple(time_specifier: str) -> tp.Tuple[int, int, int, int]:
"""
Convert slurm time specifier to tuple.
Returns:
time tuple with (days, hours, minutes, seconds)
Examples:
>>> _convert_to_time_tuple("4")
(0, 0, 4, 0)
>>> _convert_to_time_tuple("4:2")
(0, 0, 4, 2)
>>> _convert_to_time_tuple("8:4:2")
(0, 8, 4, 2)
>>> _convert_to_time_tuple("16-8")
(16, 8, 0, 0)
>>> _convert_to_time_tuple("16-8:4")
(16, 8, 4, 0)
>>> _convert_to_time_tuple("16-8:4:2")
(16, 8, 4, 2)
"""
days = 0
hours = 0
minutes = 0
seconds = 0
if time_specifier.count('-'):
with_days = True
days = int(time_specifier.split('-')[0])
time_specifier = time_specifier.split('-')[1]
else:
with_days = False
num_colon = time_specifier.count(':')
if num_colon == 0:
if with_days:
hours = int(time_specifier)
else:
minutes = int(time_specifier)
elif num_colon == 1:
if with_days:
hours = int(time_specifier.split(':')[0])
minutes = int(time_specifier.split(':')[1])
else:
minutes = int(time_specifier.split(':')[0])
seconds = int(time_specifier.split(':')[1])
elif num_colon == 2:
hours = int(time_specifier.split(':')[0])
minutes = int(time_specifier.split(':')[1])
seconds = int(time_specifier.split(':')[2])
return (days, hours, minutes, seconds)
@attr.s
class SlurmTime(SlurmRequirement):
"""
Set a limit on the total run time of the job allocation.
A time limit of zero requests that no time limit be imposed. Acceptable
time formats include "minutes", "minutes:seconds", "hours:minutes:seconds",
"days-hours", "days-hours:minutes" and "days-hours:minutes:seconds".
"""
timelimit: tp.Tuple[int, int, int,
int] = attr.ib(converter=_convert_to_time_tuple)
def to_slurm_time_format(self) -> str:
"""
Converst Time option into slurm compatible time format.
"""
days = self.timelimit[0]
hours = self.timelimit[1]
minutes = self.timelimit[2]
seconds = self.timelimit[3]
tmp_str = ""
if days > 0:
tmp_str += f"{days}-{hours:02d}"
if minutes > 0 or seconds > 0:
tmp_str += f":{minutes:02d}"
if seconds > 0:
tmp_str += f":{seconds:02d}"
else:
if hours > 0:
tmp_str += f"{hours}"
tmp_str += f":{minutes:02d}"
tmp_str += f":{seconds:02d}"
else:
tmp_str += f"{minutes}"
if seconds > 0:
tmp_str += f":{seconds:02d}"
return tmp_str
def to_slurm_cli_opt(self) -> str:
return f"--time={self.to_slurm_time_format()}"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmTime',
rhs_option: 'SlurmTime') -> 'SlurmTime':
"""
Merge the requirements of the same type together.
"""
if lhs_option < rhs_option:
return copy.deepcopy(lhs_option)
return copy.deepcopy(rhs_option)
def _get_byte_size_factor(byte_suffix: str) -> int:
"""
Returns the factor for a specific bytesize.
"""
byte_suffix = byte_suffix.lower()
if byte_suffix == "b":
return 1
if byte_suffix in ("k", "kb"):
return 1024
if byte_suffix in ("m", "mb"):
return 1024 * 1024
if byte_suffix in ("g", "gb"):
return 1024 * 1024 * 1024
if byte_suffix in ("t", "tb"):
return 1024 * 1024 * 1024 * 1024
raise ValueError("Unsupported byte suffix")
_BYTE_RGX = re.compile(r"(?P<size>\d*)(?P<byte_suffix>.*)")
def _to_bytes(byte_str: str) -> int:
"""
>>> _to_bytes("4B")
4
>>> _to_bytes("4MB")
4194304
>>> _to_bytes("10G")
10737418240
"""
match = _BYTE_RGX.search(byte_str)
if match:
size = int(match.group("size"))
byte_suffix = match.group("byte_suffix")
return size * _get_byte_size_factor(byte_suffix)
raise ValueError("Passed byte size was wrongly formatted")
def _to_biggests_byte_size(num_bytes: int) -> tp.Tuple[int, str]:
"""
>>> _to_biggests_byte_size(4)
(4, 'B')
>>> _to_biggests_byte_size(4194304)
(4, 'M')
>>> _to_biggests_byte_size(4194305)
(5, 'M')
>>> _to_biggests_byte_size(10737418240)
(10, 'G')
>>> _to_biggests_byte_size(1099511627776)
(1, 'T')
"""
if num_bytes >= _get_byte_size_factor("TB"):
return (math.ceil(num_bytes / _get_byte_size_factor("TB")), "T")
if num_bytes >= _get_byte_size_factor("GB"):
return (math.ceil(num_bytes / _get_byte_size_factor("GB")), "G")
if num_bytes >= _get_byte_size_factor("MB"):
return (math.ceil(num_bytes / _get_byte_size_factor("MB")), "M")
if num_bytes >= _get_byte_size_factor("KB"):
return (math.ceil(num_bytes / _get_byte_size_factor("KB")), "K")
return (num_bytes, "B")
@attr.s
class SlurmMem(SlurmRequirement):
"""
Set memory requirements that specify the maximal amount of memory needed.
Specify the real memory required per node. Different units can be specified
using the suffix [K|M|G|T].
"""
mem_req: int = attr.ib(converter=_to_bytes)
def to_slurm_cli_opt(self) -> str:
byte_size_tuple = _to_biggests_byte_size(self.mem_req)
return f"--mem={byte_size_tuple[0]}{byte_size_tuple[1]}"
@classmethod
def merge_requirements(cls, lhs_option: 'SlurmMem',
rhs_option: 'SlurmMem') -> 'SlurmMem':
"""
Merge the requirements of the same type together.
"""
return copy.deepcopy(max(lhs_option, rhs_option))
def merge_slurm_options(list_1: tp.List[Requirement],
list_2: tp.List[Requirement]) -> tp.List[Requirement]:
"""
Merged two lists of SlurmOptions into one.
"""
merged_options: tp.Dict[tp.Type[Requirement], Requirement] = dict()
for opt in list_1 + list_2:
key = type(opt)
if key in merged_options:
current_opt = merged_options[key]
merged_options[key] = current_opt.merge_requirements(
current_opt, opt)
else:
merged_options[key] = opt
return list(merged_options.values())
def get_slurm_options_from_config() -> tp.List[Requirement]:
"""
Generates a list of `SlurmOptions` which are specified in the BenchBuild
config.
"""
slurm_options: tp.List[Requirement] = []
if CFG['slurm']['exclusive']:
slurm_options.append(SlurmExclusive())
if not CFG['slurm']['multithread']:
slurm_options.append(SlurmHint({SlurmHint.SlurmHints.nomultithread}))
slurm_options.append(SlurmTime(str(CFG['slurm']['timelimit'])))
slurm_options.append(SlurmNiceness(int(CFG['slurm']['nice'])))
return slurm_options
|
415058
|
import unittest
from nestedfetch import nested_get, nested_set, flatten_data
class TestNestedFetch(unittest.TestCase):
simple_get_data = {
"name": "<NAME>",
"details": {"address": {"city": "Albuquerque"}},
}
nested_get_data = {
"name": "<NAME>",
"details": {"address": [{"city": "Albuquerque"}, {"city": "El Paso"}]},
}
nested_ll_get_data = {
"name": "<NAME>",
"details": {
"address": [
{"city": "Albuquerque"},
{"city": "El Paso"},
{"state": "New Mexico"},
]
},
}
nested_list_get_data = {
"name": "<NAME>",
"details": {"address": [[{"city": "Albuquerque"}, {"city": "El Paso"}]]},
}
flatten_data = {
"league": "Champions League",
"matches": [
{
"match_id": "match_1",
"goals": [
{
"time": 13,
"scorrer": "<NAME>",
"assist": "<NAME>",
"details": [
{"position": "outside-box"},
{"position": "right-side"},
],
},
{
"time": 78,
"scorrer": "<NAME>",
"assist": "<NAME>",
"details": [
{"position": "inside-box"},
{"position": "left-side"},
],
},
],
},
{
"match_id": "match_2",
"goals": [
{
"time": 36,
"scorrer": "<NAME>",
"assist": "<NAME>",
"details": [{"position": "penalty"}, {"position": "d-box"}],
}
],
},
],
}
flatten_test_data = [["This", "is"], ["flattened", "data"]]
flatten_nested_data = [[["This", "is"], ["flattened", "data"]]]
def test_simple_get_success(self):
res = nested_get(self.simple_get_data, ["details", "address", "city"])
self.assertEqual(res, "Albuquerque")
def test_nested_get_all_success(self):
res = nested_get(self.nested_get_data, ["details", "address", "city"])
self.assertEqual(res, ["Albuquerque", "El Paso"])
def test_nested_ll_get_all_success(self):
res = nested_get(
self.nested_ll_get_data, ["details", "address", "city"], default=None
)
self.assertEqual(res, ["Albuquerque", "El Paso", None])
def test_nested_get_with_index_success(self):
res = nested_get(self.nested_get_data, ["details", "address", "city", 0])
self.assertEqual(res, "Albuquerque")
def test_nested_list_get_with_index_success(self):
res = nested_get(self.nested_list_get_data, ["details", "address", 0, 0])
self.assertEqual(res, {"city": "Albuquerque"})
def test_nested_get_with_index_error(self):
res = nested_get(
self.nested_get_data, ["details", "address", "city", 5], default=None
)
self.assertEqual(res, None)
def test_nested_get_flatten(self):
res = nested_get(
self.flatten_data,
["matches", "goals", "scorrer"],
default=None,
flatten=True,
)
self.assertEqual(res, ["<NAME>", "<NAME>", "<NAME>"])
def test_nested_get_ll_flatten(self):
res = nested_get(
self.flatten_data,
["matches", "goals", "details"],
default=None,
flatten=True,
)
self.assertEqual(
res,
[
{"position": "outside-box"},
{"position": "right-side"},
{"position": "inside-box"},
{"position": "left-side"},
{"position": "penalty"},
{"position": "d-box"},
],
)
def test_simple_set_success(self):
res = nested_set(self.simple_get_data, ["details", "address", "city"], "Denver")
self.assertEqual(res, 1)
def test_simple_set_build_success(self):
res = nested_set(
self.simple_get_data,
["details", "address", "state"],
"New Mexico",
create_missing=True,
)
self.assertEqual(res, 1)
self.assertEqual(
self.simple_get_data,
{
"name": "<NAME>",
"details": {"address": {"city": "Albuquerque", "state": "New Mexico"}},
},
)
def test_simple_flatten(self):
res = flatten_data(self.flatten_test_data)
self.assertEqual(res, ["This", "is", "flattened", "data"])
def test_nested_flatten(self):
res = flatten_data(self.flatten_nested_data)
self.assertEqual(res, ["This", "is", "flattened", "data"])
if __name__ == "__main__":
unittest.main()
|
415072
|
import numpy as np
from distancematrix.util import diag_indices_of
from distancematrix.generator.abstract_generator import AbstractGenerator
from distancematrix.generator.abstract_generator import AbstractBoundStreamingGenerator
class MockGenerator(AbstractGenerator):
"""
Mock generator for testing purposes. Simply returns distances from a given distance matrix.
"""
def __init__(self, dist_matrix):
"""
Creates a new mock generator that will return distances from the provided distance matrix.
:param dist_matrix: distances to return.
"""
self._dist_matrix = dist_matrix
# Storage for parameters used for prepare and prepare_streaming
self.m = None
self.series_window = None
self.query_window = None
self.series = None
self.query = None
self.bound_gen = None
def prepare_streaming(self, m, series_window, query_window=None):
self.m = m
self.series_window = series_window
self.query_window = query_window
if query_window is None:
query_window = series_window
self_join = True
else:
self_join = False
s_subseqs = series_window - m + 1
q_subseqs = query_window - m + 1
self.bound_gen = BoundMockGenerator(self._dist_matrix, s_subseqs, q_subseqs,
self_join, -series_window, -query_window)
return self.bound_gen
def prepare(self, m, series, query=None):
self.m = m
self.series = series
self.query = query
s_win = len(series) - m + 1
if query is None:
q_win = s_win
self_join = True
else:
q_win = len(query) - m + 1
self_join = False
self.bound_gen = BoundMockGenerator(self._dist_matrix, s_win, q_win, self_join, 0, 0)
return self.bound_gen
class BoundMockGenerator(AbstractBoundStreamingGenerator):
"""
Mock generator for testing purposes. Simply returns distances from a given distance matrix.
"""
def __init__(self, dist_matrix, s_win, q_win, self_join, s_view_index, q_view_index):
"""
Creates a new mock generator that will return distances from the provided distance matrix.
:param dist_matrix: 2D matrix, base distance values to use, a view will be used to determine
which values to return for mocked calculations
:param s_win: window size of the view over the series axis
:param q_win: window size of the view over the query axis
:param self_join: are we doing a self-join (does adding series data also implicitly add query data)
:param s_view_index: start index of the view of dist_matrix (for series)
:param q_view_index: start index of the view of dist_matrix (for query)
"""
self._dist_matrix = dist_matrix
self._s_win = s_win
self._q_win = q_win
self._self_join = self_join
self._s_index = s_view_index
self._q_index = q_view_index
self.appended_series = np.empty((0,), dtype=float)
self.appended_query = np.empty((0,), dtype=float)
def calc_diagonal(self, diag):
view = self._dist_matrix[
max(self._q_index, 0): max(self._q_index + self._q_win, 0),
max(self._s_index, 0): max(self._s_index + self._s_win, 0)
]
return view[diag_indices_of(view, diag)]
def calc_column(self, column):
view = self._dist_matrix[
max(self._q_index, 0): max(self._q_index + self._q_win, 0),
max(self._s_index, 0): max(self._s_index + self._s_win, 0)
]
return view[:, column]
def append_series(self, values):
self.appended_series = np.concatenate([self.appended_series, values])
self._s_index += len(values)
if self._self_join:
self._q_index += len(values)
def append_query(self, values):
if self._self_join:
raise RuntimeError("Should not append query if self-joining.")
self.appended_query = np.concatenate([self.appended_query, values])
self._q_index += len(values)
|
415115
|
import gzip
import logging
from http import HTTPStatus
from io import BytesIO
from apscheduler.schedulers import SchedulerAlreadyRunningError
from flask import Flask
from flask import render_template as rt
from flask_assets import Bundle
from htmlmin.main import minify
from webassets.env import RegisterError
from shhh import __version__
from shhh.api import api
from shhh.enums import EnvConfig
from shhh.extensions import assets, db, scheduler
from shhh.views import views
def create_app(env):
"""Application factory."""
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] [sev %(levelno)s] [%(levelname)s] [%(name)s]> %(message)s",
datefmt="%a, %d %b %Y %H:%M:%S",
)
if env == EnvConfig.TESTING.value:
logging.getLogger("shhh").setLevel(logging.CRITICAL)
logging.getLogger("apscheduler").setLevel(logging.CRITICAL)
logging.getLogger("tasks").setLevel(logging.CRITICAL)
app = Flask(__name__)
configurations = {
EnvConfig.TESTING.value: "shhh.config.TestConfig",
EnvConfig.DEV_LOCAL.value: "shhh.config.DefaultConfig",
EnvConfig.DEV_DOCKER.value: "shhh.config.DockerConfig",
EnvConfig.HEROKU.value: "shhh.config.HerokuConfig",
EnvConfig.PRODUCTION.value: "shhh.config.ProductionConfig",
}
app.config.from_object(configurations.get(env, "shhh.config.ProductionConfig"))
register_extensions(app)
with app.app_context():
register_blueprints(app)
db.create_all()
try:
scheduler.start()
except SchedulerAlreadyRunningError:
pass
assets.manifest = False
assets.cache = False
try:
compile_assets(assets)
except RegisterError:
pass
app.context_processor(inject_global_vars)
app.after_request(optimize_response)
app.after_request(security_headers)
app.register_error_handler(HTTPStatus.NOT_FOUND.value, not_found_error)
app.register_error_handler(HTTPStatus.INTERNAL_SERVER_ERROR.value, internal_server_error)
return app
def register_blueprints(app):
"""Register application blueprints."""
app.register_blueprint(api)
app.register_blueprint(views)
def register_extensions(app):
"""Register application extensions."""
assets.init_app(app)
db.init_app(app)
try:
scheduler.init_app(app)
except SchedulerAlreadyRunningError:
pass
def compile_assets(app_assets):
"""Configure and build asset bundles."""
js_assets = ("create", "created", "read")
css_assets = ("styles",)
for code in js_assets:
bundle = Bundle(f"src/js/{code}.js", filters="jsmin", output=f"dist/js/{code}.min.js")
app_assets.register(code, bundle)
bundle.build()
for style in css_assets:
bundle = Bundle(
f"src/css/{style}.css", filters="cssmin", output=f"dist/css/{style}.min.css"
)
app_assets.register(style, bundle)
bundle.build()
def inject_global_vars():
"""Global Jinja variables."""
return {"version": __version__}
def not_found_error(error):
"""Not found error handler."""
return rt("error.html", error=error), HTTPStatus.NOT_FOUND.value
def internal_server_error(error):
"""Internal server error handler."""
return rt("error.html", error=error), HTTPStatus.INTERNAL_SERVER_ERROR.value
def optimize_response(response):
"""Minify HTML and use gzip compression."""
if response.mimetype == "text/html":
response.set_data(minify(response.get_data(as_text=True)))
# Do not gzip below 500 bytes or on JSON content
if response.content_length < 500 or response.mimetype == "application/json":
return response
response.direct_passthrough = False
gzip_buffer = BytesIO()
gzip_file = gzip.GzipFile(mode="wb", compresslevel=6, fileobj=gzip_buffer)
gzip_file.write(response.get_data())
gzip_file.close()
response.set_data(gzip_buffer.getvalue())
response.headers.add("Content-Encoding", "gzip")
return response
# pylint: disable=line-too-long
def security_headers(response):
"""Add required security headers."""
response.headers.add("X-Frame-Options", "SAMEORIGIN")
response.headers.add("X-Content-Type-Options", "nosniff")
response.headers.add("X-XSS-Protection", "1; mode=block")
response.headers.add("Referrer-Policy", "no-referrer-when-downgrade")
response.headers.add(
"Strict-Transport-Security", "max-age=63072000; includeSubdomains; preload"
)
response.headers.add(
"Content-Security-Policy",
"default-src 'self'; img-src 'self'; object-src 'self'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'",
)
response.headers.add(
"feature-policy",
"accelerometer 'none'; camera 'none'; geolocation 'none'; gyroscope 'none'; magnetometer 'none'; microphone 'none'; payment 'none'; usb 'none'",
)
return response
|
415156
|
import torch.nn as nn
import torch
from at_learner_core.models.wrappers.losses import get_loss
from at_learner_core.models.wrappers.simple_classifier_wrapper import SimpleClassifierWrapper
from at_learner_core.models.architectures import get_backbone_block
from ..architectures.transformer import TransformerEncoder
from collections import OrderedDict
class DLASWrapper(SimpleClassifierWrapper):
def __init__(self, wrapper_config):
super().__init__(wrapper_config)
def _init_modules(self, wrapper_config):
self.input_modalities = wrapper_config.input_modalities
for modal_key in self.input_modalities:
for idx in range(0, 4):
if 'optical_flow' in modal_key and idx == 0:
backbone, feature_size = get_backbone_block(wrapper_config.backbone, idx, get_feature_size=True,
in_size=2)
else:
backbone, feature_size = get_backbone_block(wrapper_config.backbone, idx, get_feature_size=True)
setattr(self, f'{modal_key}_block{idx}', backbone)
feature_sizes = []
for idx in range(1, 4):
backbone, feature_size = get_backbone_block(wrapper_config.backbone, idx, get_feature_size=True)
feature_sizes.append(feature_size)
setattr(self, f'agg_block{idx}', backbone)
for idx in range(1, 4):
planes = feature_sizes[idx-1]
adaptive_block = nn.Sequential(nn.Conv2d(planes, planes, 1), nn.ReLU(inplace=True))
setattr(self, f'agg_adaptive_block{idx}', adaptive_block)
self.backbone_pooling = nn.AdaptiveAvgPool2d((1, 1))
self.backbone_feature_size = feature_size
self.pooling = nn.AdaptiveAvgPool2d((1, feature_size))
self.pooling2 = nn.AdaptiveMaxPool2d((1, feature_size))
self.pooling3 = nn.AdaptiveMaxPool2d((1, feature_size))
self.classifier = nn.Linear(3*feature_size, wrapper_config.nclasses)
def forward(self, x):
B, C, W, H = x[self.input_modalities[0]].size()
device = x[self.input_modalities[0]].device
features_dict = OrderedDict()
for modal_key in self.input_modalities:
features_dict[modal_key] = getattr(self, f'{modal_key}_block0')(x[modal_key])
features_agg = features_dict[self.input_modalities[0]]
for modal_key in self.input_modalities[1:]:
features_agg = features_agg + features_dict[modal_key]
features_dict['agg'] = features_agg
for idx in range(1, 4):
for modal_key in self.input_modalities + ['agg']:
features_dict[modal_key] = getattr(self, f'{modal_key}_block{idx}')(features_dict[modal_key])
features_agg = features_dict[self.input_modalities[0]]
for modal_key in self.input_modalities[1:]:
features_agg = features_agg + features_dict[modal_key]
features_dict['agg'] = features_dict['agg'] + getattr(self, f'agg_adaptive_block{idx}')(features_agg)
for modal_key in self.input_modalities + ['agg']:
features_dict[modal_key] = self.backbone_pooling(features_dict[modal_key]).squeeze()
M = len(self.input_modalities) + 1
features = torch.empty((B, M, self.backbone_feature_size)).to(device)
for idx, key in enumerate(self.input_modalities + ['agg']):
features[:, idx, :] = features_dict[key]
features = features.view((B, M, -1))
"""
results_dict = OrderedDict()
for modal_key in self.input_modalities + ['agg']:
results_dict[modal_key] = getattr(self, f'{modal_key}_clf')(features_dict[modal_key])
"""
features1 = self.pooling(features)
features2 = self.pooling2(features)
features3 = self.pooling3(-features)
features = torch.cat([features1, features2, features3], axis=2)
features = features.squeeze()
output = self.classifier(features)
sigmoid_output = torch.sigmoid(output)
if isinstance(self.loss, nn.modules.loss.CrossEntropyLoss):
x['target'] = x['target'].squeeze()
output_dict = {'output': sigmoid_output.detach().cpu().numpy(),
'target': x['target'].detach().cpu().numpy()}
for k, v in x.items():
if k not in ['data', 'target'] + self.input_modalities:
output_dict[k] = v
loss = self.loss(output, x['target'])
return output_dict, loss
|
415165
|
from .classification_models.classification_models import *
from .inception_resnet_v2 import InceptionResNetV2
from .inception_v3 import InceptionV3
from .backbones import get_backbone
from .preprocessing import get_preprocessing
|
415166
|
from datetime import datetime, timedelta
from typing import Any, List, Optional
from drillsrs import db
THRESHOLDS = [
timedelta(seconds=0),
timedelta(hours=1),
timedelta(hours=3),
timedelta(hours=8),
timedelta(days=1),
timedelta(days=3),
timedelta(days=7),
timedelta(days=14),
timedelta(days=30),
timedelta(days=60),
timedelta(days=120),
]
def consecutive_correct_answers(card: db.Card) -> int:
for i, user_answer in enumerate(reversed(card.user_answers)):
if not user_answer.is_correct:
return i
return len(card.user_answers)
def next_due_date(card: db.Card) -> Optional[datetime]:
if not card.is_active:
return None
if not card.user_answers:
return datetime.now()
index = 0
for user_answer in card.user_answers:
if user_answer.is_correct:
index = min(index + 1, len(THRESHOLDS) - 1)
else:
index = max(index - 1, 0)
return card.user_answers[-1].date + THRESHOLDS[index]
def get_cards_to_study(
session: Any, deck: db.Deck, how_many: int
) -> List[db.Card]:
return list(
session.query(db.Card)
.filter(db.Card.deck_id == deck.id)
.filter(db.Card.is_active == 0)
.order_by(db.Card.num.asc())
.limit(how_many)
)
def get_due_cards(session: Any, deck: db.Deck) -> List[db.Card]:
return list(
session.query(db.Card)
.filter(db.Card.deck_id == deck.id)
.filter(db.Card.is_active)
.order_by(db.Card.due_date.asc())
)
def get_cards_to_review(session: Any, deck: db.Deck) -> List[db.Card]:
return [
card
for card in get_due_cards(session, deck)
if card.due_date and datetime.now() >= card.due_date
]
|
415223
|
import logging
import pi_weatherstation
import pi_weatherstation.config as config
from aioprometheus import Gauge, Service
TEMPERATURE = Gauge("temperature", "Temperature")
PRESSURE = Gauge("pressure", "Pressure")
HUMIDITY = Gauge("humidity", "Humidity")
GAS_RESISTANCE = Gauge("gas_resistance", "Gas resistance")
AIR_QUALITY = Gauge("air_quality", "Air quality")
PROMETHEUS_SERVER = Service()
class PrometheusMetrics:
def __init__(self, store):
self.store = store
PROMETHEUS_SERVER.register(TEMPERATURE)
PROMETHEUS_SERVER.register(PRESSURE)
PROMETHEUS_SERVER.register(HUMIDITY)
PROMETHEUS_SERVER.register(GAS_RESISTANCE)
PROMETHEUS_SERVER.register(AIR_QUALITY)
async def start_prometheus_server(self):
await PROMETHEUS_SERVER.start(
addr=config.get("metrics_host"),
port=config.getint("metrics_port"),
)
logging.info(
(f"Serving prometheus metrics on: {PROMETHEUS_SERVER.metrics_url}")
)
async def push_weather_data(self):
weather = self.store.stored_data.get("weather_sensor")
if not weather:
logging.debug("Weather data unavailable")
return
labels = {
"pi_weatherstation_version": pi_weatherstation.VERSION,
"location": config.get("metrics_location_label"),
}
TEMPERATURE.set(labels, weather.get("temperature"))
PRESSURE.set(labels, weather.get("pressure"))
HUMIDITY.set(labels, weather.get("humidity"))
GAS_RESISTANCE.set(labels, weather.get("gas_resistance"))
if "air_quality" in weather:
AIR_QUALITY.set(labels, weather.get("air_quality"))
|
415276
|
from typing import List
class Node:
def __init__(self, val: int):
self.val = val
self.l = None
self.r = None
def __repr__(self):
return "{}=[l->{}, r->{}]".format(self.val, self.l, self.r)
def make_cartree(arr: List[int], last: Node, root: Node):
if not arr:
return root
node = Node(arr[0])
if not last:
return make_cartree(arr[1:], node, node)
if last.val > node.val:
node.l = last
return make_cartree(arr[1:], node, node)
last.r = node
return make_cartree(arr[1:], last, last)
# Tests
cartree = make_cartree([3, 2, 6, 1, 9], None, None)
assert str(cartree) == \
"1=[l->2=[l->3=[l->None, r->None], " + \
"r->6=[l->None, r->None]], " + \
"r->9=[l->None, r->None]]"
|
415282
|
from django.urls import re_path
from . import views
urlpatterns = (
# returns token only
re_path(r'^social/jwt-pair/(?:(?P<provider>[a-zA-Z0-9_-]+)/?)?$',
views.SocialJWTPairOnlyAuthView.as_view(),
name='login_social_jwt_pair'),
# returns token + user_data
re_path(r'^social/jwt-pair-user/(?:(?P<provider>[a-zA-Z0-9_-]+)/?)?$',
views.SocialJWTPairUserAuthView.as_view(),
name='login_social_jwt_pair_user'),
)
|
415310
|
import datetime
import constant
import json
import re
def getData():
return datetime.datetime.now().strftime("%Y-%m-%d")
def buildFileName(name, ext):
return (constant.DATA_DIR) + getData() + (name) + "." + ext
def writeCsvAndFilterHashtags(popular_posts, hashtags, writer):
hashtagregex = re.compile(r"#(\w+)")
for post in popular_posts:
print(str(post.author_user_id), str(post.share_url), str(post.desc),
post.statistics.comment_count, post.statistics.digg_count)
writer.writerow({'User ID': str(post.author_user_id), 'URL': str(post.share_url), 'Description': str(
post.desc), 'Comments': post.statistics.comment_count, 'Likes': post.statistics.digg_count})
posttags = hashtagregex.findall(str(post.desc))
new_hashtags = hashtagCounter(posttags, hashtags)
saveHashtag(new_hashtags)
def hashtagCounter(posttags, hashtags):
for tag in posttags:
if tag in hashtags:
hashtags[tag] += 1
else:
hashtags[tag] = 1
return hashtags
def saveHashtag(hashtags):
with open((constant.DATA_DIR + "hashtags.json"), 'w') as fout:
json.dump(hashtags, fout)
|
415312
|
import psp.PSP_lib as bd
import numpy as np
import pandas as pd
import random
import math
from openbabel import openbabel as ob
obConversion = ob.OBConversion()
obConversion.SetInAndOutFormats("xyz", "xyz")
ff = ob.OBForceField.FindForceField('UFF')
mol = ob.OBMol()
np.set_printoptions(precision=20)
# define objective function
def f(
unit_name,
sl,
unit,
bond,
angle,
neigh_atoms_info,
xyz_tmp_dir,
dum1,
dum2,
atom1,
atom2,
):
file_name, conf_unit, dis_dum1_dum2, ang_1st_2nd, penalty = bd.create_conformer(
unit_name,
sl,
unit,
bond,
neigh_atoms_info,
angle,
xyz_tmp_dir,
dum1,
dum2,
atom1,
atom2,
)
obConversion.ReadFile(mol, file_name)
ff.Setup(mol)
E_cost = (
ff.Energy()
+ ff.Energy() * (1 - (ang_1st_2nd / 180.0))
+ ff.Energy() * penalty * 10
)
return E_cost, conf_unit, file_name
######################################################
# Simulated Annealing
######################################################
def SA(
unit_name,
unit,
bonds,
angle,
neigh_atoms_info,
xyz_tmp_dir,
dum1,
dum2,
atom1,
atom2,
Steps,
Substeps,
):
i1 = bonds.index.values
i2 = angle
# Start location
x_start = [i1[0], i2[0]]
# Number of cycles
n = Steps
# Number of trials per cycle
m = Substeps
# Number of accepted solutions
na = 0.0
# Probability of accepting worse solution at the start
p1 = 0.3
# Probability of accepting worse solution at the end
p50 = 0.001
# Initial temperature
t1 = -1.0 / math.log(p1)
# Final temperature
t50 = -1.0 / math.log(p50)
# Fractional reduction every cycle
frac = (t50 / t1) ** (1.0 / (n - 1.0))
# Initialize x
x = np.zeros((n + 1, 2))
x[0] = x_start
results = []
xi = np.zeros(2)
xi = x_start
na = na + 1.0
# Current best results so far
xc = np.zeros(2)
xc = x[0]
fc, unit_new, file_name = f(
unit_name,
0,
unit,
bonds.loc[0],
0.0,
neigh_atoms_info,
xyz_tmp_dir,
dum1,
dum2,
atom1,
atom2,
)
fs = np.zeros(n + 1)
fs[0] = fc
results.append([0, fc, file_name])
# Current temperature
t = t1
# DeltaE Average
DeltaE_avg = 0.0
for i in range(n):
for j in range(m):
unit_prev = unit.copy()
xi[0] = np.random.choice(i1)
xi[1] = np.random.choice(i2)
fc_new, unit, file_name = f(
unit_name,
i,
unit,
bonds.loc[xi[0]],
xi[1],
neigh_atoms_info,
xyz_tmp_dir,
dum1,
dum2,
atom1,
atom2,
)
DeltaE = abs(fc_new - fc)
if fc_new > fc:
# Initialize DeltaE_avg if a worse solution was found
# on the first iteration
if i == 0 and j == 0:
DeltaE_avg = DeltaE
# To avoid divide by ZERO add a small number to DeltaE_avg
if DeltaE_avg == 0.0:
DeltaE_avg = DeltaE_avg + 1.0e-13
# objective function is worse
# generate probability of acceptance
p = math.exp(-DeltaE / (DeltaE_avg * t))
# determine whether to accept worse point
if random.random() < p:
# accept the worse solution
accept = True
else:
# don't accept the worse solution
accept = False
else:
# objective function is lower, automatically accept
accept = True
if accept is True:
# update currently accepted solution
xc[0] = xi[0]
xc[1] = xi[1]
fc = fc_new
best_xyz = file_name
# increment number of accepted solutions
na = na + 1.0
# update DeltaE_avg
DeltaE_avg = (DeltaE_avg * (na - 1.0) + DeltaE) / na
else:
unit = unit_prev.copy()
# Record the best x values at the end of every cycle
x[i + 1][0] = xc[0]
x[i + 1][1] = xc[1]
try:
results.append([i, fc, best_xyz])
except Exception:
results.append([i, fc, 'XXX'])
fs[i + 1] = fc
if np.around(fs[i], decimals=15) == np.around(
fs[i + 1], decimals=15
) and np.around(fs[i - 1], decimals=15) == np.around(fs[i + 1], decimals=15):
break
# Lower the temperature for next cycle
t = frac * t
results = pd.DataFrame(results, columns=['i', 'Energy+', 'xyzFile'])
results = results[results['xyzFile'] != 'XXX']
results = results.drop_duplicates(subset='xyzFile', keep="last")
return results
|
415343
|
import json
from hashlib import sha3_256
from django.conf import settings
from nacl.exceptions import CryptoError
from nacl.signing import SigningKey as NaClSigningKey
from nacl.signing import VerifyKey
from node.blockchain.types import AccountNumber, Hash, KeyPair, Signature, SigningKey
from .misc import bytes_to_hex, hex_to_bytes
def generate_signature(signing_key: SigningKey, message: bytes) -> Signature:
return NaClSigningKey(hex_to_bytes(signing_key)).sign(message).signature.hex()
def derive_public_key(signing_key: SigningKey) -> AccountNumber:
return AccountNumber(bytes_to_hex(NaClSigningKey(hex_to_bytes(signing_key)).verify_key))
def normalize_dict(dict_: dict) -> bytes:
return json.dumps(dict_, separators=(',', ':'), sort_keys=True).encode('utf-8')
def hash_binary_data(binary_data: bytes) -> Hash:
return Hash(sha3_256(binary_data).digest().hex())
def get_signing_key():
return settings.NODE_SIGNING_KEY
def get_node_identifier():
return derive_public_key(get_signing_key())
def is_signature_valid(verify_key: AccountNumber, message: bytes, signature: Signature) -> bool:
try:
verify_key_bytes = hex_to_bytes(verify_key)
signature_bytes = hex_to_bytes(signature)
except ValueError:
return False
try:
VerifyKey(verify_key_bytes).verify(message, signature_bytes)
except CryptoError:
return False
return True
def generate_key_pair() -> KeyPair:
signing_key = NaClSigningKey.generate()
return KeyPair(bytes_to_hex(signing_key.verify_key), bytes_to_hex(signing_key))
|
415391
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LdpInitialization(Base):
__slots__ = ()
_SDM_NAME = 'ldpInitialization'
_SDM_ATT_MAP = {
'HeaderVersion': 'ldpInitialization.header.version-1',
'HeaderPduLengthinOctets': 'ldpInitialization.header.pduLengthinOctets-2',
'HeaderLsrID': 'ldpInitialization.header.lsrID-3',
'HeaderLabelSpace': 'ldpInitialization.header.labelSpace-4',
'HeaderUBit': 'ldpInitialization.header.uBit-5',
'HeaderType': 'ldpInitialization.header.type-6',
'HeaderLength': 'ldpInitialization.header.length-7',
'HeaderMessageID': 'ldpInitialization.header.messageID-8',
'CommonSessionParametersTLVUBit': 'ldpInitialization.header.commonSessionParametersTLV.uBit-9',
'CommonSessionParametersTLVFBit': 'ldpInitialization.header.commonSessionParametersTLV.fBit-10',
'CommonSessionParametersTLVType': 'ldpInitialization.header.commonSessionParametersTLV.type-11',
'CommonSessionParametersTLVLength': 'ldpInitialization.header.commonSessionParametersTLV.length-12',
'CommonSessionParametersTLVVersion': 'ldpInitialization.header.commonSessionParametersTLV.version-13',
'CommonSessionParametersTLVKeepaliveTime': 'ldpInitialization.header.commonSessionParametersTLV.keepaliveTime-14',
'CommonSessionParametersTLVABit': 'ldpInitialization.header.commonSessionParametersTLV.aBit-15',
'CommonSessionParametersTLVDBit': 'ldpInitialization.header.commonSessionParametersTLV.dBit-16',
'CommonSessionParametersTLVReserved': 'ldpInitialization.header.commonSessionParametersTLV.reserved-17',
'CommonSessionParametersTLVPathVectorLimit': 'ldpInitialization.header.commonSessionParametersTLV.pathVectorLimit-18',
'CommonSessionParametersTLVMaxPDULength': 'ldpInitialization.header.commonSessionParametersTLV.maxPDULength-19',
'CommonSessionParametersTLVLsrID': 'ldpInitialization.header.commonSessionParametersTLV.lsrID-20',
'CommonSessionParametersTLVLabelSpace': 'ldpInitialization.header.commonSessionParametersTLV.labelSpace-21',
'AtmSessionParametersTLVUBit': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.uBit-22',
'AtmSessionParametersTLVFBit': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.fBit-23',
'AtmSessionParametersTLVType': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.type-24',
'AtmSessionParametersTLVLength': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.length-25',
'AtmSessionParametersTLVAtmMergeCapabilities': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.atmMergeCapabilities-26',
'AtmSessionParametersTLVNumberOfLabelRangeComponents': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.numberOfLabelRangeComponents-27',
'AtmSessionParametersTLVDBit': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.dBit-28',
'AtmSessionParametersTLVReserved': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.reserved-29',
'AtmLabelRangeComponentReserved': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.atmLabelRangeComponent.reserved-30',
'AtmLabelRangeComponentMinimumVPI': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.atmLabelRangeComponent.minimumVPI-31',
'AtmLabelRangeComponentMinimumVCI': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.atmLabelRangeComponent.minimumVCI-32',
'AtmsessionparameterstlvAtmLabelRangeComponentReserved': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.atmLabelRangeComponent.reserved-33',
'AtmLabelRangeComponentMaximumVPI': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.atmLabelRangeComponent.maximumVPI-34',
'AtmLabelRangeComponentMaximumVCI': 'ldpInitialization.header.optionalParameter.atmSessionParametersTLV.atmLabelRangeComponent.maximumVCI-35',
'FrameRelaySessionParametersTLVUBit': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.uBit-36',
'FrameRelaySessionParametersTLVFBit': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.fBit-37',
'FrameRelaySessionParametersTLVType': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.type-38',
'FrameRelaySessionParametersTLVLength': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.length-39',
'FrameRelaySessionParametersTLVFrameRelayMergeCapabilities': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.frameRelayMergeCapabilities-40',
'FrameRelaySessionParametersTLVNumberOfLabelRangeComponents': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.numberOfLabelRangeComponents-41',
'FrameRelaySessionParametersTLVDBit': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.dBit-42',
'FrameRelaySessionParametersTLVReserved': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.reserved-43',
'FrameRelayLabelRangeComponentReserved': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.frameRelayLabelRangeComponent.reserved-44',
'FrameRelayLabelRangeComponentDlciLength': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.frameRelayLabelRangeComponent.dlciLength-45',
'FrameRelayLabelRangeComponentMinimumDLCI': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.frameRelayLabelRangeComponent.minimumDLCI-46',
'FramerelaysessionparameterstlvFrameRelayLabelRangeComponentReserved': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.frameRelayLabelRangeComponent.reserved-47',
'FrameRelayLabelRangeComponentMaximumDLCI': 'ldpInitialization.header.optionalParameter.frameRelaySessionParametersTLV.frameRelayLabelRangeComponent.maximumDLCI-48',
'P2mpCapabilityParametersTLVUBit': 'ldpInitialization.header.optionalParameter.p2mpCapabilityParametersTLV.uBit-49',
'P2mpCapabilityParametersTLVFBit': 'ldpInitialization.header.optionalParameter.p2mpCapabilityParametersTLV.fBit-50',
'P2mpCapabilityParametersTLVTclP2mpCapabilityParameter': 'ldpInitialization.header.optionalParameter.p2mpCapabilityParametersTLV.tclP2mpCapabilityParameter-51',
'P2mpCapabilityParametersTLVTclP2mpCapabilityParameterLength': 'ldpInitialization.header.optionalParameter.p2mpCapabilityParametersTLV.tclP2mpCapabilityParameterLength-52',
'P2mpCapabilityParametersTLVSBit': 'ldpInitialization.header.optionalParameter.p2mpCapabilityParametersTLV.sBit-53',
'P2mpCapabilityParametersTLVTclP2mpCapabilityParameterReserved': 'ldpInitialization.header.optionalParameter.p2mpCapabilityParametersTLV.tclP2mpCapabilityParameterReserved-54',
}
def __init__(self, parent, list_op=False):
super(LdpInitialization, self).__init__(parent, list_op)
@property
def HeaderVersion(self):
"""
Display Name: Version
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderVersion']))
@property
def HeaderPduLengthinOctets(self):
"""
Display Name: PDU length(in octets)
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderPduLengthinOctets']))
@property
def HeaderLsrID(self):
"""
Display Name: LSR ID
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLsrID']))
@property
def HeaderLabelSpace(self):
"""
Display Name: Label space
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLabelSpace']))
@property
def HeaderUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderUBit']))
@property
def HeaderType(self):
"""
Display Name: Type
Default Value: 0x0200
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderType']))
@property
def HeaderLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLength']))
@property
def HeaderMessageID(self):
"""
Display Name: Message ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderMessageID']))
@property
def CommonSessionParametersTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVUBit']))
@property
def CommonSessionParametersTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVFBit']))
@property
def CommonSessionParametersTLVType(self):
"""
Display Name: Type
Default Value: 0x0500
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVType']))
@property
def CommonSessionParametersTLVLength(self):
"""
Display Name: Length
Default Value: 14
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVLength']))
@property
def CommonSessionParametersTLVVersion(self):
"""
Display Name: Version
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVVersion']))
@property
def CommonSessionParametersTLVKeepaliveTime(self):
"""
Display Name: Keepalive time
Default Value: 30
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVKeepaliveTime']))
@property
def CommonSessionParametersTLVABit(self):
"""
Display Name: A bit
Default Value: 0
Value Format: decimal
Available enum values: Downstream unsolicited, 0, Downstream on demand, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVABit']))
@property
def CommonSessionParametersTLVDBit(self):
"""
Display Name: D bit
Default Value: 0
Value Format: decimal
Available enum values: Loop detection disabled, 0, Loop detection enabled, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVDBit']))
@property
def CommonSessionParametersTLVReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVReserved']))
@property
def CommonSessionParametersTLVPathVectorLimit(self):
"""
Display Name: Path vector limit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVPathVectorLimit']))
@property
def CommonSessionParametersTLVMaxPDULength(self):
"""
Display Name: Max PDU length
Default Value: 4096
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVMaxPDULength']))
@property
def CommonSessionParametersTLVLsrID(self):
"""
Display Name: LSR ID
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVLsrID']))
@property
def CommonSessionParametersTLVLabelSpace(self):
"""
Display Name: Label space
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommonSessionParametersTLVLabelSpace']))
@property
def AtmSessionParametersTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmSessionParametersTLVUBit']))
@property
def AtmSessionParametersTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmSessionParametersTLVFBit']))
@property
def AtmSessionParametersTLVType(self):
"""
Display Name: Type
Default Value: 0x0501
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmSessionParametersTLVType']))
@property
def AtmSessionParametersTLVLength(self):
"""
Display Name: Length
Default Value: 12
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmSessionParametersTLVLength']))
@property
def AtmSessionParametersTLVAtmMergeCapabilities(self):
"""
Display Name: ATM merge capabilities
Default Value: 0
Value Format: decimal
Available enum values: Merge not supported, 0, VP merge supported, 1, VC merge supported, 2, VP and VC merge supported, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmSessionParametersTLVAtmMergeCapabilities']))
@property
def AtmSessionParametersTLVNumberOfLabelRangeComponents(self):
"""
Display Name: Number of label range components
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmSessionParametersTLVNumberOfLabelRangeComponents']))
@property
def AtmSessionParametersTLVDBit(self):
"""
Display Name: D bit
Default Value: 0
Value Format: decimal
Available enum values: Bidirectional VC capability, 0, Unidirectional VC capability, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmSessionParametersTLVDBit']))
@property
def AtmSessionParametersTLVReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmSessionParametersTLVReserved']))
@property
def AtmLabelRangeComponentReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelRangeComponentReserved']))
@property
def AtmLabelRangeComponentMinimumVPI(self):
"""
Display Name: Minimum VPI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelRangeComponentMinimumVPI']))
@property
def AtmLabelRangeComponentMinimumVCI(self):
"""
Display Name: Minimum VCI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelRangeComponentMinimumVCI']))
@property
def AtmsessionparameterstlvAtmLabelRangeComponentReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmsessionparameterstlvAtmLabelRangeComponentReserved']))
@property
def AtmLabelRangeComponentMaximumVPI(self):
"""
Display Name: Maximum VPI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelRangeComponentMaximumVPI']))
@property
def AtmLabelRangeComponentMaximumVCI(self):
"""
Display Name: Maximum VCI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelRangeComponentMaximumVCI']))
@property
def FrameRelaySessionParametersTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelaySessionParametersTLVUBit']))
@property
def FrameRelaySessionParametersTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelaySessionParametersTLVFBit']))
@property
def FrameRelaySessionParametersTLVType(self):
"""
Display Name: Type
Default Value: 0x0502
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelaySessionParametersTLVType']))
@property
def FrameRelaySessionParametersTLVLength(self):
"""
Display Name: Length
Default Value: 12
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelaySessionParametersTLVLength']))
@property
def FrameRelaySessionParametersTLVFrameRelayMergeCapabilities(self):
"""
Display Name: Frame Relay merge capabilities
Default Value: 0
Value Format: decimal
Available enum values: Merge not supported, 0, Merge supported, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelaySessionParametersTLVFrameRelayMergeCapabilities']))
@property
def FrameRelaySessionParametersTLVNumberOfLabelRangeComponents(self):
"""
Display Name: Number of label range components
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelaySessionParametersTLVNumberOfLabelRangeComponents']))
@property
def FrameRelaySessionParametersTLVDBit(self):
"""
Display Name: D bit
Default Value: 0
Value Format: decimal
Available enum values: Bidirectional VC capability, 0, Unidirectional VC capability, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelaySessionParametersTLVDBit']))
@property
def FrameRelaySessionParametersTLVReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelaySessionParametersTLVReserved']))
@property
def FrameRelayLabelRangeComponentReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelRangeComponentReserved']))
@property
def FrameRelayLabelRangeComponentDlciLength(self):
"""
Display Name: DLCI length
Default Value: 0
Value Format: decimal
Available enum values: 10 bits, 0, 23 bits, 2
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelRangeComponentDlciLength']))
@property
def FrameRelayLabelRangeComponentMinimumDLCI(self):
"""
Display Name: Minimum DLCI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelRangeComponentMinimumDLCI']))
@property
def FramerelaysessionparameterstlvFrameRelayLabelRangeComponentReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FramerelaysessionparameterstlvFrameRelayLabelRangeComponentReserved']))
@property
def FrameRelayLabelRangeComponentMaximumDLCI(self):
"""
Display Name: Maximum DLCI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelRangeComponentMaximumDLCI']))
@property
def P2mpCapabilityParametersTLVUBit(self):
"""
Display Name: U bit
Default Value: 1
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['P2mpCapabilityParametersTLVUBit']))
@property
def P2mpCapabilityParametersTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['P2mpCapabilityParametersTLVFBit']))
@property
def P2mpCapabilityParametersTLVTclP2mpCapabilityParameter(self):
"""
Display Name: P2MP Capability Parameter
Default Value: 0x0508
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['P2mpCapabilityParametersTLVTclP2mpCapabilityParameter']))
@property
def P2mpCapabilityParametersTLVTclP2mpCapabilityParameterLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['P2mpCapabilityParametersTLVTclP2mpCapabilityParameterLength']))
@property
def P2mpCapabilityParametersTLVSBit(self):
"""
Display Name: S bit
Default Value: 1
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['P2mpCapabilityParametersTLVSBit']))
@property
def P2mpCapabilityParametersTLVTclP2mpCapabilityParameterReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['P2mpCapabilityParametersTLVTclP2mpCapabilityParameterReserved']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
415405
|
from unittest import TestCase
from nose.tools import *
import os
import datetime as dt
import mock
from StringIO import StringIO
from phillyleg.management.scraper_wrappers import PhillyLegistarSiteWrapper
from phillyleg.models import *
class Test_LegFile_uniqueWords:
@istest
def FindsUniqueWordsCorrectly(self):
legfile = LegFile()
legfile.title = "Word1 word2 hyphen-word1 word1. Word2 hyphen-word2... Word3..."
words = legfile.unique_words()
assert_equal(words, set(['word1', 'word2', 'word3', 'hyphen-word1', 'hyphen-word2']))
class Test__LegFile_mentionedLegfiles:
def setup(self):
LegFile.objects.all().delete()
@istest
def FindsMentionedLegfilesCorrectly(self):
l123456 = LegFile(id='123456', key=1)
l123456.save()
l123456a = LegFile(id='123456-A', key=2)
l123456a.save()
l123456aa = LegFile(id='123456-AA', key=3)
l123456aa.save()
legfile = LegFile(title='This legfile mentions files 123456, 123457, and 123456-AA.')
files = set(legfile.mentioned_legfiles())
assert_equal(files, set([l123456, l123456aa]))
class Test__LegFile_lastActionDate:
def setup(self):
LegFile.objects.all().delete()
@istest
def is_none_when_legfile_has_no_actions (self):
legfile = LegFile()
assert_is_none(legfile.last_action_date)
@istest
def is_last_action_dateTaken (self):
legfile = LegFile(id='123456', key=1)
legfile.save()
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 11)))
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 19)))
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 12)))
assert_equal(legfile.last_action_date, dt.date(2011, 8, 19))
class Test__LegFile_timeline:
def setup(self):
LegFile.objects.all().delete()
@istest
def is_empty_when_legfile_has_no_actions (self):
legfile = LegFile(id='123456', key=1)
legfile.save()
assert_equal(len(legfile.timeline), 0)
@istest
def collects_actions_by_date_taken (self):
legfile = LegFile(id='123456', key=1)
legfile.save()
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 11), description='a'))
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 11), description='b'))
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 12), description='c'))
legfile.save()
assert_equal(len(legfile.timeline), 2)
assert_equal(len(legfile.timeline[dt.date(2011, 8, 11)]), 2)
assert_equal(len(legfile.timeline[dt.date(2011, 8, 12)]), 1)
@istest
def always_iterates_through_keys_sorted (self):
legfile = LegFile(id='123456', key=1)
legfile.save()
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 11), description='a'))
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 12), description='b'))
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 10), description='c'))
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 15), description='d'))
legfile.actions.add(LegAction(date_taken=dt.date(2011, 8, 13), description='e'))
legfile.save()
dates = [date for date in legfile.timeline]
assert_equal(dates, [dt.date(2011, 8, 10),
dt.date(2011, 8, 11),
dt.date(2011, 8, 12),
dt.date(2011, 8, 13),
dt.date(2011, 8, 15)])
class Test__LegFile_refresh:
def setUp(self):
self.legfiles_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testlegfiles')
class PhillyLegistarFileWrapper (PhillyLegistarSiteWrapper):
def urlopen(wrapper, *args, **kwargs):
return self.open_legfile(73)
self.PhillyLegistarFileWrapper = PhillyLegistarFileWrapper
def open_legfile(self, key):
return open(os.path.join(self.legfiles_dir, 'key%s.html' % key))
@istest
def updates_file_if_data_is_stale(self):
last_update_time = dt.datetime.now() - dt.timedelta(days=1, hours=1)
legfile = LegFile(id='123456', key=73, updated_datetime=last_update_time, title='abcde')
legfile.get_data_source = lambda: self.PhillyLegistarFileWrapper()
legfile.refresh()
assert_equal(legfile.title, '''Providing for the approval by the Council of the City of Philadelphia of a Revised Five Year Financial Plan for the City of Philadelphia covering Fiscal Years 2001 through 2005, and incorporating proposed changes with respect to Fiscal Year 2000, which is to be submitted by the Mayor to the Pennsylvania Intergovernmental Cooperation Authority (the "Authority") pursuant to the Intergovernmental Cooperation Agreement, authorized by an ordinance of this Council approved by the Mayor on January 3, 1992 (Bill No. 1563-A), by and between the City and the Authority.''')
@istest
def updates_file_if_data_is_stale(self):
last_update_time = dt.datetime.now() - dt.timedelta(hours=23)
legfile = LegFile(id='123456', key=73, updated_datetime=last_update_time, title='abcde')
legfile.get_data_source = lambda: self.PhillyLegistarFileWrapper()
legfile.refresh()
assert_equal(legfile.title, '''abcde''')
|
415453
|
import httplib2
import logging
import os
import shutil
import socket
import sys
import urllib
import zipfile
def unzip(source_filename, dest_dir):
with zipfile.ZipFile(source_filename) as zf:
zf.extractall(dest_dir)
def create_config_files(directory):
"""
Initialize directory ready for vpn walker
:param directory: the path where you want this to happen
:return:
"""
# Some constant strings
config_zip_url = "http://www.ipvanish.com/software/configs/configs.zip"
if not os.path.exists(directory):
os.makedirs(directory)
logging.info("Starting to download IPVanish config file zip")
url_opener = urllib.URLopener()
zip_path = os.path.join(directory, '../configs.zip')
unzip_path = os.path.join(directory, '../unzipped')
if not os.path.exists(unzip_path):
os.makedirs(unzip_path)
url_opener.retrieve(config_zip_url, zip_path)
logging.info("Extracting zip file")
unzip(zip_path, unzip_path)
# remove zip file
os.remove(zip_path)
# copy ca and key to root path
shutil.copyfile(os.path.join(unzip_path, 'ca.ipvanish.com.crt'),
os.path.join(directory, '../ca.ipvanish.com.crt'))
# move all config files to /vpns
server_country = {}
for filename in os.listdir(unzip_path):
if filename.endswith('.ovpn'):
country = filename.split('-')[1]
file_path = os.path.join(unzip_path, filename)
lines = [line.rstrip('\n') for line in open(file_path)]
# get ip address for this vpn
ip = ""
for line in lines:
if line.startswith('remote'):
hostname = line.split(' ')[1]
ip = socket.gethostbyname(hostname)
break
if len(ip) > 0:
new_path = os.path.join(directory, ip + '.ovpn')
shutil.copyfile(file_path, new_path)
server_country[ip] = country
else:
logging.warn("Unable to resolve hostname and remove %s" % filename)
os.remove(file_path)
with open(os.path.join(directory, 'servers.txt'), 'w') as f:
for ip in server_country:
f.write('|'.join([ip, server_country[ip]]) + '\n')
# remove extracted folder
shutil.rmtree(unzip_path)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage {0} <directory to create VPNs in>".format(sys.argv[0])
sys.exit(1)
create_config_files(sys.argv[1])
|
415479
|
from cryptography.x509.extensions import (
AuthorityKeyIdentifier,
BasicConstraints,
KeyUsage,
GeneralNames,
)
from cryptography.x509.general_name import GeneralName
import six
def _gn_patched__init__(self, general_names):
general_names = list(general_names)
if not all(isinstance(x, GeneralName) for x in general_names):
raise TypeError(
"Every item in the general_names list must be an "
"object conforming to the GeneralName interface"
)
self._general_names = general_names
def _bc_patched__init__(self, ca, path_length):
self._ca = ca
self._path_length = path_length
def _ku_patched__init__(
self,
digital_signature,
content_commitment,
key_encipherment,
data_encipherment,
key_agreement,
key_cert_sign,
crl_sign,
encipher_only,
decipher_only,
):
self._digital_signature = digital_signature
self._content_commitment = content_commitment
self._key_encipherment = key_encipherment
self._data_encipherment = data_encipherment
self._key_agreement = key_agreement
self._key_cert_sign = key_cert_sign
self._crl_sign = crl_sign
self._encipher_only = encipher_only
self._decipher_only = decipher_only
def _aki_patched__init__(
self, key_identifier, authority_cert_issuer, authority_cert_serial_number
):
if authority_cert_issuer is not None:
authority_cert_issuer = list(authority_cert_issuer)
if not all(isinstance(x, GeneralName) for x in authority_cert_issuer):
raise TypeError(
"authority_cert_issuer must be a list of GeneralName "
"objects"
)
if authority_cert_serial_number is not None and not isinstance(
authority_cert_serial_number, six.integer_types
):
raise TypeError("authority_cert_serial_number must be an integer")
self._key_identifier = key_identifier
self._authority_cert_issuer = authority_cert_issuer
self._authority_cert_serial_number = authority_cert_serial_number
def patch_basicconstraints():
BasicConstraints.__init__ = _bc_patched__init__
def patch_keyusage():
KeyUsage.__init__ = _ku_patched__init__
def patch_authoritykeyidentifier():
AuthorityKeyIdentifier.__init__ = _aki_patched__init__
def patch_generalnames():
GeneralNames.__init__ = _gn_patched__init__
def patch_all():
patch_basicconstraints()
patch_keyusage()
patch_authoritykeyidentifier()
patch_generalnames()
|
415538
|
import os, json, zipfile, io, urllib.request, shutil, glob, subprocess, sys, time, importlib, threading, tempfile, traceback
from urllib.parse import urlparse
from Alpha_SharedFunctions import get_set_root, download, check_cuda, check_cudnn, get_gpu_vendor, compact, get_cuda_ver, create_vsgan_folder
mxurl = "https://api.github.com/repos/kice/vs_mxnet/releases/latest"
pipmodules = ["pyperclip", "Pillow", "pySmartDL", "numpy", "opencv-python", "onnxruntime", "block", "scipy"]
modelurl = "https://github.com/WolframRhodium/Super-Resolution-Zoo/trunk"
svnurlurl = "https://raw.githubusercontent.com/AlphaAtlas/VapourSynth-Super-Resolution-Helper/master/URLs/SVN_URL"
cpumxmodule = "mxnet"
torchstuff = ["torch===1.3.0+cpu", "torchvision===0.4.1+cpu", "-f", r"""https://download.pytorch.org/whl/torch_stable.html"""]
ffurl = "https://ffmpeg.zeranoe.com/builds/win64/static/ffmpeg-latest-win64-static.zip"
def install_mxnet_cpu():
#Installs the appropriate version of mxnet with pip
root = get_set_root()
subprocess.run([sys.executable, "-m", "pip", "install", cpumxmodule, "--upgrade"], shell=True, check=True)
#TODO: Use pySmartDL JSON fetcher instead
def get_latest_release_github(url):
#tries to automatically get the latest github release from a repo
attempt = 0
error = "unknown"
urlstuff = ""
while attempt <= 3:
attempt = attempt + 1
with urllib.request.urlopen(url) as urlstuff:
data = json.loads(urlstuff.read().decode())
if urlstuff.getcode() == 200:
return data['assets'][0]["browser_download_url"]
print("Error fetching Github release.")
time.sleep(1)
print("Trying again...")
raise Exception("Failed fetching GitHub release with response code " + urlstuff.getcode() + " from " + url)
def download_mx_plugin():
#Downloads and moves kice's MXNet plugin. Automatically gets newest release.
root = get_set_root()
print("Downloading MXNet Plugin...")
print(" ")
rurl = get_latest_release_github(mxurl)
d = download(rurl)
if os.path.isdir("MXNet"):
shutil.rmtree("MXNet")
with tempfile.TemporaryDirectory() as t:
zipfile.ZipFile(d).extractall(path=t)
mxfile = glob.glob(os.path.join(t, "/**/vs_mxnet.dll"), recursive=True)
os.mkdir("MXNet")
shutil.move(src=mxfile[0], dst="MXNet")
def download_ffmpeg():
root = get_set_root()
ffmpegdir = os.path.join(root, "../bin/ffmpeg.exe")
if not os.path.isfile(ffmpegdir):
print("Downloading ffmpeg...")
print(" ")
d = download(ffurl)
zipexedir = os.path.join(root, "../bin/7za.exe")
with tempfile.TemporaryDirectory() as t:
subprocess.run([zipexedir, "x", d, "-o" + t], shell=True, check=True)
ffmpegfile = glob.glob(os.path.join(t, "/**/ffmpeg.exe"), recursive=True)
shutil.move(src=ffmpegfile[0], dst=ffmpegdir)
def install_svn():
#Downloads SVN, used for selectively pulling giant repos
root = get_set_root()
if not os.path.isfile(os.path.join(root, "../bin/PortableSub/bin/svn.exe")):
#get URL for SVN download
print("Fetching SVN URL...")
svnurl = str(urllib.request.urlopen(svnurlurl).read().decode()).rstrip()
print("Downloading SVN archive...")
svnarchivedir = download(svnurl)
s = subprocess.run([os.path.join(root, "../bin/7za.exe"), "x", svnarchivedir, "-o" + os.path.join(root, "../bin/PortableSub"), "-aoa"], check=True, shell=True)
def install_neural_networks():
#Sets up Neural Networks folder
#Another script will use SVN to selectively pull stuff as needed.
root = get_set_root()
if os.path.isdir(os.path.join(root, "../NeuralNetworks")):
s = subprocess.run([os.path.join(root, "../bin/PortableSub/bin/svn.exe"), "update", "--set-depth", "immediates", os.path.join(root, "../NeuralNetworks")], check=True, shell=True)
else:
s = subprocess.run([os.path.join(root, "../bin/PortableSub/bin/svn.exe"), "checkout", "--depth", "immediates", modelurl, os.path.join(root, "../NeuralNetworks")], check=True, shell=True)
os.chdir(root)
def install_python_modules():
#Pip!
root = get_set_root()
subprocess.run([sys.executable, "-m", "pip", "install"] + pipmodules + ["--upgrade"], shell=True, check=True)
def install_vsgan_cpu():
root = get_set_root()
if not get_gpu_vendor()[0]:
subprocess.run([sys.executable, "-m", "pip", "install"] + torchstuff + ["--upgrade"], shell=True, check=True)
subprocess.run([sys.executable, "-m", "pip", "install", "https://github.com/AlphaAtlas/VSGAN/tarball/master", "--upgrade"], shell=True, check=True)
subprocess.run([sys.executable, "-m", "pip", "install", "torch"], shell=True, check=True)
create_vsgan_folder()
#TODO: Thread Updates
if __name__ == "__main__":
try:
root = get_set_root()
if not get_gpu_vendor()[0]:
print("No CUDA GPU detected! Exiting...")
sys.exit()
install_python_modules()
import pySmartDL
install_svn()
install_neural_networks()
download_mx_plugin()
download_ffmpeg()
#install_mxnet_cpu() #TODO: Get CPU version of MXNet working, or remove it.
install_vsgan_cpu()
root = get_set_root()
compact(os.path.join(root, ".."))
if get_gpu_vendor()[0]:
print("Would you like to install CUDA and cuDNN?")
i = input("Y/N: ")
if i.lower() == "y":
#This script needs to relaunch itself for admin privledges
#Hence it needs to be called as a subprocess
#cudascriptpath = os.path.normpath(os.path.join(root, "../Scripts/Alpha_InstallCUDA.py"))
#subprocess.Popen([sys.executable, cudascriptpath], creationflags=subprocess.CREATE_NEW_CONSOLE, shell=True, cwd=os.path.normpath(os.path.join(root, "../Scripts")))
#As it turns out, the script doesn't like popen.
os.chdir(os.path.normpath(os.path.join(root, "../Scripts")))
os.system(r"""..\VapourSynth64\python.exe Alpha_InstallCUDA.py""")
except Exception as e:
#SHOW ME WHAT YOU GOT
print(" ")
traceback.print_exc()
input("Press ENTER to continue...")
|
415552
|
import gym
import numpy as np
from gym.spaces import Box, Discrete
class DummyAtari(gym.Env):
def __init__(self, grayscale=True, squeeze=False):
if grayscale:
shape = (84, 84) if squeeze else (84, 84, 1)
else:
shape = (84, 84, 3)
self.observation_space = Box(
low=np.zeros(shape),
high=np.zeros(shape) + 255,
shape=shape,
dtype=np.uint8,
)
self.action_space = Discrete(4)
self.t = 1
def step(self, action):
observation = self.observation_space.sample()
reward = np.random.random()
return observation, reward, self.t % 80 == 0, {}
def reset(self):
self.t = 1
return self.observation_space.sample()
|
415553
|
import json
import requests
from requests.structures import CaseInsensitiveDict
from office365.runtime.client_request import ClientRequest
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.http.request_options import RequestOptions
class ODataV4BatchRequest(ClientRequest):
""" JSON batch request """
def __init__(self, context):
super(ODataV4BatchRequest, self).__init__(context)
def build_request(self, query):
"""
:type query: office365.runtime.queries.client_query.ClientQuery
"""
url = "{0}/$batch".format(self.context.service_root_url())
request = RequestOptions(url)
request.method = HttpMethod.Post
request.ensure_header('Content-Type', "application/json")
request.ensure_header('Accept', "application/json")
request.data = self._prepare_payload()
return request
def process_response(self, batch_response):
"""Parses an HTTP response.
:type batch_response: requests.Response
"""
for query_id, resp in self._extract_response(batch_response):
resp.raise_for_status()
sub_qry = self.current_query.ordered_queries[query_id]
self.context.pending_request().add_query(sub_qry)
self.context.pending_request().process_response(resp)
self.context.pending_request().clear()
def _extract_response(self, batch_response):
"""
type batch_response: requests.Response
"""
json_responses = batch_response.json()
for json_resp in json_responses["responses"]:
resp = requests.Response()
resp.status_code = int(json_resp['status'])
resp.headers = CaseInsensitiveDict(json_resp['headers'])
resp._content = json.dumps(json_resp["body"]).encode('utf-8')
yield int(json_resp["id"]), resp
def _prepare_payload(self):
"""
Serializes a batch request body.
"""
requests_json = []
for qry in self.current_query.queries:
request_id = str(len(requests_json))
request = qry.build_request()
requests_json.append(self._normalize_request(request, request_id))
return {"requests": requests_json}
def _normalize_request(self, request, _id, depends_on=None):
"""
:type request: RequestOptions
:type _id: str
:type depends_on: list[str] or None
"""
allowed_props = ["id", "method", "headers", "url", "body"]
request_json = dict((k, v) for k, v in vars(request).items() if v is not None and k in allowed_props)
request_json["id"] = _id
if depends_on is not None:
request_json["dependsOn"] = depends_on
request_json["url"] = request_json["url"].replace(self.context.service_root_url(), "")
return request_json
@property
def current_query(self):
"""
:rtype: office365.runtime.queries.batch_query.BatchQuery
"""
return self._current_query
|
415584
|
import argparse
import itertools
from util import numeric
def parse_cli():
parser = argparse.ArgumentParser(description='Prints the Cartesian product of a group of lists.')
parser.add_argument('-l', '--list', nargs='+', action='append',
help='Start of list', required=True, type=numeric)
args = parser.parse_args()
prod = itertools.product(*args.list)
for items in prod:
print(','.join(str(s) for s in items))
if __name__ == "__main__":
parse_cli()
|
415591
|
import bpy
import os
import numpy as np
from PIL import Image
from imageio import imread
from render.camera import set_camera
from utils.geometry import convert_euler
def get_intro_camera(rendering, n):
nn = int(n / 8)
phi_s = np.concatenate([
np.linspace(rendering.camera_phi, rendering.camera_phi + 10, nn),
np.linspace(rendering.camera_phi + 10, rendering.camera_phi, nn),
np.linspace(rendering.camera_phi, rendering.camera_phi - 10, nn),
np.linspace(rendering.camera_phi - 10, rendering.camera_phi, nn),
np.linspace(rendering.camera_phi, rendering.camera_phi, nn * 4)
])
theta_s = np.concatenate([
np.linspace(rendering.camera_theta, rendering.camera_theta, nn * 4),
np.linspace(rendering.camera_theta, rendering.camera_theta - 10, nn),
np.linspace(rendering.camera_theta - 10, rendering.camera_theta, nn),
np.linspace(rendering.camera_theta, rendering.camera_theta + 10, nn),
np.linspace(rendering.camera_theta + 10, rendering.camera_theta, nn)
])
return phi_s, theta_s
def render_intro(om, rendering, m):
render_args = bpy.context.scene.render
time_step = 1 / rendering.fps
phi_s, theta_s = get_intro_camera(rendering, int(rendering.intro_time * rendering.fps))
for n in range(int(rendering.intro_time * rendering.fps)):
if "ABORT" in globals():
if globals()["ABORT"]:
print("Aborted")
raise KeyboardInterrupt
set_camera(rendering.camera_rho, theta_s[n], phi_s[n], look_at=rendering.camera_look_at)
# objects are before occluders
for i, obj_motion in enumerate(m["objects"] + m["occluders"]):
loc = obj_motion['location']
euler = convert_euler(obj_motion['orientation'])
om.set_position(om.obj_names[i], loc, euler)
i = len(m["objects"]) + len(m["occluders"])
for desk_motion in m["desks"]:
for obj_motion in desk_motion:
loc = obj_motion['location']
euler = convert_euler(obj_motion['orientation'])
om.set_position(om.obj_names[i], loc, euler)
i += 1
image_path = os.path.join(rendering.output_dir, 'imgs',
'%s_-%05.2fs.png' % (rendering.image_prefix, n * time_step))
render_args.filepath = image_path
bpy.ops.render.render(write_still=True)
set_camera(rendering.camera_rho, rendering.camera_theta, rendering.camera_phi,
look_at=rendering.camera_look_at)
|
415604
|
from __future__ import absolute_import, division, print_function, unicode_literals
try:
from cechomesh import get_device_names
except ImportError:
get_device_names = lambda x: '(none)',
def info():
return {
'input': ', '.join(get_device_names(True)),
'output': ', '.join(get_device_names(False)),
}
|
415663
|
import pytest
from aiohttp import web
from aioapi.middlewares import validation_error_middleware
@pytest.fixture
def client_for(aiohttp_client):
async def _client_for(*, routes):
app = web.Application()
app.add_routes(routes)
app.middlewares.append(validation_error_middleware)
return await aiohttp_client(app)
return _client_for
|
415705
|
from __future__ import print_function, division
import json
import h5py
import numpy as np
import sys
import csv
import torch
import base64
import copy
from torch.utils.data import Dataset, DataLoader
from nltk.tokenize import word_tokenize
import pickle
sys.path.insert(0,'./data')
from build_vocab_coco import Vocabulary
import nltk
from nltk.tokenize import word_tokenize as tokenize
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
import inflect
inflect = inflect.engine() # for handling plural forms in the captions for pseudo-supervision
csv.field_size_limit(sys.maxsize)
class COCO_Dataset(Dataset):
"""COCO dataset."""
def __init__(self, text_data_path,image_data_path, vocab_path,coco_class, maxlength=20):
"""
Args:
text_data_path (string): Path to the json file with captions or annotations.
image_data_path (string): tsv file with image features
vocab_path (string): Path to the vocab pickle file.
coco_class (coco_class): list of coco classes removed from captions to get contexual descriptions
"""
"""
Returns:
captions: The ground-truth captions
bottom_up_features: Features from bounding boxes extracted from Faster-RCNN [4]
bottom_up_classes: Classes from bounding boxes corresponding to bottom_up_features
x_m_caps: Contextual descriptions after removing COCO classes from captions
caption_length: Caption lengths
x_o_caps: Object descriptions of COCO classes from captions
image_idx: image-id in the annotations file
"""
self.image_data_path = image_data_path
self.vocab = pickle.load(open(str(vocab_path),'rb'))
self.word2idx = self.vocab.word2idx
self.idx2workd = self.vocab.idx2word
self.maxlength = maxlength
coco_class_all = []
coco_class_name = open(coco_class, 'r')
for line in coco_class_name:
coco_class = line.rstrip("\n").split(', ')
coco_class_all.append(coco_class)
self.wtod = {}
for i in range(len(coco_class_all)):
for w in coco_class_all[i]:
self.wtod[w] = i
self.wtol = {}
lemmatizer = WordNetLemmatizer()
for w in self.word2idx:
tok = tokenize(w)[0]
self.wtol[w] = lemmatizer.lemmatize(tok)
self.dtoi = {w:i+1 for i,w in enumerate(self.wtod.keys())}
self.imagefeatures = {}
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
feats_count = 0
if isinstance(self.image_data_path,list):
for _image_data_path in self.image_data_path:
with open(_image_data_path, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['height'] = int(item['image_h'])
item['width'] = int(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in ['boxes', 'features']:
item[field] = np.frombuffer(base64.b64decode(item[field]), dtype=np.float32).reshape((item['num_boxes'],-1))
self.imagefeatures[feats_count] = item #item['image_id']
feats_count += 1
self.inv_annotations = {}
self.image_filenames = {}
if isinstance(text_data_path,list):
for _text_data_path in text_data_path:
annotations = json.load(open(_text_data_path))
for c in annotations["annotations"]:
if str(c["image_id"]) in self.inv_annotations.keys():
self.inv_annotations[str(c["image_id"])].append(c["caption"])
else:
self.inv_annotations[str(c["image_id"])] = []
self.inv_annotations[str(c["image_id"])].append(c["caption"])
for c in annotations["images"]:
if str(c["id"]) not in self.image_filenames.keys():
self.image_filenames[str(c["id"])] = c["file_name"]
self.blacklist_classes = {
"auto part":'vehicle', "bathroom accessory":'furniture', "bicycle wheel":'bicycle', "boy":'boy',
"door handle":'door', "fashion accessory":'clothing', "footwear":'shoes', "human arm":'person',
"human beard":'person', "human body":'person', "human ear":'person', "human eye":'person', "human face":'person', "human foot":'person',
"human hair":'person', "human hand":'person', "human head":'person', "human leg":'person', "human mouth":'person', "human nose":'person',
"land vehicle":'vehicle', "plumbing fixture":'toilet',
"seat belt":'vehicle', "vehicle registration plate":'vehicle',
"face":'person',"hair":'person',"head":'person',"ear":'person',"tail":'giraffe',"neck":'giraffe',
"hat":'person',"helmet":'person',"nose":'person',"tire":'bus',"tour":'bus',"hand":'person',"shadow":'person'
}
self.punctuations = [
"''", "'", "``", "`", "(", ")", "{", "}",
".", "?", "!", ",", ":", "-", "--", "...", ";"
]
self.vg_classes_to_vocab = {}
self.vg_classes_to_vocab[0] = 0
self.vg_classes_to_vocab_p = {}
self.vg_classes_to_vocab_p[0] = 0
classes = ['__background__']
vg_obj_counter = 1
with open('./data/visual_genome_classes.txt') as f:
for _object in f.readlines():
#classes.append(object.split(',')[0].lower().strip())
_object = _object.split(',')[0].lower().strip()
if _object in self.word2idx:
if _object in self.blacklist_classes:
self.vg_classes_to_vocab[vg_obj_counter] = self.word2idx[self.blacklist_classes[_object]]
self.vg_classes_to_vocab_p[vg_obj_counter] = self.word2idx[self.blacklist_classes[_object]]
else:
self.vg_classes_to_vocab[vg_obj_counter] = self.word2idx[_object]
if inflect.singular_noun( _object ) == False:
_object_p = inflect.plural(_object)
else:
_object_p = _object
if _object_p in self.word2idx:
self.vg_classes_to_vocab_p[vg_obj_counter] = self.word2idx[_object_p]
else:
self.vg_classes_to_vocab_p[vg_obj_counter] = self.word2idx[_object]
else:
self.vg_classes_to_vocab[vg_obj_counter] = 0
self.vg_classes_to_vocab_p[vg_obj_counter] = 0
vg_obj_counter += 1
def get_det_word(self,captions, ngram=2):
# get the present category. taken from NBT []
indicator = []
stem_caption = []
for s in captions:
tmp = []
for w in s:
if w in self.wtol.keys():
tmp.append(self.wtol[w])
stem_caption.append(tmp)
indicator.append([(-1, -1, -1)]*len(s)) # category class, binary class, fine-grain class.
ngram_indicator = {i+1:copy.deepcopy(indicator) for i in range(ngram)}
# get the 2 gram of the caption.
for i, s in enumerate(stem_caption):
for n in range(ngram,0,-1):
#print('stem_caption ', s)
for j in range(len(s)-n+1):
ng = ' '.join(s[j:j+n])
#print('ng ', ng)
# if the n-gram exist in word_to_detection dictionary.
if ng in self.wtod and indicator[i][j][0] == -1: #and self.wtod[ng] in pcats: # make sure that larger gram not overwright with lower gram.
bn = (ng != ' '.join(captions[i][j:j+n])) + 1
fg = self.dtoi[ng]
#print('fg ',fg)
ngram_indicator[n][i][j] = (int(self.wtod[ng]), int(bn), int(fg))
indicator[i][j:j+n] = [(int(self.wtod[ng]), int(bn), int(fg))] * n
#sys.exit(0)
return ngram_indicator
def get_caption_seq(self,captions):
cap_seq = np.zeros([len(captions), self.maxlength])
masked_cap_seq = np.zeros([len(captions), self.maxlength])
object_cap_seq = np.zeros([len(captions), self.maxlength])
det_indicator = self.get_det_word(captions, ngram=2)
for i, caption in enumerate(captions):
j = 0
k = 0
o = 0
while j < len(caption) and j < self.maxlength:
is_det = False
for n in range(2, 0, -1):
if det_indicator[n][i][j][0] != -1:
cap_seq[i,k] = int(self.word2idx[caption[j]] if caption[j] in self.word2idx.keys() else 0)
if inflect.singular_noun( caption[j] ) == False:
masked_cap_seq[i,k] = 4 #placeholder in vocab for singular visual genome class object
else:
masked_cap_seq[i,k] = 3 #placeholder in vocab for plural visual genome class object
object_cap_seq[i,o] = int(self.word2idx[caption[j]] if caption[j] in self.word2idx.keys() else 0)
is_det = True
j += n # skip the ngram.
o += 1
break
if is_det == False:
cap_seq[i,k] = int(self.word2idx[caption[j]] if caption[j] in self.word2idx.keys() else 0)
masked_cap_seq[i,k] = cap_seq[i,k]
j += 1
k += 1
return cap_seq, masked_cap_seq, object_cap_seq
def __len__(self):
return len(self.imagefeatures)
def close(self):
self.imagefeatures.close()
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
captions = []
masked_images = []
x_m_caps = []
x_o_caps = []
image_idx = self.imagefeatures[idx]["image_id"]
if str(image_idx) in self.image_filenames.keys():
i=idx
else:
image_idx = self.imagefeatures[1]["image_id"]
i=1
bottom_up_features = []
bottom_up_classes = []
bottom_up_classes_p = []
total_boxes = 0
nms_boxes = []
nms_class_names = []
for j in range(self.imagefeatures[i]["num_boxes"]):
bottom_up_features.append( self.imagefeatures[i]["features"][j,:] )
bottom_up_features = np.array(bottom_up_features)
if bottom_up_features.shape[0] < 100:
bottom_up_features_pad = np.zeros((100 - bottom_up_features.shape[0], 2048))
bottom_up_features = np.concatenate([bottom_up_features,bottom_up_features_pad],axis=0)
caps = self.inv_annotations[str(image_idx)]
targets = []
caption_length = []
for c in caps:
caption_tokens = nltk.tokenize.word_tokenize(c.lower().strip())
caption_tokens = [ct for ct in caption_tokens if ct not in self.punctuations]
caption = []
caption.append('<start>')
caption.extend(caption_tokens)
caption = caption[0:(self.maxlength-1)]
caption.append('<end>')
targets.append(caption)
caption_length.append(len(caption))
gt_cap, mask_cap, obj_cap = self.get_caption_seq([targets[i] for i in range(5)]) #5 human annotations
caption_length = [caption_length[i] for i in range(5)]
captions.append(gt_cap)
x_m_caps.append(mask_cap)
x_o_caps.append(obj_cap)
return captions, x_m_caps, x_o_caps, bottom_up_features, np.array(caption_length), image_idx
|
415744
|
import sys
import json
import asyncio
from .core import Bot, run
from .utils import load_plugin
def main():
"""
CLI entrypoint for testing.
"""
_, config, *args = sys.argv
with open(config, 'r') as fd:
bot_config = json.load(fd)
bots = []
for config in bot_config:
params = config.get('params') or {}
daemons = config.get('daemons')
bot = Bot(config.get("key"), daemons=daemons, **params)
for plugin in config.get("plugins", []):
bot.listen(plugin)
bots.append(bot)
run(*bots)
if __name__ == '__main__':
main()
|
415787
|
import os
import json
import yaml
import argparse
from easydict import EasyDict
from utils.dirs import create_dirs
def get_config_from_json(json_file):
"""
Get the config from a json file
Input:
- json_file: json configuration file
Return:
- config: namespace
- config_dict: dictionary
"""
# parse the configurations from the config json file provided
with open(json_file, 'r') as config_file:
config_dict = json.load(config_file)
# convert the dictionary to a namespace using bunch lib
config = EasyDict(config_dict)
return config, config_dict
def get_config_from_yaml(yaml_file):
"""
Get the config from yaml file
Input:
- yaml_file: yaml configuration file
Return:
- config: namespace
- config_dict: dictionary
"""
with open(yaml_file) as fp:
config_dict = yaml.load(fp)
# convert the dictionary to a namespace using bunch lib
config = EasyDict(config_dict)
return config, config_dict
def get_args():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-c', '--config',
metavar='C',
default='None',
help='The Configuration file')
argparser.add_argument(
'-s', '--seed',
default=100,
type=int,
help='The random seed')
args = argparser.parse_args()
return args
def get_config():
args = get_args()
config_file = args.config
random_seed = args.seed
if config_file.endswith('json'):
config, _ = get_config_from_json(config_file)
elif config_file.endswith('yaml'):
config, _ = get_config_from_yaml(config_file)
else:
raise Exception("Only .json and .yaml are supported!")
config.random_seed = random_seed
config.cache_dir = os.path.join("cache", '{}_{}'.format(config.exp_name, config.random_seed))
config.model_dir = os.path.join(config.cache_dir, 'models')
config.log_dir = os.path.join(config.cache_dir, 'logs')
config.img_dir = os.path.join(config.cache_dir, 'imgs')
# create the experiments dirs
create_dirs([config.cache_dir, config.model_dir,
config.log_dir, config.img_dir])
return config
|
415813
|
import graphene
from flask import abort
from flask_login import login_required, current_user, login_user
from graphene import relay
from graphene.contrib.sqlalchemy import SQLAlchemyNode, \
SQLAlchemyConnectionField
from graphene.core.types.custom_scalars import JSONString
from relask import Relask
from . import models
relask = Relask()
@relask.schema.register
class User(SQLAlchemyNode):
class Meta:
model = models.User
@login_required
def resolve_email(self, args, info):
if getattr(current_user, 'id', None) == self.instance.id:
return self.instance.email
else:
abort(403)
class Admin(relay.Node):
title = graphene.String()
users = SQLAlchemyConnectionField(User)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def resolve_title(self, args, info):
return 'Relask Admin'
@classmethod
def get_node(cls, id_, info):
return cls(id=id_)
@classmethod
def instance(cls):
return cls.get_node('admin', None)
class Viewer(relay.Node):
website = graphene.String()
currentUser = graphene.Field(User)
isAuthenticated = graphene.Boolean()
contact = graphene.Field(User)
admin = graphene.Field(Admin)
def resolve_website(self, args, info):
return 'http://decentfox.com'
def resolve_currentUser(self, args, info):
uid = current_user.get_id()
return User.get_node(uid) if uid else None
def resolve_isAuthenticated(self, args, info):
return current_user.is_authenticated
def resolve_contact(self, args, info):
return User.get_node(1)
def resolve_admin(self, args, info):
return Admin.instance()
@classmethod
def get_node(cls, id_, info):
return cls(id=id_)
@classmethod
def instance(cls):
return cls.get_node('viewer', None)
class LoginMutation(relay.ClientIDMutation):
class Input:
login = graphene.String()
password = graphene.String()
viewer = graphene.Field(Viewer)
errors = JSONString()
@classmethod
def mutate_and_get_payload(cls, args, info):
import time
import random
if random.random() > 0.5:
time.sleep(3)
user = models.db.session.query(models.User).filter(
models.User.login == args.get('login')).first()
if not user:
return cls(viewer=Viewer.instance(),
errors=dict(login='No such user!'))
elif user.password == args.get('password'):
login_user(user)
return cls(viewer=Viewer.instance())
else:
return cls(viewer=Viewer.instance(),
errors=dict(password='<PASSWORD>!'))
class Query(graphene.ObjectType):
node = relay.NodeField()
viewer = graphene.Field(Viewer)
def resolve_viewer(self, args, info):
return Viewer.instance()
class Mutations(graphene.ObjectType):
login = graphene.Field(LoginMutation)
relask.schema.query = Query
relask.schema.mutation = Mutations
|
415854
|
from distutils.core import setup
setup(name='HaPy-ffi',
version='0.1.3',
description='Haskell bindings for Python',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/sakana/HaPy',
py_modules=['HaPy'],
)
|
415861
|
from django.core.files.base import ContentFile
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.test.utils import override_settings
from django_mail_admin.models import OutgoingEmail, STATUS, PRIORITY, EmailTemplate, Attachment, create_attachments, \
send_mail
from django_mail_admin.utils import (parse_emails,
parse_priority, split_emails)
from django_mail_admin.validators import validate_email_with_name, validate_comma_separated_emails
from django_mail_admin.mail import send
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
class UtilsTest(TestCase):
def test_mail_status(self):
"""
Check that send_mail assigns the right status field to Email instances
"""
send_mail('subject', 'message', '<EMAIL>', ['<EMAIL>'],
priority=PRIORITY.medium)
email = OutgoingEmail.objects.latest('id')
self.assertEqual(email.status, STATUS.queued)
# Emails sent with "now" priority is sent right away
send_mail('subject', 'message', '<EMAIL>', ['<EMAIL>'],
priority=PRIORITY.now)
email = OutgoingEmail.objects.latest('id')
self.assertEqual(email.status, STATUS.sent)
def test_email_validator(self):
# These should validate
validate_email_with_name('<EMAIL>')
validate_email_with_name('<NAME> <<EMAIL>>')
OutgoingEmail.objects.create(to=['<EMAIL>'], from_email='Alice <<EMAIL>>',
subject='Test', message='Message', status=STATUS.sent)
# Should also support international domains
validate_email_with_name('<NAME> <<EMAIL>>')
# These should raise ValidationError
self.assertRaises(ValidationError, validate_email_with_name, 'invalid')
self.assertRaises(ValidationError, validate_email_with_name, 'Al <ab>')
def test_comma_separated_email_list_validator(self):
# These should validate
validate_comma_separated_emails(['<EMAIL>'])
validate_comma_separated_emails(
['<EMAIL>', '<EMAIL>', '<EMAIL>']
)
validate_comma_separated_emails(['<NAME> <<EMAIL>>'])
# Should also support international domains
validate_comma_separated_emails(['<EMAIL>'])
# These should raise ValidationError
self.assertRaises(ValidationError, validate_comma_separated_emails,
['<EMAIL>', 'invalid_mail', '<EMAIL>'])
def test_split_emails(self):
"""
Check that split emails correctly divide email lists for multiprocessing
"""
for i in range(225):
OutgoingEmail.objects.create(from_email='<EMAIL>', to=['<EMAIL>'])
expected_size = [57, 56, 56, 56]
email_list = split_emails(OutgoingEmail.objects.all(), 4)
self.assertEqual(expected_size, [len(emails) for emails in email_list])
def test_create_attachments(self):
attachments = create_attachments({
'attachment_file1.txt': ContentFile('content'),
'attachment_file2.txt': ContentFile('content'),
})
self.assertEqual(len(attachments), 2)
self.assertIsInstance(attachments[0], Attachment)
self.assertTrue(attachments[0].pk)
self.assertEqual(attachments[0].file.read(), b'content')
self.assertTrue(attachments[0].name.startswith('attachment_file'))
self.assertEquals(attachments[0].mimetype, '')
def test_create_attachments_with_mimetype(self):
attachments = create_attachments({
'attachment_file1.txt': {
'file': ContentFile('content'),
'mimetype': 'text/plain'
},
'attachment_file2.jpg': {
'file': ContentFile('content'),
'mimetype': 'text/plain'
}
})
self.assertEqual(len(attachments), 2)
self.assertIsInstance(attachments[0], Attachment)
self.assertTrue(attachments[0].pk)
self.assertEquals(attachments[0].file.read(), b'content')
self.assertTrue(attachments[0].name.startswith('attachment_file'))
self.assertEquals(attachments[0].mimetype, 'text/plain')
def test_create_attachments_open_file(self):
attachments = create_attachments({
'attachment_file.py': __file__,
})
self.assertEqual(len(attachments), 1)
self.assertIsInstance(attachments[0], Attachment)
self.assertTrue(attachments[0].pk)
self.assertTrue(attachments[0].file.read())
self.assertEquals(attachments[0].name, 'attachment_file.py')
self.assertEquals(attachments[0].mimetype, u'')
def test_parse_priority(self):
self.assertEqual(parse_priority('now'), PRIORITY.now)
self.assertEqual(parse_priority('high'), PRIORITY.high)
self.assertEqual(parse_priority('medium'), PRIORITY.medium)
self.assertEqual(parse_priority('low'), PRIORITY.low)
def test_parse_emails(self):
# Converts a single email to list of email
self.assertEqual(
parse_emails('<EMAIL>'),
['<EMAIL>']
)
# None is converted into an empty list
self.assertEqual(parse_emails(None), [])
# Raises ValidationError if email is invalid
self.assertRaises(
ValidationError,
parse_emails, 'invalid_email'
)
self.assertRaises(
ValidationError,
parse_emails, ['invalid_email', '<EMAIL>']
)
|
415883
|
import nltk
gram1 = nltk.data.load('grammars/large_grammars/atis.cfg')
sent = nltk.data.load('grammars/large_grammars/atis_sentences.txt')
sent = nltk.parse.util.extract_test_sentences(sent)
testingsent=sent[25]
sent=testingsent[0]
parser6 = nltk.parse.IncrementalBottomUpLeftCornerChartParser(gram1)
chart6 = parser6.chart_parse(sent)
print((chart6.num_edges()))
print((len(list(chart6.parses(gram1.start())))))
|
415908
|
import json
import sys
from kurobako import solver
from kurobako.solver.optuna import OptunaSolverFactory
import optuna
optuna.logging.disable_default_handler()
def create_study(seed: int) -> optuna.Study:
# Avoid the fail by `flake8`.
seed
n_objectives = 2
directions = ["minimize"] * n_objectives
sampler_name = sys.argv[1]
# Sampler.
sampler_cls = getattr(
optuna.samplers,
sampler_name,
getattr(optuna.integration, sampler_name, None),
)
if sampler_cls is None:
raise ValueError("Unknown sampler: {}.".format(sampler_name))
sampler_kwargs = json.loads(sys.argv[2])
sampler = sampler_cls(**sampler_kwargs)
return optuna.create_study(
directions=directions,
sampler=sampler,
pruner=optuna.pruners.NopPruner(),
)
if __name__ == "__main__":
factory = OptunaSolverFactory(create_study)
runner = solver.SolverRunner(factory)
runner.run()
|
415923
|
value = 'some' #modify this line
if value == 'Y' or value == 'y':
print('yes')
elif value == 'N' or value == 'n':
print('no')
else:
print('error')
|
415978
|
import IECore
class cobReader( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"Op that reads a COB from disk.",
IECore.ObjectParameter(
name = "result",
description = "The Cortex Object read from disk.",
defaultValue = IECore.NullObject(),
type = IECore.TypeId.Object
)
)
self.parameters().addParameter(
IECore.PathParameter(
name = "filename",
description = "The path to the COB on disk.",
defaultValue = "",
)
)
def doOperation( self, args ) :
filename = args['filename'].value
obj = IECore.Reader.create(filename).read()
return obj
IECore.registerRunTimeTyped( cobReader )
|
416042
|
from django import forms
from django.contrib.auth.models import User
class EmailForm(forms.ModelForm):
class Meta:
model = User
fields = ["email"]
def __init__(self, *args, **kwargs):
super(EmailForm, self).__init__(*args, **kwargs)
self.fields["email"].required = True
|
416046
|
import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
import torch.nn as nn
from parameterized import parameterized
from torch.testing._internal.common_fx2trt import AccTestCase
class TestChunkConverter(AccTestCase):
@parameterized.expand(
[
("chunk", 3, 1),
("chunk", 2000, 2),
("chunk", 3, -2),
]
)
def test_chunk(self, _, chunk, dim):
class Chunk(nn.Module):
def forward(self, x):
return x.chunk(chunk, dim)[0]
inputs = [torch.randn(3, 10, 20)]
self.run_test(
Chunk(),
inputs,
expected_ops={acc_ops.chunk},
)
|
416057
|
import cherrypy
import os
import tangelo
import tangelo.util
from tangelo.server import analyze_url
from tangelo.server import Content
import autobahn.websocket as ab_websocket
import autobahn.wamp as wamp
import twisted.internet.reactor
import subprocess
import threading
import ws4py
import sys
import time
vtkpython = None
weblauncher = None
def initialize():
global vtkpython
global weblauncher
# Get the module config.
config = tangelo.plugin_config()
# Raise an error if there's no vtkpython executable.
vtkpython = config.get("vtkpython", None)
if not vtkpython:
msg = "No 'vtkpython' option specified in configuration plugin"
tangelo.log_warning("VTKWEB", "[initialization] fatal error: %s" % (msg))
# Construct a run() function that will mask the restful API and just
# inform the caller about the configuration problem.
def run():
tangelo.http_status(400, "Bad Configuration")
return {"error": msg}
sys.modules[__name__].__dict__["run"] = run
return
vtkpython = tangelo.util.expandpath(vtkpython)
tangelo.log("VTKWEB", "[initialization] Using vtkpython executable %s" % (vtkpython))
# Use the "web launcher" included with the plugin.
weblauncher = os.path.realpath("%s/../include/vtkweb-launcher.py" % (os.path.dirname(__file__)))
# Initialize a table of VTKWeb processes.
if tangelo.plugin_store().get("processes") is None:
tangelo.plugin_store()["processes"] = {}
# Check to see if a reactor is running already.
if twisted.internet.reactor.running:
threads = [t for t in threading.enumerate() if t.name == "tangelo-vtkweb-plugin"]
if len(threads) > 0:
tangelo.log_warning("VTKWEB", "[initialization] A reactor started by a previous loading of this plugin is already running")
else:
tangelo.log_warning("VTKWEB", "[initialization] A reactor started by someone other than this plugin is already running")
else:
# Start the Twisted reactor, but in a separate thread so it doesn't
# block the CherryPy main loop. Mark the thread as "daemon" so that
# when Tangelo's main thread exits, the reactor thread will be killed
# immediately.
reactor = threading.Thread(
target=twisted.internet.reactor.run,
kwargs={"installSignalHandlers": False},
name="tangelo-vtkweb-plugin"
)
reactor.daemon = True
reactor.start()
tangelo.log_info("VTKWEB", "[initialization] Starting Twisted reactor")
initialize()
@tangelo.restful
def get(key=None):
processes = tangelo.plugin_store()["processes"]
# If no key was supplied, return list of running processes.
if key is None:
return processes.keys()
# Error for bad key.
if key not in processes:
tangelo.http_status(400, "No Such Process Key")
return {"error": "Requested key not in process table"}
# Retrieve the process entry.
rec = processes[key]
response = {"status": "complete",
"process": "running",
"port": rec["port"],
"stdout": rec["stdout"].readlines(),
"stderr": rec["stderr"].readlines()}
# Check the status of the process.
returncode = rec["process"].poll()
if returncode is not None:
# Since the process has ended, delete the process object.
del processes[key]
# Fill out the report response.
response["process"] = "terminated"
response["returncode"] = returncode
return response
@tangelo.restful
def post(*pargs, **query):
args = query.get("args", "")
timeout = float(query.get("timeout", 0))
processes = tangelo.plugin_store()["processes"]
if len(pargs) == 0:
tangelo.http_status(400, "Required Argument Missing")
return {"error": "No program path was specified"}
program_url = "/" + "/".join(pargs)
content = analyze_url(program_url).content
if content is None or content.type != Content.File:
tangelo.http_status(404, "Not Found")
return {"error": "Could not find a script at %s" % (program_url)}
elif content.path is None:
tangelo.http_status(403, "Restricted")
return {"error": "The script at %s is access-restricted"}
program = content.path
# Check the user arguments.
userargs = args.split()
if "--port" in userargs:
tangelo.http_status(400, "Illegal Argument")
return {"error": "You may not specify '--port' among the arguments passed in 'args'"}
# Obtain an available port.
port = tangelo.util.get_free_port()
# Generate a unique key.
key = tangelo.util.generate_key(processes.keys())
# Detect http vs. https
scheme = "ws"
ssl_key = cherrypy.config.get("server.ssl_private_key")
ssl_cert = cherrypy.config.get("server.ssl_certificate")
# Generate command line.
cmdline = [vtkpython, weblauncher, program, "--port", str(port)] + userargs
if ssl_key and ssl_cert:
scheme = "wss"
cmdline.extend(["--sslKey", ssl_key, "--sslCert", ssl_cert])
# Launch the requested process.
tangelo.log_info("VTKWEB", "Starting process: %s" % (" ".join(cmdline)))
try:
process = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (OSError, IOError) as e:
tangelo.log_warning("VTKWEB", "Error: could not launch VTKWeb process")
return {"error": e.strerror}
# Capture the new process's stdout and stderr streams in
# non-blocking readers.
stdout = tangelo.util.NonBlockingReader(process.stdout)
stderr = tangelo.util.NonBlockingReader(process.stderr)
# Read from stdout to look for the signal that the process has
# started properly.
class FactoryStarted:
pass
class Failed:
pass
class Timeout:
pass
signal = "Starting factory"
if timeout <= 0:
timeout = 10
sleeptime = 0.5
wait = 0
saved_lines = []
try:
while True:
lines = stdout.readlines()
saved_lines += lines
for line in lines:
if line == "":
# This indicates that stdout has closed without
# starting the process.
raise Failed()
elif signal in line:
# This means that the server has started.
raise FactoryStarted()
# If neither failure nor success occurred in the last block
# of lines from stdout, either time out, or try again after
# a short delay.
if wait >= timeout:
raise Timeout()
wait += sleeptime
time.sleep(sleeptime)
except Timeout:
tangelo.http_status(524, "Timeout")
return {"error": "Process startup timed out"}
except Failed:
tangelo.http_status(500)
return {"error": "Process did not start up properly",
"stdout": saved_lines,
"stderr": stderr.readlines()}
except FactoryStarted:
stdout.pushlines(saved_lines)
# Create a websocket handler path dedicated to this process.
host = "localhost" if cherrypy.server.socket_host == "0.0.0.0" else cherrypy.server.socket_host
tangelo.websocket.mount(key, WebSocketRelay(host, port, key), "wamp")
# Log the new process in the process table, including non-blocking
# stdout and stderr readers.
processes[key] = {"port": port,
"process": process,
"stdout": stdout,
"stderr": stderr}
# Form the websocket URL from the hostname/port used in the
# request, and the newly generated key.
url = "%s://%s/ws/%s/ws" % (scheme, cherrypy.request.base.split("//")[1], key)
return {"key": key,
"url": url}
@tangelo.restful
def delete(key=None):
# TODO(choudhury): shut down a vtkweb process by key after a given timeout.
processes = tangelo.plugin_store()["processes"]
if key is None:
tangelo.http_status(400, "Required Argument Missing")
return {"error": "'key' argument is required"}
# Check for the key in the process table.
if key not in processes:
tangelo.http_status(400, "Key Not Found")
return {"error": "Key %s not in process table" % (key)}
# Terminate the process.
tangelo.log_info("VTKWEB", "Shutting down process %s" % (key))
proc = processes[key]
proc["process"].terminate()
proc["process"].wait()
tangelo.log_info("VTKWEB", "Process terminated")
# Remove the process entry from the table.
del processes[key]
return {"key": key}
def VTKWebSocketAB(url, relay):
class RegisteringWebSocketClientFactory(wamp.WampClientFactory):
def register(self, client):
self.client = client
class Protocol(wamp.WampClientProtocol):
def onOpen(self):
self.factory.register(self)
def onMessage(self, msg, is_binary):
relay.send(msg)
class Connection(threading.Thread):
def run(self):
self.factory = RegisteringWebSocketClientFactory(url)
self.factory.protocol = Protocol
twisted.internet.reactor.callFromThread(ab_websocket.connectWS,
self.factory)
def send(self, data):
twisted.internet.reactor.callFromThread(Protocol.sendMessage,
self.factory.client,
data)
c = Connection()
c.start()
return c
def WebSocketRelay(hostname, port, key):
class Class(ws4py.websocket.WebSocket):
def __init__(self, *pargs, **kwargs):
ws4py.websocket.WebSocket.__init__(self, *pargs, **kwargs)
scheme = "ws"
if cherrypy.config.get("server.ssl_private_key"):
scheme = "wss"
url = "%s://%s:%d/ws" % (scheme, hostname, port)
tangelo.log_info(
"VTKWEB",
"websocket created at %s:%d/%s (proxy to %s)" % (hostname, port, key, url)
)
self.client = VTKWebSocketAB(url, self)
def closed(self, code, reason=None):
# TODO(choudhury): figure out if recovery, etc. is possible if the
# socket is closed for some reason.
tangelo.log_info(
"VTKWEB",
"websocket at %s:%d/%s closed with code %d (%s)" % (
hostname, port, key, code, reason
)
)
def received_message(self, msg):
self.client.send(msg.data)
return Class
|
416058
|
from im2mesh.encoder import pointnet
encoder_dict = {
'pointnet_simple': pointnet.SimplePointnet,
}
encoder_temporal_dict = {
'pointnet_spatiotemporal': pointnet.SpatioTemporalResnetPointnet,
'pointnet_spatiotemporal2': pointnet.SpatioTemporalResnetPointnet2,
}
|
416111
|
from django.conf import settings
def google_analytics_code(request):
return {
'google_analytics_code': settings.GOOGLE_ANALYTICS_CODE
}
|
416122
|
import numpy as np
from numpy.typing import ArrayLike
def gaussian(energy: float, width: float, xs: ArrayLike) -> np.ndarray:
xs = np.asarray(xs)
return np.exp(-((xs - energy) ** 2) / (2 * width ** 2))
|
416126
|
from torchtext.data import Field
from rnng.actions import NT, REDUCE, SHIFT
from rnng.fields import ActionField
from rnng.models import DiscRNNG
class TestActionField(object):
def make_action_field(self):
nonterm_field = Field(pad_token=None)
return ActionField(nonterm_field)
def test_init(self):
nonterm_field = Field(pad_token=None)
field = ActionField(nonterm_field)
assert field.nonterm_field is nonterm_field
assert field.unk_token is None
assert field.pad_token is None
def test_build_vocab(self):
field = self.make_action_field()
nonterms = 'S NP VP'.split()
field.nonterm_field.build_vocab([nonterms])
field.build_vocab()
assert len(field.vocab) == len(field.nonterm_field.vocab) + 2
assert field.vocab.stoi[REDUCE] == DiscRNNG.REDUCE_ID
assert field.vocab.stoi[SHIFT] == DiscRNNG.SHIFT_ID
for nonterm in nonterms:
nid = field.nonterm_field.vocab.stoi[nonterm]
action = NT(nonterm)
assert field.vocab.stoi[action] == nid + 2
assert NT(field.nonterm_field.unk_token) in field.vocab.stoi
def test_numericalize(self):
field = self.make_action_field()
nonterms = 'S NP VP'.split()
field.nonterm_field.build_vocab([nonterms])
field.build_vocab()
arr = [
NT('S'),
NT('NP'),
NT('VP'),
SHIFT,
REDUCE,
]
tensor = field.numericalize([arr], device=-1)
assert tensor.size() == (len(arr), 1)
assert tensor.squeeze().data.tolist() == [field.vocab.stoi[a] for a in arr]
def test_numericalize_with_unknown_nt_action(self):
field = self.make_action_field()
nonterms = 'S NP VP'.split()
field.nonterm_field.build_vocab([nonterms])
field.build_vocab()
arr = [
NT('PP'),
]
tensor = field.numericalize([arr], device=-1)
assert tensor.squeeze().data.tolist() == [
field.vocab.stoi[NT(field.nonterm_field.unk_token)]
]
|
416133
|
from .abstract import AbstractBJSNode
#===============================================================================
class FresnelBJSNode(AbstractBJSNode):
bpyType = 'ShaderNodeFresnel'
def __init__(self, bpyNode, socketName, overloadChannels):
super().__init__(bpyNode, socketName, overloadChannels)
self.indexOfRefraction = self.findInput('IOR')
|
416142
|
from ._traces import TracesValidator
from ._name import NameValidator
from ._layout import LayoutValidator
from ._group import GroupValidator
from ._data import DataValidator
from ._baseframe import BaseframeValidator
|
416147
|
import sys,threading
sys.setrecursionlimit(3000000)
threading.stack_size(67108864)
def firstdfs(vertexind):
global fs,isexplored,visitordered,mapDictT
if len(mapDictT[vertexind])>0:
for ind in mapDictT[vertexind]:
if not isexplored[ind-1]:
isexplored[ind-1]=True
firstdfs(ind)
visitordered[fs-1]=vertexind
#print(str(vertexind)+' fs: '+str(fs))
fs=fs-1
def seconddfs(vertexind):
global s,secisexplored,header,mapDict
if len(mapDict[vertexind])==0:return
for ind in mapDict[vertexind]:
if not secisexplored[ind-1]:
secisexplored[ind-1]=True
seconddfs(ind)
header[s-1]+=1
def sccmain():
global mapDict,mapDictT,fs,isexplored,visitordered,s,secisexplored,header
maplength=875714
#maplength=11
f=open('SCC.txt','r')
mapDict={x:[] for x in range(1,maplength+1)}
mapDictT={x:[] for x in range(1,maplength+1)}
for line in f.readlines():
tmp=[int(x) for x in line.split()]
mapDict[tmp[0]].append(tmp[1])
mapDictT[tmp[1]].append(tmp[0])
f.close
fs=maplength
isexplored=[False for x in range(1,maplength+1)]
secisexplored=[False for x in range(1,maplength+1)]
visitordered=[0 for x in range(1,maplength+1)]
header=[0 for x in range(1,maplength+1)]
for ind in range(1,maplength+1):
if not isexplored[ind-1]:
#print('Begin from: '+str(ind))
isexplored[ind-1]=True
firstdfs(ind)
print('Second DFS')
for ind in visitordered:
if not secisexplored[ind-1]:
s=ind
secisexplored[ind-1]=True
seconddfs(ind)
header.sort(reverse=True)
print(header[0:20])
if __name__ =='__main__':
thread=threading.Thread(target=sccmain)
thread.start()
|
416218
|
import hcl2
import unittest
from checkov.terraform.checks.resource.aws.MQBrokerNotPubliclyExposed import check
from checkov.common.models.enums import CheckResult
class TestMQBrokerNotPubliclyExposed(unittest.TestCase):
def test_failure_mqbroker_logging(self):
hcl_res = hcl2.loads("""
resource "aws_mq_broker" "example" {
broker_name = "example"
engine_type = "ActiveMQ"
engine_version = "5.15.0"
host_instance_type = "mq.t2.micro"
publicly_accessible = true
user {
username = "ExampleUser"
password = "<PASSWORD>"
}
}
""")
resource_conf = hcl_res['resource'][0]['aws_mq_broker']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success_mqbroker_logging(self):
hcl_res = hcl2.loads("""
resource "aws_mq_broker" "example" {
broker_name = "example"
engine_type = "ActiveMQ"
engine_version = "5.15.0"
host_instance_type = "mq.t2.micro"
publicly_accessible = false
user {
username = "ExampleUser"
password = "<PASSWORD>"
}
}
""")
resource_conf = hcl_res['resource'][0]['aws_mq_broker']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_missing_mqbroker_logging(self):
hcl_res = hcl2.loads("""
resource "aws_mq_broker" "example" {
broker_name = "example"
engine_type = "ActiveMQ"
engine_version = "5.15.0"
host_instance_type = "mq.t2.micro"
user {
username = "ExampleUser"
password = "<PASSWORD>"
}
}
""")
resource_conf = hcl_res['resource'][0]['aws_mq_broker']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
416225
|
import requests
r = requests.post("http://localhost:8000/tag", data="Fördomen har alltid sin rot i vardagslivet - <NAME>".encode("utf-8"))
print(r.text)
|
416227
|
import scipy.optimize as opt
def f(variables):
(x, y) = variables
first_eq = x + 2 * y + 4
second_eq = 2 * x + y + 3
return [first_eq, second_eq]
# use scipy.optimize.fsolve to solve n-equations with n-unknowns
(x, y) = opt.fsolve(f, (.01, .01))
print(x,y)
|
416251
|
from loudml.model import Model
from loudml.opentsdb import (
_build_tags_predicates,
OpenTSDBBucket,
)
from loudml.membucket import MemBucket
from loudml.misc import (
nan_to_none,
make_ts,
)
import copy
import datetime
import logging
import numpy as np
import os
import unittest
logging.getLogger('tensorflow').disabled = True
FEATURES = [
{
'name': 'avg_foo',
'metric': 'avg',
'field': 'foo',
'default': 0,
},
{
'name': 'count_bar',
'metric': 'count',
'field': 'bar',
'default': 0,
},
{
'name': 'avg_baz',
'metric': 'avg',
'field': 'baz',
'match_all': [
{'tag': 'mytag', 'value': 'myvalue'},
],
'default': 0,
},
]
FEATURES_MATCH_ALL_TAG1 = [
{
'name': 'avg_baz',
'metric': 'avg',
'field': 'baz',
'match_all': [
{'tag': 'tag_kw', 'value': 'tag1'},
],
},
]
FEATURES_MATCH_ALL_TAG2 = [
{
'name': 'avg_baz',
'metric': 'avg',
'field': 'baz',
'match_all': [
{'tag': 'tag_kw', 'value': 'tag2'},
{'tag': 'tag_int', 'value': 7},
{'tag': 'tag_bool', 'value': True},
],
},
]
if 'OPENTSDB_ADDR' in os.environ:
ADDR = os.environ['OPENTSDB_ADDR']
else:
ADDR = 'localhost:4242'
class TestOpenTSDBQuick(unittest.TestCase):
@classmethod
def setUpClass(cls):
bucket_interval = 3
t0 = int(datetime.datetime.now().timestamp())
t0 -= t0 % bucket_interval
cls.t0 = t0
cls.source = OpenTSDBBucket({
'name': 'test',
'addr': ADDR,
})
cls.source.drop()
cls.source.init()
cls.model = Model(dict(
name="test-model",
offset=30,
span=300,
bucket_interval=3,
interval=60,
features=FEATURES,
))
data = [
# (foo, bar, timestamp)
(1, 33, t0 - 1), # excluded
(2, 120, t0), (3, 312, t0 + 1),
# empty
(4, 18, t0 + 7),
(5, 78, t0 + 9), # excluded
]
for foo, bar, ts in data:
cls.source.insert_times_data(
ts=ts,
data={
'foo': foo,
}
)
cls.source.insert_times_data(
ts=ts,
data={
'bar': bar,
}
)
cls.source.insert_times_data(
ts=ts,
tags={
'tag_kw': 'tag1',
'tag_int': 9,
'tag_bool': False,
},
data={
'baz': bar,
}
)
cls.source.insert_times_data(
ts=ts,
tags={
'tag_kw': 'tag2',
'tag_int': 7,
'tag_bool': True,
},
data={
'baz': -bar,
}
)
cls.source.commit()
@classmethod
def tearDownClass(cls):
cls.source.drop()
def test_build_tags_predicates(self):
self.assertDictEqual(
_build_tags_predicates(), {},
)
self.assertDictEqual(
_build_tags_predicates([
{'tag': 'foo', 'value': 'bar'},
{'tag': 'a "', 'value': 'b \''},
{'tag': 'int', 'value': 42},
{'tag': 'bool', 'value': True},
]), {
'foo': 'bar',
'a "': 'b \'',
'int': 42,
'bool': True,
}
)
def test_build_times_queries(self):
queries = self.source._build_times_queries(
bucket_interval=self.model.bucket_interval,
features=self.model.features,
from_date=1515404367.1234,
to_date="2018-01-08T14:59:27.456Z",
)
self.assertEqual(len(queries), 3)
self.assertDictEqual(
queries[0],
{
'start': 1515404367,
'end': 1515423564,
'metric': 'avg',
'down_sampler': '3s-avg-nan',
'field': 'foo',
'tags': {}
}
)
def test_get_times_data(self):
res = self.source.get_times_data(
bucket_interval=self.model.bucket_interval,
features=self.model.features,
from_date=self.t0,
to_date=self.t0 + 9,
)
foo_avg = []
bar_count = []
for line in res:
foo_avg.append(nan_to_none(line[1][0]))
bar_count.append(nan_to_none(line[1][1]))
self.assertEqual(foo_avg, [2.5, None, 4.0])
self.assertEqual(bar_count, [2.0, 0, 1.0])
def test_get_times_data2(self):
res = self.source.get_times_data(
bucket_interval=self.model.bucket_interval,
features=self.model.features,
from_date=self.t0,
to_date=self.t0 + 9,
)
# _source to write aggregate data to RAM
_source = MemBucket()
_features = copy.deepcopy(self.model.features)
for _, feature in enumerate(self.model.features):
feature.metric = 'avg'
i = None
for i, (_, val, timeval) in enumerate(res):
bucket = {
feature.field: val[i]
for i, feature in enumerate(self.model.features)
}
bucket.update({'timestamp': make_ts(timeval)})
_source.insert_times_data(bucket)
res2 = _source.get_times_data(
bucket_interval=self.model.bucket_interval,
features=self.model.features,
from_date=self.t0,
to_date=self.t0 + 9,
)
self.model.features = _features
for i, (_, val2, timeval2) in enumerate(res2):
(_, val, timeval) = res[i]
np.testing.assert_allclose(val, val2)
def test_match_all(self):
model = Model(dict(
name="test-model",
offset=30,
span=300,
bucket_interval=3,
interval=60,
features=FEATURES_MATCH_ALL_TAG1,
))
res = self.source.get_times_data(
bucket_interval=model.bucket_interval,
features=model.features,
from_date=self.t0,
to_date=self.t0 + 9,
)
baz_avg = []
for line in res:
baz_avg.append(nan_to_none(line[1][0]))
self.assertEqual(
baz_avg, [216.0, None, 18.0])
model = Model(dict(
name="test-model",
offset=30,
span=300,
bucket_interval=3,
interval=60,
features=FEATURES_MATCH_ALL_TAG2,
))
res = self.source.get_times_data(
bucket_interval=model.bucket_interval,
features=model.features,
from_date=self.t0,
to_date=self.t0 + 9,
)
baz_avg = []
for line in res:
baz_avg.append(nan_to_none(line[1][0]))
self.assertEqual(
baz_avg, [-216.0, None, -18.0])
|
416267
|
from scipy.spatial.distance import pdist,squareform
#from scipy.cluster.hierarchy import linkage, dendrogram,fcluster
import os
import re
import numpy as np
import pandas as pd
import pickle
import scipy.sparse as sp
#a=np.array(['1','0','0','1','1','1','0'])
#b=np.array(['0','0','1','1','1','1','1'])
def cal_dist(u,v):
l=np.count_nonzero(u!=v)
return l
#print(l)
def remove_1per(in_csv,idp,out):
#data=pd.read_csv("all_strain.csv")
#data=pd.read_csv(in_csv)
data=sp.load_npz(in_csv)
data=data.A
#X=data.to_numpy()
X=data.T
total_kmer=np.sum(X,axis=1)
total_kmer=np.array(total_kmer)
total_kmer[total_kmer==0]=1
#total_kmer=np.sum(X,axis=1)
dm=squareform(pdist(X,cal_dist))
distance_matrix=dm/total_kmer[:,None]
sid_match=pickle.load(open(idp, "rb"))
sk=dict(zip(sid_match,list(total_kmer)))# Dict : strain -> total kmer
temd=pd.DataFrame(distance_matrix,index=sid_match,columns=sid_match)
temd.to_csv(out+'/tem_dist.csv',sep="\t")
ot=open(out+'/tem_hier.R','w+')
ot.write('x<-read.table(\"'+out+'/tem_dist.csv\", header=T, row.names=1)\nd<-as.dist(as(x,\"matrix\"))\nhc<-hclust(d,method=\"complete\")\nres<-sort(cutree(hc,h=0.01))\nres') # Cutoff: 99.9% or 99%
ot.close()
os.system('Rscript '+out+'/tem_hier.R > '+out+'/cls_res.txt')
os.system('rm '+out+'/tem_hier.R '+out+'/tem_dist.csv')
f=open(out+'/cls_res.txt','r')
a=[]
while True:
line=f.readline().strip()
if not line:break
a.append(line)
d={}
#dmap={}
c=0
for l in a[::-1]:
c+=1
if not c%2==0:
ele=l.split()
if len(ele)==1:
if l not in d:
d[int(l)]={}
#dmap[l]={}
name=int(l)
else:
for e in ele:
if int(e) not in d:
d[int(e)]={}
name=ele
else:
ele=l.split()
if len(ele)==1:
d[name][l]=''
else:
i=0
for e in ele:
d[int(name[i])][e]=''
i+=1
f.close()
os.system('rm '+out+'/cls_res.txt')
nsid_match={}
ni=1
for s in sid_match:
nsid_match[s]=str(ni)
ni+=1
def pick_rep(in_cls,sk):
max_kmer=0
rep=''
for s in in_cls:
if sk[s]>max_kmer:
max_kmer=sk[s]
rep=s
return rep
o1=open(out+'/Re_Cluster_info.txt','w+')
left=[]
remain=[]
strains=[]
#print(sorted(d.keys()))
#exit()
for cid in sorted(d.keys()):
rep=pick_rep(d[cid],sk)
#print(cid,rep,nsid_match[rep])
left.append(nsid_match[rep])
remain.append(int(nsid_match[rep])-1)
strains.append(rep)
o1.write(str(cid)+'\t'+rep+'\t'+str(sk[rep])+'\t'+str(len(d[cid]))+'\t'+','.join(list(d[cid].keys()))+'\n')
#ndata=data.loc[:, left]
#ndata.to_csv(out+'/all_strains_re.csv',index=False)
#print(remain)
ndata=data[:,remain]
ndata=sp.csr_matrix(ndata)
sp.save_npz(out+'/all_strains_re.npz',ndata)
with open(out+'/id2strain_re.pkl','wb') as o2:
pickle.dump(strains, o2, pickle.HIGHEST_PROTOCOL)
|
416278
|
from sage.misc.lazy_import import lazy_import
lazy_import('sage.symbolic.expression',
['print_order', '_print_key', 'print_sorted', '_math_key',
'math_sorted', 'mixed_order', '_mixed_key', 'mixed_sorted'],
deprecation=32386)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.