id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11476645
|
import random
import re
import six
from geodata.address_expansions.gazetteers import *
from geodata.encoding import safe_decode, safe_encode
from geodata.text.tokenize import tokenize_raw, token_types
from geodata.text.utils import non_breaking_dash_regex
LOWER, UPPER, TITLE, MIXED = range(4)
def token_capitalization(s):
if s.istitle():
return TITLE
elif s.islower():
return LOWER
elif s.isupper():
return UPPER
else:
return MIXED
expansion_token_regex = re.compile('([^ \-\.]+)([\.\- ]+|$)')
def recase_abbreviation(expansion, tokens, space_token=six.u(' ')):
expansion_tokens = expansion_token_regex.findall(expansion)
if len(tokens) > len(expansion_tokens) and all((token_capitalization(t) != LOWER for t, c in tokens)):
expansion_tokenized = tokenize(expansion)
is_acronym = len(expansion_tokenized) == 1 and expansion_tokenized[0][1] == token_types.ACRONYM
if len(expansion) <= 3 or is_acronym:
return expansion.upper()
else:
return expansion.title()
elif len(tokens) == len(expansion_tokens):
strings = []
for (t, c), (e, suf) in zip(tokens, expansion_tokens):
cap = token_capitalization(t)
if suf == six.u(' '):
suf = space_token
if cap == LOWER:
strings.append(six.u('').join((e.lower(), suf)))
elif cap == UPPER:
strings.append(six.u('').join((e.upper(), suf)))
elif cap == TITLE:
strings.append(six.u('').join((e.title(), suf)))
elif t.lower() == e.lower():
strings.append(t)
else:
strings.append(six.u('').join((e.title(), suf)))
return six.u('').join(strings)
else:
strings = []
for e, suf in expansion_tokens:
strings.append(e.title())
if suf == six.u(' '):
strings.append(space_token)
else:
strings.append(suf)
return six.u('').join(strings)
def abbreviate(gazetteer, s, language, abbreviate_prob=0.3, separate_prob=0.2, add_period_hyphen_prob=0.3):
'''
Abbreviations
-------------
OSM discourages abbreviations, but to make our training data map better
to real-world input, we can safely replace the canonical phrase with an
abbreviated version and retain the meaning of the words
'''
raw_tokens = tokenize_raw(s)
s_utf8 = safe_encode(s)
tokens = [(safe_decode(s_utf8[o:o + l]), token_types.from_id(c)) for o, l, c in raw_tokens]
norm_tokens = [(t.lower() if c in token_types.WORD_TOKEN_TYPES else t, c) for t, c in tokens]
n = len(tokens)
abbreviated = []
i = 0
def abbreviated_tokens(i, tokens, t, c, length, data, space_token=six.u(' ')):
data = [d.split(six.b('|')) for d in data]
# local copy
abbreviated = []
n = len(t)
# Append the original tokens with whitespace if there is any
if random.random() > abbreviate_prob or not any((int(is_canonical) and lang in (language, 'all') for lang, dictionary, is_canonical, canonical in data)):
for j, (t_i, c_i) in enumerate(t):
abbreviated.append(tokens[i + j][0])
if j < n - 1:
abbreviated.append(space_token)
return abbreviated
for lang, dictionary, is_canonical, canonical in data:
if lang not in (language, 'all'):
continue
is_canonical = int(is_canonical)
is_stopword = dictionary == 'stopword'
is_prefix = dictionary.startswith('concatenated_prefixes')
is_suffix = dictionary.startswith('concatenated_suffixes')
is_separable = is_prefix or is_suffix and dictionary.endswith('_separable') and len(t[0][0]) > length
suffix = None
prefix = None
if not is_canonical:
continue
if not is_prefix and not is_suffix:
abbreviations = gazetteer.canonicals.get((canonical, lang, dictionary))
# TODO: maybe make this a Zipfian choice e.g. so "St" gets chosen most often for "Street"
# would require an audit of the dictionaries though so abbreviations are listed from
# left-to-right by frequency of usage
token = random.choice(abbreviations) if abbreviations else canonical
token = recase_abbreviation(token, tokens[i:i + len(t)], space_token=space_token)
abbreviated.append(token)
break
elif is_prefix:
token = tokens[i][0]
prefix, token = token[:length], token[length:]
abbreviated.append(prefix)
if random.random() < separate_prob:
sub_tokens = tokenize(token)
if sub_tokens and sub_tokens[0][1] in (token_types.HYPHEN, token_types.DASH):
token = six.u('').join((t for t, c in sub_tokens[1:]))
abbreviated.append(space_token)
if token.islower():
abbreviated.append(token.title())
else:
abbreviated.append(token)
abbreviated.append(space_token)
break
elif is_suffix:
token = tokens[i][0]
token, suffix = token[:-length], token[-length:]
concatenated_abbreviations = gazetteer.canonicals.get((canonical, lang, dictionary), [])
separated_abbreviations = []
phrase = gazetteer.trie.get(suffix.rstrip('.'))
suffix_data = [safe_decode(d).split(six.u('|')) for d in (phrase or [])]
for l, d, _, c in suffix_data:
if l == lang and c == canonical:
separated_abbreviations.extend(gazetteer.canonicals.get((canonical, lang, d)))
separate = random.random() < separate_prob
if concatenated_abbreviations and not separate:
abbreviation = random.choice(concatenated_abbreviations)
elif separated_abbreviations:
abbreviation = random.choice(separated_abbreviations)
else:
abbreviation = canonical
if separate:
sub_tokens = tokenize(token)
if sub_tokens and sub_tokens[-1][1] in (token_types.HYPHEN, token_types.DASH):
token = six.u('').join((t for t, c in sub_tokens[:-1]))
abbreviated.append(token)
if separate:
abbreviated.append(space_token)
if suffix.isupper():
abbreviated.append(abbreviation.upper())
elif separate:
abbreviated.append(abbreviation.title())
else:
abbreviated.append(abbreviation)
break
else:
for j, (t_i, c_i) in enumerate(t):
abbreviated.append(tokens[i + j][0])
if j < n - 1:
abbreviated.append(space_token)
return abbreviated
for t, c, length, data in gazetteer.filter(norm_tokens):
if c == token_types.PHRASE:
abbrev_tokens = abbreviated_tokens(i, tokens, t, c, length, data)
abbreviated.extend(abbrev_tokens)
if i + len(t) < n and raw_tokens[i + len(t)][0] > sum(raw_tokens[i + len(t) - 1][:2]):
abbreviated.append(six.u(' '))
i += len(t)
else:
token = tokens[i][0]
if not non_breaking_dash_regex.search(token):
abbreviated.append(token)
else:
sub_tokens = tokenize(non_breaking_dash_regex.sub(six.u(' '), token))
sub_tokens_norm = [(t.lower() if c in token_types.WORD_TOKEN_TYPES else t, c) for t, c in sub_tokens]
sub_token_abbreviated = []
sub_i = 0
sub_n = len(sub_tokens)
for t, c, length, data in gazetteer.filter(sub_tokens_norm):
if c == token_types.PHRASE:
abbrev_tokens = abbreviated_tokens(sub_i, sub_tokens, t, c, length, data, space_token=six.u('-'))
sub_token_abbreviated.extend(abbrev_tokens)
sub_i += len(t)
if sub_i < sub_n:
if abbrev_tokens and random.random() < add_period_hyphen_prob and not abbrev_tokens[-1].endswith(six.u('.')) and not abbrev_tokens[-1].lower().endswith(sub_tokens_norm[sub_i - 1][0]):
sub_token_abbreviated.append(six.u('.'))
sub_token_abbreviated.append(six.u('-'))
else:
sub_token_abbreviated.append(sub_tokens[sub_i][0])
sub_i += 1
if sub_i < sub_n:
sub_token_abbreviated.append(six.u('-'))
abbreviated.append(six.u('').join(sub_token_abbreviated))
if i < n - 1 and raw_tokens[i + 1][0] > sum(raw_tokens[i][:2]):
abbreviated.append(six.u(' '))
i += 1
return six.u('').join(abbreviated).strip()
|
11476654
|
from sciwing.infer.interface_client_base import BaseInterfaceClient
from typing import Dict, Any
import wasabi
import sciwing.constants as constants
from sciwing.modules.embedders.trainable_word_embedder import TrainableWordEmbedder
from sciwing.modules.embedders.concat_embedders import ConcatEmbedders
from sciwing.datasets.seq_labeling.conll_dataset import CoNLLDatasetManager
from sciwing.modules.lstm2seqencoder import Lstm2SeqEncoder
from sciwing.models.rnn_seq_crf_tagger import RnnSeqCrfTagger
from sciwing.cli.sciwing_interact import SciWINGInteract
import pathlib
from sciwing.infer.seq_label_inference.conll_inference import Conll2003Inference
PATHS = constants.PATHS
DATA_DIR = PATHS["DATA_DIR"]
class BuildConllNerSeqCrfInfer(BaseInterfaceClient):
def __init__(self, hparams: Dict[str, Any]):
self.hparams = hparams
data_dir = pathlib.Path(DATA_DIR)
self.train_filename = data_dir.joinpath("eng.train")
self.dev_filename = data_dir.joinpath("eng.testa")
self.test_filename = data_dir.joinpath("eng.testb")
self.printer = wasabi.Printer()
self.data_manager = self.build_dataset()
self.model = self.build_model()
self.infer = self.build_infer()
def build_dataset(self):
data_manager = CoNLLDatasetManager(
train_filename=self.train_filename,
dev_filename=self.dev_filename,
test_filename=self.test_filename,
column_names=["POS", "DEP", "NER"],
train_only="ner",
)
return data_manager
def build_model(self):
embedder = TrainableWordEmbedder(
embedding_type=self.hparams.get("emb_type"),
datasets_manager=self.data_manager,
device=self.hparams.get("device"),
)
embedder = ConcatEmbedders([embedder])
lstm2seqencoder = Lstm2SeqEncoder(
embedder=embedder,
dropout_value=self.hparams.get("dropout"),
hidden_dim=self.hparams.get("hidden_dim"),
bidirectional=self.hparams.get("bidirectional"),
combine_strategy=self.hparams.get("combine_strategy"),
rnn_bias=True,
device=self.hparams.get("device"),
num_layers=self.hparams.get("num_layers"),
)
model = RnnSeqCrfTagger(
rnn2seqencoder=lstm2seqencoder,
encoding_dim=2 * self.hparams.get("hidden_dim")
if self.hparams.get("bidirectional")
and self.hparams.get("combine_strategy") == "concat"
else self.hparams.get("hidden_dim"),
device=self.hparams.get("device"),
tagging_type="IOB1",
datasets_manager=self.data_manager,
)
return model
def build_infer(self):
infer = Conll2003Inference(
model=self.model,
model_filepath=self.hparams.get("model_filepath"),
datasets_manager=self.data_manager,
)
return infer
def generate_prediction_file(self, output_filename: str):
self.infer.generate_predictions_for(
task="ner",
test_filename=str(self.test_filename),
output_filename=str(output_filename),
)
if __name__ == "__main__":
dirname = pathlib.Path(".", "output")
model_filepath = dirname.joinpath("checkpoints", "best_model.pt")
hparams = {
"emb_type": "glove_6B_100",
"hidden_dim": 100,
"bidirectional": False,
"combine_strategy": "concat",
"model_filepath": str(model_filepath),
"device": "cpu",
"dropout": 0.5,
"num_layers": 1,
}
conll_inference = BuildConllNerSeqCrfInfer(hparams)
conll_inference.generate_prediction_file(
output_filename=pathlib.Path("conll_2003_ner_predictions.txt")
)
|
11476658
|
from PIL import Image, ImageDraw, ImageFont
import os
import json
import random
def drawing(types,fromQQ):
# 插件目录 C:\Users\asus\Downloads\Programs\github\fortune\server\server\fortune
base_path = os.path.split(os.path.realpath(__file__))[0]
img_dir = f'{base_path}/data/img/{types}/'
img_path = img_dir + random.choice(os.listdir(img_dir))
out_dir = f'{base_path}/data/out/{types}/'
out_path = out_dir + f'{fromQQ}.jpg'
text_path = f'{base_path}/data/text/copywriting.json'
title_path = f'{base_path}/data/text/goodLuck.json'
fontPath = {
'title': f"{base_path}/data/font/Mamelon.otf",
'text': f"{base_path}/data/font/sakura.ttf"
}
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print("目录创建成功!")
img = Image.open(img_path)
# Draw title
draw = ImageDraw.Draw(img)
with open(text_path, 'r', encoding='utf-8') as f:
content = f.read()
content = json.loads(content)
text = random.choice(content['copywriting'])
with open(title_path, 'r', encoding='utf-8') as f:
content = f.read()
content = json.loads(content)
for i in content['types_of']:
if i['good-luck'] == text['good-luck']:
title = i['name']
text = text['content']
font_size = 45
color = '#F5F5F5'
image_font_center = (140, 99)
ttfront = ImageFont.truetype(fontPath['title'], font_size)
font_length = ttfront.getsize(title)
draw.text((image_font_center[0]-font_length[0]/2, image_font_center[1]-font_length[1]/2),
title, fill=color,font=ttfront)
# Text rendering
font_size = 25
color = '#323232'
image_font_center = [140, 297]
ttfront = ImageFont.truetype(fontPath['text'], font_size)
result = decrement(text)
if not result[0]:
return
textVertical = []
for i in range(0, result[0]):
font_height = len(result[i + 1]) * (font_size + 4)
textVertical = vertical(result[i + 1])
x = int(image_font_center[0] + (result[0] - 2) * font_size / 2 +
(result[0] - 1) * 4 - i * (font_size + 4))
y = int(image_font_center[1] - font_height / 2)
draw.text((x, y), textVertical, fill = color, font = ttfront)
# Save
img = img.convert("RGB")
img.save(out_path)
return out_path
def vertical(str):
list = []
for s in str:
list.append(s)
return '\n'.join(list)
def decrement(text):
length = len(text)
result = []
cardinality = 9
if length > 4 * cardinality:
return [False]
numberOfSlices = 1
while length > cardinality:
numberOfSlices += 1
length -= cardinality
result.append(numberOfSlices)
# Optimize for two columns
space = ' '
length = len(text)
if numberOfSlices == 2:
if length % 2 == 0:
# even
fillIn = space * int(9 - length / 2)
return [numberOfSlices, text[:int(length / 2)] + fillIn, fillIn + text[int(length / 2):]]
else:
# odd number
fillIn = space * int(9 - (length + 1) / 2)
return [numberOfSlices, text[:int((length + 1) / 2)] + fillIn,
fillIn + space + text[int((length + 1) / 2):]]
for i in range(0, numberOfSlices):
if i == numberOfSlices - 1 or numberOfSlices == 1:
result.append(text[i * cardinality:])
else:
result.append(text[i * cardinality:(i + 1) * cardinality])
return result
#drawing("noah","test")
|
11476671
|
import copy
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
def test_reference_node_serialization():
tn.check_serialization(tn.ReferenceNode("a"))
tn.check_serialization(tn.ReferenceNode("a", reference="bar"))
def test_send_to_node_serialization():
tn.check_serialization(tn.SendToNode("a"))
tn.check_serialization(tn.SendToNode("a", reference="bar"))
def test_hyperparameter_node_serialization():
tn.check_serialization(
tn.HyperparameterNode("a",
tn.ReferenceNode("b")))
def test_add_bias_node_serialization():
tn.check_serialization(tn.AddBiasNode("a"))
tn.check_serialization(tn.AddBiasNode(
"a",
inits=[],
# need to make broadcastable a list because json (de)serialization
# converts tuples to lists
broadcastable=[True, False, True]))
def test_linear_mapping_node_serialization():
tn.check_serialization(tn.LinearMappingNode("a"))
tn.check_serialization(tn.LinearMappingNode("a", output_dim=3))
def test_apply_node_serialization():
tn.check_serialization(tn.ApplyNode("a"))
def test_reference_node():
network = tn.SequentialNode("s", [
tn.InputNode("input1", shape=(3, 4, 5)),
tn.InputNode("input2", shape=(5, 4, 3)),
tn.ReferenceNode("ref", reference="input1"),
]).network()
fn = network.function(["input1"], ["ref"])
x = np.random.randn(3, 4, 5).astype(fX)
np.testing.assert_allclose(fn(x)[0], x)
def test_send_to_node():
network = tn.ContainerNode("c", [
tn.SequentialNode(
"s1",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.SendToNode("stn1", reference="s2")]),
tn.SequentialNode(
"s2",
[tn.SendToNode("stn2", reference="stn3")]),
tn.SequentialNode(
"s3",
[tn.SendToNode("stn3", reference="i")]),
tn.IdentityNode("i"),
]).network()
fn = network.function(["in"], ["i"])
x = np.random.randn(3, 4, 5).astype(fX)
np.testing.assert_allclose(fn(x)[0], x)
def test_network_doesnt_mutate():
root_node = tn.ContainerNode("c", [
tn.SequentialNode(
"s1",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.SendToNode("stn1", reference="s2")]),
tn.SequentialNode(
"s2",
[tn.SendToNode("stn2", reference="stn3")]),
tn.SequentialNode(
"s3",
[tn.SendToNode("stn3", reference="i")]),
tn.IdentityNode("i"),
])
original_dict = copy.deepcopy(root_node.__dict__)
root_node.network().build()
nt.assert_equal(original_dict,
root_node.__dict__)
def test_node_with_generated_children_can_serialize():
root_node = tn.ContainerNode("c", [
tn.SequentialNode(
"s1",
[tn.InputNode("in", shape=(3, 4, 5)),
tn.SendToNode("stn1", reference="s2")]),
tn.SequentialNode(
"s2",
[tn.SendToNode("stn2", reference="stn3")]),
tn.SequentialNode(
"s3",
[tn.SendToNode("stn3", reference="i")]),
tn.IdentityNode("i"),
])
root_node.network().build()
root2 = treeano.core.node_from_data(treeano.core.node_to_data(root_node))
nt.assert_equal(root_node, root2)
def test_add_bias_node_broadcastable():
def get_bias_shape(broadcastable):
return tn.SequentialNode("s", [
tn.InputNode("in", shape=(3, 4, 5)),
(tn.AddBiasNode("b", broadcastable=broadcastable)
if broadcastable is not None
else tn.AddBiasNode("b"))
]).network()["b"].get_vw("bias").shape
nt.assert_equal((1, 4, 5),
get_bias_shape(None))
nt.assert_equal((1, 4, 1),
get_bias_shape((True, False, True)))
nt.assert_equal((3, 1, 5),
get_bias_shape((False, True, False)))
@nt.raises(AssertionError)
def test_add_bias_node_broadcastable_incorrect_size1():
tn.SequentialNode("s", [
tn.InputNode("in", shape=(3, 4, 5)),
tn.AddBiasNode("b", broadcastable=(True, False))
]).network().build()
@nt.raises(AssertionError)
def test_add_bias_node_broadcastable_incorrect_size2():
tn.SequentialNode("s", [
tn.InputNode("in", shape=(3, 4, 5)),
tn.AddBiasNode("b", broadcastable=(True, False, True, False))
]).network().build()
def test_add_bias_node():
network = tn.SequentialNode("s", [
tn.InputNode("in", shape=(3, 4, 5)),
tn.AddBiasNode("b", broadcastable_axes=())
]).network()
bias_var = network["b"].get_vw("bias")
fn = network.function(["in"], ["s"])
x = np.random.randn(3, 4, 5).astype(fX)
y = np.random.randn(3, 4, 5).astype(fX)
# test that bias is 0 initially
np.testing.assert_allclose(fn(x)[0], x)
# set bias_var value to new value
bias_var.value = y
# test that adding works
np.testing.assert_allclose(fn(x)[0], x + y)
def test_linear_mapping_node_shape():
def get_shapes(output_dim):
network = tn.SequentialNode("s", [
tn.InputNode("in", shape=(3, 4, 5)),
tn.LinearMappingNode("linear", output_dim=output_dim),
]).network()
weight_shape = network["linear"].get_vw("weight").shape
output_shape = network["s"].get_vw("default").shape
return weight_shape, output_shape
nt.assert_equal(((5, 10), (3, 4, 10)), get_shapes(10))
nt.assert_equal(((5, 1), (3, 4, 1)),
get_shapes(1))
def test_linear_mapping_node():
network = tn.SequentialNode("s", [
tn.InputNode("in", shape=(3, 4, 5)),
tn.LinearMappingNode("linear", output_dim=6),
]).network()
weight_var = network["linear"].get_vw("weight")
fn = network.function(["in"], ["s"])
x = np.random.randn(3, 4, 5).astype(fX)
W = np.random.randn(5, 6).astype(fX)
# test that weight is 0 initially
np.testing.assert_allclose(fn(x)[0], np.zeros((3, 4, 6)))
# set weight_var value to new value
weight_var.value = W
# test that adding works
np.testing.assert_allclose(np.dot(x, W), fn(x)[0], rtol=1e-4, atol=1e-7)
def test_apply_node():
network = tn.SequentialNode("s", [
tn.InputNode("in", shape=(3, 4, 5)),
tn.ApplyNode("a", fn=T.sum, shape_fn=lambda x: ()),
]).network()
fn = network.function(["in"], ["s"])
x = np.random.randn(3, 4, 5).astype(fX)
np.testing.assert_allclose(fn(x)[0],
x.sum(),
rtol=1e-5)
|
11476706
|
import os
import logging
import types
import numpy as np
from glob import glob
from types import TupleType, StringType
from aeon import timer
logger = logging.getLogger(name='finmag')
class Tablewriter(object):
# It is recommended that the comment symbol should end with a
# space so that there is no danger that it gets mangled up with
# the 'time' field because some of the code below relies on them
# being separated by some whitespace.
comment_symbol = '# '
def __init__(self, filename, simulation, override=False, entity_order=None, entities=None):
logger.debug("Creating DataWriter for file '%s'" % (filename))
# formatting for columns (could in principle be customized
# through extra arguments here)
precision = 12
charwidth = 18
self.float_format = "%" + str(charwidth) + '.' + str(precision) + "g "
self.string_format = "%" + str(charwidth) + "s "
# save_head records whether the headings (name and units)
# have been saved already
self.save_head = False
# entities:
# Idea is to have a dictionary of keys where the keys
# are reference names for the entities and
# the value is another dictionary, which has keys 'unit', 'get' and 'header':
# 'get' is the a function that takes a simulation object as the argument
# and returns the data to be saved.
#
# No doubt this can be done neater, more general, etc.
# For example, it would be desirable if we could get ALL
# the fields from the simulation object, i.e. demag, exchange,
# anisotropy and also the corresponding energies.
#
# Ideally this would have the flexiblity to realise when we have
# two different anisotropies in the simulation, and provide both of
# these. It may be that we need create a 'fieldname' that the user
# can provide when creating interactions which summarises what the
# field is about, and which can be used as a useful column header
# here for the ndt file.
if entities is None:
self._entities = {}
self.add_entity('time', {'unit': '<s>',
'get': lambda sim: sim.t,
'header': 'time'})
self.add_entity('m', {'unit': '<>',
'get': lambda sim: sim.m_average,
'header': ('m_x', 'm_y', 'm_z')})
# add time integrator dummy tokens than return NAN as we haven't got
# the integrator yet (or may never create one).
self.add_entity('steps', {
'unit': '<1>',
#'get': lambda sim: sim.integrator.stats()['nsteps'],
'get': lambda sim: np.NAN,
'header': 'steps'})
self.add_entity('last_step_dt', {
'unit': '<1>',
#'get': lambda sim: sim.integrator.stats()['hlast'],
'get': lambda sim: np.NAN,
'header': 'last_step_dt'})
self.add_entity('dmdt', {
'unit': '<A/ms>',
#'get': lambda sim: sim.dmdt_max,
'get': lambda sim: np.array([np.NAN, np.NAN, np.NAN]),
'header': ('dmdt_x', 'dmdt_y', 'dmdt_z')})
else:
self._entities = entities
self.filename = filename
self.sim = simulation
# in what order to write data
if entity_order:
self.entity_order = entity_order
else:
self.entity_order = self.default_entity_order()
# if file exists, cowardly stop
if os.path.exists(filename) and not override:
msg = "File %s exists already; cowardly stopping" % filename
raise RuntimeError(msg)
def add_entity(self, name, dic):
"""
Add an entity to be saved to this ndt file at the next data saving instance. The
arguments are:
name : a reference name for this entity (used to order the entities in the ndt file)
dic : a dictionary containing data for the header lines and a function to retrieve the data.
Examples:
For the time entity, we have
name = 'time'
dic = {'unit': '<s>',
'get': lambda sim: sim.t,
'header': 'time'},
For the magnetisation entity, we have
name = 'm'
dic = {'unit': '<>',
'get': lambda sim: sim.m_average,
'header': ('m_x', 'm_y', 'm_z')}
"""
if self.save_head:
raise RuntimeError("Attempt to add entity '{}'->'{}' to ndt file {}" +
"after file has been created -- this is impossible".
format(name, dic, self.filename))
assert name not in self._entities.keys(), \
"Attempt to add a second '{}' to entities for {}".\
format(name, self.filename)
# check that right keywords are given
entity_descr = "entity '{}' -> '{}'".format(name, dic)
assert 'header' in dic, "Missing 'header' in " + entity_descr
assert 'unit' in dic, "Missing 'unit' in " + entity_descr
assert 'get' in dic, "Missing 'get' in " + entity_descr
self._entities[name] = dic
self.update_entity_order()
def modify_entity_get_method(self, name, new_get_method):
"""Allows changing the get method. Is used for integrators at the moment: we register
dummy get methods when the tablewriter file is created, and then updated those if and
when an integrator has been created."""
assert name in self._entities, "Couldn't find '{}' in {}".format(
name, self._entities.keys())
logger.debug("Updating get method for {} in TableWriter(name={})".format(
name, self.filename))
# logger.debug("Updating get method for {} in TableWriter(name={}) old method: {}, new method: {}".format(
# name, self.filename, self._entities[name]['get'], new_get_method))
self._entities[name]['get'] = new_get_method
def delete_entity_get_method(self, name):
"""We cannot delete entities once they are created (as this would change the number of columns in the
data file). Instead, we register a return function that returns numpy.NAN.
"""
assert name in self._entities, "Couldn't find '{}' in {}".format(
name, self._entities.keys())
logger.debug("'Deleting' get method for {} in TableWriter(name={})".format(
name, self.filename))
self._entities[name]['get'] = lambda sim: np.NAN
def delete_entity_get_methods(self):
"""Method to delete all get_methods.
Might need this (trying to find references to the simulation objects are hiding).
"""
logger.debug("'Deletinging all get methods in TableWriter(name={})".format(self.filename))
keys = self._entities.keys()
for key in keys:
self.delete_entity_get_method(key)
def default_entity_order(self):
keys = self._entities.keys()
# time needs to go first
if 'time' in keys:
keys.remove('time')
return ['time'] + sorted(keys)
elif 'step' in keys:
keys.remove('step')
return ['step'] + sorted(keys)
else:
return keys
def update_entity_order(self):
self.entity_order = self.default_entity_order()
def headers(self):
"""return line one and two of ndt data file as string"""
line1 = [self.comment_symbol]
line2 = [self.comment_symbol]
for entityname in self.entity_order:
colheaders = self._entities[entityname]['header']
# colheaders can be a 3-tuple ('mx','my','mz'), say
# or a string ('time'). Avoid iterating over string:
if isinstance(colheaders, str):
colheaders = [colheaders]
for colhead in colheaders:
line1.append(self.string_format % colhead)
line2.append(self.string_format %
self._entities[entityname]['unit'])
return "".join(line1) + "\n" + "".join(line2) + "\n"
@timer.method
def save(self):
"""Append data (spatial averages of fields) for current
configuration"""
if not self.save_head:
f = open(self.filename, 'w')
# Write header
f.write(self.headers())
f.close()
self.save_head = True
# open file
with open(self.filename, 'a') as f:
f.write(' ' * len(self.comment_symbol)) # account for comment
# symbol width
# The commented lines below are Hans' initial attempt to catch when the
# number of columns to be written changes
# but this seems to never happen. So it's not quite right.
# Also, if this was the right place to catch it, i.e. if watching
# self._entities is the critical object that shouldn't change after
# the header has been written, then we should convert this into a
# 'property' which raises an error if called for writing once the
# header lines have been written. HF, 9 June 2014.
# if len(self._entities) == self.ncolumn_headings_written:
# msg = "It seems number of columns to be written" + \
# "to {} has changed".format(self.filename)
# msg += "from {} to {}. This is not supported.".format(
# self.ncolumn_headings_written, len(self.entity_order))
# logger.error(msg)
# raise ValueError(msg)
for entityname in self.entity_order:
value = self._entities[entityname]['get'](self.sim)
if isinstance(value, np.ndarray):
for v in value:
f.write(self.float_format % v)
elif isinstance(value, float) or isinstance(value, int):
f.write(self.float_format % value)
elif isinstance(value, types.NoneType):
#f.write(self.string_format % value)
f.write(self.string_format % "nan")
else:
msg = "Can only deal with numpy arrays, float and int " + \
"so far, but type is %s" % type(value)
raise NotImplementedError(msg)
f.write('\n')
class Tablereader(object):
# open ndt file
def __init__(self, filename):
self.filename = filename
# if file exists, cowardly stop
if not os.path.exists(filename):
raise RuntimeError("Cannot see file '%s'" % self.filename)
# immediatey read file
self.reload()
def reload(self):
"""Read Table data file"""
try:
self.f = open(self.filename, 'r')
except IOError:
raise RuntimeError("Cannot see file '%s'" % self.filename)
line1 = self.f.readline()
line2 = self.f.readline()
headers = line1.split()
units = line2.split()
assert len(headers) == len(units)
# use numpy to read remaining data (genfromtxt will
# complain if there are rows with different sizes)
try:
self.data = np.genfromtxt(self.f)
except ValueError:
raise RuntimeError("Cannot load data from file '{}'." +
"Maybe the file was incompletely written?".
format(self.f))
self.f.close()
# Make sure we have a 2d array even if the file only contains a single
# line (or none)
if self.data.ndim == 1:
self.data = self.data[np.newaxis, :]
# Check if the number of data columns is equal to the number of headers
assert self.data.shape[1] == len(headers) - 1
datadic = {}
# now wrap up data conveniently
for i, entity in enumerate(headers[1:]):
datadic[entity] = self.data[:, i]
self.datadic = datadic
def entities(self):
"""Returns list of available entities"""
return self.datadic.keys()
def timesteps(self):
"""Returns list of available time steps"""
return self.datadic['time']
def __getitem__(self, entity):
"""
Given the entity name, return the data as a 1D numpy array.
If multiple entity names (separated by commas) are given
then a 2D numpy array is returned where the columns represent
the data for the entities.
"""
if isinstance(entity, StringType):
res = self.datadic[entity]
elif isinstance(entity, TupleType):
res = [self.datadic[e] for e in entity]
else:
raise TypeError("'entity' must be a string or a tuple. "
"Got: {} ({})".format(entity, type(entity)))
return res
class FieldSaver(object):
"""
Wrapper class which can incrementally save data to one file or
multiple files (depending on the file type). Internally, this
keeps a counter which is included in the file name if multiple
files need to be created.
Supported file types:
.npy -- Creates multiple, incrementally numbered .npy files.
"""
cnt_pattern = '_{:06d}'
def __init__(self, filename, overwrite=False, incremental=False):
if not filename.endswith('.npy'):
filename += '.npy'
# Create any non-existing directory components
dirname = os.path.dirname(filename)
if dirname != '' and not os.path.exists(dirname):
os.makedirs(dirname)
self.filename = filename
self.basename, self.ext = os.path.splitext(filename)
self.incremental = incremental
self.counter = 0
if incremental:
existing_files = glob(self.basename + '_*' + self.ext)
else:
existing_files = glob(self.filename)
if len(existing_files) > 0:
if overwrite == False:
raise IOError(
"Will not overwrite existing file(s). Use 'overwrite=True' "
"if this is what you want.".format(self.basename))
else:
logger.debug("Overwriting {} existing file(s) "
"'{}*.npy'.".format(len(existing_files), self.basename))
for f in existing_files:
os.remove(f)
def save(self, data):
"""
Save the given data (which should be a numpy array).
"""
if self.incremental:
cur_filename = self.basename + \
self.cnt_pattern.format(self.counter) + self.ext
else:
cur_filename = self.filename
logger.debug("Saving field data to file '{}'.".format(cur_filename))
np.save(cur_filename, data)
self.counter += 1
def demo2():
import finmag
sim = finmag.example.barmini(name='demo2-fileio')
sim.save_averages()
# and write some more data
sim.schedule("save_ndt", every=10e-12)
sim.run_until(0.1e-9)
# read the data
data = Tablereader('demo2_fileio.ndt')
for t, mx, my, mz in zip(data['time'], data['m_x'], data['m_y'], data['m_z']):
print("t={:10g}, m = {:12}, {:12}, {:12}".format(t, mx, my, mz))
def demo1():
# create example simulation
import finmag
import dolfin as df
xmin, ymin, zmin = 0, 0, 0 # one corner of cuboid
xmax, ymax, zmax = 6, 6, 11 # other corner of cuboid
nx, ny, nz = 3, 3, 6 # number of subdivisions (use ~2nm edgelength)
mesh = df.BoxMesh(df.Point(xmin, ymin, zmin), df.Point(xmax, ymax, zmax), nx, ny, nz)
# standard Py parameters
sim = finmag.sim_with(
mesh, Ms=0.86e6, alpha=0.5, unit_length=1e-9, A=13e-12, m_init=(1, 0, 1))
filename = 'data.txt'
ndt = Tablewriter(filename, sim, override=True)
times = np.linspace(0, 3.0e-11, 6 + 1)
for i, time in enumerate(times):
print("In iteration {}, computing up to time {}".format(i, time))
sim.run_until(time)
ndt.save()
# now open file for reading
f = Tablereader(filename)
print f.timesteps()
print f['m_x']
if __name__ == "__main__":
print("Demo 1")
demo1()
print("Demo 2")
demo2()
|
11476709
|
from scipy.signal import find_peaks
from tssearch.search.search_utils import lockstep_search, elastic_search
def time_series_segmentation(dict_distances, query, sequence, tq=None, ts=None, weight=None):
"""
Time series segmentation locates the time instants between consecutive query repetitions on a more extended and
repetitive sequence.
Parameters
----------
dict_distances: dict
Configuration file with distances
query: nd-array
Query time series.
sequence: nd-array
Sequence time series.
tq: nd-array
Time stamp time series query.
ts: nd-array
Time stamp time series sequence.
weight: nd-array (Default: None)
query weight values
Returns
-------
segment_results: dict
Segmented time instants for each given distances
"""
l_query = len(query)
segment_results = {}
for d_type in dict_distances:
for dist in dict_distances[d_type]:
if "use" not in dict_distances[d_type][dist] or dict_distances[d_type][dist]["use"] == "yes":
segment_results[dist] = {}
if d_type == "lockstep":
distance = lockstep_search(dict_distances[d_type][dist], query, sequence, weight)
elif d_type == "elastic":
distance, ac = elastic_search(dict_distances[d_type][dist], query, sequence, tq, ts, weight)
else:
print("WARNING")
continue
pks, _ = find_peaks(-distance, distance=l_query / 2)
segment_results[dist] = pks
return segment_results
|
11476720
|
import torch.nn.functional as F
import torch
import random
import numpy as np
from fastNLP import Const
from fastNLP import CrossEntropyLoss
from fastNLP import AccuracyMetric
from fastNLP import Tester
import os
from fastNLP import logger
def should_mask(name, t=''):
if 'bias' in name:
return False
if 'embedding' in name:
splited = name.split('.')
if splited[-1]!='weight':
return False
if 'embedding' in splited[-2]:
return False
if 'c0' in name:
return False
if 'h0' in name:
return False
if 'output' in name and t not in name:
return False
return True
def get_init_mask(model):
init_masks = {}
for name, param in model.named_parameters():
if should_mask(name):
init_masks[name+'.mask'] = torch.ones_like(param)
# logger.info(init_masks[name+'.mask'].requires_grad)
return init_masks
def set_seed(seed):
random.seed(seed)
np.random.seed(seed+100)
torch.manual_seed(seed+200)
torch.cuda.manual_seed_all(seed+300)
def get_parameters_size(model):
result = {}
for name,p in model.state_dict().items():
result[name] = p.size()
return result
def prune_by_proportion_model(model,proportion,task):
# print('this time prune to ',proportion*100,'%')
for name, p in model.named_parameters():
# print(name)
if not should_mask(name,task):
continue
tensor = p.data.cpu().numpy()
index = np.nonzero(model.mask[task][name+'.mask'].data.cpu().numpy())
# print(name,'alive count',len(index[0]))
alive = tensor[index]
# print('p and mask size:',p.size(),print(model.mask[task][name+'.mask'].size()))
percentile_value = np.percentile(abs(alive), (1 - proportion) * 100)
# tensor = p
# index = torch.nonzero(model.mask[task][name+'.mask'])
# # print('nonzero len',index)
# alive = tensor[index]
# print('alive size:',alive.shape)
# prune_by_proportion_model()
# percentile_value = torch.topk(abs(alive), int((1-proportion)*len(index[0]))).values
# print('the',(1-proportion)*len(index[0]),'th big')
# print('threshold:',percentile_value)
prune_by_threshold_parameter(p, model.mask[task][name+'.mask'],percentile_value)
# for
def prune_by_proportion_model_global(model,proportion,task):
# print('this time prune to ',proportion*100,'%')
alive = None
for name, p in model.named_parameters():
# print(name)
if not should_mask(name,task):
continue
tensor = p.data.cpu().numpy()
index = np.nonzero(model.mask[task][name+'.mask'].data.cpu().numpy())
# print(name,'alive count',len(index[0]))
if alive is None:
alive = tensor[index]
else:
alive = np.concatenate([alive,tensor[index]],axis=0)
percentile_value = np.percentile(abs(alive), (1 - proportion) * 100)
for name, p in model.named_parameters():
if should_mask(name,task):
prune_by_threshold_parameter(p, model.mask[task][name+'.mask'],percentile_value)
def prune_by_threshold_parameter(p, mask, threshold):
p_abs = torch.abs(p)
new_mask = (p_abs > threshold).float()
# print(mask)
mask[:]*=new_mask
def one_time_train_and_prune_single_task(trainer,PRUNE_PER,
optimizer_init_state_dict=None,
model_init_state_dict=None,
is_global=None,
):
from fastNLP import Trainer
trainer.optimizer.load_state_dict(optimizer_init_state_dict)
trainer.model.load_state_dict(model_init_state_dict)
# print('metrics:',metrics.__dict__)
# print('loss:',loss.__dict__)
# print('trainer input:',task.train_set.get_input_name())
# trainer = Trainer(model=model, train_data=task.train_set, dev_data=task.dev_set, loss=loss, metrics=metrics,
# optimizer=optimizer, n_epochs=EPOCH, batch_size=BATCH, device=device,callbacks=callbacks)
trainer.train(load_best_model=True)
# tester = Tester(task.train_set, model, metrics, BATCH, device=device, verbose=1,use_tqdm=False)
# print('FOR DEBUG: test train_set:',tester.test())
# print('**'*20)
# if task.test_set:
# tester = Tester(task.test_set, model, metrics, BATCH, device=device, verbose=1)
# tester.test()
if is_global:
prune_by_proportion_model_global(trainer.model, PRUNE_PER, trainer.model.now_task)
else:
prune_by_proportion_model(trainer.model, PRUNE_PER, trainer.model.now_task)
# def iterative_train_and_prune_single_task(get_trainer,ITER,PRUNE,is_global=False,save_path=None):
def iterative_train_and_prune_single_task(get_trainer,args,model,train_set,dev_set,test_set,device,save_path=None):
'''
:param trainer:
:param ITER:
:param PRUNE:
:param is_global:
:param save_path: should be a dictionary which will be filled with mask and state dict
:return:
'''
from fastNLP import Trainer
import torch
import math
import copy
PRUNE = args.prune
ITER = args.iter
trainer = get_trainer(args,model,train_set,dev_set,test_set,device)
optimizer_init_state_dict = copy.deepcopy(trainer.optimizer.state_dict())
model_init_state_dict = copy.deepcopy(trainer.model.state_dict())
if save_path is not None:
if not os.path.exists(save_path):
os.makedirs(save_path)
# if not os.path.exists(os.path.join(save_path, 'model_init.pkl')):
# f = open(os.path.join(save_path, 'model_init.pkl'), 'wb')
# torch.save(trainer.model.state_dict(),f)
mask_count = 0
model = trainer.model
task = trainer.model.now_task
for name, p in model.mask[task].items():
mask_count += torch.sum(p).item()
init_mask_count = mask_count
logger.info('init mask count:{}'.format(mask_count))
# logger.info('{}th traning mask count: {} / {} = {}%'.format(i, mask_count, init_mask_count,
# mask_count / init_mask_count * 100))
prune_per_iter = math.pow(PRUNE, 1 / ITER)
for i in range(ITER):
trainer = get_trainer(args,model,train_set,dev_set,test_set,device)
one_time_train_and_prune_single_task(trainer,prune_per_iter,optimizer_init_state_dict,model_init_state_dict)
if save_path is not None:
f = open(os.path.join(save_path,task+'_mask_'+str(i)+'.pkl'),'wb')
torch.save(model.mask[task],f)
mask_count = 0
for name, p in model.mask[task].items():
mask_count += torch.sum(p).item()
logger.info('{}th traning mask count: {} / {} = {}%'.format(i,mask_count,init_mask_count,mask_count/init_mask_count*100))
def get_appropriate_cuda(task_scale='s'):
if task_scale not in {'s','m','l'}:
logger.info('task scale wrong!')
exit(2)
import pynvml
pynvml.nvmlInit()
total_cuda_num = pynvml.nvmlDeviceGetCount()
for i in range(total_cuda_num):
logger.info(i)
handle = pynvml.nvmlDeviceGetHandleByIndex(i) # 这里的0是GPU id
memInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
utilizationInfo = pynvml.nvmlDeviceGetUtilizationRates(handle)
logger.info(i, 'mem:', memInfo.used / memInfo.total, 'util:',utilizationInfo.gpu)
if memInfo.used / memInfo.total < 0.15 and utilizationInfo.gpu <0.2:
logger.info(i,memInfo.used / memInfo.total)
return 'cuda:'+str(i)
if task_scale=='s':
max_memory=2000
elif task_scale=='m':
max_memory=6000
else:
max_memory = 9000
max_id = -1
for i in range(total_cuda_num):
handle = pynvml.nvmlDeviceGetHandleByIndex(0) # 这里的0是GPU id
memInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
utilizationInfo = pynvml.nvmlDeviceGetUtilizationRates(handle)
if max_memory < memInfo.free:
max_memory = memInfo.free
max_id = i
if id == -1:
logger.info('no appropriate gpu, wait!')
exit(2)
return 'cuda:'+str(max_id)
# if memInfo.used / memInfo.total < 0.5:
# return
def print_mask(mask_dict):
def seq_mul(*X):
res = 1
for x in X:
res*=x
return res
for name,p in mask_dict.items():
total_size = seq_mul(*p.size())
unmasked_size = len(np.nonzero(p))
print(name,':',unmasked_size,'/',total_size,'=',unmasked_size/total_size*100,'%')
print()
def check_words_same(dataset_1,dataset_2,field_1,field_2):
if len(dataset_1[field_1]) != len(dataset_2[field_2]):
logger.info('CHECK: example num not same!')
return False
for i, words in enumerate(dataset_1[field_1]):
if len(dataset_1[field_1][i]) != len(dataset_2[field_2][i]):
logger.info('CHECK {} th example length not same'.format(i))
logger.info('1:{}'.format(dataset_1[field_1][i]))
logger.info('2:'.format(dataset_2[field_2][i]))
return False
# for j,w in enumerate(words):
# if dataset_1[field_1][i][j] != dataset_2[field_2][i][j]:
# print('CHECK', i, 'th example has words different!')
# print('1:',dataset_1[field_1][i])
# print('2:',dataset_2[field_2][i])
# return False
logger.info('CHECK: totally same!')
return True
def get_now_time():
import time
from datetime import datetime, timezone, timedelta
dt = datetime.utcnow()
# print(dt)
tzutc_8 = timezone(timedelta(hours=8))
local_dt = dt.astimezone(tzutc_8)
result = ("_{}_{}_{}__{}_{}_{}".format(local_dt.year, local_dt.month, local_dt.day, local_dt.hour, local_dt.minute,
local_dt.second))
return result
def get_bigrams(words):
result = []
for i,w in enumerate(words):
if i!=len(words)-1:
result.append(words[i]+words[i+1])
else:
result.append(words[i]+'<end>')
return result
def print_info(*inp,islog=False,sep=' '):
from fastNLP import logger
if islog:
print(*inp,sep=sep)
else:
inp = sep.join(map(str,inp))
logger.info(inp)
def better_init_rnn(rnn,coupled=False):
import torch.nn as nn
if coupled:
repeat_size = 3
else:
repeat_size = 4
# print(list(rnn.named_parameters()))
if hasattr(rnn,'num_layers'):
for i in range(rnn.num_layers):
nn.init.orthogonal(getattr(rnn,'weight_ih_l'+str(i)).data)
weight_hh_data = torch.eye(rnn.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, repeat_size)
with torch.no_grad():
getattr(rnn,'weight_hh_l'+str(i)).set_(weight_hh_data)
nn.init.constant(getattr(rnn,'bias_ih_l'+str(i)).data, val=0)
nn.init.constant(getattr(rnn,'bias_hh_l'+str(i)).data, val=0)
if rnn.bidirectional:
for i in range(rnn.num_layers):
nn.init.orthogonal(getattr(rnn, 'weight_ih_l' + str(i)+'_reverse').data)
weight_hh_data = torch.eye(rnn.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, repeat_size)
with torch.no_grad():
getattr(rnn, 'weight_hh_l' + str(i)+'_reverse').set_(weight_hh_data)
nn.init.constant(getattr(rnn, 'bias_ih_l' + str(i)+'_reverse').data, val=0)
nn.init.constant(getattr(rnn, 'bias_hh_l' + str(i)+'_reverse').data, val=0)
else:
nn.init.orthogonal(rnn.weight_ih.data)
weight_hh_data = torch.eye(rnn.hidden_size)
weight_hh_data = weight_hh_data.repeat(repeat_size,1)
with torch.no_grad():
rnn.weight_hh.set_(weight_hh_data)
# The bias is just set to zero vectors.
print('rnn param size:{},{}'.format(rnn.weight_hh.size(),type(rnn)))
if rnn.bias:
nn.init.constant(rnn.bias_ih.data, val=0)
nn.init.constant(rnn.bias_hh.data, val=0)
# print(list(rnn.named_parameters()))
|
11476826
|
import logging
import uuid
from django.utils import timezone
from elasticsearch import Elasticsearch, RequestError
from elasticsearch.client import IlmClient
from zentral.core.exceptions import ImproperlyConfigured
from zentral.contrib.inventory.models import Source
from zentral.contrib.inventory.utils import SourceFilter
from .base import BaseExporter
logger = logging.getLogger("zentral.contrib.inventory.exporters.es_aggregations")
ES_ALIAS = "zentral-inventory-export-aggregations"
ES_LIFECYCLE_POLICY_NAME = ES_ALIAS
ES_LIFECYCLE_POLICY = {
"policy": {
"phases": {
"hot": {
"actions": {
"rollover": {
"max_size": "1GB",
"max_age": "15d",
"max_docs": 1000000,
}
}
},
"delete": {
"min_age": "30d",
"actions": {
"delete": {}
}
}
}
}
}
ES_TEMPLATE_NAME = ES_ALIAS
ES_INDEX_PATTERN = '{}-*'.format(ES_ALIAS)
ES_TEMPLATE = {
'index_patterns': [ES_INDEX_PATTERN],
'settings': {'number_of_shards': 1,
'number_of_replicas': 0,
'index.lifecycle.name': ES_LIFECYCLE_POLICY_NAME,
'index.lifecycle.rollover_alias': ES_ALIAS},
'mappings': {
'date_detection': False,
'dynamic_templates': [{'strings_as_keyword': {'mapping': {'ignore_above': 1024,
'type': 'keyword'},
'match_mapping_type': 'string'}}],
'properties': {
'source': {
"properties": {
"id": {"type": "integer"},
"module": {"type": "keyword"},
"name": {"type": "keyword"},
"display_name": {"type": "keyword"},
}
},
'filter': {
"properties": {
"title": {"type": "keyword"},
"slug": {"type": "keyword"},
}
},
'value': {"type": "keyword"},
'count': {"type": "integer"},
'@timestamp': {'type': 'date'},
}
}
}
class InventoryExporter(BaseExporter):
name = "elasticsearch aggregations exporter"
def __init__(self, config_g):
super().__init__(config_g)
error_msgs = []
self.es_hosts = config_g["es_hosts"]
if not self.es_hosts:
error_msgs.append("Missing es_hosts")
if not isinstance(self.es_hosts, list):
error_msgs.append("es_hosts must be a list")
if error_msgs:
raise ImproperlyConfigured("{} in {}".format(", ".join(error_msgs), self.name))
def iter_machine_snapshots(self):
for serial_number, machine_snapshots in self.get_ms_query().fetch(paginate=False, for_filtering=True):
for machine_snapshot in machine_snapshots:
yield machine_snapshot
def get_es_client(self):
self._es = Elasticsearch(hosts=self.es_hosts)
self._es_version = [int(i) for i in self._es.info()["version"]["number"].split(".")]
if self._es_version < [7]:
raise ValueError("Inventory exporter {} not compatible with ES < 7.0")
# lifecycle
_esilm = IlmClient(self._es)
_esilm.put_lifecycle(ES_LIFECYCLE_POLICY_NAME, ES_LIFECYCLE_POLICY)
# template
self._es.indices.put_template(ES_TEMPLATE_NAME, ES_TEMPLATE)
# create index
for i in range(10):
existing_indices = self._es.indices.get(ES_INDEX_PATTERN).keys()
if not len(existing_indices):
current_index_name = ES_INDEX_PATTERN.replace("*", "000001")
try:
self._es.indices.create(current_index_name, {"aliases": {ES_ALIAS: {"is_write_index": True}}})
except RequestError:
# probably race
pass
else:
break
return ES_ALIAS
def run(self):
timestamp = timezone.now().isoformat()
index_name = self.get_es_client()
for source in Source.objects.current_machine_snapshot_sources():
ms_query = self.get_ms_query()
source_d = {"id": source.pk,
"module": source.module,
"name": source.name,
"display_name": source.get_display_name()}
ms_query.force_filter(SourceFilter, hidden_value=source.pk)
for f, f_links, _, _ in ms_query.grouping_links():
filter_d = {"title": f.title, "slug": f.get_query_kwarg()}
for label, f_count, _, _, _ in f_links:
if label == "\u2400":
label = "NULL"
elif not isinstance(label, str):
label = str(label)
doc = {"source": source_d,
"filter": filter_d,
"value": label,
"count": f_count,
"@timestamp": timestamp}
doc_id = str(uuid.uuid4())
self._es.create(index_name, doc_id, doc)
|
11476897
|
from edi_835_parser.elements import Element
organization_types = {
'PE': 'payee',
'PR': 'payer',
}
class OrganizationType(Element):
def parser(self, value: str) -> str:
value = value.strip()
return organization_types.get(value, value)
|
11476912
|
from typing import (
List,
Optional,
)
from functools import (
reduce,
)
from ..token import (
Token,
TokenType,
)
from ..node import (
CykNode,
)
from ..peaker import (
Peaker,
)
from .identifiers import (
NoqaIdentifier,
)
def _is(peaker, token_type, index=1):
# type: (Peaker[Token], Optional[TokenType], int) -> bool
try:
token = peaker.peak(lookahead=index)
except IndexError:
token = None
if not token_type and not token:
return True
return bool(token and token.token_type == token_type)
def _are(peaker, *token_types):
# type: (Peaker[Token], Optional[TokenType]) -> bool
return all([
_is(peaker, token_type, i + 1)
for i, token_type in enumerate(token_types)
])
def _parse_noqa_head(peaker):
# type: (Peaker[Token]) -> Optional[CykNode]
if not (
_are(peaker, TokenType.HASH, TokenType.NOQA, TokenType.NEWLINE)
or _are(peaker, TokenType.HASH, TokenType.NOQA, None)
):
return None
noqa_hash = CykNode('hash', value=peaker.next())
noqa = CykNode('noqa', value=peaker.next())
if _is(peaker, TokenType.NEWLINE):
peaker.next()
return CykNode(
'noqa',
lchild=noqa_hash,
rchild=noqa,
annotations=[
NoqaIdentifier,
],
)
def _last_node(node):
# type: (CykNode) -> CykNode
curr = node
rchild = curr.rchild
while rchild:
curr = rchild
rchild = curr.rchild
return curr
def foldr(fun, xs, acc):
return reduce(lambda x, y: fun(y, x), xs[::-1], acc)
def _parse_words_until_newline_or_end(peaker):
if not peaker.has_next() or _is(peaker, TokenType.NEWLINE):
return None
words = [CykNode('word', value=peaker.next())]
while peaker.has_next() and not _is(peaker, TokenType.NEWLINE):
words.append(CykNode('word', value=peaker.next()))
if len(words) == 1:
head = words[0]
head.symbol = 'words'
return head
def join(x, y):
return CykNode(
'words',
lchild=x,
rchild=y,
)
acc = words.pop()
acc.symbol = 'words'
return foldr(join, words, acc)
def _parse_noqa(peaker):
# type: (Peaker[Token]) -> Optional[CykNode]
if not (
_are(peaker, TokenType.HASH, TokenType.NOQA, TokenType.COLON,
TokenType.WORD)
):
return None
noqa_hash = CykNode('hash', value=peaker.next())
noqa = CykNode('noqa', value=peaker.next())
colon = CykNode('colon', value=peaker.next())
targets = _parse_words_until_newline_or_end(peaker)
head = CykNode(
'noqa',
lchild=CykNode(
'noqa-head',
lchild=noqa_hash,
rchild=noqa,
),
rchild=CykNode(
'noqa-statement1',
lchild=colon,
rchild=targets,
),
annotations=[
NoqaIdentifier,
],
)
return head
def _parse_long_description(peaker):
# type: (Peaker[Token]) -> Optional[CykNode]
if not peaker.has_next():
return None
head = _parse_noqa(peaker) or _parse_noqa_head(peaker)
if head:
new_head = CykNode(
'long-description',
lchild=head,
)
head = new_head
else:
head = CykNode(
symbol='long-description',
lchild=CykNode('long-description1', value=peaker.next()),
)
curr = _last_node(head)
while peaker.has_next():
noqa = _parse_noqa(peaker) or _parse_noqa_head(peaker)
if not noqa: # curr.rchild:
curr.rchild = CykNode(
symbol='long-description1',
lchild=CykNode('long-description1', value=peaker.next()),
)
else:
old_left = curr.lchild
curr.lchild = CykNode(
symbol='long-description1',
lchild=old_left,
rchild=noqa,
)
curr = _last_node(curr)
return head
def parse(tokens):
# type: (List[Token]) -> Optional[CykNode]
peaker = Peaker((x for x in tokens), lookahead=5)
if not peaker.has_next():
return None
return _parse_long_description(peaker)
|
11476940
|
from anoncreds.protocol.types import AttribType, AttribDef
GVT = AttribDef('gvt',
[AttribType('name', encode=True),
AttribType('age', encode=False),
AttribType('height', encode=False),
AttribType('sex', encode=True)])
|
11476955
|
import pandas as pd
import pytest
from orion.evaluation.utils import (
from_list_points_labels, from_list_points_timestamps, from_pandas_contextual,
from_pandas_points, from_pandas_points_labels)
def assert_list_tuples(returned, expected_return):
assert len(returned) == len(expected_return)
for ret, exp_ret in zip(returned, expected_return):
assert tuple(ret) == exp_ret
def test_from_pandas_contextual():
anomalies = pd.DataFrame({'start': [2, 8], 'end': [5, 9]})
expected_return = [(2, 5), (8, 9)]
returned = from_pandas_contextual(anomalies)
assert_list_tuples(returned, expected_return)
def test_from_pandas_contextual_severity():
anomalies = pd.DataFrame({'start': [2, 8], 'end': [5, 9],
'severity': [0.1, 0.2]})
expected_return = [(2, 5, 0.1), (8, 9, 0.2)]
returned = from_pandas_contextual(anomalies)
assert_list_tuples(returned, expected_return)
def test_from_pandas_contextual_error():
anomalies = pd.DataFrame({'start': [2, 8]})
with pytest.raises(KeyError):
from_pandas_contextual(anomalies)
def test_from_list_points_timestamps():
anomalies = [2, 3, 4, 5, 8, 9]
expected_return = [(2, 5), (8, 9)]
returned = from_list_points_timestamps(anomalies)
assert_list_tuples(returned, expected_return)
def test_from_pandas_points():
anomalies = pd.DataFrame({'timestamp': [2, 3, 4, 5, 8, 9]})
expected_return = [(2, 5), (8, 9)]
returned = from_pandas_points(anomalies)
assert_list_tuples(returned, expected_return)
def test_from_pandas_points_error():
anomalies = pd.DataFrame({'label': [0, 1]})
with pytest.raises(KeyError):
from_pandas_points(anomalies)
def test_from_pandas_points_labels():
anomalies = pd.DataFrame({'timestamp': [2, 3, 4, 5, 6, 7, 8, 9],
'label': [1, 1, 1, 1, 0, 0, 1, 1]})
expected_return = [(2, 5), (8, 9)]
returned = from_pandas_points_labels(anomalies)
assert_list_tuples(returned, expected_return)
def test_from_pandas_points_labels_error():
anomalies = pd.DataFrame({'timestamp': [2, 8]})
with pytest.raises(KeyError):
from_pandas_points_labels(anomalies)
def test_from_list_points_labels():
anomalies = [0, 0, 1, 1, 1, 1, 0, 0, 1, 1]
expected_return = [(2, 5), (8, 9)]
returned = from_list_points_labels(anomalies)
assert_list_tuples(returned, expected_return)
|
11476997
|
n = int(input())
marksheet = [(input(), float(input())) for _ in range(n)]
marks = [mark for name, mark in marksheet]
second_highest = sorted(set(marks))[1]
names = [name for [name, grade] in marksheet if grade == second_highest]
print("\n".join(sorted(names)))
|
11477042
|
import uvicore
import inspect
import importlib
from uvicore.support import module
from uvicore.container import Binding
from uvicore.support.dumper import dd, dump
from uvicore.contracts import Ioc as IocInterface
from uvicore.typing import Any, Callable, List, Optional, Type, TypeVar, Dict, Union
T = TypeVar('T')
class Ioc(IocInterface):
"""Inversion of Control private class.
Do not import from this location.
Use the uvicore.ioc singleton global instead."""
@property
def bindings(self) -> Dict[str, Binding]:
return self._bindings
@property
def overrides(self) -> Dict[str, str]:
# Merge app config bindings with registered overrides (app config wins)
app_config_overrides = self._app_config.get('bindings') or {}
overrides = {**self._overrides, **app_config_overrides}
return overrides
@property
def aliases(self) -> Dict[str, str]:
return self._aliases
def __init__(self, app_config: Dict) -> None:
self._bindings: Dict[str, Binding] = Dict()
self._aliases: Dict[str, str] = Dict()
self._app_config = app_config
self._overrides: Dict[str, str] = Dict()
# Add default binding specific to uvicore framework
# Only some early defaults are here. The rest are bound in
# their service providers register() method
# NO - Deprecated now that I can bind default and make
self.bind_map({
#'Application': {
# 'object': 'uvicore.foundation.application._Application',
# 'singleton': True,
# 'aliases': ['App', 'app', 'application'],
#},
#'ServiceProvider': {
# 'object': 'uvicore.package.provider._ServiceProvider',
# 'aliases': ['service', 'provider'],
#},
#'Package': {
# 'object': 'uvicore.package.package._Package',
# 'aliases': ['package'],
#},
#'Dispatcher': {
# 'object': 'uvicore.events.dispatcher._Dispatcher',
# 'singleton': True,
# 'aliases': ['dispatcher', 'Event', 'event', 'Events', 'events'],
#},
})
#def config(self, config: Dict) -> None:
# self._app_config = config
def binding(self, name: str = None, *, type: str = None, include_overrides: bool = True) -> Union[Binding, Dict]:
if name:
# Get one binding by name
if name in self.bindings:
return self.bindings[name]
elif name in self.aliases:
return self.bindings[self.aliases[name]]
elif type:
# Get all binding of the specified type
#return [binding for binding in self.bindings.values() if binding.type.lower() == type.lower()]
bindings = Dict({key:binding for key, binding in self.bindings.items() if binding.type.lower() == type.lower()})
if include_overrides:
return bindings
# Strip out overridden bindings
new_bindings = Dict()
for key, binding in bindings.items():
# Ignore BASE override models
if binding.path != key: continue
new_bindings[key] = binding
return new_bindings
def make(self, name: str, default: Callable[[], T] = None, **kwargs) -> T:
if default is not None and self.binding(name) is None:
# Default was provided and no binding currently exists
# Bind the default provided but look for bindings override in app_config
#object = default
#bindings = self._app_config.get('bindings') or {}
#object = bindings.get(name) or default
object = self.overrides.get(name) or default
self.bind(name, object, **kwargs)
binding = self.binding(name)
if not binding:
# No binding set yet. If we simply try to import the file only, it may have a
# decorator that will bind itself. If no binding found even after import, treat as not found
if '.' in name: module.load(name)
# Check binding again
binding = self.binding(name)
if not binding:
raise ModuleNotFoundError("Could not find IoC name '{}' in mapping.".format(name))
# If object is not defined, dynamically import it on first make (deferred)
if not binding.object:
# If object is None, dynamically import object from path
binding.object = module.load(binding.path).object
# Determine type
is_class = inspect.isclass(binding.object)
is_singleton = is_class and binding.singleton
kwargs = binding.kwargs or {}
# Odd case
if not is_class and binding.singleton and hasattr(binding.object, '__class__') and '.' in str(getattr(binding.object, '__class__')):
# If you override a singlton with another singleton (in the case of overriding a table for example)
# You get an odd case where the binding object is the singleton itself. So here we detect if the object
# should be a singleton, and is NOT a class (because its already a singleton instance) and the object
# is an instance (meaning it has a __class__ attribute) then we need to swap the instance with the object
# and set the objects name to the instances __class__
# By checking if __class__ has a . in it we skip over if someone accidentally added a singleton to a function or method
# In case you are wondering, the singleton of the original IS the same singleton as the override!
binding.instance = binding.object
binding.object = binding.instance.__class__
is_singleton = True
# Instantiate a singleton only once
made = None
if is_singleton:
if not binding.instance:
if binding.factory:
factory = module.load(binding.factory).object
binding.instance = factory().make(binding.object, **kwargs)
else:
binding.instance = binding.object(**kwargs)
made = binding.instance
# Instantiate a non-singleton every time
# Unless there is no factory and no kwargs, simply return the object class
elif is_class:
if binding.factory:
if type(binding.factory) == str:
# String factory, dynamically import it
factory = module.load(binding.factory).object
else:
# Direct class factory
factory = binding.factory
made = factory().make(binding.object, **kwargs)
elif binding.kwargs:
made = binding.object(**kwargs)
else:
made = binding.object
# Bind is not a class. Must be a method or module, return it
else:
made = binding.object
# Return made object
return made
def bind_from_decorator(self, cls, name: str = None, *, object_type: str = None, factory: Any = None, kwargs: Dict = None, singleton: bool = False, aliases: List = []) -> None:
"""Bind from a decorator"""
# Check for an override binding in the running app_config
# These can also be defined in a service provider self.binding_override() method
override = self.overrides.get(name)
# If override and they aren't the same class
if override and override != name:
# Override OBJECT path defined in config or service provider self.binding_override() method
self.bind(name=name, object=override, object_type=object_type, override=True, factory=factory, kwargs=kwargs, singleton=singleton, aliases=aliases)
# Also bind the original so I can import it to override it. Originals should never be a singleton
# We solve circular dependencies by adding the cls right to the binding, so it never has to import it!
self.bind(name + '_BASE', cls, object_type=object_type, override=True, factory=factory, kwargs=kwargs, singleton=False, aliases=aliases)
else:
# No override. Check if binding already exists.
existing = self.binding(name)
if existing and existing.path == name and existing.object is None:
# Binding already exists and decorators never override existing bindings.
# If existing binding is the same class as this decorator is on add in the
# cls so .make() doesn't have to "import" the same class (causing circular
# import issues). If its not the same class, .make() will make and import it
# as usual. Use original decorators object_type and singeton because those
# should NEVER be different (user error might override wrong). If user provides no
# aliases, use decorators.
existing.object = cls
existing.type = object_type
existing.singleton = singleton
if not existing.aliases: existing.aliases = aliases
if not existing.kwargs: existing.kwargs = kwargs
if not existing.factory: existing.factory = factory
else:
# Binding does not already exist, create it from decorator
self.bind(name, cls, object_type=object_type, override=False, factory=factory, kwargs=kwargs, singleton=singleton, aliases=aliases)
#self.bind(name, cls, object_type=object_type, override=False, factory=factory, kwargs=kwargs, singleton=singleton, aliases=aliases)
#return cls
# else:
# # Binding already exists and decorators never override existing bindings. If existing binding is the same class as this decorator is on
# # add in the cls so .make() doesn't have to "import" the same class (causing circular import issues). If its not the same class, .make() will
# # make and import it as usual.
# if self.bindings[name].path == name and self.bindings[name].object is None:
# self._bindings[name].object = cls
# self._bindings[name].type = object_type
# Finally return the actual bind make, which if overridden, could be a completely different object!
return self.make(name)
def bind(self, name: str = None, object: Any = None, *, object_type: str = 'service', override: bool = True, factory: Any = None, kwargs: Dict = None, singleton: bool = False, aliases: List = []) -> None:
# Decorator Usage
if object is None:
def decorator(cls):
bind_name = name or cls.__module__ + '.' + cls.__name__
return self.bind_from_decorator(cls, name=bind_name, object_type=object_type, factory=factory, kwargs=kwargs, singleton=singleton, aliases=aliases)
return decorator
# Add each aliases to list of all aliases
for alias in aliases:
self._aliases[alias] = name
# Set path and object based on str or actual class
path = None
if type(object) == str:
path = object
object = None
if path is None and object is not None:
if hasattr(object, '__module__') and hasattr(object, '__name__'):
path = object.__module__ + '.' + object.__name__
else:
path = name
# Add binding, obeying override
if override == True or name not in self.bindings:
#print(name, '----', path)
self._bindings[name] = Binding(
path=path,
object=object,
instance=None,
type=object_type,
factory=factory,
kwargs=kwargs,
singleton=singleton,
aliases=aliases,
)
def bind_override(self, name: str, object: str):
"""Add a binding override to an array to check later"""
self._overrides[name] = object
def bind_map(self, mapping: Dict[str, Dict]) -> None:
# bind_map is not used anymore, though could be cool if passed through from provider class as well, if ever
for name, options in mapping.items():
self.bind(name, **options)
def alias(self, src: str, dest: str) -> None:
if dest not in self.bindings:
raise Exception('Could not find IoC binding '.format(dest))
if src not in self.bindings[dest]:
self.bindings[dest].aliases.append(src)
|
11477053
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from uuid import uuid4
from datetime import datetime
from traildb import TrailDBConstructor, TrailDB
cons = TrailDBConstructor('tiny', ['username', 'action'])
for i in range(3):
uuid = uuid4().hex
username = 'user%d' % i
for day, action in enumerate(['open', 'save', 'close']):
cons.add(uuid, datetime(2016, i + 1, day + 1), (username, action))
cons.finalize()
for uuid, trail in TrailDB('tiny').trails():
print(uuid, list(trail))
|
11477069
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import MetaData
import uuid
# def auto_constraint_name(constraint, table):
# if constraint.name is None or constraint.name == "_unnamed_":
# return "sa_autoname_%s" % str(uuid.uuid4())[0:5]
# else:
# return constraint.name
# Recommended naming convention used by Alembic, as various different database
# providers will autogenerate vastly different names making migrations more
# difficult.
NAMING_CONVENTION = {
#"auto_constraint_name": auto_constraint_name,
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=NAMING_CONVENTION)
Base = declarative_base(metadata=metadata)
|
11477080
|
import click
from opnsense_cli.formatters.cli_output import CliOutputFormatter
from opnsense_cli.callbacks.click import \
formatter_from_formatter_name, bool_as_string, available_formats, int_as_string, tuple_to_csv, \
resolve_linked_names_to_uuids
from opnsense_cli.types.click_param_type.int_or_empty import INT_OR_EMPTY
from opnsense_cli.commands.plugin.haproxy import haproxy
from opnsense_cli.api.client import ApiClient
from opnsense_cli.api.plugin.haproxy import Settings, Service
from opnsense_cli.facades.commands.plugin.haproxy.acl import HaproxyAclFacade
pass_api_client = click.make_pass_decorator(ApiClient)
pass_haproxy_acl_svc = click.make_pass_decorator(HaproxyAclFacade)
@haproxy.group()
@pass_api_client
@click.pass_context
def acl(ctx, api_client: ApiClient, **kwargs):
"""
Specify various conditions.
Define custom rules for blocking malicious requests, choosing backends, redirecting to HTTPS and
using cached objects.
"""
settings_api = Settings(api_client)
service_api = Service(api_client)
ctx.obj = HaproxyAclFacade(settings_api, service_api)
@acl.command()
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="table",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default=(
"uuid,name,description,expression,negate"
),
show_default=True,
)
@pass_haproxy_acl_svc
def list(haproxy_acl_svc: HaproxyAclFacade, **kwargs):
"""
Show all acl
"""
result = haproxy_acl_svc.list_acls()
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
@acl.command()
@click.argument('uuid')
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="table",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default=(
"name,description,expression,negate,hdr_beg,hdr_end,hdr,hdr_reg,hdr_sub,path_beg,path_end,path,path_reg,"
"path_dir,path_sub,cust_hdr_beg_name,cust_hdr_beg,cust_hdr_end_name,cust_hdr_end,cust_hdr_name,cust_hdr,"
"cust_hdr_reg_name,cust_hdr_reg,cust_hdr_sub_name,cust_hdr_sub,url_param,url_param_value,ssl_c_verify_code,"
"ssl_c_ca_commonname,src,src_bytes_in_rate_comparison,src_bytes_in_rate,src_bytes_out_rate_comparison,"
"src_bytes_out_rate,src_conn_cnt_comparison,src_conn_cnt,src_conn_cur_comparison,src_conn_cur,"
"src_conn_rate_comparison,src_conn_rate,src_http_err_cnt_comparison,src_http_err_cnt,"
"src_http_err_rate_comparison,src_http_err_rate,src_http_req_cnt_comparison,src_http_req_cnt,"
"src_http_req_rate_comparison,src_http_req_rate,src_kbytes_in_comparison,src_kbytes_in,"
"src_kbytes_out_comparison,src_kbytes_out,src_port_comparison,src_port,src_sess_cnt_comparison,"
"src_sess_cnt,src_sess_rate_comparison,src_sess_rate,nbsrv,nbsrv_backend,BackendNrSrv,ssl_fc_sni,ssl_sni,"
"ssl_sni_sub,ssl_sni_beg,ssl_sni_end,ssl_sni_reg,custom_acl,value,urlparam,"
"queryBackend,BackendQuery,allowedUsers,Users,allowedGroups,Groups"
),
show_default=True,
)
@pass_haproxy_acl_svc
def show(haproxy_acl_svc: HaproxyAclFacade, **kwargs):
"""
Show details for acl
"""
result = haproxy_acl_svc.show_acl(kwargs['uuid'])
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
@acl.command()
@click.argument('name')
@click.option(
'--description',
help=('Description for this condition.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--expression',
help=('Type of condition'),
type=click.Choice(
[
'http_auth', 'hdr_beg', 'hdr_end', 'hdr', 'hdr_reg', 'hdr_sub', 'path_beg', 'path_end', 'path',
'path_reg', 'path_dir', 'path_sub', 'cust_hdr_beg', 'cust_hdr_end', 'cust_hdr', 'cust_hdr_reg',
'cust_hdr_sub', 'url_param', 'ssl_c_verify', 'ssl_c_verify_code', 'ssl_c_ca_commonname', 'src',
'src_is_local', 'src_port', 'src_bytes_in_rate', 'src_bytes_out_rate', 'src_kbytes_in', 'src_kbytes_out',
'src_conn_cnt', 'src_conn_cur', 'src_conn_rate', 'src_http_err_cnt', 'src_http_err_rate',
'src_http_req_cnt', 'src_http_req_rate', 'src_sess_cnt', 'src_sess_rate', 'nbsrv',
'traffic_is_http', 'traffic_is_ssl', 'ssl_fc', 'ssl_fc_sni', 'ssl_sni', 'ssl_sni_sub',
'ssl_sni_beg', 'ssl_sni_end', 'ssl_sni_reg', 'custom_acl'
]
),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None,
required=True,
)
@click.option(
'--negate/--no-negate',
help=('Use this to invert the meaning of the expression.'),
show_default=True,
is_flag=True,
callback=bool_as_string,
default=True,
required=True,
)
@click.option(
'--hdr_beg',
help=('HTTP host header starts with string (prefix match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--hdr_end',
help=('HTTP host header ends with string (suffix match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--hdr',
help=('HTTP host header matches exact string'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--hdr_reg',
help=('HTTP host header matches regular expression'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--hdr_sub',
help=('HTTP host header contains string (substring match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--path_beg',
help=('HTTP request URL path starts with string (prefix match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--path_end',
help=('HTTP request URL path ends with string (suffix match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--path',
help=('HTTP request URL path matches exact string'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--path_reg',
help=('HTTP request URL path matches regular expression'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--path_dir',
help=('HTTP request URL path contains directory (subdir match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--path_sub',
help=('HTTP request URL path contains string (substring match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr_beg_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr_beg',
help=('HTTP Header starts with string (prefix match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr_end_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr_end',
help=('HTTP Header ends with string (suffix match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr',
help=('HTTP Header matches exact string'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr_reg_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr_reg',
help=('HTTP Header matches regular expression'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr_sub_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--cust_hdr_sub',
help=('HTTP Header contains string (substring match)'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--url_param',
help=('Specify the URL parameter to be checked for the value specified below.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--url_param_value',
help=('Specify the value for the URL parameter.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--ssl_c_verify_code',
help=(
'Specify the SSL/TLS error ID that should be checked for the incoming connection. '
'Please refer to your SSL library\'s documentation for an exhaustive list of error codes.'
),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--ssl_c_ca_commonname',
help=('Verify the CA Common-Name of the certificate presented by the client against the specified string.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--src',
help=('Verify the source IPv4 address of the client of the session matches the specified IPv4 or IPv6 address.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--src_bytes_in_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_bytes_in_rate',
help=('The average bytes rate from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_bytes_out_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_bytes_out_rate',
help=('The average bytes rate to the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_conn_cnt_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_conn_cnt',
help=('The cumulative number of connections initiated from the current incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_conn_cur_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_conn_cur',
help=(
'The current amount of concurrent connections initiated from the current incoming connection\'s source address.'
),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_conn_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_conn_rate',
help=('The average connection rate from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_http_err_cnt_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_http_err_cnt',
help=('The cumulative number of HTTP errors from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_http_err_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_http_err_rate',
help=('The average rate of HTTP errors from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_http_req_cnt_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_http_req_cnt',
help=('The cumulative number of HTTP requests from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_http_req_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_http_req_rate',
help=('The average rate of HTTP requests from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_kbytes_in_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_kbytes_in',
help=('The total amount of data received from the incoming connection\'s source address (in kilobytes).'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_kbytes_out_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_kbytes_out',
help=('The total amount of data sent to the incoming connection\'s source address (in kilobytes).'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_port_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_port',
help=(
'An integer value corresponding to the TCP source port of the connection on the client side, '
'which is the port the client connected from.'
),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_sess_cnt_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_sess_cnt',
help=('The cumulative number of connections initiated from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--src_sess_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default='gt',
required=False,
)
@click.option(
'--src_sess_rate',
help=('None'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--nbsrv',
help=('Verify the minimum number of usable servers in the named backend matches the specified value.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None,
required=False,
)
@click.option(
'--nbsrv_backend',
help=('Use the specified backend to count usable servers. Leave empty to use the current backend.'),
callback=resolve_linked_names_to_uuids,
show_default=True,
default=None,
required=False,
)
@click.option(
'--ssl_fc_sni',
help=('The value of the Server Name TLS extension sent by a client matches the exact string.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--ssl_sni',
help=('The value of the Server Name TLS extension sent by a client matches the exact string.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--ssl_sni_sub',
help=(
'The value of the Server Name TLS extension sent by a client contains the specified string (substring match).'
),
show_default=True,
default=None,
required=False,
)
@click.option(
'--ssl_sni_beg',
help=(
'The value of the Server Name TLS extension sent by a client starts with the specified string (prefix match).'
),
show_default=True,
default=None,
required=False,
)
@click.option(
'--ssl_sni_end',
help=('The value of the Server Name TLS extension sent by a client ends with the specified string (suffix match).'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--ssl_sni_reg',
help=('The value of the Server Name TLS extension sent by a client matches with the specified regular expression.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--custom_acl',
help=('Specify a HAProxy condition/ACL that is currently not supported by the GUI.'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--value',
help=('None'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--urlparam',
help=('None'),
show_default=True,
default=None,
required=False,
)
@click.option(
'--queryBackend',
help=('None'),
callback=resolve_linked_names_to_uuids,
show_default=True,
default=None,
required=False,
)
@click.option(
'--allowedUsers',
help=('None'),
callback=resolve_linked_names_to_uuids,
show_default=True,
default=None,
required=False,
)
@click.option(
'--allowedGroups',
help=('None'),
callback=resolve_linked_names_to_uuids,
show_default=True,
default=None,
required=False,
)
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="plain",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default="result,validations",
show_default=True,
)
@pass_haproxy_acl_svc
def create(haproxy_acl_svc: HaproxyAclFacade, **kwargs):
"""
Create a new acl
"""
json_payload = {
'acl': {
"name": kwargs['name'],
"description": kwargs['description'],
"expression": kwargs['expression'],
"negate": kwargs['negate'],
"hdr_beg": kwargs['hdr_beg'],
"hdr_end": kwargs['hdr_end'],
"hdr": kwargs['hdr'],
"hdr_reg": kwargs['hdr_reg'],
"hdr_sub": kwargs['hdr_sub'],
"path_beg": kwargs['path_beg'],
"path_end": kwargs['path_end'],
"path": kwargs['path'],
"path_reg": kwargs['path_reg'],
"path_dir": kwargs['path_dir'],
"path_sub": kwargs['path_sub'],
"cust_hdr_beg_name": kwargs['cust_hdr_beg_name'],
"cust_hdr_beg": kwargs['cust_hdr_beg'],
"cust_hdr_end_name": kwargs['cust_hdr_end_name'],
"cust_hdr_end": kwargs['cust_hdr_end'],
"cust_hdr_name": kwargs['cust_hdr_name'],
"cust_hdr": kwargs['cust_hdr'],
"cust_hdr_reg_name": kwargs['cust_hdr_reg_name'],
"cust_hdr_reg": kwargs['cust_hdr_reg'],
"cust_hdr_sub_name": kwargs['cust_hdr_sub_name'],
"cust_hdr_sub": kwargs['cust_hdr_sub'],
"url_param": kwargs['url_param'],
"url_param_value": kwargs['url_param_value'],
"ssl_c_verify_code": kwargs['ssl_c_verify_code'],
"ssl_c_ca_commonname": kwargs['ssl_c_ca_commonname'],
"src": kwargs['src'],
"src_bytes_in_rate_comparison": kwargs['src_bytes_in_rate_comparison'],
"src_bytes_in_rate": kwargs['src_bytes_in_rate'],
"src_bytes_out_rate_comparison": kwargs['src_bytes_out_rate_comparison'],
"src_bytes_out_rate": kwargs['src_bytes_out_rate'],
"src_conn_cnt_comparison": kwargs['src_conn_cnt_comparison'],
"src_conn_cnt": kwargs['src_conn_cnt'],
"src_conn_cur_comparison": kwargs['src_conn_cur_comparison'],
"src_conn_cur": kwargs['src_conn_cur'],
"src_conn_rate_comparison": kwargs['src_conn_rate_comparison'],
"src_conn_rate": kwargs['src_conn_rate'],
"src_http_err_cnt_comparison": kwargs['src_http_err_cnt_comparison'],
"src_http_err_cnt": kwargs['src_http_err_cnt'],
"src_http_err_rate_comparison": kwargs['src_http_err_rate_comparison'],
"src_http_err_rate": kwargs['src_http_err_rate'],
"src_http_req_cnt_comparison": kwargs['src_http_req_cnt_comparison'],
"src_http_req_cnt": kwargs['src_http_req_cnt'],
"src_http_req_rate_comparison": kwargs['src_http_req_rate_comparison'],
"src_http_req_rate": kwargs['src_http_req_rate'],
"src_kbytes_in_comparison": kwargs['src_kbytes_in_comparison'],
"src_kbytes_in": kwargs['src_kbytes_in'],
"src_kbytes_out_comparison": kwargs['src_kbytes_out_comparison'],
"src_kbytes_out": kwargs['src_kbytes_out'],
"src_port_comparison": kwargs['src_port_comparison'],
"src_port": kwargs['src_port'],
"src_sess_cnt_comparison": kwargs['src_sess_cnt_comparison'],
"src_sess_cnt": kwargs['src_sess_cnt'],
"src_sess_rate_comparison": kwargs['src_sess_rate_comparison'],
"src_sess_rate": kwargs['src_sess_rate'],
"nbsrv": kwargs['nbsrv'],
"nbsrv_backend": kwargs['nbsrv_backend'],
"ssl_fc_sni": kwargs['ssl_fc_sni'],
"ssl_sni": kwargs['ssl_sni'],
"ssl_sni_sub": kwargs['ssl_sni_sub'],
"ssl_sni_beg": kwargs['ssl_sni_beg'],
"ssl_sni_end": kwargs['ssl_sni_end'],
"ssl_sni_reg": kwargs['ssl_sni_reg'],
"custom_acl": kwargs['custom_acl'],
"value": kwargs['value'],
"urlparam": kwargs['urlparam'],
"queryBackend": kwargs['querybackend'],
"allowedUsers": kwargs['allowedusers'],
"allowedGroups": kwargs['allowedgroups'],
}
}
result = haproxy_acl_svc.create_acl(json_payload)
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
@acl.command()
@click.argument('uuid')
@click.option(
'--name',
help=('Name to identify this condition.'),
show_default=True,
default=None
)
@click.option(
'--description',
help=('Description for this condition.'),
show_default=True,
default=None
)
@click.option(
'--expression',
help=('None'),
type=click.Choice(
[
'http_auth', 'hdr_beg', 'hdr_end', 'hdr', 'hdr_reg', 'hdr_sub', 'path_beg', 'path_end', 'path', 'path_reg',
'path_dir', 'path_sub', 'cust_hdr_beg', 'cust_hdr_end', 'cust_hdr', 'cust_hdr_reg', 'cust_hdr_sub',
'url_param', 'ssl_c_verify', 'ssl_c_verify_code', 'ssl_c_ca_commonname', 'src', 'src_is_local',
'src_port', 'src_bytes_in_rate', 'src_bytes_out_rate', 'src_kbytes_in', 'src_kbytes_out',
'src_conn_cnt', 'src_conn_cur', 'src_conn_rate', 'src_http_err_cnt', 'src_http_err_rate',
'src_http_req_cnt', 'src_http_req_rate', 'src_sess_cnt', 'src_sess_rate', 'nbsrv', 'traffic_is_http',
'traffic_is_ssl', 'ssl_fc', 'ssl_fc_sni', 'ssl_sni', 'ssl_sni_sub', 'ssl_sni_beg', 'ssl_sni_end',
'ssl_sni_reg', 'custom_acl'
]
),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--negate/--no-negate',
help=('Use this to invert the meaning of the expression.'),
show_default=True,
is_flag=True,
callback=bool_as_string,
default=None
)
@click.option(
'--hdr_beg',
help=('HTTP host header starts with string (prefix match)'),
show_default=True,
default=None
)
@click.option(
'--hdr_end',
help=('HTTP host header ends with string (suffix match)'),
show_default=True,
default=None
)
@click.option(
'--hdr',
help=('HTTP host header matches exact string'),
show_default=True,
default=None
)
@click.option(
'--hdr_reg',
help=('HTTP host header matches regular expression'),
show_default=True,
default=None
)
@click.option(
'--hdr_sub',
help=('HTTP host header contains string (substring match)'),
show_default=True,
default=None
)
@click.option(
'--path_beg',
help=('HTTP request URL path starts with string (prefix match)'),
show_default=True,
default=None
)
@click.option(
'--path_end',
help=('HTTP request URL path ends with string (suffix match)'),
show_default=True,
default=None
)
@click.option(
'--path',
help=('HTTP request URL path matches exact string'),
show_default=True,
default=None
)
@click.option(
'--path_reg',
help=('HTTP request URL path matches regular expression'),
show_default=True,
default=None
)
@click.option(
'--path_dir',
help=('HTTP request URL path contains directory (subdir match)'),
show_default=True,
default=None
)
@click.option(
'--path_sub',
help=('HTTP request URL path contains string (substring match)'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr_beg_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr_beg',
help=('HTTP Header starts with string (prefix match)'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr_end_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr_end',
help=('HTTP Header ends with string (suffix match)'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr',
help=('HTTP Header matches exact string'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr_reg_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr_reg',
help=('HTTP Header matches regular expression'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr_sub_name',
help=('The name of the HTTP Header.'),
show_default=True,
default=None
)
@click.option(
'--cust_hdr_sub',
help=('HTTP Header contains string (substring match)'),
show_default=True,
default=None
)
@click.option(
'--url_param',
help=('Specify the URL parameter to be checked for the value specified below.'),
show_default=True,
default=None
)
@click.option(
'--url_param_value',
help=('Specify the value for the URL parameter.'),
show_default=True,
default=None
)
@click.option(
'--ssl_c_verify_code',
help=(
'Specify the SSL/TLS error ID that should be checked for the incoming connection. '
'Please refer to your SSL library\'s documentation for an exhaustive list of error codes.'
),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--ssl_c_ca_commonname',
help=('Verify the CA Common-Name of the certificate presented by the client against the specified string.'),
show_default=True,
default=None
)
@click.option(
'--src',
help=('Verify the source IPv4 address of the client of the session matches the specified IPv4 or IPv6 address.'),
show_default=True,
default=None
)
@click.option(
'--src_bytes_in_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_bytes_in_rate',
help=('The average bytes rate from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_bytes_out_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_bytes_out_rate',
help=('The average bytes rate to the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_conn_cnt_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_conn_cnt',
help=('The cumulative number of connections initiated from the current incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_conn_cur_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_conn_cur',
help=(
'The current amount of concurrent connections initiated from the current incoming connection\'s source address.'
),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_conn_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_conn_rate',
help=('The average connection rate from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_http_err_cnt_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_http_err_cnt',
help=('The cumulative number of HTTP errors from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_http_err_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_http_err_rate',
help=('The average rate of HTTP errors from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_http_req_cnt_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_http_req_cnt',
help=('The cumulative number of HTTP requests from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_http_req_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_http_req_rate',
help=('The average rate of HTTP requests from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_kbytes_in_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_kbytes_in',
help=('The total amount of data received from the incoming connection\'s source address (in kilobytes).'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_kbytes_out_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_kbytes_out',
help=('The total amount of data sent to the incoming connection\'s source address (in kilobytes).'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_port_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_port',
help=(
'An integer value corresponding to the TCP source port of the connection on the client side, '
'which is the port the client connected from.'
),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_sess_cnt_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_sess_cnt',
help=('The cumulative number of connections initiated from the incoming connection\'s source address.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--src_sess_rate_comparison',
help=('None'),
type=click.Choice(['', 'gt', 'ge', 'eq', 'lt', 'le']),
multiple=False,
callback=tuple_to_csv,
show_default=True,
default=None
)
@click.option(
'--src_sess_rate',
help=('None'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--nbsrv',
help=('Verify the minimum number of usable servers in the named backend matches the specified value.'),
show_default=True,
type=INT_OR_EMPTY,
callback=int_as_string,
default=None
)
@click.option(
'--nbsrv_backend',
help=('Use the specified backend to count usable servers. Leave empty to use the current backend.'),
callback=resolve_linked_names_to_uuids,
show_default=True,
default=None
)
@click.option(
'--ssl_fc_sni',
help=('The value of the Server Name TLS extension sent by a client matches the exact string.'),
show_default=True,
default=None
)
@click.option(
'--ssl_sni',
help=('The value of the Server Name TLS extension sent by a client matches the exact string.'),
show_default=True,
default=None
)
@click.option(
'--ssl_sni_sub',
help=(
'The value of the Server Name TLS extension sent by a client contains the specified string (substring match).'
),
show_default=True,
default=None
)
@click.option(
'--ssl_sni_beg',
help=(
'The value of the Server Name TLS extension sent by a client starts with the specified string (prefix match).'
),
show_default=True,
default=None
)
@click.option(
'--ssl_sni_end',
help=('The value of the Server Name TLS extension sent by a client ends with the specified string (suffix match).'),
show_default=True,
default=None
)
@click.option(
'--ssl_sni_reg',
help=('The value of the Server Name TLS extension sent by a client matches with the specified regular expression.'),
show_default=True,
default=None
)
@click.option(
'--custom_acl',
help=('Specify a HAProxy condition/ACL that is currently not supported by the GUI.'),
show_default=True,
default=None
)
@click.option(
'--value',
help=('None'),
show_default=True,
default=None
)
@click.option(
'--urlparam',
help=('None'),
show_default=True,
default=None
)
@click.option(
'--queryBackend',
help=('None'),
callback=resolve_linked_names_to_uuids,
show_default=True,
default=None
)
@click.option(
'--allowedUsers',
help=('None'),
callback=resolve_linked_names_to_uuids,
show_default=True,
default=None
)
@click.option(
'--allowedGroups',
help=('None'),
callback=resolve_linked_names_to_uuids,
show_default=True,
default=None
)
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="plain",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default="result,validations",
show_default=True,
)
@pass_haproxy_acl_svc
def update(haproxy_acl_svc: HaproxyAclFacade, **kwargs):
"""
Update a acl.
"""
json_payload = {
'acl': {}
}
options = [
'name', 'description', 'expression', 'negate', 'hdr_beg', 'hdr_end', 'hdr', 'hdr_reg', 'hdr_sub', 'path_beg',
'path_end', 'path', 'path_reg', 'path_dir', 'path_sub', 'cust_hdr_beg_name', 'cust_hdr_beg',
'cust_hdr_end_name', 'cust_hdr_end', 'cust_hdr_name', 'cust_hdr', 'cust_hdr_reg_name', 'cust_hdr_reg',
'cust_hdr_sub_name', 'cust_hdr_sub', 'url_param', 'url_param_value', 'ssl_c_verify_code', 'ssl_c_ca_commonname',
'src', 'src_bytes_in_rate_comparison', 'src_bytes_in_rate', 'src_bytes_out_rate_comparison',
'src_bytes_out_rate', 'src_conn_cnt_comparison', 'src_conn_cnt', 'src_conn_cur_comparison', 'src_conn_cur',
'src_conn_rate_comparison', 'src_conn_rate', 'src_http_err_cnt_comparison', 'src_http_err_cnt',
'src_http_err_rate_comparison', 'src_http_err_rate', 'src_http_req_cnt_comparison', 'src_http_req_cnt',
'src_http_req_rate_comparison', 'src_http_req_rate', 'src_kbytes_in_comparison', 'src_kbytes_in',
'src_kbytes_out_comparison', 'src_kbytes_out', 'src_port_comparison', 'src_port', 'src_sess_cnt_comparison',
'src_sess_cnt', 'src_sess_rate_comparison', 'src_sess_rate', 'nbsrv', 'nbsrv_backend', 'ssl_fc_sni', 'ssl_sni',
'ssl_sni_sub', 'ssl_sni_beg', 'ssl_sni_end', 'ssl_sni_reg', 'custom_acl', 'value', 'urlparam', 'queryBackend',
'allowedUsers', 'allowedGroups'
]
for option in options:
if kwargs[option.lower()] is not None:
json_payload['acl'][option] = kwargs[option.lower()]
result = haproxy_acl_svc.update_acl(kwargs['uuid'], json_payload)
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
@acl.command()
@click.argument('uuid')
@click.option(
'--output', '-o',
help='Specifies the Output format.',
default="plain",
type=click.Choice(available_formats()),
callback=formatter_from_formatter_name,
show_default=True,
)
@click.option(
'--cols', '-c',
help='Which columns should be printed? Pass empty string (-c '') to show all columns',
default="result,validations",
show_default=True,
)
@pass_haproxy_acl_svc
def delete(haproxy_acl_svc: HaproxyAclFacade, **kwargs):
"""
Delete acl
"""
result = haproxy_acl_svc.delete_acl(kwargs['uuid'])
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
|
11477138
|
import pickle as pkl
import matplotlib.pyplot as plt
import seaborn as sns
filename = 'results_all_2021-05-21-11-21-42.pickle'
# change to your pickle name which includes the concatenated dataframe
with open(filename, "rb") as f:
results = pkl.load(f)
df = results["results"]
df_pivot = (
df[["dataset", "classifier", "roc_auc", "fit_time"]]
.pivot(index="dataset", columns="classifier")
)
datasets = [
'adult',
'breastcancer',
'car',
'covtype',
'letter',
'satimage',
'sensorless',
'spambase'
]
df_roc_auc = (
df_pivot["roc_auc"]
.reset_index()
[["dataset", "XGBClassifier", "LGBMClassifier", "CatBoostClassifier", "WildWood"]]
.rename(
columns={
"XGBClassifier": "XGBoost",
"LGBMClassifier": "LightGBM",
"CatBoostClassifier": "CatBoost",
"WildWood": "WildWood",
}
)
.melt(id_vars=["dataset"])
)
df_roc_auc = df_roc_auc[df_roc_auc["dataset"].isin(datasets)]
df_fit_time = (
df_pivot["fit_time"]
.reset_index()
[["dataset", "XGBClassifier", "LGBMClassifier", "CatBoostClassifier", "WildWood"]]
.rename(
columns={
"XGBClassifier": "XGBoost",
"LGBMClassifier": "LightGBM",
"CatBoostClassifier": "CatBoost",
"WildWood": "WildWood",
}
)
.melt(id_vars=["dataset"])
)
df_fit_time = df_fit_time[df_fit_time["dataset"].isin(datasets)]
sns.set_context("paper", font_scale=1.5)
f, (ax1, ax2) = plt.subplots(nrows=2, figsize=(14, 5))
sns.barplot(x="dataset", y="value", hue="classifier", data=df_roc_auc, ax=ax1)
# ax.set_yscale("log")
# ax2.legend([])
# ax1.legend(ncol=7, loc="upper center", fontsize=17, bbox_to_anchor=(0.5, 1.3))
ax1.legend(ncol=7, loc="upper center", bbox_to_anchor=(0.5, 1.3))
ax1.set_xlabel(None)
# ax1.set_ylabel("AUC", fontsize=13)
ax1.set_ylabel("AUC")
# ax2.set_yticklabels(fontsize=15)
ax1.set_ylim((0.75, 1.04))
# g1.set_ticks(fontsize=14)
# ax1.set_xticklabels(datasets, fontsize=14)
# ax1.set_yticklabels(labels=ax1.get_yticklabels(), fontsize=12)
ax1.set_xticklabels([])
# ax1.set_yticks()
# plt.yticks(fontsize=12)
sns.barplot(x="dataset", y="value", hue="classifier", data=df_fit_time, ax=ax2)
ax2.set_xlabel(None)
ax2.set_yscale("log")
# ax2.legend(ncol=7, loc="upper left", fontsize=15)
# ax2.legend([], )
ax2.get_legend().remove()
# ax2.set_ylabel("Fit time (seconds)", fontsize=13)
ax2.set_ylabel("time (sec.)")
# plt.xticks(fontsize=14)
# ax2.set_xticklabels(datasets, fontsize=15)
ax2.set_xticklabels(datasets)
# plt.yticks(fontsize=12)
plt.tight_layout()
plt.savefig("fig_auc_timings.pdf")
|
11477144
|
from os import listdir
from os.path import join
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from torch.utils import data
import torchvision.transforms as transforms
def build_file_paths(base):
img_paths = []
names = []
img_names = sorted(listdir(base))
img_paths = [join(base, img_name) for img_name in img_names]
names = [img_name[:-4] for img_name in img_names]
return img_paths, names
class ImageData(data.Dataset):
def __init__(self, roots, request, img_transform=None, depth_transform=None):
self.need_name = True if 'name' in request else False
self.need_depth = True if 'depth' in request else False
img_paths, names = build_file_paths(roots['img'])
if self.need_depth: depth_paths, _ = build_file_paths(roots['depth'])
else: depth_paths = None
self.img_paths = img_paths
self.depth_paths = depth_paths
self.names = names
self.img_transform = img_transform
self.depth_transform = depth_transform
def __getitem__(self, item):
img = Image.open(self.img_paths[item]).convert('RGB')
depth = Image.open(self.depth_paths[item]).convert('L') if self.need_depth else None
name = self.names[item] if self.need_name else None
if self.img_transform is not None:
img = self.img_transform(img)
if self.depth_transform is not None and self.need_depth:
depth = self.depth_transform(depth)
results = {}
results['img'] = img
if self.need_depth: results['depth'] = depth
if self.need_name: results['name'] = name
return results
def __len__(self):
return len(self.img_paths)
def get_loader(roots, request, batch_size, num_thread=4, pin=True):
img_transform = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
depth_transform = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor()
])
dataset = ImageData(roots, request, img_transform, depth_transform)
data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_thread, pin_memory=pin)
return data_loader
|
11477178
|
import os
import setuptools
README = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.rst')
setuptools.setup(
name='theanets',
version='0.8.0pre',
packages=setuptools.find_packages(),
author='lmjohns3',
author_email='<EMAIL>',
description='Feedforward and recurrent neural nets using Theano',
long_description=open(README).read(),
license='MIT',
url='http://github.com/lmjohns3/theanets',
keywords=('machine-learning '
'neural-network '
'deep-neural-network '
'recurrent-neural-network '
'autoencoder '
'sparse-autoencoder '
'classifier '
'theano '
),
install_requires=['click', 'downhill', 'theano',
# TODO(leif): remove when theano is fixed.
'nose-parameterized'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
|
11477207
|
import numpy as np
import os
import pickle
import pysam
from math import ceil, floor
from arcsv.constants import ALTERED_QNAME_MAX_LEN
from arcsv.helper import path_to_string, block_gap, fetch_seq, GenomeInterval
from arcsv.sv_affected_len import sv_affected_len
from arcsv.sv_classify import classify_paths
from arcsv.sv_filter import get_filter_string
from arcsv.sv_validate import altered_reference_sequence
from arcsv.vcf import vcf_line_template
def sv_extra_lines(sv_ids, info_extra, format_extra):
info_tags = ('='.join((str(a), str(b))) for (a, b) in info_extra.items())
format_tags = ('='.join((str(a), str(b))) for (a, b) in format_extra.items())
info_line = ';'.join(x for x in info_tags)
format_line = ':'.join(x for x in format_tags)
return '\n'.join('\t'.join((sv_id, info_line, format_line)) for sv_id in sv_ids) + '\n'
# VALID need to update this, at least the reference to sv_output
# note: only used while converting other SV file formats
def do_sv_processing(opts, data, outdir, reffile,
log, verbosity, write_extra=False):
ref = pysam.FastaFile(reffile)
skipped_altered_size = 0
skipped_too_small = 0
altered_reference_file = open(os.path.join(outdir, 'altered.fasta'), 'w')
altered_reference_data = open(os.path.join(outdir, 'altered.pkl'), 'wb')
qnames, block_positions, insertion_sizes, del_sizes = [], [], [], []
simplified_blocks, simplified_paths = [], []
has_left_flank, has_right_flank = [], []
sv_outfile = open(os.path.join(outdir, 'arcsv_out.tab'), 'w')
sv_outfile.write(svout_header_line())
if write_extra:
sv_extra = open(os.path.join(outdir, 'sv_vcf_extra.bed'), 'w')
for datum in data:
paths, blocks, left_bp, right_bp, score, \
filterstring, id_extra, info_extra, format_extra = datum
path1, path2 = paths
# start, end = 0, len(blocks) - 1
graphsize = 2 * len(blocks)
# classify sv
cpout = classify_paths(path1, path2, blocks, graphsize, left_bp, right_bp, verbosity)
(event1, event2), svs, complex_types = cpout
# if only one simple SV is present and < 50 bp, skip it
if len(svs) == 1 and \
svs[0].type != 'BND' and \
svs[0].length < opts['min_simplesv_size']:
skipped_too_small += 1
continue
# write output
# VALID args have changed -- frac1/2, no event_filtered
outlines = sv_output(path1, path2, blocks, event1, event2,
svs, complex_types, score, 0, 0, '.', 0,
False, [], filterstring_manual=filterstring,
id_extra=id_extra)
sv_outfile.write(outlines)
# write out extra info from VCF if necessary
if write_extra:
sv_ids = (l.split('\t')[3] for l in outlines.strip().split('\n'))
sv_extra.write(sv_extra_lines(sv_ids, info_extra, format_extra))
# write altered reference to file
# CLEANUP tons of stuff duplicated here from sv_inference.py
s1 = path_to_string(path1, blocks=blocks)
s2 = path_to_string(path2, blocks=blocks)
sv1 = [sv for sv in svs if sv.genotype == '1/1' or sv.genotype == '1/0']
sv2 = [sv for sv in svs if sv.genotype == '1/1' or sv.genotype == '0/1']
compound_het = (path1 != path2) and (len(sv1) > 0) and (len(sv2) > 0)
for (k, path, ev, pathstring, svlist) in [(0, path1, event1, s1, sv1),
(1, path2, event2, s2, sv2)]:
if k == 1 and path1 == path2:
continue
if len(svlist) == 0:
continue
id = ','.join(svlist[0].event_id.split(',')[0:2])
if compound_het:
id += ',' + str(k + 1)
id += id_extra
qname = id
qname += ':{0}'.format(pathstring)
for sv in svlist:
qname += ':{0}'.format(sv.type.split(':')[0]) # just write DUP, not DUP:TANDEM
ars_out = altered_reference_sequence(path, blocks, ref,
flank_size=opts['altered_flank_size'])
seqs, block_pos, insertion_size, del_size, svb, svp, hlf, hrf = ars_out
if sum(len(s) for s in seqs) > opts['max_size_altered']:
skipped_altered_size += 1
continue
qnames.append(qname)
block_positions.append(block_pos)
insertion_sizes.append(insertion_size)
del_sizes.append(del_size)
simplified_blocks.append(svb)
simplified_paths.append(svp)
has_left_flank.append(hlf)
has_right_flank.append(hrf)
seqnum = 1
qname = qname[:(ALTERED_QNAME_MAX_LEN-4)]
for seq in seqs:
altered_reference_file.write('>{0}\n{1}\n'.
format(qname + ':' + str(seqnum), seq))
seqnum += 1
log.write('altered_skip_size\t{0}\n'.format(skipped_altered_size))
log.write('skipped_small_simplesv\t{0}\n'.format(skipped_too_small))
for x in (qnames, block_positions, insertion_sizes, del_sizes, simplified_blocks,
simplified_paths, has_left_flank, has_right_flank):
pickle.dump(x, altered_reference_data)
altered_reference_file.close()
altered_reference_data.close()
sv_outfile.close()
def get_bp_string(sv):
if sv.type == 'INS':
bp = int(floor(np.median(sv.bp1)))
return str(bp)
else:
bp1 = int(floor(np.median(sv.bp1)))
bp2 = int(floor(np.median(sv.bp2)))
return '{0},{1}'.format(bp1, bp2)
def get_bp_uncertainty_string(sv):
if sv.type == 'INS':
bpu = sv.bp1[1] - sv.bp1[0] - 2
return str(bpu)
else:
bp1u = sv.bp1[1] - sv.bp1[0] - 2
bp2u = sv.bp2[1] - sv.bp2[0] - 2
return '{0},{1}'.format(bp1u, bp2u)
def get_bp_ci(sv):
bp1_cilen = sv.bp1[1] - sv.bp1[0] - 2
bp1_ci = (-int(floor(bp1_cilen/2)), int(ceil(bp1_cilen/2)))
bp2_cilen = sv.bp2[1] - sv.bp2[0] - 2
bp2_ci = (-int(floor(bp2_cilen/2)), int(ceil(bp2_cilen/2)))
return bp1_ci, bp2_ci
def get_sv_ins(sv):
if sv.type == 'INS':
return sv.length
elif sv.type == 'BND':
return sv.bnd_ins
else:
return 0
def bnd_alt_string(orient, other_orient, chrom, other_pos, ref_base):
alt_after = True if orient == '-' else False
alt_location_template = ']{0}]' if other_orient == '-' else '[{0}['
alt_location = alt_location_template.format(str(chrom) + ':' + str(other_pos))
alt_string = (ref_base + alt_location) if alt_after else (alt_location + ref_base)
return alt_string
# writes out svs
# NOTE: main output consists of one line per unique non-reference path
def sv_output(path1, path2, blocks, event1, event2,
frac1, frac2, sv_list, complex_types,
event_lh, ref_lh, next_best_lh,
next_best_pathstring, num_paths,
filter_criteria,
filterstring_manual=None, id_extra='',
output_vcf=False,
reference=False,
output_split_support=False):
lines = ''
splitlines = ''
vcflines = []
sv1 = [sv for sv in sv_list if sv.genotype == '1/1' or sv.genotype == '1/0']
sv2 = [sv for sv in sv_list if sv.genotype == '1/1' or sv.genotype == '0/1']
compound_het = (path1 != path2) and (len(sv1) > 0) and (len(sv2) > 0)
is_het = (path1 != path2)
num_paths = str(num_paths)
for (k, path, event, svs, complex_type, frac) in [(0, path1, event1, sv1,
complex_types[0], frac1),
(1, path2, event2, sv2,
complex_types[1], frac2)]:
if k == 1 and path1 == path2:
continue
if len(svs) == 0:
continue
chrom = blocks[int(floor(path1[0]/2))].chrom
# CLEANUP this code is duplicated up above -- should be merged
id = '_'.join(svs[0].event_id.split(',')[0:2])
if compound_het:
id = id + '_' + str(k + 1)
id += id_extra
num_sv = len(svs)
if filterstring_manual is None:
fs = sorted(set((get_filter_string(sv, filter_criteria) for sv in svs)))
if all(x == 'PASS' for x in fs):
filters = 'PASS'
else:
filters = ','.join(x for x in fs if x != 'PASS')
else:
filters = filterstring_manual
all_sv_bp1 = [int(floor(np.median(sv.bp1))) for sv in svs]
all_sv_bp2 = [int(floor(np.median(sv.bp2))) for sv in svs]
all_sv_bp = all_sv_bp1 + all_sv_bp2
minbp, maxbp = min(all_sv_bp), max(all_sv_bp)
total_span = maxbp - minbp
# sv_span = maxbp - minbp
# bp_cis = bp_ci for sv in svs
# (bp1, bp2) in bp_cis
sv_bp_joined = ';'.join(get_bp_string(sv) for sv in svs)
sv_bp_uncertainty_joined = ';'.join(get_bp_uncertainty_string(sv) for sv in svs)
sv_bp_ci = [get_bp_ci(sv) for sv in svs]
svtypes = list(sv.type.split(':')[0] for sv in svs) # use DUP not DUP:TANDEM
svtypes_joined = ','.join(svtypes)
nonins_blocks = [b for b in blocks if not b.is_insertion()]
nni = len(nonins_blocks)
block_bp = [nonins_blocks[0].start] + \
[int(floor(np.median((blocks[i-1].end, blocks[i].start))))
for i in range(1, nni)] + \
[nonins_blocks[-1].end]
block_bp_joined = ','.join(str(x) for x in block_bp)
block_bp_uncertainty = [0] + \
[block_gap(blocks, 2*i) for i in range(1, nni)] + \
[0]
block_bp_uncertainty_joined = ','.join(str(x) for x in block_bp_uncertainty)
blocks_midpoints = [GenomeInterval(chrom, block_bp[i], block_bp[i+1])
for i in range(nni)]
blocks_midpoints.extend([b for b in blocks if b.is_insertion()])
len_affected = sv_affected_len(path, blocks_midpoints)
pathstring = path_to_string(path, blocks=blocks)
nblocks = len([b for b in blocks if not b.is_insertion()])
refpath = list(range(2 * nblocks))
ref_string = path_to_string(refpath, blocks=blocks)
gt = 'HET' if is_het else 'HOM'
insertion_lengths = [get_sv_ins(sv) for sv in svs if get_sv_ins(sv) > 0]
if len(insertion_lengths) == 0:
inslen_joined = 'NA'
else:
inslen_joined = ','.join(str(l) for l in insertion_lengths)
sr = list(sv.split_support for sv in svs)
pe = list(sv.pe_support for sv in svs)
sr_joined = ','.join(map(str, sr))
pe_joined = ','.join(map(str, pe))
lhr = '%.2f' % (event_lh - ref_lh)
lhr_next = '%.2f' % (event_lh - next_best_lh)
frac_str = '%.3f' % frac
line = '\t'.join(str(x) for x in
(chrom, minbp, maxbp, id,
svtypes_joined, complex_type, num_sv,
block_bp_joined, block_bp_uncertainty_joined,
ref_string, pathstring, len_affected, filters,
sv_bp_joined, sv_bp_uncertainty_joined,
gt, frac_str, inslen_joined,
sr_joined, pe_joined, lhr, lhr_next,
next_best_pathstring, num_paths))
# num_sv
# block_bp_joined
# block_bp_uncertainty_joined
line += '\n'
lines = lines + line
if output_vcf:
template = vcf_line_template()
info_tags_ordered = ['SV_TYPE', 'HAPLOID_CN', 'COMPLEX_TYPE', 'MATE_ID', 'END',
'CI_POS', 'CI_END', 'INS_LEN', 'SR', 'PE', 'SV_SPAN',
'EVENT_SPAN', 'EVENT_START', 'EVENT_END', 'EVENT_AFFECTED_LEN',
'EVENT_NUM_SV', 'REF_STRUCTURE', 'ALT_STRUCTURE',
'SEGMENT_ENDPTS', 'SEGMENT_ENDPTS_CIWIDTH',
'AF', 'SCORE_VS_REF',
'SCORE_VS_NEXT', 'NEXT_BEST_STRUCTURE', 'NUM_PATHS']
info_tags_ordering = {y: x for x, y in enumerate(info_tags_ordered)}
for (i, sv) in enumerate(svs):
info_list = []
sv_chrom = sv.ref_chrom
# pos
pos = all_sv_bp1[i] + 1
if num_sv > 1:
id_vcf = id + '_' + str(i + 1)
else:
id_vcf = id
ref_base = fetch_seq(reference, sv_chrom, pos-1, pos) # pysam is 0-indexed
alt = '<{0}>'.format(sv.type)
qual = '.'
svtype = svtypes[i]
info_list.append(('SV_TYPE', svtype))
end = all_sv_bp2[i] + 1
info_list.append(('END', end))
block_bp_vcf = ','.join(str(x+1) for x in block_bp)
info_list.append(('SEGMENT_ENDPTS', block_bp_vcf))
info_list.append(('SEGMENT_ENDPTS_CIWIDTH', block_bp_uncertainty_joined))
if svtype == 'INS':
svlen = sv.length
else:
svlen = end - pos
info_list.append(('SV_SPAN', svlen))
info_list.append(('EVENT_SPAN', total_span))
info_list.append(('EVENT_AFFECTED_LEN', len_affected))
if svtype == 'DUP':
info_list.append(('HAPLOID_CN', sv.copynumber))
bp1_ci, bp2_ci = sv_bp_ci[i]
bp1_ci_str = str(bp1_ci[0]) + ',' + str(bp1_ci[1])
bp2_ci_str = str(bp2_ci[0]) + ',' + str(bp2_ci[1])
if bp1_ci_str != '0,0':
info_list.append(('CI_POS', bp1_ci_str))
if bp2_ci_str != '0,0' and svtype != 'INS':
info_list.append(('CI_END', bp2_ci_str))
info_list.extend([('REF_STRUCTURE', ref_string), ('ALT_STRUCTURE', pathstring),
('AF', frac_str), ('SR', sr[i]), ('PE', pe[i]),
('SCORE_VS_REF', lhr), ('SCORE_VS_NEXT', lhr_next),
('NEXT_BEST_STRUCTURE', next_best_pathstring),
('NUM_PATHS', num_paths), ('EVENT_START', minbp + 1),
('EVENT_END', maxbp), ('EVENT_NUM_SV', num_sv)])
# FORMAT/GT
format_str = 'GT'
gt_vcf = sv.genotype
if svtype != 'BND':
# write line
info_list.sort(key=lambda x: info_tags_ordering[x[0]])
info = ';'.join(['{0}={1}'.format(el[0], el[1]) for el in info_list])
line = template.format(chr=chrom, pos=pos, id=id_vcf,
ref=ref_base, alt=alt, qual=qual,
filter=filters, info=info,
format_str=format_str, gt=gt_vcf)
vcflines.append(line)
else: # breakend type --> 2 lines in vcf
id_bnd1, id_bnd2 = id_vcf + 'A', id_vcf + 'B'
mateid_bnd1, mateid_bnd2 = id_bnd2, id_bnd1
orientation_bnd1, orientation_bnd2 = sv.bnd_orientation
pos_bnd1 = all_sv_bp1[i] + 1
pos_bnd2 = all_sv_bp2[i] + 1
if orientation_bnd1 == '-':
pos_bnd1 -= 1
if orientation_bnd2 == '-':
pos_bnd2 -= 1
ref_bnd1 = fetch_seq(reference, sv_chrom, pos_bnd1 - 1, pos_bnd1)
ref_bnd2 = fetch_seq(reference, sv_chrom, pos_bnd2 - 1, pos_bnd2)
alt_bnd1 = bnd_alt_string(orientation_bnd1, orientation_bnd2,
sv.ref_chrom, pos_bnd2, ref_bnd1)
alt_bnd2 = bnd_alt_string(orientation_bnd2, orientation_bnd1,
sv.ref_chrom, pos_bnd1, ref_bnd2)
ctype_str = complex_type.upper().replace('.', '_')
info_list_bnd1 = [('MATE_ID', mateid_bnd1)]
info_list_bnd2 = [('MATE_ID', mateid_bnd2)]
if bp1_ci_str != '0,0':
info_list_bnd1.append(('CI_POS', bp1_ci_str))
if bp2_ci_str != '0,0':
info_list_bnd2.append(('CI_POS', bp2_ci_str))
if sv.bnd_ins > 0:
info_list_bnd1.append(('INS_LEN', sv.bnd_ins))
info_list_bnd2.append(('INS_LEN', sv.bnd_ins))
common_tags = [('SV_TYPE', svtype), ('COMPLEX_TYPE', ctype_str),
('EVENT_SPAN', total_span), ('EVENT_START', minbp + 1),
('EVENT_END', maxbp), ('EVENT_AFFECTED_LEN', len_affected),
('EVENT_NUM_SV', num_sv), ('SEGMENT_ENDPTS', block_bp_vcf),
('SEGMENT_ENDPTS_CIWIDTH', block_bp_uncertainty_joined),
('REF_STRUCTURE', ref_string), ('ALT_STRUCTURE', pathstring),
('AF', frac_str), ('SR', sr[i]), ('PE', pe[i]),
('SCORE_VS_REF', lhr), ('SCORE_VS_NEXT', lhr_next),
('NEXT_BEST_STRUCTURE', next_best_pathstring),
('NUM_PATHS', num_paths)]
info_list_bnd1.extend(common_tags)
info_list_bnd2.extend(common_tags)
info_list_bnd1.sort(key=lambda x: info_tags_ordering[x[0]])
info_list_bnd2.sort(key=lambda x: info_tags_ordering[x[0]])
info_bnd1 = ';'.join(['{0}={1}'.format(el[0], el[1])
for el in info_list_bnd1])
info_bnd2 = ';'.join(['{0}={1}'.format(el[0], el[1])
for el in info_list_bnd2])
line1 = template.format(chr=chrom, pos=pos_bnd1, id=id_bnd1,
ref=ref_bnd1, alt=alt_bnd1, qual=qual,
filter=filters, info=info_bnd1,
format_str=format_str, gt=gt_vcf)
line2 = template.format(chr=chrom, pos=pos_bnd2, id=id_bnd2,
ref=ref_bnd2, alt=alt_bnd2, qual=qual,
filter=filters, info=info_bnd2,
format_str=format_str, gt=gt_vcf)
vcflines.append(line1)
vcflines.append(line2)
if output_split_support:
split_line_list = []
bp_orientations = {'Del': ('-', '+'),
'Dup': ('+', '-'),
'InvL': ('-', '-'),
'InvR': ('+', '+')}
bp_idx = 1
for sv in svs:
bp1 = str(int(floor(np.median(sv.bp1))))
bp2 = str(int(floor(np.median(sv.bp2))))
for split in sv.supporting_splits:
orientation = bp_orientations[split.split_type[:-1]]
orientation = ','.join(orientation)
strand = split.split_type[-1]
qname = split.aln.qname
seq = split.aln.seq
mapq = str(split.aln.mapq)
if split.mate is not None:
mate_seq = split.mate.seq
mate_mapq = str(split.mate.mapq)
mate_has_split = str(split.mate_has_split)
else:
mate_seq = 'NA'
mate_mapq = 'NA'
mate_has_split = 'NA'
line = '\t'.join(str(x) for x in
(id, block_bp_joined, ref_string, pathstring,
sv_bp_joined, 'split', qname, bp_idx,
bp1, bp2, orientation,
qname, strand, seq, mapq,
mate_seq, mate_mapq, mate_has_split))
split_line_list.append(line)
bp_idx += 1
if len(split_line_list) > 0:
splitlines = splitlines + '\n'.join(split_line_list) + '\n'
return lines, vcflines, splitlines
def svout_header_line():
return '\t'.join(('chrom', 'minbp', 'maxbp', 'id',
'svtype', 'complextype', 'num_sv',
'bp', 'bp_uncertainty', 'reference', 'rearrangement', 'len_affected',
'filter', 'sv_bp', 'sv_bp_uncertainty',
'gt', 'af', 'inslen', 'sr_support', 'pe_support',
'score_vs_ref', 'score_vs_next', 'rearrangement_next', 'num_paths')) + \
'\n'
def splitout_header_line():
return '\t'.join(('sv_id', 'bp', 'reference', 'rearrangement', 'sv_bp',
'support_type', 'qname', 'bp_idx',
'bp1', 'bp2', 'bp_orientation',
'qname', 'strand', 'seq', 'mapq',
'mate_seq', 'mate_mapq', 'mate_has_split')) + \
'\n'
|
11477241
|
import pytest
from hooks.po_location_format import main
from hooks.utils import get_current_branch
INPUT_PO_DATA = """
#: foo/bar.py:123 foo/bar.py:200
#: foo/foo.py:123
msgid "Foo"
msgstr "Bar"
#: foo/bar.py:123 foo/bar.py:200
#: foo/foo.py:123
msgid "Bar"
msgstr "Foo"
"""
FILE_PO_DATA = """
#: foo/bar.py
#: foo/foo.py
msgid "Foo"
msgstr "Bar"
#: foo/bar.py
#: foo/foo.py
msgid "Bar"
msgstr "Foo"
"""
NEVER_PO_DATA = """
msgid "Foo"
msgstr "Bar"
msgid "Bar"
msgstr "Foo"
"""
@pytest.mark.parametrize(
"input_data,output_data,add_location",
[(INPUT_PO_DATA, FILE_PO_DATA, "file"), (INPUT_PO_DATA, NEVER_PO_DATA, "never")],
)
def test_output_is_correct(input_data, output_data, add_location, tmpdir):
with tmpdir.as_cwd():
in_file = tmpdir.join(f"in_{add_location}.po")
in_file.write_text(INPUT_PO_DATA, encoding="utf-8")
assert main([str(in_file), "--add-location", add_location]) == 1
with in_file.open() as f:
assert output_data == f.read()
|
11477260
|
import datetime
import logging
import boto3
from django.conf import settings
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import AbstractUser
from django.contrib.gis.db import models as gis_models
from django.core.exceptions import FieldError
from django.db import IntegrityError, models
from django.utils import timezone
from localflavor.us.models import USStateField, USZipCodeField
from localflavor.us.us_states import STATE_CHOICES
from phonenumber_field.modelfields import PhoneNumberField
from supportal.app.models import EmailSend, Person
from supportal.app.models.base_model_mixin import BaseModelMixin
from supportal.services.email_service import get_email_service
_cognito_client = None
ASSIGNMENT_COUNT_TO_INVITE = 10
DAILY_INVITES = 3
def _get_cognito_client():
global _cognito_client
if _cognito_client is None:
_cognito_client = boto3.client("cognito-idp")
return _cognito_client
class UserManager(BaseUserManager):
use_in_migrations = True
@classmethod
def normalize_email(cls, email):
"""Lower case email addresses, overrides BaseUserManager#normalize_email.
We do this to make email log-in case insensitive.
This is technically not RFC compliant and could create problems for users
with case-sensitive email servers. We aren't likely to be affected by this but,
if we are, normalization can be bypassed by calling User.change_email on an
existing user.
"""
return email.strip().lower()
def create_cognito_user(self, email):
if not email:
raise FieldError("email field is required")
response = _get_cognito_client().admin_create_user(
UserPoolId=settings.COGNITO_USER_POOL,
# Cognito quirk: when the pool is set to use email as username, Cognito
# *does not* use the email as the username... it generates a uuid.
# We need to read the "real" Cognito username, which we use
# to associate id tokens to our users in the authentication backend,
# from the response.
Username=email,
UserAttributes=[
{"Name": "email", "Value": email},
# No need to verify email as it is is effectively verified by
# our custom auth flow.
{"Name": "email_verified", "Value": "True"},
],
MessageAction="SUPPRESS",
# Even though we don't use it, set email as the desired delivery medium
# in case we want to use it in the future.
DesiredDeliveryMediums=["EMAIL"],
# Fail if email already exists
ForceAliasCreation=False,
)
return response
def _email_new_user(self, email):
payload = {
"email": email,
"switchboard_signup_url": settings.SUPPORTAL_BASE_URL,
"transactional": True,
}
email_service = get_email_service()
email_service.send_email(
template_name=EmailSend.INVITE_EMAIL,
from_email=settings.FROM_EMAIL,
recipient=email,
reply_to_email=settings.REPLY_TO_EMAIL,
configuration_set_name=settings.CONFIGURATION_SET_NAME,
payload=payload,
application_name="supportal",
)
def _create_user(
self,
username,
email,
password,
skip_cognito,
should_send_invite_email,
**extra_fields,
):
"""
Create and save a user with the given username, email, and password.
This sets up the user in Congito unless skip_cognito=True is passed.
This option is intended for testing or to add users that have already been
created in Cognito.
"""
email = self.normalize_email(email)
if not skip_cognito:
cognito_response = self.create_cognito_user(email)
logging.info(f"Create user response from Cognito {cognito_response}")
username = cognito_response["User"]["Username"]
logging.info(f"Created Cognito user {username} for email {email}")
if not username:
raise ValueError("The given username must be set")
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
if should_send_invite_email:
self._email_new_user(email)
return user
def create_user(
self,
username,
email,
should_send_invite_email=False,
skip_cognito=False,
**extra_fields,
):
"""Create and save a regular User (password not allowed)."""
is_staff = email.endswith(
"@<EMAIL>"
) # staff get added as admins and staff
extra_fields.setdefault("is_staff", is_staff)
extra_fields.setdefault("is_admin", is_staff)
extra_fields.setdefault("is_superuser", False)
return self._create_user(
username,
email,
None,
skip_cognito,
should_send_invite_email,
**extra_fields,
)
def create_superuser(
self, username, email, password, skip_cognito=False, **extra_fields
):
"""Create and save a super User (password required).
We keep the "username" param for compatibility with the createsuperuser
manage command, but it is never used.
TODO: update createsuperuser command
"""
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_admin", True)
extra_fields.setdefault("is_superuser", True)
if password is None:
raise ValueError("Superuser must have a usable password.")
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True.")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return self._create_user(
username,
email,
password,
should_send_invite_email=False,
skip_cognito=skip_cognito,
**extra_fields,
)
def get_user_by_email(self, email):
"""Get a user by email
:raises User.DoesNotExist
:raises User.MultipleObjectsReturned if email matches more than one User,
which should be impossible.
"""
return self.get(email=self.normalize_email(email))
class User(AbstractUser, BaseModelMixin):
"""
Custom User class for the Supportal
We inherit 'password' from AbstractUser in order to use the admin interface.
It's not actually necessary when using Cognito, so the user manager sets
unusable passwords for all non-admin users. In the future, we may want to
integrate Cognito auth with the admin interface, but it's not trivial.
"""
objects = UserManager()
person = models.ForeignKey(Person, on_delete=models.SET_NULL, null=True)
added_by = models.ForeignKey(
"self", on_delete=models.SET_NULL, blank=True, null=True, related_name="invites"
)
is_admin = models.BooleanField(default=False)
email = models.EmailField(blank=False, unique=True, db_index=True)
phone = PhoneNumberField(blank=True)
address = models.CharField(max_length=255, blank=True)
city = models.CharField(max_length=255, blank=True)
state = USStateField(choices=STATE_CHOICES, blank=True)
zip5 = USZipCodeField(blank=True)
coordinates = gis_models.PointField(
geography=True, srid=4326, null=True, blank=True
)
unsubscribed_at = models.DateTimeField(null=True, db_index=True)
self_reported_team_name = models.CharField(max_length=255, blank=True)
verified_at = models.DateTimeField(null=True)
impersonated_user = models.ForeignKey(
"self", on_delete=models.DO_NOTHING, null=True, blank=True, default=None
)
@property
def latest_invite(self):
try:
return self.invites.latest(field_name="created_at")
except User.DoesNotExist:
return None
@property
def has_invite(self):
latest_invite = self.latest_invite
has_reached_contact_count = (
self.assignment_contacts_count >= ASSIGNMENT_COUNT_TO_INVITE
)
if latest_invite:
invite_has_reached_contact_count = (
latest_invite.assignment_contacts_count >= ASSIGNMENT_COUNT_TO_INVITE
)
has_not_maxed_daily_invites = (
self.invites.filter(
created_at__gte=timezone.now() - datetime.timedelta(days=1)
).count()
< DAILY_INVITES
)
return (
invite_has_reached_contact_count
and has_not_maxed_daily_invites
and has_reached_contact_count
)
return has_reached_contact_count
@property
def assignment_contacts_count(self):
return (
self.vol_prospect_assignments.filter(
vol_prospect_contact_events__isnull=False
)
.distinct("person")
.count()
)
@property
def remaining_contacts_count(self):
remaining = ASSIGNMENT_COUNT_TO_INVITE - self.assignment_contacts_count
return remaining if remaining > 0 else 0
def change_email(self, new_email):
logging.info(
f"Changing {self.username}'s email from {self.email} to {new_email}"
)
self.email = new_email
self.save()
_get_cognito_client().admin_update_user_attributes(
UserPoolId=settings.COGNITO_USER_POOL,
Username=self.username,
UserAttributes=[
{"Name": "email", "Value": new_email},
# If not set to true, this would trigger Cognito's internal
# email verification flow, which we don't support.
{"Name": "email_verified", "Value": "True"},
],
)
def normalize_email(self):
normalized = UserManager.normalize_email(self.email)
if self.email != normalized:
try:
self.change_email(normalized)
except IntegrityError as e:
# Skip users that have already worked around case-sensitivity by creating
# a second user with a lower-case email.
# TODO: we may want to delete these Users at some point since it is
# no longer possible for them to log in.
logging.warning(f"Skipping user {self.id}. IntegrityError: {e}")
|
11477270
|
from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data
from fightchurn.listings.chap9.listing_9_1_regression_auc import reload_regression
import numpy
def calc_lift(y_true, y_pred):
if numpy.unique(y_pred).size < 10:
return 1.0
sort_by_pred=[(p,t) for p,t in sorted(zip(y_pred, y_true))]
overall_churn = sum(y_true)/len(y_true)
i90=int(round(len(y_true)*0.9))
top_decile_count=sum([p[1] for p in sort_by_pred[i90:]])
top_decile_churn = top_decile_count/(len(y_true)-i90)
lift = top_decile_churn/overall_churn
return lift
def top_decile_lift(data_set_path):
logreg_model = reload_regression(data_set_path)
X,y = prepare_data(data_set_path,as_retention=False)
predictions = logreg_model.predict_proba(X)
lift = calc_lift(y,predictions[:,0])
print('Regression Lift score={:.3f}'.format(lift))
|
11477281
|
import math
# print resourse usage estimates?
estimate_resources = 1
# General Network Parameters
INPUT_SIZE = 28 # dimension of square input image
NUM_KERNELS = 8
KERNEL_SIZE = 7 # square kernel
#NUM_KERNELS = 2
#KERNEL_SIZE = 3 # square kernel
KERNEL_SIZE_SQ = KERNEL_SIZE**2
NEIGHBORHOOD_SIZE = 4
FEATURE_SIZE = int((INPUT_SIZE - KERNEL_SIZE + 1) / math.sqrt(NEIGHBORHOOD_SIZE)) # The dimension of the convolved image
# Screen resolution
X_RES = 800
Y_RES = 600
# Shift window
CAMERA_PIXEL_WIDTH = 9
CAMERA_PIXEL_BITWIDTH = CAMERA_PIXEL_WIDTH - 1
BUFFER_W = INPUT_SIZE
BUFFER_BW = BUFFER_W - 1
BUFFER_H = INPUT_SIZE
BUFFER_BH = BUFFER_H - 1
BUFFER_SIZE = BUFFER_W * BUFFER_H
BUFFER_OUT_VECTOR_WIDTH = BUFFER_W * BUFFER_H * CAMERA_PIXEL_WIDTH
BUFFER_OUT_VECTOR_BITWIDTH = BUFFER_OUT_VECTOR_WIDTH - 1
WINDOW_VECTOR_WIDTH = KERNEL_SIZE * KERNEL_SIZE * CAMERA_PIXEL_WIDTH
WINDOW_VECTOR_BITWIDTH = WINDOW_VECTOR_WIDTH - 1
# Window selector
BUFFER_VECTOR_WIDTH = BUFFER_W * BUFFER_H * CAMERA_PIXEL_WIDTH
BUFFER_VECTOR_BITWIDTH = BUFFER_VECTOR_WIDTH - 1
X_COORD_WIDTH = int(math.ceil(math.log(BUFFER_W,2)))
X_COORD_BITWIDTH = X_COORD_WIDTH - 1
Y_COORD_WIDTH = int(math.ceil(math.log(BUFFER_H,2)))
Y_COORD_BITWIDTH = Y_COORD_WIDTH - 1
X_COORD_MAX = INPUT_SIZE - KERNEL_SIZE + 1
Y_COORD_MAX = INPUT_SIZE - KERNEL_SIZE + 1
SCREEN_X_WIDTH = int(math.ceil(math.log(X_RES,2)))
SCREEN_X_BITWIDTH = SCREEN_X_WIDTH - 1
SCREEN_Y_WIDTH = int(math.ceil(math.log(Y_RES,2)))
SCREEN_Y_BITWIDTH = SCREEN_Y_WIDTH - 1
# Shift window control (window_ctrl)
BUFFER_X_POS = 0 # the X/Y position of the shifting window/ buffer on the screen
BUFFER_Y_POS = 0
# Multiply Adder Tree
MA_TREE_SIZE = 2**int(math.ceil(math.log(KERNEL_SIZE_SQ,2))) # the number of elements in hte base of the tree, equivilant to the number of multipliers needed in each tree
CONV_MULT_WIDTH = 9
CONV_MULT_BITWIDTH = CONV_MULT_WIDTH - 1
CONV_PRODUCT_WIDTH = CONV_MULT_WIDTH * 2 # the width of the product
CONV_PRODUCT_BITWIDTH = CONV_PRODUCT_WIDTH - 1
CONV_ADD_WIDTH = CONV_PRODUCT_WIDTH + int(math.ceil(math.log(MA_TREE_SIZE,2)))
CONV_ADD_BITWIDTH = CONV_ADD_WIDTH - 1
CARRY_VECTOR_WIDTH = (KERNEL_SIZE**2) - 1;
RDY_SHIFT_REG_SIZE = int(math.ceil(math.log(MA_TREE_SIZE,2))) + 1 + 1 # +1 for rect linar and multipliers, -1 to set wren early
FM_COORD_SR_DEPTH = RDY_SHIFT_REG_SIZE #+ INPUT_SIZE - KERNEL_SIZE + 1 + int(math.sqrt(NEIGHBORHOOD_SIZE))
WINDOW_PAD_WIDTH = (MA_TREE_SIZE - KERNEL_SIZE_SQ) * CONV_MULT_WIDTH
WINDOW_PAD_BITWIDTH = WINDOW_PAD_WIDTH - 1
MULT_PAD_WIDTH = int(math.ceil(math.log(KERNEL_SIZE_SQ,2)))
MULT_ADDER_IN_WIDTH = MA_TREE_SIZE * CONV_MULT_WIDTH
MULT_ADDER_IN_BITWIDTH = MULT_ADDER_IN_WIDTH - 1
# General Bitwidths
#NN_WIDTH = CONV_ADD_WIDTH
#NN_BITWIDTH = NN_WIDTH - 1
# Rect Linear
RECT_IN_WIDTH = CONV_ADD_WIDTH
RECT_IN_BITWIDTH = RECT_IN_WIDTH - 1
RECT_OUT_WIDTH = RECT_IN_WIDTH
RECT_OUT_BITWIDTH = RECT_OUT_WIDTH - 1
# Sub sampling
#NH_DIM = int(math.sqrt(NEIGHBORHOOD_SIZE))
#NH_VECTOR_WIDTH = NEIGHBORHOOD_SIZE*NN_WIDTH
#NH_VECTOR_BITWIDTH = NH_VECTOR_WIDTH - 1
#NUM_NH_LAYERS = int(math.ceil(math.log(NEIGHBORHOOD_SIZE,2)))
#NUM_NH_LAYERS PNUM_NH_LAYERS
#POOL_OUT_WIDTH = NN_WIDTH + NUM_NH_LAYERS
#POOL_OUT_BITWIDTH = POOL_OUT_WIDTH - 1
#MEAN_DIVSION_CONSTANT = str(POOL_OUT_WIDTH) + "'d" + str(NEIGHBORHOOD_SIZE)
# POOL_RESET= 1 # uncomment to add reset signal to sub sampleing/pooling adder tree
#POOL_TREE_PAD = POOL_OUT_WIDTH - NN_WIDTH
# Sub Sampling control (nh_shift_reg_ctrl)
NH_WIDTH = CONV_ADD_WIDTH
NH_BITWIDTH = NH_WIDTH - 1
NH_SIZE = NEIGHBORHOOD_SIZE
NH_DIM = int(math.sqrt(NH_SIZE))
NH_SR_DEPTH = INPUT_SIZE - KERNEL_SIZE + 1 - NH_DIM #NH_SIZE
# Feature Map Buffer Contorl module
FM_ADDR_WIDTH = int(math.ceil(math.log(FEATURE_SIZE**2,2)))
FM_ADDR_BITWIDTH = FM_ADDR_WIDTH - 1
FM_WIDTH = FEATURE_SIZE # the size of the y dimension of the feature map
ADDR_MAX = FEATURE_SIZE**2
NP_MAX_COUNT = ADDR_MAX # same variable, different name for inside matrix_mult.v
NP_COUNT_WIDTH = FM_ADDR_WIDTH
NP_COUNT_BITWIDTH = FM_ADDR_BITWIDTH
RAM_SELECT_WIDTH = int(math.ceil(math.log(NUM_KERNELS,2)))
RAM_SELECT_BITWIDTH = RAM_SELECT_WIDTH - 1
# Softmax
SOFTMAX_IN_VECTOR_LENGTH = ((FEATURE_SIZE * FEATURE_SIZE) / NEIGHBORHOOD_SIZE ) * NUM_KERNELS # the number of inputs to the softmax layer
NUM_CLASSES = 10 # number of output classes for the entire nn, MUST BE A POWER OF 2!!! set unneeded class inputs to 0
# Matrix multiply (for Softmax)
NUM_INPUT_IM = 1 # The number of images input to the layer at a time
NUM_INPUT_N = (NUM_KERNELS * FEATURE_SIZE * FEATURE_SIZE )# The number of input neurons to the layer
NUM_OUTPUT_N = NUM_CLASSES
FFN_IN_WIDTH = CONV_ADD_WIDTH # The width of the inputs to the feed forward network. Should be the same as the output width of the softmax layer.
FFN_IN_BITWIDTH = (FFN_IN_WIDTH - 1)
FFN_OUT_WIDTH = (FFN_IN_WIDTH * 2) + int(math.ceil(math.log(NUM_INPUT_N,2))) # The width of the outputs of the feed forward network
FFN_OUT_BITWIDTH = (FFN_OUT_WIDTH - 1)
SUM_WIRE_LEN = ( NUM_INPUT_N * 2 ) - 1 # The number of indexes in the adder tree vector
# Normalization (for Softmax)
NORM_IN_WIDTH = FFN_OUT_WIDTH
NORM_IN_BITWIDTH = NORM_IN_WIDTH - 1
NUM_NORM_LAYERS = int(math.ceil(math.log(NUM_CLASSES,2)))
NORM_OUT_WIDTH = NORM_IN_WIDTH + NUM_NORM_LAYERS
NORM_OUT_BITWIDTH = NORM_OUT_WIDTH - 1
# NORM_RESET = 1 # uncomment to add reset signal to normalization adder tree
ADDER_TREE_PAD = NORM_OUT_WIDTH - NORM_IN_WIDTH
"""
LOG2 = "LOG2(x) \
(x <= 2) ? 1 : \
(x <= 4) ? 2 : \
(x <= 8) ? 3 : \
(x <= 16) ? 4 : \
(x <= 32) ? 5 : \
(x <= 64) ? 6 : \
(x <= 128) ? 7 : \
(x <= 256) ? 8 : \
((x) <= 512) ? 9 : \
(x <= 1024) ? 10 : \
(x <= 2048) ? 11 : \
(x <= 4096) ? 12 : \
(x <= 8192) ? 13 : \
(x <= 16384) ? 14 : \
(x <= 32768) ? 15 : \
-100000"
"""
if __name__ == "__main__":
macroList = []
blacklist = ['__', 'math', 'macroList','blacklist']
for k, v in list(locals().iteritems()):
if not any(substring in k for substring in blacklist):
macroList.append((k,v))
with open("../Hardware/network_params.h", 'w') as f:
for macro in macroList:
f.write("`define " + str(macro[0]) + ' ' + str(macro[1]) + '\n')
if estimate_resources:
le = 0;
mult = 0;
memory_bits = 0;
# Shift Window usage
le = le + (BUFFER_SIZE * CAMERA_PIXEL_WIDTH)
# window xy lookup
lookup_size = 350 # a guess
le = le + (lookup_size * KERNEL_SIZE**2)
# mult-adder tree usage
for i in range(0,NUM_KERNELS):
mult = mult + (2**math.ceil(math.log(KERNEL_SIZE**2,2)))
x = KERNEL_SIZE**2
"""
# optimized tree
if x % 2:
le = le + CONV_ADD_WIDTH + 1
x = x - 1
while x > 0:
le = le + (x* (CONV_ADD_WIDTH + 1))
x = x/2
"""
# unoptimized tree
le = le + ((CONV_ADD_WIDTH)*((2**math.ceil(math.log(x,2))*2)-1))
# rect-linear usage
le = le + (CONV_ADD_WIDTH)
# buffer 1 usage
memory_bits = memory_bits + (FEATURE_SIZE*NUM_KERNELS*CONV_ADD_WIDTH)
# pooling usage
for i in range(0,NUM_KERNELS):
le = le + ( (NEIGHBORHOOD_SIZE *2)-1)* NH_WIDTH
#le = le + NN_WIDTH # a guess about division's area
# buffer 2 usage
memory_bits = memory_bits + (FEATURE_SIZE*NUM_KERNELS*CONV_ADD_WIDTH)/4
# matrix mult usage
mult = mult + NUM_CLASSES
le = le + (NUM_CLASSES* FFN_OUT_WIDTH)
# softmax/ final acivation usgae
#le = le + (NN_WIDTH *2 * NUM_CLASSES)
print "Estimated number of Logic elements: " + str(le)
print "Estimated number of 9 bit multipliers: " + str(mult)
print "Estimated number of memory bits: " + str(memory_bits)
|
11477332
|
import os
import pytest
import subprocess
import tempfile
import configparser
@pytest.mark.parametrize("config_fname", ["./tests/_local_test_config.conf"])
@pytest.mark.parametrize("cleanup", [False, True])
@pytest.mark.parametrize("print_all", [False, True])
@pytest.mark.parametrize("force_pass", [False, True])
@pytest.mark.parametrize("rcount", [1, 3])
@pytest.mark.parametrize("timeout", [3, 5])
def test_client_general(config_fname, cleanup, print_all, force_pass, rcount, timeout):
# init config parser
config = configparser.ConfigParser()
config.read(config_fname)
# init env variables
path = config["DEFAULT"]["git_path_test_value"]
file_types = config["DEFAULT"]["file_types_test_values"]
exclude_urls = config["DEFAULT"]["exclude_test_urls"]
exclude_patterns = config["DEFAULT"]["exclude_test_patterns"]
# Generate command
cmd = [
"urlchecker",
"check",
"--subfolder",
"test_files",
"--file-types",
file_types,
"--exclude-files",
"conf.py",
"--exclude-urls",
exclude_urls,
"--exclude_patterns",
exclude_patterns,
"--retry-count",
str(rcount),
"--timeout",
str(timeout),
]
# Add boolean arguments
if cleanup:
cmd.append("--cleanup")
if print_all:
cmd.append("--print-all")
if force_pass:
cmd.append("--force-pass")
# Add final path
cmd.append(path)
# excute script
pipe = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@pytest.mark.parametrize("save", [True])
def test_client_save(save):
# init config parser
config = configparser.ConfigParser()
config.read("./tests/_local_test_config.conf")
# init env variables
path = config["DEFAULT"]["git_path_test_value"]
file_types = config["DEFAULT"]["file_types_test_values"]
exclude_urls = config["DEFAULT"]["exclude_test_urls"]
exclude_patterns = config["DEFAULT"]["exclude_test_patterns"]
# Generate command
cmd = [
"urlchecker",
"check",
"--subfolder",
"test_files",
"--file-types",
file_types,
"--exclude-files",
"conf.py",
"--exclude-urls",
exclude_urls,
"--exclude_patterns",
exclude_patterns,
]
# Write to file
if save:
output_csv = tempfile.NamedTemporaryFile(suffix=".csv", prefix="urlchecker-")
cmd += ["--save", output_csv.name]
# Add final path
cmd.append(path)
print(" ".join(cmd))
# excute script
pipe = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if save:
if not os.path.exists(output_csv.name):
raise AssertionError
|
11477336
|
import argparse
import numpy as np
import sys
SCALE_OPTIONS = ['log', 'linear']
def check_both_int(a, b):
if a.is_integer() and b.is_integer():
return True
else:
return False
def process_args(args):
""" Prints grid to standard error. """
min_val, max_val, num_points, scale = args.min, args.max, args.num_points, args.scale
if scale == 'linear':
dtype = int if check_both_int(min_val, max_val) else float
grid = np.linspace(min_val, max_val, num=num_points, dtype=dtype)
elif scale == 'log':
grid = np.logspace(min_val, max_val, num=num_points)
else:
raise ValueError(f"Option not in {SCALE_OPTIONS}")
grid = np.unique(grid)
for elem in grid:
sys.stdout.write(str(elem)+'\n')
def parse_cli():
parser = argparse.ArgumentParser(description='Generates a 1D grid of points from min to max')
parser.add_argument('--min', type=float, required=True,
help='Minimum value.')
parser.add_argument('--max', type=float, required=True,
help='Maximum value.')
parser.add_argument('--num-points', type=int, required=True,
help='Number of grid points.')
parser.add_argument('--scale', type=str, default='linear', choices=SCALE_OPTIONS,
help='Scaling of grid points. Note if "log" then min and max are interpreted '
'as 10 ** min and 10 ** max respectively.')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_cli()
process_args(args)
|
11477341
|
import re
from janome.tokenizer import Tokenizer
class Preprocessor():
def __init__(self):
self.tokenizer = Tokenizer()
self._symbol_replace = re.compile(r"[^ぁ-んァ-ン一-龥ーa-zA-Za-zA-Z0-90-9]")
self._numbers = re.compile(r"[0-90-9一二三四五六七八九十百千万億兆]")
def tokenize(self, text, join=False):
_txt = self._symbol_replace.sub(" ", text)
words = self.tokenizer.tokenize(_txt, wakati=True)
words = [w.strip() for w in words]
words = [w for w in words if w not in stop_words]
words = [w for w in words if not self._numbers.match(w)]
if len(words) > 0:
index_character = ""
_words = list(words)
_words.reverse()
for w in _words:
if "増" in w:
index_character = "増増増"
break
elif "減" in w:
index_character = "減減減"
break
words += [index_character]
if join:
return " ".join(words)
else:
return words
stop_words = ["あそこ",
"あたり",
"あちら",
"あっち",
"あと",
"あな",
"あなた",
"あれ",
"いくつ",
"いつ",
"いま",
"いや",
"いろいろ",
"うち",
"おおまか",
"おまえ",
"おれ",
"がい",
"かく",
"かたち",
"かやの",
"から",
"がら",
"きた",
"くせ",
"ここ",
"こっち",
"こと",
"ごと",
"こちら",
"ごっちゃ",
"これ",
"これら",
"ごろ",
"さまざま",
"さらい",
"さん",
"しかた",
"しよう",
"すか",
"ずつ",
"すね",
"すべて",
"ぜんぶ",
"そう",
"そこ",
"そちら",
"そっち",
"そで",
"それ",
"それぞれ",
"それなり",
"たくさん",
"たち",
"たび",
"ため",
"だめ",
"ちゃ",
"ちゃん",
"てん",
"とおり",
"とき",
"どこ",
"どこか",
"ところ",
"どちら",
"どっか",
"どっち",
"どれ",
"なか",
"なかば",
"なに",
"など",
"なん",
"はじめ",
"はず",
"はるか",
"ひと",
"ひとつ",
"ふく",
"ぶり",
"べつ",
"へん",
"ぺん",
"ほう",
"ほか",
"まさ",
"まし",
"まとも",
"まま",
"みたい",
"みつ",
"みなさん",
"みんな",
"もと",
"もの",
"もん",
"やつ",
"よう",
"よそ",
"わけ",
"わたし",
"ハイ",
"上",
"中",
"下",
"字",
"年",
"月",
"日",
"時",
"分",
"秒",
"週",
"火",
"水",
"木",
"金",
"土",
"国",
"都",
"道",
"府",
"県",
"市",
"区",
"町",
"村",
"各",
"第",
"方",
"何",
"的",
"度",
"文",
"者",
"性",
"体",
"人",
"他",
"今",
"部",
"課",
"係",
"外",
"類",
"達",
"気",
"室",
"口",
"誰",
"用",
"界",
"会",
"首",
"男",
"女",
"別",
"話",
"私",
"屋",
"店",
"家",
"場",
"等",
"見",
"際",
"観",
"段",
"略",
"例",
"系",
"論",
"形",
"間",
"地",
"員",
"線",
"点",
"書",
"品",
"力",
"法",
"感",
"作",
"元",
"手",
"数",
"彼",
"彼女",
"子",
"内",
"楽",
"喜",
"怒",
"哀",
"輪",
"頃",
"化",
"境",
"俺",
"奴",
"高",
"校",
"婦",
"伸",
"紀",
"誌",
"レ",
"行",
"列",
"事",
"士",
"台",
"集",
"様",
"所",
"歴",
"器",
"名",
"情",
"連",
"毎",
"式",
"簿",
"回",
"匹",
"個",
"席",
"束",
"歳",
"目",
"通",
"面",
"円",
"玉",
"枚",
"前",
"後",
"左",
"右",
"次",
"先",
"春",
"夏",
"秋",
"冬",
"一",
"二",
"三",
"四",
"五",
"六",
"七",
"八",
"九",
"十",
"百",
"千",
"万",
"億",
"兆",
"下記",
"上記",
"時間",
"今回",
"前回",
"場合",
"一つ",
"年生",
"自分",
"ヶ所",
"ヵ所",
"カ所",
"箇所",
"ヶ月",
"ヵ月",
"カ月",
"箇月",
"名前",
"本当",
"確か",
"時点",
"全部",
"関係",
"近く",
"方法",
"我々",
"違い",
"多く",
"扱い",
"新た",
"その後",
"半ば",
"結局",
"様々",
"以前",
"以後",
"以降",
"未満",
"以上",
"以下",
"幾つ",
"毎日",
"自体",
"向こう",
"何人",
"手段",
"同じ",
"感じ"]
|
11477357
|
from decimal import Decimal
from django.db.models import Count, Sum
from django.conf import settings
from rest_framework import viewsets, mixins, decorators
from rest_framework.response import Response
from rest_framework.views import APIView, status
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.reverse import reverse
from wxapp.models import WxUser
from wxapp.permissions import OnlyWxUser
from wx_pay.unified import WxPayOrderClient
from qapi.mixins import RelationMethod
from apps.config.models import BoolConfig
from .serializers import WxUserSerializer, WxUserSavePhoneSerializer, WxUserAccountLogSerializer, \
WithdrawSerializer, WithdrawCreateSerializer, WithdrawOperationLogSerializer, RechargeCreateSerializer, \
RechargeRecordSerializer, WxUserCreditLogSerializer
from .models import WxUserAccountLog, Withdraw, WithdrawOperationLog, RechargeRecord, WxUserCreditLog
from .filters import WxUserFilter, WxUserAccountLogFilter, WithdrawFilter, RechargeRecordFilter, WxUserCreditLogFilter
# Create your views here.
class WxUserInfoViewSet(viewsets.ReadOnlyModelViewSet, mixins.CreateModelMixin, RelationMethod):
"""
get: admin 获取微信用户列表
微信用户获取自己的信息
post: 保存更新微信用户的信息
get: /api/wxuserinfo/referrals/?order=xxxx 获取微信用户的帮手
默认按照时间用户的加入时间倒序排序
get:/api/wxuserinfo/<pk>/referrals_list/ 获取该微信用户的帮手
"""
serializer_class = WxUserSerializer
queryset = WxUser.objects.all().order_by('-date_joined')
permission_classes = (IsAuthenticated,)
filterset_class = WxUserFilter
def get_queryset(self):
user = self.request.user
queryset = super().get_queryset()
if getattr(user, 'is_wechat', False):
return queryset.filter(id=user.id)
if user.is_staff:
return queryset.exclude(nickname='')
return queryset.none()
def list(self, request, *args, **kwargs):
if getattr(request.user, 'is_wechat', False):
return Response(self.get_serializer(request.user).data)
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
""" 重写, 前端只需要 post """
if getattr(request.user, 'is_wechat', False):
serializer = self.get_serializer(request.user, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
else:
return Response(dict(code=403, message='仅限创建微信用户信息'), status=status.HTTP_403_FORBIDDEN)
@decorators.action(methods=['GET', ], detail=False)
def referrals(self, request, *args, **kwargs):
user = request.user
if getattr(request.user, 'is_wechat', False):
res = []
for r in request.user.relations.all():
res.append({'avatar_url': r.referral.avatar_url, 'nickname': r.referral.nickname,
'create_time': r.create_time})
return Response(res)
return Response(dict(code=403, message='仅限微信用户使用此接口'), status=status.HTTP_403_FORBIDDEN)
@decorators.action(methods=['GET', 'POST'], detail=False, permission_classes=(OnlyWxUser,))
def phone(self, request, *args, **kwargs):
if request.method == 'GET':
info = getattr(request.user, 'info', None)
has_phone = False
phone = ''
if info:
phone = info.phone
has_phone = bool(phone)
return Response(dict(has_phone=has_phone, phone=phone))
elif request.method == 'POST':
serializer = WxUserSavePhoneSerializer(data=request.data, context=dict(request=request))
serializer.is_valid(raise_exception=True)
phone_info = serializer.save()
return Response(phone_info)
else:
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
@decorators.action(methods=['GET', ], detail=True)
def referrals_list(self, request, *args, **kwargs):
instance = self.get_object()
referral_ids = instance.relations.all().values_list('referral')
referrals = self.queryset.filter(pk__in=referral_ids) \
.annotate(referral_count=Count('relations'))
serializer = self.get_serializer(referrals, many=True)
return Response(serializer.data)
@decorators.action(methods=['POST', ], detail=True, permission_classes=[IsAdminUser, ])
def testers(self, request, *args, **kwargs):
'''
修改是否为测试人员\n
post:
{\n
testers: true (or false false为否 true为是)
}
'''
instance = self.get_object()
testers = request.data.get('testers', 'false')
if testers == 'true':
instance.testers = True
elif testers == 'false':
instance.testers = False
instance.save()
return Response('修改完成')
@decorators.action(methods=['POST', ], detail=True, permission_classes=[IsAdminUser])
def upload_perm(self, request, *args, **kwargs):
instance = self.get_object()
upload_perm = request.data.get('upload_perm', 'false')
if upload_perm == 'true':
instance.upload_perm = True
elif upload_perm == 'false':
instance.upload_perm = False
instance.save()
return Response('修改完成')
@decorators.action(methods=['POST', ], detail=True, permission_classes=[IsAdminUser, ])
def rebate_right(self, request, *args, **kwargs):
'''
修改是否有推广返利的权利\n
post:
{\n
rebate_right: true (or false false为否 true为是)
}
'''
instance = self.get_object()
rebate_right = request.data.get('rebate_right', 'false')
instance.rebate_right = rebate_right
instance.save()
return Response('修改完成')
@decorators.action(methods=['POST', ], detail=True, permission_classes=[IsAdminUser, ])
def bonus_right(self, request, *args, **kwargs):
'''
修改是否有分销返佣的权利\n
post:
{\n
bonus_right: true (or false false为否 true为是)
}
'''
instance = self.get_object()
bonus_right = request.data.get('bonus_right', 'false')
instance.bonus_right = bonus_right
instance.save()
return Response('修改完成')
@decorators.action(methods=['GET', ], detail=True)
def account_logs(self, request, *args, **kwargs):
return self.detail_route_view(request, 'account_logs', WxUserAccountLogSerializer, filter_class=None)
@decorators.action(methods=['GET', ], detail=True)
def credit_logs(self, request, *args, **kwargs):
return self.detail_route_view(request, 'credit_logs', WxUserCreditLogSerializer, filter_class=None)
@decorators.action(methods=['POST', ], detail=True, permission_classes=[IsAdminUser, ])
def change_account(self, request, *args, **kwargs):
'''
post:
{\n
account: wallet (or credit)
operation: add (or subtract)
amount: "100" or (100)
}
'''
admin_user = request.user
if not admin_user.has_perm('account.change_account'):
return Response('您没有修改权限', status=status.HTTP_403_FORBIDDEN)
instance = self.get_object()
amount = request.data.get('amount', 0)
operation = request.data.get('operation', None)
if Decimal(amount) < 0 or not operation:
return Response('请输入正确的数值', status=status.HTTP_400_BAD_REQUEST)
if request.data.get('account') == 'wallet':
amount = Decimal(amount).quantize(Decimal('0.00'))
if operation == 'add':
WxUserAccountLog.record(instance, WxUserAccountLog.GIFT, balance=amount, remark='店铺赠送')
elif operation == 'subtract':
if amount > instance.account.asset + instance.account.recharge:
return Response('扣减金额大于用户钱包余额', status=status.HTTP_400_BAD_REQUEST)
WxUserAccountLog.record(instance, WxUserAccountLog.DEDUCTION, remark='店铺扣减',
balance=amount if amount < instance.account.recharge else instance.account.recharge,
asset=0 if amount < instance.account.recharge else amount-instance.account.recharge)
elif request.data.get('account') == 'credit':
amount = int(amount)
if operation == 'add':
WxUserCreditLog.record(instance, WxUserCreditLog.GIFT, credit=amount, remark='店铺赠送')
elif operation == 'subtract':
if amount > instance.account.credit:
return Response('扣减积分大于用户积分', status=status.HTTP_400_BAD_REQUEST)
WxUserCreditLog.record(instance, WxUserCreditLog.DEDUCTION, credit=amount, remark='店铺扣减')
return Response('修改完成')
class AccountLogViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = WxUserAccountLogSerializer
queryset = WxUserAccountLog.objects.all()
permission_classes = (IsAdminUser,)
filterset_class = WxUserAccountLogFilter
# 微信用户只能看到自己的账户记录,管理员只看所有的状态为返利的记录
def get_queryset(self):
if getattr(self.request.user, 'is_staff', False):
filter_list = []
if BoolConfig.get_bool('rebate_switch'):
filter_list.append(WxUserAccountLog.ASSET)
if BoolConfig.get_bool('bonus_switch'):
filter_list.append(WxUserAccountLog.BONUS)
return WxUserAccountLog.objects.filter(a_type__in=filter_list)
return WxUserAccountLog.objects.none()
class WxUserCreditLogViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = WxUserCreditLogSerializer
queryset = WxUserCreditLog.objects.all()
permission_classes = (IsAuthenticated,)
filterset_class = WxUserCreditLogFilter
def get_queryset(self):
if getattr(self.request.user, 'is_wechat', False):
return WxUserCreditLog.objects.filter(user=self.request.user)
if getattr(self.request.user, 'is_staff', False):
return WxUserCreditLog.objects.all()
return WxUserCreditLog.objects.none()
class WithdrawCreate(APIView):
"""
post:
{\n
"amount": 22.5
"wx_code": 'afeafeafe'
}
"""
serializer_class = WithdrawCreateSerializer
permission_classes = (OnlyWxUser,)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data, context=dict(request=request))
serializer.is_valid(raise_exception=True)
instance = serializer.save()
return Response(instance)
class WithdrawViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
serializer_class = WithdrawSerializer
queryset = Withdraw.objects.all()
permission_classes = (IsAuthenticated,)
filterset_class = WithdrawFilter
def get_queryset(self):
if getattr(self.request.user, 'is_wechat', False):
return Withdraw.objects.filter(user=self.request.user)
if getattr(self.request.user, 'is_staff', False):
return Withdraw.objects.all()
return Withdraw.objects.none()
@decorators.action(methods=['POST', ], detail=True, permission_classes=[IsAdminUser, ])
def operation(self, request, *args, **kwargs):
"""
Post:
{\n
status: 1 或 2 (1 提现完成, 2 拒绝提现)
remark: '备注'
}
"""
instance = self.get_object()
if instance.status != instance.SUBMIT:
return Response('提现状态异常', status=status.HTTP_400_BAD_REQUEST)
status_ = request.data.get('status', '')
if status_ and status_ == 1:
instance.succ(admin=request.user)
return Response('提现完成')
if status_ and status_ == 2:
instance.fail(admin=request.user, remark=request.data.get('remark', ''))
return Response('提现以拒绝')
class WithdrawOperationLogViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = WithdrawOperationLogSerializer
permission_classes = [IsAdminUser, ]
queryset = WithdrawOperationLog.objects.all()
class Recharge(APIView):
serializer_class = RechargeCreateSerializer
permission_classes = (OnlyWxUser,)
def post(self, request, *args, **kwargs):
user = request.user
serializer = self.serializer_class(data=request.data, context=dict(request=request))
serializer.is_valid(raise_exception=True)
instance = serializer.save()
extra_data = {"openid": user.wx_app_openid}
order = WxPayOrderClient().create(
channel="wx_lite", # 小程序发起支付的标识
out_trade_no=instance.rchg_no,
total_fee=int(instance.real_pay * 100), # money 单位为分
client_ip=request.META['REMOTE_ADDR'],
fee_type="CNY",
attach="recharge",
body='优惠充值',
notify_url=reverse('paycallback', request=request),
**extra_data
)
return Response(order)
class RechargeRecordViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = RechargeRecordSerializer
permission_classes = (IsAdminUser,)
queryset = RechargeRecord.objects.all()
filterset_class = RechargeRecordFilter
|
11477363
|
import numpy as np
from scipy.integrate import quad
from scipy.special import gamma
class Park(object):
"""Class for fatigue life estimation using frequency domain
method by Tovo and Benasciutti[1, 2].
References
----------
[1] <NAME>, <NAME> and <NAME>. A new fatigue prediction model for marine
structures subject to wide band stress process. Ocean Engineering, 76: 144-151, 2014
[2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Vibration Fatigue by Spectral Methods, From Structural Dynamics to Fatigue Damage
– Theory and Experiments, ISBN: 9780128221907, Elsevier, 1st September 2020
Example
-------
Import modules, define time- and frequency-domain data
>>> import FLife
>>> import pyExSi as es
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> # time-domain data
>>> N = 2 ** 16 # number of data points of time signal
>>> fs = 2048 # sampling frequency [Hz]
>>> t = np.arange(0, N) / fs # time vector
>>> # frequency-domain data
>>> M = N // 2 + 1 # number of data points of frequency vector
>>> freq = np.arange(0, M, 1) * fs / N # frequency vector
>>> PSD_lower = es.get_psd(freq, 20, 60, variance = 5) # lower mode of random process
>>> PSD_higher = es.get_psd(freq, 100, 120, variance = 2) # higher mode of random process
>>> PSD = PSD_lower + PSD_higher # bimodal one-sided flat-shaped PSD
Get Gaussian stationary signal, instantiate SpectralData object and plot PSD
>>> rg = np.random.default_rng(123) # random generator seed
>>> x = es.random_gaussian(N, PSD, fs, rg) # Gaussian stationary signal
>>> sd = FLife.SpectralData(input=x, dt=1/fs) # SpectralData instance
>>> plt.plot(sd.psd[:,0], sd.psd[:,1])
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('PSD')
Define S-N curve parameters and get fatigue-life estimatate
>>> C = 1.8e+22 # S-N curve intercept [MPa**k]
>>> k = 7.3 # S-N curve inverse slope [/]
>>> park = FLife.Park(sd)
>>> print(f'Fatigue life: {park.get_life(C,k):.3e} s.')
Define stress vector and depict stress peak PDF
>>> s = np.arange(0,np.max(x),.01)
>>> plt.plot(s,park.get_PDF(s))
>>> plt.xlabel('Stress [MPa]')
>>> plt.ylabel('PDF')
"""
def __init__(self, spectral_data):
"""Get needed values from reference object.
:param spectral_data: Instance of class SpectralData
"""
self.spectral_data = spectral_data
def get_PDF(self, s):
"""Returns cycle PDF(Probability Density Function) as a function of stress s.
:param s: numpy.ndarray
Stress vector.
:return: function pdf(s)
"""
m0 = self.spectral_data.moments[0]
#alpha are used for n-th moment of rainflow range distrubution Mrr(n)
alpha2 = self.spectral_data.alpha2
alpha0_95 = self.spectral_data.get_bandwidth_estimator(self.spectral_data.PSD_splitting, i=0.95)[0]
alpha1_97 = self.spectral_data.get_bandwidth_estimator(self.spectral_data.PSD_splitting, i=1.97)[0]
alpha0_54 = self.spectral_data.get_bandwidth_estimator(self.spectral_data.PSD_splitting, i=0.54)[0]
alpha0_93 = self.spectral_data.get_bandwidth_estimator(self.spectral_data.PSD_splitting, i=0.93)[0]
alpha1_95 = self.spectral_data.get_bandwidth_estimator(self.spectral_data.PSD_splitting, i=1.95)[0]
#Mrr(n)
M_rr_1 = alpha2
M_rr_2 = alpha0_95*alpha1_97
M_rr_3 = alpha0_54*alpha0_93*alpha1_95
#distribution parameters
sigma_r1 = alpha2
C_r1 = (M_rr_2 - M_rr_3) / (sigma_r1**2 * (1 - sigma_r1))
C_r2 = (-sigma_r1*M_rr_2 + M_rr_3) / (1-sigma_r1)
C_g = 1 - C_r1 - C_r2
V_1 = 1/np.sqrt(np.pi) * gamma(1)/gamma(1.5)
sigma_g = 1/(V_1*C_g) * (M_rr_1 - C_r1*sigma_r1 - C_r2)
def park_pdf(s):
#PDF of stress amplitude normalized by standard deviation of process
#half-Gaussian
gauss_pdf = lambda s: 2/(np.sqrt(2*np.pi)*sigma_g)* np.exp(-s**2/(2*sigma_g**2))
#Rayleigh
rayleigh1_pdf = lambda s: s/sigma_r1**2 * np.exp(-s**2/(2*sigma_r1**2))
#Rayleigh with unit variance
rayleigh2_pdf = lambda s: s * np.exp(-s**2/2)
pdf_out = C_g*gauss_pdf(s) + C_r1*rayleigh1_pdf(s) + C_r2*rayleigh2_pdf(s)
return pdf_out
return 1/np.sqrt(m0) * park_pdf(s/np.sqrt(m0))
def get_life(self, C, k):
"""Calculate fatigue life with parameters C, k, as defined in [2].
:param C: [int,float]
S-N curve intercept [MPa**k].
:param k: [int,float]
S-N curve inverse slope [/].
:return:
Estimated fatigue life in seconds.
:rtype: float
"""
m_p = self.spectral_data.m_p
d = m_p / C * quad(lambda s: s**k*self.get_PDF(s), a=0, b=np.Inf)[0]
T = 1.0/d
return T
|
11477391
|
from tempfile import mkstemp
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_less
from sklearn.datasets import load_iris
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from pystruct.models import GridCRF, GraphCRF
from pystruct.datasets import generate_blocks_multinomial
from pystruct.learners import FrankWolfeSSVM
from pystruct.utils import SaveLogger
def test_multinomial_blocks_frankwolfe():
X, Y = generate_blocks_multinomial(n_samples=10, noise=0.5, seed=0)
crf = GridCRF(inference_method='qpbo')
clf = FrankWolfeSSVM(model=crf, C=1, max_iter=50)
clf.fit(X, Y)
Y_pred = clf.predict(X)
assert_array_equal(Y, Y_pred)
def test_multinomial_blocks_frankwolfe_batch():
X, Y = generate_blocks_multinomial(n_samples=10, noise=0.3, seed=0)
crf = GridCRF(inference_method='qpbo')
clf = FrankWolfeSSVM(model=crf, C=1, max_iter=500, batch_mode=True)
clf.fit(X, Y)
Y_pred = clf.predict(X)
assert_array_equal(Y, Y_pred)
def test_svm_as_crf_pickling_bcfw():
iris = load_iris()
X, y = iris.data, iris.target
X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X]
Y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1)
_, file_name = mkstemp()
pbl = GraphCRF(n_features=4, n_states=3, inference_method='unary')
logger = SaveLogger(file_name)
svm = FrankWolfeSSVM(pbl, C=10, logger=logger, max_iter=50)
svm.fit(X_train, y_train)
assert_less(.97, svm.score(X_test, y_test))
assert_less(.97, logger.load().score(X_test, y_test))
def test_svm_as_crf_pickling_batch():
iris = load_iris()
X, y = iris.data, iris.target
X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X]
Y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1)
_, file_name = mkstemp()
pbl = GraphCRF(n_features=4, n_states=3, inference_method='unary')
logger = SaveLogger(file_name)
svm = FrankWolfeSSVM(pbl, C=10, logger=logger, max_iter=50, batch_mode=False)
svm.fit(X_train, y_train)
assert_less(.97, svm.score(X_test, y_test))
assert_less(.97, logger.load().score(X_test, y_test))
|
11477392
|
from mpas_analysis.shared.io.namelist_streams_interface import NameList, \
StreamsFile
from mpas_analysis.shared.io.utility import paths, decode_strings
from mpas_analysis.shared.io.write_netcdf import write_netcdf
from mpas_analysis.shared.io.mpas_reader import open_mpas_dataset
|
11477395
|
from django.apps import AppConfig
class ExampleBackendAppConfig(AppConfig):
name = 'example_backend_app'
|
11477398
|
import sys
import torch
import random
import numpy as np
import json
from torch.nn.utils import rnn
import progressbar
import random
import json
from torch import nn
import os
def map_bool(bool_status):
if bool_status == 'True':
return True
elif bool_status == 'False':
return False
else:
raise Exception('Wrong Bool Status')
format_mapping_dict = {
'metalwoz': {'nlu': False, 'bs': False, 'da': False, 'nlg': True},
'kvret': {'nlu': False, 'bs': True, 'da': False, 'nlg': True},
'woz': {'nlu': False, 'bs': True, 'da': False, 'nlg': True},
'camres676': {'nlu': False, 'bs': True, 'da': False, 'nlg': True},
'taskmaster': {'nlu': False, 'bs': True, 'da': False, 'nlg': True},
'e2e_ms': {'nlu': False, 'bs': True, 'da': True, 'nlg': True},
'frames': {'nlu': False, 'bs': True, 'da': True, 'nlg': True},
'schema_guided': {'nlu': False, 'bs': True, 'da': True, 'nlg': True}
}
dataset_name_list = ['e2e_ms', 'metalwoz', 'kvret', 'woz', 'camres676', 'taskmaster', 'frames', 'schema_guided']
class TOD_PRETRAINING_CORPUS:
def __init__(self, tokenizer, shuffle_mode, dataset_prefix_path, use_nlu, use_bs, use_da, use_nlg, max_tgt_len=128):
self.use_nlu, self.use_bs, self.use_da, self.use_nlg = \
map_bool(use_nlu), map_bool(use_bs), map_bool(use_da), map_bool(use_nlg)
print ('use NLU: {}, use DST: {}, use POL: {}, use NLG: {}'.format(use_nlu, use_bs, use_da, use_nlg))
print ('Tokenizer Size is %d' % len(tokenizer))
self.tokenizer = tokenizer
self.pad_token_id = self.tokenizer.convert_tokens_to_ids(['<_PAD_>'])[0]
self.sos_context_token_id = self.tokenizer.convert_tokens_to_ids(['<sos_context>'])[0]
self.eos_context_token_id = self.tokenizer.convert_tokens_to_ids(['<eos_context>'])[0]
self.eos_b_token_id = self.tokenizer.convert_tokens_to_ids(['<eos_b>'])[0]
self.eos_a_token_id = self.tokenizer.convert_tokens_to_ids(['<eos_a>'])[0]
self.eos_r_token_id = self.tokenizer.convert_tokens_to_ids(['<eos_r>'])[0]
self.eos_d_token_id = self.tokenizer.convert_tokens_to_ids(['<eos_d>'])[0]
self.shuffle_mode = shuffle_mode
self.max_tgt_len = max_tgt_len
# construct task-specific prefix
bs_prefix_text = 'translate dialogue to belief state:'
self.bs_prefix_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(bs_prefix_text))
da_prefix_text = 'translate dialogue to dialogue action:'
self.da_prefix_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(da_prefix_text))
nlg_prefix_text = 'translate dialogue to system response:'
self.nlg_prefix_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(nlg_prefix_text))
ic_prefix_text = 'translate dialogue to user intent:'
self.ic_prefix_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(ic_prefix_text))
self.train_all_dataset_list = self.load_data(dataset_prefix_path, train_test_mode='train', \
use_bs=self.use_bs, use_da=self.use_da, use_nlg=self.use_nlg)
self.dev_all_dataset_list = self.load_data(dataset_prefix_path, train_test_mode='test', \
use_bs=self.use_bs, use_da=self.use_da, use_nlg=self.use_nlg)
if self.use_nlu == True:
print ('Add Intent Classification for Pretraining.')
train_ic_sess_list, dev_ic_sess_list = self.load_train_dev_intent_classification_data(dataset_prefix_path)
elif self.use_nlu == False:
print ('Do not Add Intent Classification for Pretraining.')
train_ic_sess_list, dev_ic_sess_list = [], []
else:
raise Exception('Wrong use_Intent_Classification Mode!!!')
self.train_all_dataset_list += train_ic_sess_list
self.dev_all_dataset_list += dev_ic_sess_list
train_session_num, dev_session_num = len(self.train_all_dataset_list), len(self.dev_all_dataset_list)
self.train_data_list = self.shuffle_train_data()
self.dev_data_list = self.flatten_data(self.dev_all_dataset_list, mode='dev_set')
print ('train session number is {}, train turn number is {}, train turn number per session {}'.format(train_session_num, len(self.train_data_list), round(len(self.train_data_list)/train_session_num, 2)))
print ('dev session number is {}, dev turn numbder is {}, dev turn number per session {}'.format(dev_session_num, len(self.dev_data_list), round(len(self.dev_data_list)/dev_session_num, 2)))
self.train_num, self.dev_num = len(self.train_data_list), len(self.dev_data_list)
def load_data(self, dataset_prefix_path, train_test_mode, use_bs, use_da, use_nlg):
all_dataset_list = []
for name in dataset_name_list:
one_dataset_list = self.parse_one_dataset(dataset_prefix_path, name, train_test_mode, use_bs, use_da, use_nlg)
if len(one_dataset_list) > 0:
all_dataset_list += one_dataset_list
return all_dataset_list
def parse_one_dataset(self, dataset_prefix_path, data_set_name, train_test_mode, use_bs, use_da, use_nlg):
assert train_test_mode in ['train', 'test']
# train_test_mode: 'train' or 'test'
bs_exist, da_exist, nlg_exist = format_mapping_dict[data_set_name]['bs'], \
format_mapping_dict[data_set_name]['da'], format_mapping_dict[data_set_name]['nlg']
dataset_path = dataset_prefix_path + '/' + data_set_name + '_' + train_test_mode + '.json'
print ('Loading data from {}'.format(dataset_path))
with open(dataset_path) as f:
data = json.load(f)
all_sess_list = []
for one_sess in data:
dial_sess_list = one_sess["dialogue_session"] # this list contains all turns from on session
one_sess_list = []
# one_sess_list is a list of turns
# each turn is list of tuple pairs
previous_context = []
turn_num = len(dial_sess_list)
for turn_id in range(turn_num):
curr_turn = dial_sess_list[turn_id]
curr_turn_list = []
# this is a list of tuple pair (src, tgt)
# [(nlg_input, nlg_output), (bs_input, bs_output), (da_input, da_output)]
curr_user_input = curr_turn['user_id_list']
curr_sys_resp = curr_turn['resp_id_list']
# ----------------------------------------------------------- #
if use_nlg and nlg_exist: # adding nlg data into pre-training procedure
# construct nlg_input, nlg_output
nlg_input = previous_context + curr_user_input
nlg_input = self.nlg_prefix_id + [self.sos_context_token_id] + \
nlg_input[-900:] + [self.eos_context_token_id]
nlg_output = curr_sys_resp[:-1][:self.max_tgt_len] + [self.eos_r_token_id] # constrain the maximum tgt len
curr_turn_list.append((nlg_input, nlg_output))
if use_bs and bs_exist:
bs_input = previous_context + curr_user_input
bs_input = self.bs_prefix_id + [self.sos_context_token_id] + bs_input[-900:] + \
[self.eos_context_token_id]
curr_bspn = curr_turn['bspn_id_list']
bs_output = curr_bspn[:-1][:self.max_tgt_len] + [self.eos_b_token_id]
curr_turn_list.append((bs_input, bs_output))
if use_da and da_exist:
da_input = previous_context + curr_user_input
da_input = self.da_prefix_id + [self.sos_context_token_id] + da_input[-900:] + \
[self.eos_context_token_id]
curr_aspn = curr_turn['aspn_id_list']
da_output = curr_aspn[:-1][:self.max_tgt_len] + [self.eos_a_token_id]
curr_turn_list.append((da_input, da_output))
if len(curr_turn_list) > 0:
one_sess_list.append(curr_turn_list)
# update previous context
previous_context = previous_context + curr_user_input + curr_sys_resp
if len(one_sess_list) > 0:
all_sess_list.append(one_sess_list)
return all_sess_list
def load_intent_classification_data(self, path):
'''
we treat each instance as a session, each data instance is treated as one session with one turn.
so all instances have the following format
[
[
[(ic_src, ic_tgt)]
],
[
[(ic_src, ic_tgt)]
],
...
]
'''
print ('Loading data from {}'.format(path))
all_sess_list = []
with open(path) as f:
data = json.load(f)
for one_dict in data:
one_intent_input = self.ic_prefix_id + [self.sos_context_token_id] + \
one_dict['user_id_list'][-900:] + [self.eos_context_token_id]
one_intent_output = one_dict['intent_id_list']
one_turn = [(one_intent_input, one_intent_output)]
one_sess = [one_turn]
all_sess_list.append(one_sess)
return all_sess_list
def load_train_dev_intent_classification_data(self, dataset_prefix_path):
train_path = dataset_prefix_path + '/train_intent_classification.json'
train_ic_sess_list = self.load_intent_classification_data(train_path)
dev_path = dataset_prefix_path + '/test_intent_classification.json'
dev_ic_sess_list = self.load_intent_classification_data(dev_path)
return train_ic_sess_list, dev_ic_sess_list
def shuffle_train_data(self):
return self.flatten_data(self.train_all_dataset_list, mode='train_set')
def flatten_data(self, all_dataset_session_list, mode):
'''
all_dataset_session_list:
contains all sessions from all datasets
each session contains multiple turns
each turn is a list which has a format ranging from
[(nlg_input, nlg_output)],
[(nlg_input, nlg_output), (bs_input, bs_output)],
[(nlg_input, nlg_output), (bs_input, bs_output), (da_input, da_output)]
'''
flatten_data_list = []
if mode == 'train_set':
if self.shuffle_mode == 'session_level':
tmp_session_list = all_dataset_session_list.copy()
random.shuffle(tmp_session_list)
for one_session in tmp_session_list:
for one_turn in one_session:
for one_tuple in one_turn:
flatten_data_list.append(one_tuple)
elif self.shuffle_mode == 'turn_level':
for one_session in all_dataset_session_list:
for one_turn in one_session:
for one_tuple in one_turn:
flatten_data_list.append(one_tuple)
random.shuffle(flatten_data_list)
else:
raise Exception('Wrong Shuffle Mode!!!')
elif mode == 'dev_set':
for one_session in all_dataset_session_list:
for one_turn in one_session:
for one_tuple in one_turn:
flatten_data_list.append(one_tuple)
else:
raise Exception()
return flatten_data_list
def get_batches(self, batch_size, mode):
#batch_size = self.cfg.batch_size
batch_list = []
if mode == 'train':
self.train_data_list = self.shuffle_train_data()
all_data_list = self.train_data_list
elif mode == 'dev':
all_data_list = self.dev_data_list
else:
raise Exception('Wrong Mode!!!')
all_input_data_list, all_output_data_list = [], []
for inp, oup in all_data_list:
all_input_data_list.append(inp)
all_output_data_list.append(oup)
data_num = len(all_input_data_list)
batch_num = int(data_num/batch_size) + 1
for i in range(batch_num):
start_idx, end_idx = i*batch_size, (i+1)*batch_size
if start_idx > data_num - 1:
break
end_idx = min(end_idx, data_num - 1)
one_input_batch_list, one_output_batch_list = [], []
for idx in range(start_idx, end_idx):
one_input_batch_list.append(all_input_data_list[idx])
one_output_batch_list.append(all_output_data_list[idx])
one_batch = [one_input_batch_list, one_output_batch_list]
batch_list.append(one_batch)
out_str = 'Overall Number of datapoints is ' + str(data_num) + \
' Number of ' + mode + ' batches is ' + str(len(batch_list))
print (out_str)
return batch_list
def build_iterator(self, batch_size, mode):
batch_list = self.get_batches(batch_size, mode)
for i, batch in enumerate(batch_list):
yield batch
def pad_batch(self, batch_id_list):
batch_id_list = [torch.LongTensor(item) for item in batch_id_list]
batch_tensor = rnn.pad_sequence(batch_id_list, batch_first=True, padding_value=self.pad_token_id)
batch_mask = torch.ones_like(batch_tensor)
batch_mask = batch_mask.masked_fill(batch_tensor.eq(self.pad_token_id), 0.0).type(torch.FloatTensor)
return batch_tensor, batch_mask
def process_output(self, batch_tgt_id_list):
batch_tgt_id_list = [torch.LongTensor(item) for item in batch_tgt_id_list]
batch_tgt_tensor, _ = self.pad_batch(batch_tgt_id_list)
batch_tgt_input_tensor = batch_tgt_tensor[:, :-1].clone()
batch_tgt_output_tensor = batch_tgt_tensor[:, 1:].clone()
return batch_tgt_input_tensor, batch_tgt_output_tensor
def parse_batch_tensor(self, batch):
batch_input_id_list, batch_output_id_list = batch
batch_src_tensor, batch_src_mask = self.pad_batch(batch_input_id_list)
batch_input, batch_labels = self.process_output(batch_output_id_list)
batch_labels[batch_labels[:, :] == self.pad_token_id] = -100
return batch_src_tensor, batch_src_mask, batch_input, batch_labels
|
11477411
|
import keras.backend as K
from keras.metrics import get
from keras import Model
from keras.layers import Dense, Dropout, Input, Flatten, Add, BatchNormalization, Concatenate
from keras import regularizers, initializers, constraints, activations
import numpy as np
class FFNN:
def __init__(self,
layers,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
dropout=0.0,
recursive_forecast=False,
):
"""
Base FeedForward Neural Network.
:param layers: list of integers. The i-th elem. of the list is the number of units of the i-th layer.
:param dropout: An integer or tuple/list of a single integer, specifying the length
of the 1D convolution window.
:param recursive_forecast: an integer or tuple/list of a single integer, specifying the dilation rate
to use for dilated convolution.
Usually dilation rate increases exponentially with the depth of the network.
:param for all the other parameters see keras.layers.Dense
"""
self.layers = layers
self.dropout = dropout
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.recursive_forecast = recursive_forecast
self.model = None
self.horizon = None
def model_inputs(self, input_shape, conditions_shape=None):
"""
:param input_shape: np.array
(window_size, n_features)
:param conditions_shape: np.array
(horizon, n_features)
:return: a tuple containing:
- a list containing all the Input Layers needed by the model
- the tensor that has to be feeded to the subsequent layers of the archotecture
"""
inputs = Input(shape=input_shape, name='input')
if conditions_shape is not None:
conditions = Input(shape=conditions_shape, name='exogenous')
# pass through different filters in order for them to have = no. channels
out = Concatenate(axis=1)(
[Dense(units=128, activation='sigmoid')(inputs),
Dense(units=128, activation='tanh')(conditions)]
) # concatenate over temporal axis
return [inputs, conditions], out
return inputs, inputs
def build_model(self, input_shape, horizon, conditions_shape=None):
"""
Create a Model that takes as inputs:
- 3D tensor of shape tesor (batch_size, window_size, n_features)
- 3D Tensor of shape (batch_size, window_size, n_features)
and outputs:
- 2D tensor of shape (batch_size, 1) or (batch_size, horizon), depending on the value of
recursive_forecast.
:param input_shape: np.array
(window_size, n_features)
:param horizon: int
the forecasting horizon
:param conditions_shape: np.array
(horizon, n_features)
:return: a keras Model
"""
pass
def predict(self, inputs):
if self.recursive_forecast:
return self._predict_rec(inputs)
else:
return self.model.predict(inputs)
def _predict_rec(self, inputs):
"""
Perform prediction when the model's recursive_forecast flag is set to True.
Perform recursive prediction by feeding the network input at time t+1 with the prediction at
time t. This is repeted 'horizon' number of time.
If exogenous features are available they are integrated into the fore casting process.
:param inputs:
np.array with shape: `(batch, window_size, n_features)`
or list of tensor having shape:
- np.array with shape: `(batch, window_size, n_features)`
- np.array with shape: `(batch, horizon, n_features)`
:return: np.array
(batch, horizon)
"""
try:
inputs, conditions = inputs
except ValueError:
conditions = None
outputs = np.zeros((inputs.shape[0], self.horizon)) # (batch_size, pred_steps)
for i in range(self.horizon):
if conditions is not None:
next_exog = conditions[:, i:i + 1, :] # exog at time i
out = self.model.predict([inputs, next_exog]) # output at time i
inputs = np.concatenate([inputs[:, 1:, :],
np.concatenate([np.expand_dims(out,-1), next_exog], -1)],
1) # shift input and concat exog
else:
out = self.model.predict(inputs) # [batch, 1]
inputs = np.concatenate([inputs[:, 1:, :],
np.expand_dims(out, -1)],
1)
outputs[:, i] = out[:, 0]
return outputs
def evaluate(self, inputs, fn_inverse=None, fn_plot=None):
try:
X, y = inputs
inputs = X
except:
X, conditions, y = inputs
inputs = [X, conditions]
y_hat = self.predict(inputs)
if fn_inverse is not None:
y_hat = fn_inverse(y_hat)
y = fn_inverse(y)
if fn_plot is not None:
fn_plot([y, y_hat])
results = []
for m in self.model.metrics:
if isinstance(m, str):
results.append(K.eval(K.mean(get(m)(y, y_hat))))
else:
results.append(K.eval(K.mean(m(y, y_hat))))
return results
class SimpleNet(FFNN):
def build_model(self, input_shape, horizon, conditions_shape=None):
self.horizon = horizon
model_inputs, inputs = self.model_inputs(input_shape, conditions_shape)
out = Flatten()(inputs)
for units in self.layers:
out = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation,
kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint,
use_bias=self.use_bias, bias_regularizer=self.bias_regularizer,
bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(out)
out = Dropout(self.dropout)(out)
if self.recursive_forecast:
out = Dense(units=1, activation='linear')(out)
else:
out = Dense(units=self.horizon, activation='linear')(out)
self.model = Model(model_inputs, out)
self.model.summary()
return self.model
class ResNet(FFNN):
def _residual_block(self, units, inputs):
out = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation,
kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint,
use_bias=self.use_bias, bias_regularizer=self.bias_regularizer,
bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(inputs)
out = Dropout(self.dropout)(out)
out = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation,
kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint,
use_bias=self.use_bias, bias_regularizer=self.bias_regularizer,
bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(out)
out = BatchNormalization(trainable=True)(out)
if K.int_shape(inputs)[-1] != K.int_shape(out)[-1]:
inputs = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation,
kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint,
use_bias=self.use_bias, bias_regularizer=self.bias_regularizer,
bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(inputs)
out = Add()([inputs, out])
return out
def build_model(self, input_shape, horizon, conditions_shape=None):
self.horizon = horizon
model_inputs, inputs = self.model_inputs(input_shape, conditions_shape)
out = Flatten()(inputs)
for units in self.layers:
out = self._residual_block(units, out)
if self.recursive_forecast:
out = Dense(units=1, activation='linear')(out)
else:
out = Dense(units=self.horizon, activation='linear')(out)
self.model = Model(model_inputs, out)
self.model.summary()
return self.model
|
11477449
|
import os
import logging
import torch
from torch.utils.data import TensorDataset
from src.pequod.data.utils_squad import (read_squad_examples,
convert_examples_to_features)
logger = logging.getLogger(__name__)
def load_and_cache_examples(args, split, lang, tokenizer, key="", evaluate=False):
cache_filename = os.path.join(
args.data_dir, "cached_%s_%s_%s" % (split, lang, key))
input_file = os.path.join(args.data_dir, "%s-%s.json" % (split, lang))
if os.path.exists(cache_filename):
logger.info("Loading features from cached file %s", cache_filename)
features = torch.load(cache_filename)
if evaluate:
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
else: examples = None
else:
logger.info("Creating features from dataset file at %s", input_file)
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, max_query_length=args.max_query_length,
is_training=not evaluate, cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token)
logger.info("Saving features into cached file %s", cache_filename)
torch.save(features, cache_filename)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor(
[f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor(
[f.p_mask for f in features], dtype=torch.float)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor(
[f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor(
[f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions, all_cls_index, all_p_mask)
return dataset, examples, features
|
11477468
|
import torch.nn as nn
from .configuration_rcan import RcanConfig
from ...modeling_utils import (
default_conv,
BamBlock,
MeanShift,
Upsampler,
PreTrainedModel
)
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
class RCAB(nn.Module):
def __init__(
self, bam, conv, n_feat, kernel_size, reduction,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: modules_body.append(nn.BatchNorm2d(n_feat))
if i == 0: modules_body.append(act)
if bam:
modules_body.append(BamBlock(n_feat, reduction))
else:
modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
# res = self.body(x).mul(self.res_scale)
res += x
return res
class ResidualGroup(nn.Module):
def __init__(self, bam, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks):
super(ResidualGroup, self).__init__()
modules_body = [
RCAB(bam, conv, n_feat, kernel_size, reduction, bias=True, bn=False,
act=nn.ReLU(True), res_scale=1) for _ in range(n_resblocks)]
modules_body.append(conv(n_feat, n_feat, kernel_size))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res += x
return res
class RcanModel(PreTrainedModel):
config_class = RcanConfig
def __init__(self, args, conv=default_conv):
super(RcanModel, self).__init__(args)
n_resgroups = args.n_resgroups
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
reduction = args.reduction
scale = args.scale
act = nn.ReLU(True)
rgb_mean = args.rgb_mean
rgb_std = args.rgb_std
self.sub_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std)
# define head module
modules_head = [conv(args.n_colors, n_feats, kernel_size)]
# define body module
modules_body = [
ResidualGroup(
args.bam, conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale,
n_resblocks=n_resblocks) for _ in range(n_resgroups)]
modules_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
modules_tail = [
Upsampler(conv, scale, n_feats, act=False),
conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*modules_head)
self.body = nn.Sequential(*modules_body)
self.tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') == -1:
raise RuntimeError(f'While copying the parameter named {name}, '
f'whose dimensions in the model are {own_state[name].size()} and '
f'whose dimensions in the checkpoint are {param.size()}.')
elif strict:
if name.find('tail') == -1:
raise KeyError(f'unexpected key "{name}" in state_dict')
|
11477489
|
import tensorflow as tf
import numpy as np
from scipy.interpolate import interp1d
def weight_variable(shape, name=None):
return tf.get_variable(name=name, shape=shape, dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.001))
def bias_variable(shape, name=None):
return tf.get_variable(name=name, shape=shape, dtype=tf.float32, initializer=tf.constant_initializer(0))
def conv2d(x, w, strides=1, name=None):
return tf.nn.conv2d(x, w, strides=[1, 1, strides, 1], padding="SAME", name=name)
def lrelu(x, leak=0.2):
return tf.maximum(x, leak*x)
def prelu(x, scope=None):
"""parametric ReLU activation"""
with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
_alpha = tf.get_variable("prelu", shape=1,
dtype=x.dtype, initializer=tf.constant_initializer(0.1))
return tf.maximum(0.0, x) + _alpha * tf.minimum(0.0, x), _alpha
def deconv(x, w, output_shape, strides, name=None):
dyn_input_shape = tf.shape(x)
batch_size = dyn_input_shape[0]
output_shape = tf.stack([batch_size, output_shape[1], output_shape[2], output_shape[3]])
output = tf.nn.conv2d_transpose(x, w, output_shape, strides, padding="SAME", name=name)
return output
def prefilter(k_size, channel_in, channel_out, name=None):
x = np.linspace(0, 80, num=k_size)
filters = np.zeros([k_size, 1])
filters[int((k_size - 1) / 2), 0] = 1
for chn in range(channel_out - 1):
y = np.exp(-np.square(x - 40) / (200 / ((channel_out - 1) * 5 + 1) * (chn * 5 + 1)))
value = interp1d(x, y, kind='cubic')
value = value(x)
value = value / np.sum(value)
filters = np.concatenate((filters, np.expand_dims(value, axis=1)), axis=1)
filters = np.tile(filters, [1, channel_in, 1, 1])
filters = np.transpose(filters, (0, 2, 1, 3))
return tf.get_variable(name=name, shape=[1, k_size, channel_in, channel_out], dtype=tf.float32, initializer=tf.constant_initializer(filters))
def shear(x, scale):
global y
input_shape = x.get_shape().as_list()
hei = input_shape[1]
wid = input_shape[2]
shift_max = np.ceil((hei - 1) / 2 * abs(scale))
base_shift = shift_max - (hei - 1) / 2 * abs(scale)
paddings = [[0, 0], [0, 0], [int(shift_max), int(shift_max)], [0, 0]]
x = tf.pad(x, paddings)
for i in range(hei):
if scale > 0:
shift = i * scale + base_shift
else:
shift = (hei - i - 1) * abs(scale) + base_shift
if shift == int(shift):
cur_y = tf.slice(x, [0, i, int(shift), 0], [-1, 1, wid, -1])
else:
cur_y = tf.add((shift - np.floor(shift)) * tf.slice(x, [0, i, int(np.ceil(shift)), 0], [-1, 1, wid, -1]),
(np.ceil(shift) - shift) * tf.slice(x, [0, i, int(np.floor(shift)), 0], [-1, 1, wid, -1]))
if i == 0:
y = cur_y
else:
y = tf.concat([y, cur_y], axis=1)
return y
def reconstructor(up_scale, x, shear_value=0, chn=27):
with tf.variable_scope('SR', reuse=tf.AUTO_REUSE):
input_shape = x.get_shape().as_list()
size_wid = [int(input_shape[2] / 4), int(input_shape[2] / 2), input_shape[2]]
ang_in = input_shape[1]
chn_in = input_shape[3]
ang_out = (ang_in - 1) * up_scale + 1
chn_Laplacian = 10
num_prefilter = 20
# Shear feature maps
s0 = shear(x, shear_value)
"""Decomposition"""
# Layer 1
w = weight_variable([5, 5, chn_in, chn_Laplacian], 'w1')
b = bias_variable([chn_Laplacian], 'b1')
s1 = lrelu(conv2d(s0, w, 4) + b)
w = weight_variable([3, 3, chn_Laplacian, chn_Laplacian], 'Dw1_1')
b = bias_variable([chn_Laplacian], 'Db1_1')
s1_2 = lrelu(deconv(s1, w, [-1, ang_in, size_wid[1], chn_Laplacian], [1, 1, 2, 1]) + b)
# Layer 2
w = weight_variable([5, 5, chn_in, chn_Laplacian], 'w2')
b = bias_variable([chn_Laplacian], 'b2')
s2 = lrelu(conv2d(s0, w, 2) + b)
w = weight_variable([5, 5, chn_Laplacian, chn_Laplacian], 'Dw2_1')
b = bias_variable([chn_Laplacian], 'Db2_1')
s2_2 = lrelu(deconv(s2, w, [-1, ang_in, size_wid[2], chn_Laplacian], [1, 1, 2, 1]) + b)
s2 = tf.subtract(s2, s1_2)
# Layer 3
w = weight_variable([5, 5, chn_in, chn_Laplacian], 'w3')
b = bias_variable([chn_Laplacian], 'b3')
s3 = lrelu(conv2d(s0, w, 1) + b)
s3 = tf.subtract(s3, s2_2)
"""Pre-filter"""
w = prefilter(k_size=5, channel_in=chn_Laplacian, channel_out=num_prefilter, name='Prefilter1')
s1 = conv2d(s1, w, 1)
w = prefilter(k_size=11, channel_in=chn_Laplacian, channel_out=num_prefilter, name='Prefilter2')
s2 = conv2d(s2, w, 1)
w = prefilter(k_size=21, channel_in=chn_Laplacian, channel_out=num_prefilter, name='Prefilter3')
s3 = conv2d(s3, w, 1)
"""Feature extraction"""
w = weight_variable([3, 3, num_prefilter, chn], 'w4')
b = bias_variable([chn], 'b4')
s1 = lrelu(conv2d(s1, w, 1) + b)
w = weight_variable([3, 3, num_prefilter, chn], 'w5')
b = bias_variable([chn], 'b5')
s2 = lrelu(conv2d(s2, w, 1) + b)
w = weight_variable([3, 3, num_prefilter, chn], 'w6')
b = bias_variable([chn], 'b6')
s3 = lrelu(conv2d(s3, w, 1) + b)
"""Concatenation"""
w = weight_variable([5, 5, chn, chn], 'Dw3')
b = bias_variable([chn], 'Db3')
s1 = lrelu(deconv(s1, w, [-1, ang_in, size_wid[2], chn], [1, 1, 4, 1]) + b)
w = weight_variable([5, 5, chn, chn], 'Dw4')
b = bias_variable([chn], 'Db4')
s2 = lrelu(deconv(s2, w, [-1, ang_in, size_wid[2], chn], [1, 1, 2, 1]) + b)
s = tf.concat([s1, s2, s3], -1)
"""Mapping"""
w = weight_variable([3, 3, chn * 3, chn * 3], 'w7')
b = bias_variable([chn * 3], 'b7')
s = lrelu(tf.layers.batch_normalization(conv2d(s, w, 1) + b))
"""Angular reconstruction & inverse shear"""
w = weight_variable([9, 9, chn, chn * 3], 'Dw5')
b = bias_variable([chn], 'Db5')
s = deconv(s, w, [-1, ang_out, size_wid[2], chn], [1, up_scale, 1, 1]) + b
h = shear(s, -shear_value / up_scale)
return h
def blender(x, chn=27):
with tf.variable_scope('Blender'):
input_shape = x.get_shape().as_list()
size_wid = [int(input_shape[2] / 4), int(input_shape[2] / 2), input_shape[2]]
chn_in = input_shape[3]
ang_in = input_shape[1]
# Blending
w = weight_variable([1, 1, chn_in, chn], 'w0')
b = bias_variable([chn], 'b0')
h0 = lrelu(conv2d(x, w, 1) + b)
# Encoder: Stride 2
w = weight_variable([3, 3, chn, chn * 2], 'w1')
b = bias_variable([chn * 2], 'b1')
h1 = lrelu(conv2d(h0, w, 2) + b)
w = weight_variable([3, 3, chn * 2, chn * 2], 'w2')
b = bias_variable([chn * 2], 'b2')
h1 = lrelu(conv2d(h1, w, 1) + b)
# Encoder: Stride 2
w = weight_variable([3, 3, chn * 2, chn * 2], 'w3')
b = bias_variable([chn * 2], 'b3')
h2 = lrelu(conv2d(h1, w, 2) + b)
w = weight_variable([3, 3, chn * 2, chn * 2], 'w4')
b = bias_variable([chn * 2], 'b4')
h2 = lrelu(conv2d(h2, w, 1) + b)
# Mapping
w = weight_variable([3, 3, chn * 2, chn * 2], 'w5')
b = bias_variable([chn * 2], 'b5')
h2 = lrelu(tf.layers.batch_normalization(conv2d(h2, w, 1) + b))
# Decoder: Stride 2
w = weight_variable([5, 5, chn * 2, chn * 2], 'Dw1')
b = bias_variable([chn * 2], 'Db1')
h2 = deconv(h2, w, [-1, ang_in, size_wid[1], chn * 2], [1, 1, 2, 1]) + b
h3 = tf.concat([lrelu(h2), h1], 3)
w = weight_variable([1, 1, chn * 4, chn * 2], 'Dw2')
b = bias_variable([chn * 2], 'Db2')
h3 = lrelu(conv2d(h3, w, 1) + b)
# Decoder: Stride 2
w = weight_variable([5, 5, chn, chn * 2], 'Dw3')
b = bias_variable([chn], 'Db3')
h4 = deconv(h3, w, [-1, ang_in, size_wid[2], chn], [1, 1, 2, 1]) + b
h4 = tf.concat([lrelu(h4), h0], 3)
w = weight_variable([1, 1, chn * 2, chn], 'Dw4')
b = bias_variable([chn], 'Db4')
h4 = lrelu(conv2d(h4, w, 1) + b)
w = weight_variable([9, 9, chn, 1], 'w6') # The difference with old model
b = bias_variable([1], 'b6')
h = conv2d(h4, w, 1) + b
return h
def model(up_scale, x):
input_shape = x.get_shape().as_list()
size_wid = [int(input_shape[2] / 4), int(input_shape[2] / 2), input_shape[2]]
ang_in = input_shape[1]
ang_out = (ang_in - 1) * up_scale + 1
chn_base = 27
# Shear reconstructor
s1 = reconstructor(up_scale, x, shear_value=-9, chn=chn_base)
s2 = reconstructor(up_scale, x, shear_value=-6, chn=chn_base)
s3 = reconstructor(up_scale, x, shear_value=-3, chn=chn_base)
s4 = reconstructor(up_scale, x, shear_value=0, chn=chn_base)
s5 = reconstructor(up_scale, x, shear_value=3, chn=chn_base)
s6 = reconstructor(up_scale, x, shear_value=6, chn=chn_base)
s7 = reconstructor(up_scale, x, shear_value=9, chn=chn_base)
s = tf.concat([s1, s2, s3, s4, s5, s6, s7], axis=-1)
# Shear blender
y_out = blender(s, chn=chn_base)
return y_out
|
11477491
|
import math
class Solution:
def maxScore(self, s: str) -> int:
count0 = count1 = 0
maxDiff = -math.inf
for i, c in enumerate(s):
if c == '0':
count0 += 1
else:
count1 += 1
if i != len(s) - 1:
maxDiff = max(maxDiff, count0 - count1)
return maxDiff + count1
|
11477498
|
import numpy as np
# grid
spacing = 0.01
length = 1.5
x = np.arange(0, length, spacing)
# velocity
v = 1.0
# time
start = 0.0
end = 1.0
step = 0.01
# initial gauss profile
loc = 0.3
scale = 0.1
u = np.exp(-1 / scale ** 2 * (x - loc) ** 2)
u0 = u.copy()
# time loop - Lax method
factor = (v * step) / (2 * spacing)
for t in np.arange(start, end, step):
u_left = np.roll(u, 1)
u_right = np.roll(u, -1)
u1 = 0.5 * (u_right + u_left) - factor * (u_right - u_left)
u = u1.copy()
|
11477515
|
from vizh.ir import *
import tests.backend_test
class HelloPutstr(tests.backend_test.BackendTest):
def __init__(self):
super().__init__("putstr")
def get_function(self):
return Function(FunctionSignature("main", 1, False), self.to_instructions(
[InstructionType.INC]*72 +
[InstructionType.READ,
InstructionType.RIGHT,
InstructionType.WRITE] +
[InstructionType.INC]*29 +
[InstructionType.READ,
InstructionType.RIGHT,
InstructionType.WRITE] +
[InstructionType.INC]*7 +
[InstructionType.READ,
InstructionType.RIGHT,
InstructionType.WRITE,
InstructionType.RIGHT,
InstructionType.WRITE] +
[InstructionType.INC]*3 +
[InstructionType.LEFT] * 4 +
[(InstructionType.CALL, "putstr")]))
|
11477523
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import button
from esphome.components.ota import OTAComponent
from esphome.const import (
CONF_ID,
CONF_OTA,
DEVICE_CLASS_RESTART,
ENTITY_CATEGORY_CONFIG,
ICON_RESTART_ALERT,
)
DEPENDENCIES = ["ota"]
safe_mode_ns = cg.esphome_ns.namespace("safe_mode")
SafeModeButton = safe_mode_ns.class_("SafeModeButton", button.Button, cg.Component)
CONFIG_SCHEMA = (
button.button_schema(
device_class=DEVICE_CLASS_RESTART,
entity_category=ENTITY_CATEGORY_CONFIG,
icon=ICON_RESTART_ALERT,
)
.extend({cv.GenerateID(): cv.declare_id(SafeModeButton)})
.extend({cv.GenerateID(CONF_OTA): cv.use_id(OTAComponent)})
.extend(cv.COMPONENT_SCHEMA)
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await button.register_button(var, config)
ota = await cg.get_variable(config[CONF_OTA])
cg.add(var.set_ota(ota))
|
11477536
|
from sys import platform
import argparse
import os
from dotaservice.dotaservice import main
from dotaservice.dotaservice import verify_game_path
def get_default_game_path():
game_path = None
if platform == "linux" or platform == "linux2":
game_path = os.path.expanduser("~/Steam/steamapps/common/dota 2 beta/game")
elif platform == "darwin":
game_path = os.path.expanduser(
"~/Library/Application Support/Steam/SteamApps/common/dota 2 beta/game")
return game_path
def get_default_action_path():
action_path = None
if platform == "linux" or platform == "linux2":
action_path = "/tmp/"
elif platform == "darwin":
#action_path = "/Volumes/ramdisk/"
action_path = "/tmp/"
return action_path
if platform not in ["linux", "linux2", "darwin"]:
raise EnvironmentError("Platform {} not supported.".format(platform))
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--ip", type=str, help="gRPC host ip", default="")
parser.add_argument("-p", "--port", type=int, help="gRPC port", default=13337)
parser.add_argument("--remove-logs", type=bool, help="Removes logs after each game", default=True)
parser.add_argument("-l", "--log", dest="log_level", help="Set the logging level",
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='INFO')
parser.add_argument(
"--game-path",
type=str,
default=get_default_game_path(),
help="Path to the dota entrypoint (dota.sh).")
parser.add_argument(
"--action-path",
type=str,
default=get_default_action_path(),
help="Path to the root folder in which the game logs will be saved.")
args = parser.parse_args()
main(
grpc_host=args.ip,
grpc_port=args.port,
dota_path=args.game_path,
action_folder=args.action_path,
remove_logs=args.remove_logs,
log_level=args.log_level,
)
|
11477555
|
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
n = len(matrix)
lo = matrix[0][0]
hi = matrix[n - 1][n - 1]
def countNotGreater(target: int) -> int:
i, j = 0, n - 1
cnt = 0
while i < n and j >= 0:
if matrix[i][j] <= target:
cnt += j + 1
i += 1
else:
j -= 1
return cnt
while lo < hi:
mid = (lo + hi) // 2
cnt = countNotGreater(mid)
if cnt < k:
lo = mid + 1
else:
hi = mid
return lo
|
11477599
|
import argparse
import numpy as np
import pandas as pd
import time
import random
import os
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch import nn, optim, autograd
from torch.autograd import Variable
from torchvision.utils import save_image
from sklearn.linear_model import LogisticRegression, Ridge
from utils import cov, compute_prob, mean_nll, mean_accuracy, mean_accuracy_np
from vae import VAE, vae_loss_function, train_vae, test_vae
randseed = int(time.time()*1e7%1e8)
print("random seed: ", randseed)
random.seed(randseed)
np.random.seed(randseed)
torch.manual_seed(randseed)
out_dir = 'supervised_out'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
parser = argparse.ArgumentParser(description='Colored MNIST Supervised')
parser.add_argument('--hidden_dim', type=int, default=256)
parser.add_argument('--l2_reg', type=float, default=1.)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--n_restarts', type=int, default=1)
parser.add_argument('--steps', type=int, default=1001)
parser.add_argument('--grayscale_model', action='store_true')
parser.add_argument('--mode', type=str, default="linear", choices=["linear", "logistic"])
parser.add_argument('--z_dim', type=int, default=64)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--num_features', type=int, default=20)
parser.add_argument('--input_dim', type=int, default=2*14*14)
parser.add_argument('--vae_epochs', type=int, default=101)
parser.add_argument('--spurious_corr', type=float, default=0.8)
parser.add_argument('--alter_freq', type=int, default=100)
flags = parser.parse_args()
print('Flags:')
for k,v in sorted(vars(flags).items()):
print("\t{}: {}".format(k, v))
final_train_accs = []
final_test_accs = []
final_train_baselineaccs = []
final_test_baselineaccs = []
final_train_baselinevaeaccs = []
final_test_baselinevaeaccs = []
for restart in range(flags.n_restarts):
print("Restart", restart)
# Load MNIST, make train/val splits, and shuffle train set examples
mnist = datasets.MNIST('~/datasets/mnist', train=True, download=True)
size = 50000
mnist_train = (mnist.data[:size], mnist.targets[:size])
mnist_val = (mnist.data[size:], mnist.targets[size:])
def make_environment(images, labels, e):
def torch_bernoulli(p, size):
return (torch.rand(size) < p).float()
def torch_xor(a, b):
return (a-b).abs() # Assumes both inputs are either 0 or 1
# 2x subsample for computational convenience
images = images.reshape((-1, 28, 28))[:, fdf8:f53e:61e4::18, ::2]
subset = [(labels==1) | (labels==8)]
images = images[subset]
labels = labels[subset]
# Assign a binary label based on the digit; flip label with probability 0.25
labels = (labels == 1).float()
labels = torch_xor(labels, torch_bernoulli(0.25, len(labels)))
# Assign a color based on the label; flip the color with probability e
colors = torch_xor(labels, torch_bernoulli(e, len(labels)))
# Apply the color to the image by zeroing out the other color channel
images = torch.stack([images, images], dim=1)
images[torch.tensor(range(len(images))), (1-colors).long(), :, :] *= 0
images = images.view(-1, flags.input_dim)
return {
'images': (images.float() / 255.).cuda(),
'labels': labels[:, None].cuda(),
'colors': colors[:, None].cuda()
}
envs = [
make_environment(mnist_train[0], mnist_train[1], 1-flags.spurious_corr),
make_environment(mnist_val[0], mnist_val[1], 0.9)
]
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.num_features = flags.num_features
lin1 = nn.Linear(flags.input_dim, flags.hidden_dim)
lin2 = nn.Linear(flags.hidden_dim, flags.hidden_dim)
lin3 = nn.Linear(flags.hidden_dim, self.num_features)
lin4 = nn.Linear(flags.z_dim, flags.hidden_dim)
lin5 = nn.Linear(flags.hidden_dim, 1)
for i,lin in enumerate([lin1, lin2, lin3, lin4, lin5]):
nn.init.xavier_uniform_(lin.weight, 1.)
nn.init.zeros_(lin.bias)
print("layer", i, lin.weight.abs().mean())
# initialization to be too larg values will create optimization problems
while lin.weight.abs().mean() > 0.1:
nn.init.xavier_uniform_(lin.weight, 1.)
print("layer", i, lin.weight.abs().mean())
self._main = nn.Sequential(lin1, nn.ReLU(True), lin2, nn.ReLU(True), lin3, nn.ReLU(True), nn.BatchNorm1d(flags.num_features, affine=False))
# _tvaez maps the VAE latent to one-dimensional outcome
# for classification
self._tvaez = nn.Sequential(lin4, nn.ReLU(True), lin5, nn.ReLU(True))
# self.betas = torch.zeros([self.num_features+1, 1]).cuda()
self.finallayer = nn.Linear(flags.num_features + 1, 1)
def forward(self, input, vaez):
features = self._main(input)
logits = self.finallayer(torch.cat([features, self._tvaez(vaez)],dim=1))
probs = compute_prob(logits, mode=flags.mode)
return features, logits, probs
class baselineMLP(nn.Module):
def __init__(self):
super().__init__()
self.num_features = flags.num_features
lin1 = nn.Linear(flags.input_dim, flags.hidden_dim)
lin2 = nn.Linear(flags.hidden_dim, flags.hidden_dim)
lin3 = nn.Linear(flags.hidden_dim, self.num_features)
for i,lin in enumerate([lin1, lin2, lin3]):
nn.init.xavier_uniform_(lin.weight, 1.)
nn.init.zeros_(lin.bias)
print("baseline layer", i, lin.weight.abs().mean())
# initialization to be too larg values will create optimization problems
while lin.weight.abs().mean() > 0.1:
nn.init.xavier_uniform_(lin.weight, 1.)
print("layer", i, lin.weight.abs().mean())
self._main = nn.Sequential(lin1, nn.ReLU(True), lin2, nn.ReLU(True), lin3, nn.ReLU(True), nn.BatchNorm1d(flags.num_features, affine=False))
self.finallayer = nn.Linear(flags.num_features, 1)
def forward(self, input):
features = self._main(input)
logits = self.finallayer(features)
probs = compute_prob(logits, mode="logistic")
return features, logits, probs
class baselinevaeMLP(nn.Module):
def __init__(self):
super().__init__()
self.num_features = flags.num_features
lin4 = nn.Linear(flags.z_dim, flags.hidden_dim)
lin5 = nn.Linear(flags.hidden_dim, 1)
for i,lin in enumerate([lin4, lin5]):
nn.init.xavier_uniform_(lin.weight, 1.)
nn.init.zeros_(lin.bias)
print("baseline layer", i, lin.weight.abs().mean())
# initialization to be too larg values will create optimization problems
while lin.weight.abs().mean() > 0.1:
nn.init.xavier_uniform_(lin.weight, 1.)
print("layer", i, lin.weight.abs().mean())
self._tvaez = nn.Sequential(lin4, nn.ReLU(True), lin5, nn.ReLU(True))
self.finallayer = nn.Linear(1, 1)
def forward(self, vaez):
features = self._tvaez(vaez)
logits = self.finallayer(features)
probs = compute_prob(logits, mode="logistic")
return features, logits, probs
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(flags.num_features, 1)
def forward(self, x):
return self.fc(x)
def initNet(layer):
nn.init.xavier_uniform_(layer.weight)
nn.init.zeros_(layer.bias)
baselinemlp = baselineMLP().cuda()
optimizer_baselinenll = optim.Adam(baselinemlp.parameters(), lr=flags.lr, weight_decay=0.1*flags.l2_reg)
# baseline
for step in range(flags.steps):
for i in range(len(envs)):
env = envs[i]
baselinefeatures, baselinelogits, baselineprobs = baselinemlp(env['images'])
labels = env['labels']
env['baselinenll'] = mean_nll(baselineprobs, env['labels'], mode="logistic")
env['baselineacc'] = mean_accuracy(baselineprobs, env['labels'])
train_baselinenll = torch.stack([envs[0]['baselinenll']]).mean()
test_baselinenll = torch.stack([envs[1]['baselinenll']]).mean()
train_baselineacc = torch.stack([envs[0]['baselineacc']]).mean()
test_baselineacc = torch.stack([envs[1]['baselineacc']]).mean()
baselinenll_loss = train_baselinenll.clone()
# + train_l2penalty.clone()
optimizer_baselinenll.zero_grad()
baselinenll_loss.backward(retain_graph=True)
optimizer_baselinenll.step()
if step % 10 == 0:
print("itr", np.int32(step),
"train_baselinenll", train_baselinenll.detach().cpu().numpy(),
"train_baselineacc", train_baselineacc.detach().cpu().numpy(),
"test_baselinenll", test_baselinenll.detach().cpu().numpy(),
"test_baselineacc", test_baselineacc.detach().cpu().numpy())
final_train_baselineaccs.append(train_baselineacc.detach().cpu().numpy().item())
final_test_baselineaccs.append(test_baselineacc.detach().cpu().numpy().item())
# fit VAE
train_loader = torch.utils.data.DataLoader(dataset=envs[0]['images'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=envs[1]['images'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
# build model
vae = VAE(x_dim=flags.input_dim, h_dim1=flags.hidden_dim, h_dim2=flags.hidden_dim, z_dim=flags.z_dim)
if torch.cuda.is_available():
vae.cuda()
optimizer_vae = optim.Adam(vae.parameters())
for epoch in range(1, flags.vae_epochs):
train_vae(vae, train_loader, optimizer_vae, epoch)
test_vae(vae, test_loader)
train_vae_recon, train_vae_mu, train_vae_logvar = vae(envs[0]['images'].view(-1, flags.input_dim))
test_vae_recon, test_vae_mu, test_vae_logvar = vae(envs[1]['images'].view(-1, flags.input_dim))
envs[0]['vaez'] = train_vae_mu.detach()
envs[1]['vaez'] = test_vae_mu.detach()
# baselinevae (use vae features only)
baselinevaemlp = baselinevaeMLP().cuda()
optimizer_baselinevaenll = optim.Adam(baselinevaemlp.parameters(), lr=flags.lr, weight_decay=0.1*flags.l2_reg)
for step in range(flags.steps):
for i in range(len(envs)):
env = envs[i]
baselinevaefeatures, baselinevaelogits, baselinevaeprobs = baselinevaemlp(env['vaez'])
labels = env['labels']
env['baselinevaenll'] = mean_nll(baselinevaeprobs, env['labels'], mode="logistic")
env['baselinevaeacc'] = mean_accuracy(baselinevaeprobs, env['labels'])
train_baselinevaenll = torch.stack([envs[0]['baselinevaenll']]).mean()
test_baselinevaenll = torch.stack([envs[1]['baselinevaenll']]).mean()
train_baselinevaeacc = torch.stack([envs[0]['baselinevaeacc']]).mean()
test_baselinevaeacc = torch.stack([envs[1]['baselinevaeacc']]).mean()
baselinevaenll_loss = train_baselinevaenll.clone()
# + train_l2penalty.clone()
optimizer_baselinevaenll.zero_grad()
baselinevaenll_loss.backward(retain_graph=True)
optimizer_baselinevaenll.step()
if step % 10 == 0:
print("itr", np.int32(step),
"train_baselinevaenll", train_baselinevaenll.detach().cpu().numpy(),
"train_baselinevaeacc", train_baselinevaeacc.detach().cpu().numpy(),
"test_baselinevaenll", train_baselinevaenll.detach().cpu().numpy(),
"test_baselinevaeacc", test_baselinevaeacc.detach().cpu().numpy())
final_train_baselinevaeaccs.append(train_baselinevaeacc.detach().cpu().numpy().item())
final_test_baselinevaeaccs.append(test_baselinevaeacc.detach().cpu().numpy().item())
# causal_REP
mlp = MLP().cuda()
net = Net().cuda() # final classification net
optimizer_net = optim.Adam(net.parameters(), lr=flags.lr, weight_decay=flags.l2_reg)
optimizer_nll = optim.Adam(list(mlp._tvaez.parameters()) + list(mlp.finallayer.parameters()), lr=10*flags.lr, weight_decay=flags.l2_reg)
optimizer_causalrep = optim.Adam(mlp._main.parameters(), lr=flags.lr, weight_decay=1e-6)
pred = LogisticRegression(C=0.01)
print("weight mean", mlp._main[0].weight.abs().mean())
for step in range(flags.steps):
# if step % 10 == 0:
# print('\n', step)
for i in range(len(envs)):
env = envs[i]
features, logits, probs = mlp(env['images'], env['vaez'])
labels = env['labels']
vaez = env['vaez']
env['nll'] = mean_nll(probs, env['labels'], mode=flags.mode)
env['acc'] = mean_accuracy(probs, env['labels'])
env['covs'] = cov( torch.cat([features, mlp._tvaez(env['vaez'])], dim=1))[-1][:-1]
env['causalrep'] = ((features.std(dim=0) * mlp.finallayer.weight[0][:flags.num_features])**2).sum()
# + 2 * mlp.finallayer.weight[0][flags.num_features:] * mlp.finallayer.weight[0][:flags.num_features] * env['covs']).sum()
# one can learn highly correlated features in the representations too, which can be an issue.
y = features - features.mean(dim=0)
X = mlp._tvaez(env['vaez']) - mlp._tvaez(env['vaez']).mean()
beta = [torch.matmul(
torch.matmul(
torch.inverse(1e-8*torch.eye(X.shape[1]).cuda()+
torch.matmul(
torch.transpose(X, 0, 1),
X)),
torch.transpose(X, 0, 1)),
y[:,j]) for j in range(y.shape[1])]
r2s = torch.Tensor([1 - (((X*(beta[j])).T[0] - y[:,j])**2).sum() / (y[:,j]**2 + 1e-8).sum() for j in range(y.shape[1])]).mean()
env['featureZr2'] = r2s.cuda()
weight_norm = torch.tensor(0.).cuda()
for w in mlp.parameters():
weight_norm += w.norm().pow(2)
env['l2penalty'] = flags.l2_reg * weight_norm
if i == 0: # training data
pred.fit(features.detach().cpu().numpy(), labels.detach().cpu().numpy().T[0])
if step % 100 == 0:
if i == 0: # training data
initNet(net.fc)
running_loss = 0.0
loss_net = nn.BCELoss()(nn.Sigmoid()(net(features)), labels)
loss_net.backward(retain_graph=True)
for _ in range(1000):
optimizer_net.step()
env['predacc'] = mean_accuracy(nn.Sigmoid()(net(features)), labels)
env['skl_predacc'] = mean_accuracy_np(pred.predict(features.detach().cpu().numpy()), labels.detach().cpu().numpy().T[0])
if step % flags.alter_freq == 0:
print("\nnll", env['nll'],
"\nl2", env['l2penalty'],
"\ncausalrep", env['causalrep'],
"\nfeatureZr2", env['featureZr2'])
train_l2penalty = torch.stack([envs[0]['l2penalty']])
train_causalrep = torch.stack([envs[0]['causalrep']])
train_featureZr2 = torch.stack([envs[0]['featureZr2']])
train_nll = torch.stack([envs[0]['nll']]).mean()
train_acc = torch.stack([envs[0]['acc']]).mean()
train_predacc = torch.stack([envs[0]['predacc']]).mean()
train_skl_predacc = envs[0]['skl_predacc']
test_nll = torch.stack([envs[1]['nll']]).mean()
test_acc = torch.stack([envs[1]['acc']]).mean()
test_featureZr2 = torch.stack([envs[1]['featureZr2']])
test_predacc = torch.stack([envs[1]['predacc']]).mean()
test_skl_predacc = envs[1]['skl_predacc']
nll_loss = train_nll.clone()
# + train_l2penalty.clone()
optimizer_nll.zero_grad()
nll_loss.backward(retain_graph=True)
optimizer_nll.step()
# print(nll_loss)
test_acc = envs[1]['acc']
if step % flags.alter_freq == 0:
train_causalrep_loss = -train_causalrep.clone() - 1e-1* torch.log(1 - train_featureZr2)
optimizer_causalrep.zero_grad()
train_causalrep_loss.backward()
optimizer_causalrep.step()
if step % 10 == 0:
print("itr", np.int32(step),
"train_causalrep", train_causalrep.detach().cpu().numpy(),
"nll_loss", nll_loss.detach().cpu().numpy(),
"train_nll", train_nll.detach().cpu().numpy(),
"train_acc", train_acc.detach().cpu().numpy(),
"test_acc", test_acc.detach().cpu().numpy(),
"train_predacc", train_predacc.detach().cpu().numpy(),
"test_predacc", test_predacc.detach().cpu().numpy(),
"train_skl_predacc", train_skl_predacc,
"test_skl_predacc", test_skl_predacc,
"train_featureZr2", train_featureZr2.detach().cpu().numpy(),
"test_featureZr2", test_featureZr2.detach().cpu().numpy())
final_train_accs.append(train_skl_predacc)
final_test_accs.append(test_skl_predacc)
print('Final train baseline acc (mean/std across restarts so far):')
print(np.mean(final_train_baselineaccs), np.std(final_train_baselineaccs))
print('Final test baseline acc (mean/std across restarts so far):')
print(np.mean(final_test_baselineaccs), np.std(final_test_baselineaccs))
print('Final train baselinevae acc (mean/std across restarts so far):')
print(np.mean(final_train_baselinevaeaccs), np.std(final_train_baselinevaeaccs))
print('Final test baselinevae acc (mean/std across restarts so far):')
print(np.mean(final_test_baselinevaeaccs), np.std(final_test_baselinevaeaccs))
print('Final train acc (mean/std across restarts so far):')
print(np.mean(final_train_accs), np.std(final_train_accs))
print('Final test acc (mean/std across restarts so far):')
print(np.mean(final_test_accs), np.std(final_test_accs))
print('final_train_baselineaccs', final_train_baselineaccs)
print('final_test_baselineaccs', final_test_baselineaccs)
print('final_train_baselinevaeaccs', final_train_baselinevaeaccs)
print('final_test_baselinevaeaccs', final_test_baselinevaeaccs)
print('final_train_accs', final_train_accs)
print('final_test_accs', final_test_accs)
if not os.path.exists("./res"):
try:
os.makedirs("./res", 0o700)
except OSError as e:
if e.errno != errno.EEXIST:
raise
outfile = out_dir + '/supervised_CMNIST' + str(int(time.time()*1e6)) + '.csv'
result = pd.DataFrame({
'causalrep_train_accs': np.array(final_train_accs),
'causalrep_test_accs': np.array(final_test_accs),
'naive_train_accs': np.array(final_train_baselineaccs),
'naive_test_accs': np.array(final_test_baselineaccs),
'naive_vae_train_accs': np.array(final_train_baselinevaeaccs),
'naive_vae_test_accs': np.array(final_test_baselinevaeaccs),
'hidden_dim': np.repeat(flags.hidden_dim, flags.n_restarts),
'l2_reg': np.repeat(flags.l2_reg, flags.n_restarts),
'lr': np.repeat(flags.lr, flags.n_restarts),
'n_restarts': np.repeat(flags.n_restarts, flags.n_restarts),
'mode': np.repeat(flags.mode, flags.n_restarts),
'steps': np.repeat(flags.steps, flags.n_restarts),
'z_dim': np.repeat(flags.z_dim, flags.n_restarts),
'batch_size': np.repeat(flags.batch_size, flags.n_restarts),
'num_features': np.repeat(flags.num_features, flags.n_restarts),
'input_dim': np.repeat(flags.input_dim, flags.n_restarts),
'vae_epochs': np.repeat(flags.vae_epochs, flags.n_restarts),
'spurious_corr': np.repeat(flags.spurious_corr, flags.n_restarts),
'alter_freq': np.repeat(flags.alter_freq, flags.n_restarts),
'randseed': np.repeat(randseed, flags.n_restarts),
})
result.to_csv(outfile)
|
11477609
|
import uuid
from ichnaea.models.config import ExportConfig
class TestExportConfig(object):
def test_fields(self, session):
name = uuid.uuid4().hex
skip_keys = [<KEY>().hex for i in range(3)]
skip_sources = ["query", "fused"]
session.add(
ExportConfig(
name=name,
batch=100,
schema="internal",
url="internal://",
skip_keys=skip_keys,
skip_sources=skip_sources,
)
)
session.flush()
result = session.query(ExportConfig).get(name)
assert result.name == name
assert result.batch == 100
assert result.schema == "internal"
assert result.url == "internal://"
assert result.skip_keys == frozenset(skip_keys)
assert result.skip_sources == frozenset(skip_sources)
def test_allowed(self, session):
configs = [
ExportConfig(name="none", skip_keys=None, skip_sources=None),
ExportConfig(name="test", skip_keys=["test"], skip_sources=None),
ExportConfig(name="gnss", skip_keys=None, skip_sources=["gnss"]),
ExportConfig(
name="query", skip_keys=["test", "test2"], skip_sources=["query"]
),
]
session.add_all(configs)
session.commit()
def test(name, api_key, source, expected):
row = (
session.query(ExportConfig).filter(ExportConfig.name == name)
).first()
assert row.allowed(api_key, source) == expected
test("none", None, None, True)
test("none", None, "gnss", True)
test("none", "test", None, True)
test("none", "test", "gnss", True)
test("test", None, None, True)
test("test", None, "gnss", True)
test("test", "test", None, False)
test("test", "test", "gnss", False)
test("test", "test2", "gnss", True)
test("gnss", None, None, True)
test("gnss", None, "gnss", False)
test("gnss", None, "query", True)
test("gnss", "test", None, True)
test("gnss", "test", "gnss", False)
test("gnss", "test", "query", True)
test("query", None, None, True)
test("query", None, "gnss", True)
test("query", None, "query", False)
test("query", "test", None, False)
test("query", "test", "gnss", False)
test("query", "test", "query", False)
test("query", "test2", None, False)
def test_skip_keys(self, session):
non_ascii = b"\xc3\xa4".decode("utf-8")
configs = [
ExportConfig(name="none", skip_keys=None),
ExportConfig(name="list", skip_keys=[]),
ExportConfig(name="set", skip_keys=set()),
ExportConfig(name="one", skip_keys=["ab"]),
ExportConfig(name="two", skip_keys=["ab", "cd"]),
ExportConfig(name="unicode", skip_keys=["ab", non_ascii]),
]
session.add_all(configs)
session.commit()
def test(name, expected):
row = (
session.query(ExportConfig).filter(ExportConfig.name == name)
).first()
assert row.skip_keys == expected
test("none", None)
test("list", frozenset())
test("set", frozenset())
test("one", frozenset(["ab"]))
test("two", frozenset(["ab", "cd"]))
test("unicode", frozenset(["ab", non_ascii]))
|
11477679
|
from renormalizer.mps import Mpo
from renormalizer.utils import Quantity
from renormalizer.model.op import Op
from renormalizer.model import HolsteinModel, Model
import numpy as np
def e_ph_static_correlation(model: HolsteinModel, imol:int =0, jph:int =0,
periodic:bool =False, name:str="S"):
'''
construct the electron-phonon static correlation operator in polaron problem
The details of the definition, see
<NAME> et al. J. Chem. Phys. 142, 174103 (2015) or
Romero et al. Journal of Luminescence 83-84 (1999) 147-153
if periodic:
# D is the displacement between different PES of each mode
S_(m, jph) = \frac{1}{D_m+n,jph} \sum_n \langle x_{m+n,jph} a_n^\dagger a_n \rangle
operator name = "_".join([name, str(m), str(jph)])
m stands for distance if periodic
else:
S_(n,m,jph) = \frac{1}{D_m,jph} \langle x_{m, jph} a_n^\dagger a_n \rangle
operator name: "_".join([name, str(n), str(m), str(jph)])
Parameters:
model : HolsteinModel
the molecular information
imol : int
electron site index (default:0)
if periodic is True, imol is omitted.
jph : int
phonon site index
periodic : bool
if homogenous periodic system
name: str
the name of the operator
Note: Only one mode Holstein Model has been tested
'''
if model.scheme == 4:
raise NotImplementedError
prop_mpos = {}
nmols = model.mol_num
if not periodic:
# each jmol site is calculated separately
for jmol in range(nmols):
op_name = "_".join([name, str(imol), str(jmol), str(jph)])
ph = model[jmol].ph_list[jph]
prop_mpos[op_name] = Mpo.intersite(model, {imol: r"a^\dagger a"}, {(jmol, jph): r"b^\dagger + b"},
scale=Quantity(np.sqrt(1./2.0/ph.omega[0])/ph.dis[1]))
# normalized by the displacement D
else:
# each distance is calculated seperately
for dis in range(nmols):
dis_list = []
for jmol in range(nmols):
kmol = (jmol+dis) % nmols
ph = model[kmol].ph_list[jph]
dis_list.append(Mpo.intersite(model, {jmol: r"a^\dagger a"}, {(kmol, jph): r"b^\dagger + b"},
scale=Quantity(np.sqrt(1./2.0/ph.omega[0])/ph.dis[1])))
for item in dis_list[1:]:
dis_list[0] = dis_list[0].add(item)
op_name = "_".join([name, str(dis), str(jph)])
prop_mpos[op_name] = dis_list[0]
return prop_mpos
def x_average(model: Model):
"""
<x> of vibrational DoF
"""
return {"x": [Mpo(model, Op("x", v_dof)) for v_dof in model.v_dofs]}
def x_square_average(model: Model):
"""
<x^2> of vibrational DoF
"""
assert isinstance(model, Model)
return {r"x^2": {"x": [Mpo(model, Op("x^2", v_dof)) for v_dof in model.v_dofs]}}
|
11477692
|
from paraview.simple import *
from paraview.vtk.util.misc import vtkGetTempDir
from os.path import join
import shutil
Sphere()
UpdatePipeline()
e = Elevation()
UpdatePipeline()
dirname = join(vtkGetTempDir(), "savedatawitharrayselection")
shutil.rmtree(dirname, ignore_errors=True)
filename = join(dirname, "data.pvd")
SaveData(filename, ChooseArraysToWrite=1, PointDataArrays=["Normals"])
r = OpenDataFile(filename)
assert r.PointArrays.GetAvailable() == ["Normals"]
Delete(r)
shutil.rmtree(dirname, ignore_errors=True)
SetActiveSource(e)
SaveData(filename, ChooseArraysToWrite=0, PointDataArrays=["Normals"])
r = OpenDataFile(filename)
assert r.PointArrays.GetAvailable() == ["Normals", "Elevation"]
shutil.rmtree(dirname, ignore_errors=True)
|
11477719
|
import sys
from string import ascii_uppercase, ascii_lowercase, digits
class MaxLengthException(Exception):
pass
class WasNotFoundException(Exception):
pass
class Pattern:
MAX_LENGTH = 20280
@staticmethod
def gen(length):
"""
Generate a pattern of a given length up to a maximum
of 20280 - after this the pattern would repeat
"""
if length >= Pattern.MAX_LENGTH:
raise MaxLengthException('ERROR: Pattern length exceeds maximum of %d' % Pattern.MAX_LENGTH)
pattern = ''
for upper in ascii_uppercase:
for lower in ascii_lowercase:
for digit in digits:
if len(pattern) < length:
pattern += upper + lower + digit
else:
out = pattern[:length]
return out
@staticmethod
def search(search_pattern):
"""
Search for search_pattern in pattern. Convert from hex if needed
Looking for needle in haystack
"""
needle = search_pattern
try:
if needle.startswith('0x'):
# Strip off '0x', convert to ASCII and reverse
needle = needle[2:]
needle = bytes.fromhex(needle).decode('ascii')
needle = needle[::-1]
except TypeError as e:
print('Unable to convert hex input:', e)
sys.exit(1)
haystack = ''
for upper in ascii_uppercase:
for lower in ascii_lowercase:
for digit in digits:
haystack += upper + lower + digit
found_at = haystack.find(needle)
if found_at > -1:
return found_at
raise WasNotFoundException('Couldn`t find %s (%s) anywhere in the pattern.' % (search_pattern, needle))
def print_help():
print('Usage: %s LENGTH|PATTERN' % sys.argv[0])
print()
print('Generate a pattern of length LENGTH or search for PATTERN and ')
print('return its position in the pattern.')
print()
sys.exit(0)
if __name__ == '__main__':
if len(sys.argv) < 2:
print_help()
if sys.argv[1].isdigit():
pat = Pattern.gen(int(sys.argv[1]))
print(pat)
else:
found = Pattern.search(sys.argv[1])
print('Pattern %s first occurrence at position %d in pattern.' %
(sys.argv[1], found))
|
11477722
|
import unittest
from minos.common import (
Config,
)
from minos.networks import (
HttpAdapter,
HttpRouter,
MinosRedefinedEnrouteDecoratorException,
)
from tests.test_networks.test_routers import (
_Router,
)
from tests.utils import (
CONFIG_FILE_PATH,
)
class TestHttpAdapter(unittest.TestCase):
def setUp(self) -> None:
self.config = Config(CONFIG_FILE_PATH)
self.adapter = HttpAdapter.from_config(self.config)
def test_routers(self):
self.assertEqual(1, len(self.adapter.routers))
for router in self.adapter.routers:
self.assertIsInstance(router, HttpRouter)
def test_routes(self):
expected = dict()
for router in self.adapter.routers:
expected |= router.routes
observed = self.adapter.routes
self.assertEqual(expected, observed)
def test_routes_raises(self):
router = self.adapter.routers[0]
adapter = HttpAdapter([router, router])
with self.assertRaises(MinosRedefinedEnrouteDecoratorException):
adapter.routes
def test_eq(self):
another_eq = HttpAdapter.from_config(self.config)
another_ne = HttpAdapter([_Router.from_config(self.config)])
self.assertEqual(another_eq, self.adapter)
self.assertNotEqual(another_ne, self.adapter)
if __name__ == "__main__":
unittest.main()
|
11477762
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
requirements = [
'lmdb~=0.98',
'pycapnp~=0.6.4',
]
setuptools.setup(
name='osmx',
version='0.0.4',
author="<NAME>",
author_email='<EMAIL>',
description='Read OSM Express (.osmx) database files.',
license="BSD-2-Clause",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/protomaps/OSMExpress",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
install_requires = requirements,
requires_python='>=3.0',
package_data={'osmx':['messages.capnp']}
)
|
11477814
|
import datetime
import unittest
from peerplays import PeerPlays
from peerplays.utils import parse_time
from peerplays.exceptions import ObjectNotInProposalBuffer
from peerplaysbase.operationids import getOperationNameForId
from peerplays.instance import set_shared_peerplays_instance
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ppy = PeerPlays(
nobroadcast=True,
# Overwrite wallet to use this list of wifs only
wif=[wif]
)
set_shared_peerplays_instance(self.ppy)
self.ppy.set_default_account("init0")
def test_event_create(self):
ev = [["de", "1. Bundesliga"], ["en", "First Country League"]]
desc = [["de", "Bundesliga"], ["en", "Germany Scoccer Championship"]]
season = [["de", "Januar 2016"], ["en", "January 2016"]]
start = datetime.datetime(2016, 1, 1, 0, 0, 0)
rule_name = [["en", "NHL Rules v1.0"]]
rule = [["en", "The winner will be the team with the most points ..."]]
bmg_name = [["de", "Meine Market Group"], ["en", "My betting market group"]]
bm_name = [["de", "Nuernberg gewinnt"], ["en", "Nuremberg wins"]]
cond = [["de", "Description: Fuerth gewinnt"],
["en", "Description: Fuerth wins"]]
with self.assertRaises(ObjectNotInProposalBuffer):
self.ppy.event_group_create(ev, sport_id="0.0.0")
with self.assertRaises(ObjectNotInProposalBuffer):
self.ppy.event_create(desc, season, start, event_group_id="0.0.0")
with self.assertRaises(ObjectNotInProposalBuffer):
self.ppy.betting_market_group_create(bmg_name, event_id="0.0.2", rules_id="0.0.3")
with self.assertRaises(ObjectNotInProposalBuffer):
self.ppy.betting_market_group_create(bmg_name, event_id="0.0.3", rules_id="0.0.4")
proposal = self.ppy.proposal()
# Sport (0)
self.ppy.sport_create(["en", "testsport"], append_to=proposal)
# Eventgroup (1)
self.ppy.event_group_create(ev, sport_id="0.0.0", append_to=proposal)
with self.assertRaises(ObjectNotInProposalBuffer):
self.ppy.event_group_create(ev, sport_id="0.0.1", append_to=proposal)
# Event (2)
self.ppy.event_create(desc, season, start, event_group_id="0.0.1", append_to=proposal)
with self.assertRaises(ObjectNotInProposalBuffer):
self.ppy.event_create(desc, season, start, event_group_id="0.0.2")
self.ppy.event_create(desc, season, start, event_group_id="0.0.0")
# Rule (3)
self.ppy.betting_market_rules_create(rule_name, rule, append_to=proposal)
# BMG (4)
self.ppy.betting_market_group_create(bmg_name, event_id="0.0.2", rules_id="0.0.3", append_to=proposal)
with self.assertRaises(ObjectNotInProposalBuffer):
self.ppy.betting_market_group_create(bmg_name, event_id="0.0.3", rules_id="0.0.4", append_to=proposal)
self.ppy.betting_market_group_create(bmg_name, event_id="0.0.1", rules_id="0.0.4", append_to=proposal)
# BM (5)
self.ppy.betting_market_create(cond, bm_name, group_id="0.0.4", append_to=proposal)
with self.assertRaises(ObjectNotInProposalBuffer):
self.ppy.betting_market_create(cond, bm_name, group_id="0.0.3", append_to=proposal)
self.ppy.betting_market_create(cond, bm_name, group_id="0.0.5", append_to=proposal)
|
11477863
|
import config
import json
from multiprocessing import Process
import os
from pprint import pprint
import re
import requests
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from subprocess import call
from urllib import parse
from webcocktail.crawler.items import ResponseItem
from webcocktail.crawler.spiders.explore import ExploreSpider
from webcocktail.log import get_log
from webcocktail.log import print_response
from webcocktail.scanner import Scanner
import webcocktail.utils as utils
class WebCocktail(object):
CATEGORY = ['active', 'other']
def __init__(self, url='', extra_url=[], extra_domain=[], debug=False,
crawl=True, scan=True):
self.log = get_log(self.__class__.__name__)
self.target = utils.check_url(url)
self.extra_domain = extra_domain
self.extra_url = extra_url
self.active_pages = []
self.active_hashes = dict()
self.other_pages = []
self.other_hashes = dict()
self.scanner = Scanner(self, debug)
self.extra_url.extend(self.get_robots_disallow(self.target))
if crawl:
self.crawl(self.target, self.extra_url, self.extra_domain)
else:
self.add_page(utils.send_url(url=self.target))
if scan:
self.default_scan()
else:
self._scan_index()
def _add_hash(self, category, response):
hashes = self.__dict__[category + '_hashes']
url_path, new_hash = utils.get_path_hash(response)
if url_path not in hashes:
hashes[url_path] = []
hashes[url_path].append(new_hash)
def _add_crawled_page(self, response, page=None, status_code=None):
response.wct_found_by = 'crawler'
if page:
response.wct_comments = page['comments']
response.wct_hidden_inputs = page['hidden_inputs']
for r in response.history:
r.wct_found_by = 'crawler'
if (page and
utils.hash(response.content) !=
utils.hash(page['content'].encode())):
self.log.warning(
'Different request %s content '
'between crawler and requests. '
'The url may be dynamic page.' % response.url)
self.log.debug(response.request.headers)
self.log.debug(page['request']['headers'])
if (status_code and response.status_code != status_code
and status_code != 302 and status_code):
self.log.warning(
'Different request between crawler and requests: '
'%s should be %d but got %d' % (
response.url, status_code, response.status_code)
)
self.add_page(response)
def _scan_index(self):
index_request = self.active_pages[0].request
self.scanner.use('ScanFile')
results = self.scanner.scan(index_request)
for result in results:
self.add_page(result)
def filter_page(self, category, response):
hashes = self.__dict__[category + '_hashes']
url_path, new_hash = utils.get_path_hash(response)
if url_path in hashes and new_hash in hashes[url_path]:
self.log.info('%s has been in %s_pages' %
(response.url, category))
return None
return response
def add_page(self, response):
# check history response (302)
if response.history:
for r in response.history:
self.add_page(r)
if response.status_code == 200:
category = 'active'
else:
category = 'other'
response = self.filter_page(category, response)
if response is not None:
self.__dict__[category + '_pages'].append(response)
self._add_hash(category, response)
self.log.info(
'Found a new response: {r} {r.url}'.format(r=response))
def get_robots_disallow(self, url):
self.log.info('===== Checking robots.txt disallow =====')
ret_urls = []
if not url.endswith('robots.txt'):
url += 'robots.txt'
response = utils.send_url(url=url)
pages = re.findall('Disallow: (.*)', response.text)
for page in pages:
page = page[1:] if page[0] == '/' else page
self.log.info('Found %s' % self.target + page)
ret_urls.append(self.target + page)
return ret_urls
def crawl(self, target, extra_url=[], extra_domain=[]):
self.log.info('===== Crawling =====')
urls = [target] + extra_url
domains = [parse.urlparse(target).hostname] + extra_domain
kwargs = {'urls': urls, 'allowed_domains': domains}
if os.path.isfile(config.CRAWLER_LOG):
os.remove(config.CRAWLER_LOG)
if os.path.isfile(config.CRAWLER_RESULT):
os.remove(config.CRAWLER_RESULT)
def _crawl():
process = CrawlerProcess(get_project_settings())
process.crawl(ExploreSpider, **kwargs)
process.start()
process.stop()
p = Process(target=_crawl)
p.start()
p.join()
self.log.info('Parsing crawler log')
f = open(config.CRAWLER_LOG, 'r')
for log in f:
if 'Error: ' in log:
self.log.critical('There are some errors in crawler. '
'Please check up %s' % config.CRAWLER_LOG)
self.log.critical(log)
exit()
# load other status code 302, 404,.. response in config.CRAWLER_LOG
parsed_other = re.findall(
'(?!.*\(200\).*).*DEBUG:.*\((\d*)\).*<(.*) (.*)> (.*)', log)
for parsed in parsed_other:
if parsed[0] == '302':
status_code = parsed[0]
method, url = re.findall('.*<(.*) (.*)>.*', parsed[-1])[0]
else:
status_code, method, url, _ = parsed
status_code = int(status_code)
response = utils.send_url(method=method, url=url)
self._add_crawled_page(response, status_code=status_code)
f.close()
# load status code 200 page
self.log.info('Parsing crawler result')
try:
with open(config.CRAWLER_RESULT, 'r') as f:
crawled_pages = json.load(f)
except json.decoder.JSONDecodeError:
self.log.error('Parse json result error')
crawled_pages = {}
for page in crawled_pages:
page['request'].update(config.REQUEST)
if 'Cookie' in page['request']['headers']:
cookies = page['request']['headers']['Cookie']
cookies = dict(parse.parse_qsl(cookies))
page['request']['cookies'] = cookies
response = requests.request(**page['request'])
self._add_crawled_page(response, page=page)
def default_scan(self):
self.log.info('===== Default Scan =====')
self._scan_index()
self.scanner.use('default')
# scan active pages
requests = [p.request for p in self.active_pages]
results = self.scanner.scan_all(requests)
for result in results:
self.add_page(result)
# scan 302 pages
pages_302 = [p for p in self.other_pages if p.status_code == 302]
requests = [p.request for p in pages_302]
results = self.scanner.scan_all(requests)
for result in results:
self.add_page(result)
def nmap(self, url):
# TODO: create a plugin
uri = parse.urlparse(url)
url = uri.hostname
print('===== nmap %s =====' % url)
try:
call(['nmap', '-v', '-A', '-Pn', url])
except:
pass
print()
def show_pages(self, category='all', filter_function=None, **kwargs):
if not filter_function:
def filter_function(response):
return response
ret_pages = []
i = 0
for define_category in WebCocktail.CATEGORY:
if category == define_category or category == 'all':
print('===== %s pages =====' % define_category)
pages = self.__dict__[define_category + '_pages']
for response in pages:
if filter_function(response) is not None:
ret_pages.append(response)
print_response(i, response, pages, **kwargs)
i += 1
print()
return ret_pages
|
11477888
|
import chainer
import chainer.functions as F
import chainer.links as L
from tgan2.models.resblocks import DisBlock
from tgan2.models.resblocks import OptimizedDisBlock
class ResNetVideoDiscriminator(chainer.Chain):
def __init__(self, in_channels, mid_ch=64, n_classes=0, activation='relu'):
super(ResNetVideoDiscriminator, self).__init__()
self.activation = getattr(F, activation)
initializer = chainer.initializers.GlorotUniform()
with self.init_scope():
self.block1 = OptimizedDisBlock(in_channels, mid_ch)
kwargs = {'activation': activation, 'downsample': True}
self.block2 = DisBlock(mid_ch, mid_ch * 2, **kwargs)
self.block3 = DisBlock(mid_ch * 2, mid_ch * 4, **kwargs)
self.block4 = DisBlock(mid_ch * 4, mid_ch * 8, **kwargs)
self.block5 = DisBlock(mid_ch * 8, mid_ch * 16, **kwargs)
self.l6 = L.Linear(mid_ch * 16, 1, initialW=initializer)
if n_classes > 0:
self.l_c = L.EmbedID(n_classes, mid_ch * 16, initialW=initializer)
def extract_feature(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.activation(h)
h = F.sum(h, axis=(2, 3, 4)) # Global pooling
return h
def __call__(self, x, c=None):
h = self.extract_feature(x)
output = self.l6(h)
if c is not None:
w_c = self.l_c(c)
output += F.sum(w_c * h, axis=1, keepdims=True)
return output
|
11477965
|
from trafaret.utils import fold, split, unfold
class TestUtils:
def test_split(self):
data = 'leads[delete][0][id]'
split_data = split(data, ('[]', '[', ']'))
assert split_data == ['leads', 'delete', '0', 'id']
def test_fold(self):
data = {'leads[delete][0][id]': '42', 'account[subdomain]': 'murmurzet'}
folded = fold(data, delimeter=('[', ']'))
assert folded == {
'leads': {
'delete': [
{'id': '42'}
]
},
'account': {
'subdomain': 'murmurzet'
}
}
def test_fold_underscored(self):
assert fold({'a__a': 4}) == {'a': {'a': 4}}
assert fold({'a__a': 4, 'a__b': 5}) == {'a': {'a': 4, 'b': 5}}
assert fold({'a__1': 2, 'a__0': 1, 'a__2': 3}) == {'a': [1, 2, 3]}
assert fold({'form__a__b': 5, 'form__a__a': 4}, 'form') == {'a': {'a': 4, 'b': 5}}
assert fold({'form__a__b': 5, 'form__a__a__0': 4, 'form__a__a__1': 7}, 'form') == {'a': {'a': [4, 7], 'b': 5}}
assert fold({'form__1__b': 5, 'form__0__a__0': 4, 'form__0__a__1': 7}, 'form') == [{'a': [4, 7]}, {'b': 5}]
def test_unfold(self):
assert unfold({'a': 4, 'b': 5}) == {'a': 4, 'b': 5}
assert unfold({'a': [1, 2, 3]}) == {'a__0': 1, 'a__1': 2, 'a__2': 3}
assert unfold({'a': {'a': 4, 'b': 5}}) == {'a__a': 4, 'a__b': 5}
assert unfold({'a': {'a': 4, 'b': 5}}, 'form') == {'form__a__a': 4, 'form__a__b': 5}
|
11477966
|
from six import StringIO
import random
import string
import pytest
import numpy as np
from eight_mile.utils import read_label_first_data, write_label_first_data
def random_str(len_=None, min_=5, max_=21):
if len_ is None:
len_ = np.random.randint(min_, max_)
choices = list(string.ascii_letters + string.digits)
return "".join([np.random.choice(choices) for _ in range(len_)])
def generate_data():
labels = [random_str() for _ in range(random.randint(5, 50))]
texts = [[random_str() for _ in range(random.randint(1, 20))] for _ in range(len(labels))]
return labels, texts
def test_label_first_data_round_trip():
data = StringIO()
labels, texts = generate_data()
write_label_first_data(data, labels, texts)
data.seek(0)
l, ts = read_label_first_data(data)
def test_label_first_data_read_tabs():
data = StringIO()
data.write(
"""
1\tdata
2\tdata data
3\tdata\tdata\tdata
""".lstrip()
)
gold_labels = list("123")
gold_texts = [["data"] * i for i in range(1, 4)]
data.seek(0)
l, t = read_label_first_data(data)
assert l == gold_labels
assert t == gold_texts
def test_label_first_data_read_space_at_end():
data = StringIO()
# Note: The end of the first line in this example has a space after it
# This is needed for this test
data.write(
"""
1 data
2 data data\t
3 data data\tdata\t\t\t
""".lstrip()
)
gold_labels = list("123")
gold_texts = [["data"] * i for i in range(1, 4)]
data.seek(0)
l, t = read_label_first_data(data)
assert l == gold_labels
assert t == gold_texts
def test_label_first_data_read_empty_row():
data = StringIO()
data.write(
"""
1\tdata
2\tdata data
""".lstrip()
)
gold_labels = list("12")
gold_texts = [["data"] * i for i in range(1, 3)]
data.seek(0)
l, t = read_label_first_data(data)
assert l == gold_labels
assert t == gold_texts
def test_label_first_data_read_empty_example():
data = StringIO()
data.write(
"""
1\tdata data
2
3 data
""".lstrip()
)
data.seek(0)
with pytest.raises(ValueError):
l, t = read_label_first_data(data)
def test_label_first_data_read_single_token():
data = StringIO()
data.write(
"""
1 1
2 2
3 3
""".lstrip()
)
data.seek(0)
l, t = read_label_first_data(data)
assert l == list("123")
assert t == [[item] for item in "123"]
def test_write_label_first_data():
gold = """
1 data
2 data data
3 data data data
5 data data data data data
4 data data data data
""".strip()
labels = list("12354")
texts = [["data"] * int(l) for l in labels]
data = StringIO()
write_label_first_data(data, labels, texts)
data.seek(0)
assert data.read() == gold
|
11477972
|
from agent import BaseAgent, Observer
from malmo_rl.agents.random_agent import Random
from malmo_rl.agents.qlearner import QLearner
from malmo_rl.agents.ddpglearner import DDPGLearner
class AbstractAgent(BaseAgent):
def __init__(self, name, env, agent_type, **kwargs):
if agent_type == 'random':
self.agent = Random(name, env)
elif agent_type == 'dqn':
self.agent = QLearner(name, env, kwargs['grayscale'], kwargs['width'], kwargs['height'])
elif agent_type == 'ddpg':
self.agent = DDPGLearner(name, env, kwargs['grayscale'], kwargs['width'], kwargs['height'])
elif agent_type == 'observer':
self.agent = Observer(name, env)
else:
RuntimeError('Unknown agent type')
super(AbstractAgent, self).__init__(name, env)
def fit(self, env, nb_steps):
self.agent.fit(env, nb_steps)
def test(self, env, nb_episodes):
return self.agent.test(env, nb_episodes)
def save(self, out_dir):
self.agent.save(out_dir)
def load(self, out_dir):
self.agent.load(out_dir)
|
11477988
|
import imp
import logging
import os
import re
from datetime import datetime
logger = logging.getLogger(__name__)
class MigrationFile(object):
PATTERN = '^(?P<id>[0-9]+)_[a-z0-9_]+\.py$'
def __init__(self, id, filename):
self.id = int(id)
self.filename = filename
def __str__(self):
return '{0.id}: {0.filename}'.format(self)
def __eq__(self, other):
return self.id == other.id and self.filename == other.filename
@staticmethod
def normalize_name(name):
return re.sub('[^a-z0-9_]', '_', name)
@staticmethod
def validate_id(migration_id):
try:
return int(migration_id)
except ValueError:
logger.error('Invalid migration_id %s ' % migration_id)
def as_dict(self):
return {
'id': self.id,
'filename': self.filename
}
class Migrations(object):
"""Manage MongoDB migrations."""
MIGRATIONS_COLLECTION = 'db_migrations'
MIGRATIONS_DIRECTORY = 'migrations'
NO_MIGRATIONS_MSG = 'All migrations registered, nothing to execute'
def __init__(self, path, db):
self.path = path
self.db = db
self.directory = os.path.join(path, self.MIGRATIONS_DIRECTORY)
self.collection = self.db[self.MIGRATIONS_COLLECTION]
def get_migration_files(self):
"""Find migrations files."""
migrations = (re.match(MigrationFile.PATTERN, filename)
for filename in os.listdir(self.directory))
migrations = (MigrationFile(m.group('id'), m.group(0))
for m in migrations if m)
return sorted(migrations, key=lambda m: m.id)
def get_unregistered_migrations(self):
"""Find unregistered migrations."""
return [m for m in self.get_migration_files()
if not self.collection.find_one({'filename': m.filename})]
def check_directory(self):
"""Check if migrations directory exists."""
exists = os.path.exists(self.directory)
if not exists:
logger.error("No migrations directory found. Check your path or create a migration first.")
logger.error("Directory: %s" % self.directory)
return exists
def show_status(self):
"""Show status of unregistered migrations"""
if not self.check_directory():
return
migrations = self.get_unregistered_migrations()
if migrations:
logger.info('Unregistered migrations:')
for migration in migrations:
logger.info(migration.filename)
else:
logger.info(self.NO_MIGRATIONS_MSG)
def get_new_filename(self, name):
"""Generate filename for new migration."""
name = MigrationFile.normalize_name(name)
migrations = self.get_migration_files()
migration_id = migrations[-1].id if migrations else 0
migration_id += 1
return '{:04}_{}.py'.format(migration_id, name)
def create(self, name):
"""Create a new empty migration."""
if not os.path.exists(self.directory):
os.makedirs(self.directory)
filename = self.get_new_filename(name)
with open(os.path.join(self.directory, filename), 'w') as fp:
fp.write("def up(db): pass\n\n\n")
fp.write("def down(db): pass\n")
logger.info(filename)
def load_migration_file(self, filename):
"""Load migration file as module."""
path = os.path.join(self.directory, filename)
# spec = spec_from_file_location("migration", path)
# module = module_from_spec(spec)
# spec.loader.exec_module(module)
module = imp.load_source("migration", path)
return module
def get_migrations_to_up(self, migration_id=None):
"""Find migrations to execute."""
if migration_id is not None:
migration_id = MigrationFile.validate_id(migration_id)
if not migration_id:
return []
migrations = self.get_unregistered_migrations()
if not migrations:
logger.info(self.NO_MIGRATIONS_MSG)
return []
if migration_id:
try:
last_migration = [m for m in migrations
if m.id == migration_id][0]
except IndexError:
logger.error('Migration is not in unregistered list: %s'
% migration_id)
self.show_status()
return []
else:
last_migration = list(migrations)[-1]
return [m for m in migrations if m.id <= last_migration.id]
def up(self, migration_id=None, fake=False):
"""Executes migrations."""
if not self.check_directory():
return
for migration in self.get_migrations_to_up(migration_id):
logger.info('Executing migration: %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if not fake:
if hasattr(migration_module, 'up'):
migration_module.up(self.db)
else:
logger.error('No up method on migration %s' % migration.filename)
record = migration.as_dict()
record['date'] = datetime.utcnow()
self.collection.insert(record)
def get_last_migrated_id(self):
"""Find id of last applied migration."""
return self.collection.find().sort('date', -1)[0]['id']
def get_migrations_to_down(self, migration_id):
"""Find migrations to rollback."""
migration_id = MigrationFile.validate_id(migration_id)
if not migration_id:
return []
migrations = self.get_migration_files()
last_migration_id = self.get_last_migrated_id()
if migration_id in (m.id for m in self.get_unregistered_migrations()):
logger.error('Migration is not applied %s' % migration_id)
return []
try:
migration = [m for m in migrations if m.id == migration_id][0]
except IndexError:
logger.error('Migration does not exists %s' % migration_id)
return []
return list(reversed([m for m in migrations
if migration.id <= m.id <= last_migration_id]))
def down(self, migration_id):
"""Rollback to migration."""
if not self.check_directory():
return
for migration in self.get_migrations_to_down(migration_id):
logger.info('Rollback migration %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if hasattr(migration_module, 'down'):
migration_module.down(self.db)
else:
logger.info('No down method on %s' % migration.filename)
self.collection.remove({'filename': migration.filename})
|
11478031
|
from airflow import DAG
import datetime
from airflow.hooks.S3_hook import S3Hook
from elasticsearch import Elasticsearch
import json
import gzip
import io
import logging
import base64
from elasticsearch.helpers import bulk
from airflow.operators.python_operator import PythonOperator
from airflow.models import Variable
from airflow.contrib.operators.s3_list_operator import S3ListOperator
from airflow.models.xcom import XCom
import itertools
from airflow.operators.python_operator import ShortCircuitOperator
from jinja2 import PackageLoader
import time
from kite_airflow.plugins.google import GoogleSheetsRangeOperator
import kite_metrics
from kite_airflow.slack_alerts import task_fail_slack_alert
logger = logging.getLogger(__name__)
INDEX_GRANULARITY = datetime.timedelta(days=10)
BUCKET = 'kite-metrics'
KS_INDEX_PREFIX = 'kite_status'
def resolve_dotted_path(doc, path):
container = doc
field_name = path
while '.' in field_name:
container_name, field_name = path.split('.', 1)
if container_name not in container:
return None, None
container = container[container_name]
if field_name in container:
return container, field_name
return None, None
def get_index_shard(dt, granularity, epoch=datetime.date(1970, 1, 1)):
date = datetime.date(dt.year, dt.month, dt.day)
rounded = epoch + (date - epoch) // granularity * granularity
return rounded.isoformat()
def iter_s3_file(s3_hook, bucket, key):
json_file = s3_hook.get_key(key, BUCKET)
for line in gzip.open(json_file.get()['Body']):
yield json.loads(line)
def client_event_convert_fn(docs, index_date_suffix, deployments):
for doc in docs:
if 'messageId' not in doc:
continue
if 'properties' not in doc:
continue
event = doc.get('event')
if event == 'Index Build':
index_prefix = 'index_build'
elif event == 'Completion Stats':
index_prefix = 'completions_selected'
else:
continue
index_name = '{}_{}'.format(index_prefix, index_date_suffix)
for field in ['originalTimestamp']:
if field in doc:
del doc[field]
for field in ['repo_stats', 'receivedAt', 'sentAt', 'sent_at', 'parse_info.parse_errors']:
container, field_name = resolve_dotted_path(doc['properties'], field)
if container:
del container[field_name]
for field in ['cpu_info.sum', 'lexical_metrics.score']:
container, field_name = resolve_dotted_path(doc['properties'], field)
if container:
container[field_name] = float(container[field_name])
for field in ['completion_stats']:
if field in doc['properties']:
# completions_stats is an encoded list
data = doc['properties'][field]
data = base64.b64decode(data)
data = gzip.GzipFile(fileobj=io.BytesIO(data)).read()
data = json.loads(data)
del doc['properties'][field]
# create one document per completion stat
i = 0
for stat in data:
i += 1
elem = doc
for key in stat:
elem['properties'][key] = stat[key]
yield {
'_index': index_name,
'_id': doc['messageId'] + "-" + str(i),
'_source': elem
}
else:
yield {
'_index': index_name,
'_id': doc['messageId'],
'_source': doc
}
def scrub(a_dict, schema):
res = {}
for k, v in schema['properties'].items():
if k not in a_dict:
continue
a_val = a_dict[k]
elastic = v.get('elastic', False)
if isinstance(a_val, dict):
if elastic:
res[k] = {k1: v1 for k1, v1 in a_val.items() if k1}
elif 'properties' in v:
res[k] = scrub(a_val, v)
continue
if elastic:
res[k] = a_val
return res
kite_status_config = kite_metrics.load_context('kite_status')
kite_status_schema = kite_metrics.load_schema('kite_status')
def kite_status_convert_fn(docs, index_date_suffix, deployments):
total_time = 0
for i, doc in enumerate(docs):
if i and i % 10000 == 0:
logger.info('Done {} records, avg time / record={}'.format(i, total_time / i))
start_time = time.perf_counter()
if doc.get('event') != 'kite_status':
total_time += (time.perf_counter() - start_time)
continue
if not doc.get('messageId'):
total_time += (time.perf_counter() - start_time)
continue
if 'properties' not in doc:
total_time += (time.perf_counter() - start_time)
continue
if sum(doc['properties'].get('{}_events'.format(lang), 0) for lang in kite_status_config['languages']) == 0:
total_time += (time.perf_counter() - start_time)
continue
index_name = '{}_active_{}'.format(KS_INDEX_PREFIX, index_date_suffix)
doc = scrub(doc, kite_status_schema)
for field in ['cpu_samples_list', 'active_cpu_samples_list']:
if not doc['properties'].get(field):
continue
p = field.split('_')[:-2]
new_field = '_'.join(['max'] + p)
doc['properties'][new_field] = max(map(float, doc['properties'][field]))
# We got some bogus timestamps, TODO: validate and cleanup data
for field in ['license_expire', 'plan_end']:
if isinstance(doc['properties'].get(field), int):
if 0 < doc['properties'][field] < 2524636800:
doc['properties'][field] = datetime.datetime.fromtimestamp(doc['properties'][field])
else:
del doc['properties'][field]
# Next block is for backcompatibilty only
# can be removed once the content of the PR https://github.com/kiteco/kiteco/pull/10638/ has been released to
# most of our users
for field in ['cpu_samples', 'active_cpu_samples']:
if field in doc['properties']:
samples_str = doc['properties'].pop(field)
if len(samples_str) == 0:
continue
p = field.split('_')[:-1]
new_field = '_'.join(['max'] + p)
doc['properties'][new_field] = max(map(float, samples_str.split(',')))
deployment_id = doc['properties'].get('server_deployment_id')
if deployment_id and deployment_id in deployments:
doc['properties']['server_deployment_name'] = deployments[deployment_id]
doc['payload_size'] = len(doc)
total_time += (time.perf_counter() - start_time)
yield {'_index': index_name, '_id': doc['messageId'], '_source': doc}
kite_status_dag = DAG(
'elastic_load_kite_status',
description='Load kite_status to Kibana.',
default_args={
'retries': 1,
'retry_delay': datetime.timedelta(minutes=5),
'start_date': datetime.datetime(2020, 10, 15),
'on_failure_callback': task_fail_slack_alert,
},
schedule_interval='*/10 * * * *',
jinja_environment_kwargs={
'loader': PackageLoader('kite_airflow', 'templates')
},
)
client_events_dag = DAG(
'elastic_load_client_events',
description='Load client_events to Kibana.',
default_args={
'retries': 1,
'retry_delay': datetime.timedelta(minutes=5),
'start_date': datetime.datetime(2020, 10, 15),
'on_failure_callback': task_fail_slack_alert,
},
schedule_interval='*/10 * * * *',
jinja_environment_kwargs={
'loader': PackageLoader('kite_airflow', 'templates')
},
)
convert_fns = {'kite_status': kite_status_convert_fn, 'client_events': client_event_convert_fn}
def bulk_index_metrics(bucket, s3_keys, granularity, key, deployments):
s3_hook = S3Hook('aws_us_east_1')
es = Elasticsearch(
cloud_id="metrics:XXXXXXX",
http_auth=("elastic", Variable.get('elastic_password')),
)
def iter():
for s3_key in s3_keys:
dt = datetime.date(*map(int, s3_key.split('/')[2:5]))
index_date_suffix = get_index_shard(dt, granularity)
for rec in convert_fns[key](iter_s3_file(s3_hook, bucket, s3_key), index_date_suffix, deployments):
yield rec
bulk(es, iter())
def skip_no_new_files(ti, **kwargs):
prev_files = set(itertools.chain(*[result.value for result in XCom.get_many(
execution_date=ti.execution_date,
dag_ids=ti.dag_id,
task_ids=ti.task_id,
include_prior_dates=True,
limit=100
)]))
all_files = set(ti.xcom_pull(task_ids='list_prev_json_files') + (ti.xcom_pull(task_ids='list_next_json_files') or []))
curr_files = list(all_files - prev_files)
ti.xcom_push(key='curr_files', value=curr_files)
return len(curr_files) > 0
for key, dag in [('kite_status', kite_status_dag), ('client_events', client_events_dag)]:
list_ops = [
S3ListOperator(
aws_conn_id='aws_us_east_1',
task_id='list_{}_json_files'.format(k),
bucket='kite-metrics',
prefix="firehose/{}/{{{{ (execution_date + macros.timedelta(hours={})).format('%Y/%m/%d/%H') }}}}/".format(key, diff),
delimiter='/',
dag=dag,
) for k, diff in [('prev', 0), ('next', 1)]
]
def load_fn(ti, params, **kwargs):
s3_keys = ti.xcom_pull(task_ids=skip_no_new_files.__name__, key='curr_files')
logger.info("Loading files {}".format(', '.join(s3_keys)))
deployments_data = ti.xcom_pull(task_ids='copy_server_deployments')['values']
id_col = deployments_data[1].index('Deployment ID')
name_col = deployments_data[1].index('Name')
deployments = {d[id_col]: d[name_col] for d in deployments_data[2:] if len(d) > max(id_col, name_col) and d[name_col].strip()}
bulk_index_metrics(BUCKET, s3_keys, INDEX_GRANULARITY, params['key'], deployments)
return s3_keys
list_ops >> ShortCircuitOperator(
task_id=skip_no_new_files.__name__,
python_callable=skip_no_new_files,
dag=dag,
provide_context=True,
depends_on_past=True,
) >> GoogleSheetsRangeOperator(
gcp_conn_id='google_cloud_kite_dev',
spreadsheet_id='1-XXXXXXX',
range='A:D',
task_id='copy_server_deployments',
dag=dag,
provide_context=True,
) >> PythonOperator(
python_callable=load_fn,
task_id='load_{}'.format(key),
dag=dag,
provide_context=True,
params={'key': key}
)
|
11478063
|
import sys
import json
def init(config_filename = 'config.json'):
infile = open(config_filename, "rt")
#json python module doesn't honor comment lines.
#so we are going to strip them out.
json_lines = []
for line in infile:
comment = line.find('//')
if comment == -1:
json_lines.append(line)
elif comment > 0:
remainder = line[:comment]
json_lines.append(remainder)
infile.close()
settings_json = json.loads(''.join(json_lines))
module = sys.modules[__name__]
for name, value in settings_json.iteritems():
setattr(module, name, value)
|
11478076
|
import FWCore.ParameterSet.Config as cms
hcaltbfilter_beam = cms.EDFilter("HcalTBTriggerFilter",
AllowLED = cms.bool(False),
AllowPedestalOutSpill = cms.bool(False),
AllowLaser = cms.bool(False),
AllowPedestal = cms.bool(False),
AllowBeam = cms.bool(True),
AllowPedestalInSpill = cms.bool(False),
hcalTBTriggerDataTag = cms.InputTag("tbunpack")
)
|
11478083
|
from collections import defaultdict
from typing import TYPE_CHECKING, DefaultDict, Dict, List, Optional
from beagle.nodes.node import Node
from beagle.edges import FileOf, CopiedTo
# mypy type hinting
if TYPE_CHECKING:
from beagle.nodes import Process # noqa: F401
class File(Node):
__name__ = "File"
__color__ = "#3CB371"
host: Optional[str]
full_path: Optional[str]
file_path: Optional[str]
file_name: Optional[str]
extension: Optional[str]
timestamp: Optional[int]
hashes: Optional[Dict[str, str]] = {}
file_of: DefaultDict["Process", FileOf]
copied_to: DefaultDict["File", CopiedTo]
key_fields: List[str] = ["host", "full_path"]
def __init__(
self,
host: str = None,
file_path: str = None,
file_name: str = None,
full_path: str = None,
extension: str = None,
hashes: Optional[Dict[str, str]] = {},
) -> None:
self.host = host
self.file_path = file_path
self.file_name = file_name
if full_path:
self.full_path = full_path
elif file_path and file_name:
if file_path[-1] == "\\":
self.full_path = f"{file_path}{file_name}"
else:
self.full_path = f"{file_path}\\{file_name}"
else:
# Fixes bug where we don't know the path of a process
self.full_path = ""
self.extension = extension
self.hashes = hashes
self.file_of = defaultdict(FileOf)
self.copied_to = defaultdict(CopiedTo)
def set_extension(self) -> None:
if self.full_path:
self.extension = self.full_path.split(".")[-1]
@property
def edges(self) -> List[DefaultDict]:
return [self.file_of, self.copied_to]
@property
def _display(self) -> str:
return self.file_name or super()._display
|
11478100
|
import os
import sys
sys.path.insert(0, './scripts/')
import numpy as np
import tensorflow as tf
import random
from glob import glob
from utils import *
from models import *
import argparse
parser = argparse.ArgumentParser(description='Auto Encoder for 3D object reconstruction from images')
parser.add_argument('-o','--object', default='chair', help='The name of the object to train')
parser.add_argument('-ensemble', default='0', help ='The ensemble experiment number being perfomed, you should do up to five')
parser.add_argument('-e','--epochs', default=250,
help ='The number of epochs to run for.', type=int)
parser.add_argument('-b','--batchsize', default=256, help ='The batch size.', type=int)
parser.add_argument('-l', '--load', default= False, help='Indicates if a previously loaded model should be loaded.', action = 'store_true')
parser.add_argument('-le', '--load_epoch', default= 'best', help='The epoch to number to be loaded from, if you just want the best, leave as default.', type=str)
args = parser.parse_args()
checkpoint_dir = "checkpoint/" + args.object +'/'
save_dir = "plots/" + args.object +'/'
data_dir = 'data/voxels/' + args.object+ '/train'
valid_dir = 'data/voxels/' + args.object+ '/valid'
img_data_dir = 'data/images/' + args.object+ '/train'
img_valid_dir = 'data/images/' + args.object+ '/valid'
random.seed(0)
batchsize = args.batchsize
valid_length = 3 # number of batches to use in validation set
######### make directories ############################
make_directories(checkpoint_dir,save_dir)
####### inputs ###################
scope = 'reconstruction'
images = tf.placeholder(tf.float32, [args.batchsize, 128, 128, 3], name='images')
models = tf.placeholder(tf.float32, [args.batchsize, 32, 32, 32] , name='real_models')
########## network computations #######################
net, pred = auto_encoder(images, scope=scope, is_train=True, reuse = False)
_, pred_valid = auto_encoder(images, scope=scope, is_train=False, reuse = True)
mse = tf.reduce_mean(tf.square(models-pred))
mae = tf.reduce_mean(tf.abs(models-pred))
loss = mse + .001*mae
real_loss = tf.reduce_mean(tf.square(models-pred_valid))
############ Optimization #############
vars = tl.layers.get_variables_with_name(scope, True, True)
net.print_params(False)
optim = tf.train.RMSPropOptimizer(learning_rate = 1e-3).minimize(loss, var_list=vars)
####### Training ################
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess=tf.Session()
sess.run(tf.global_variables_initializer())
##### load checkpoints ####################
if args.load:
load_networks(checkpoint_dir, sess, net, args.load_epoch, name = (scope + args.ensemble))
recon_loss, valid_IoU, valid_loss, max_IoU = [],[],[], 0
######## make files and models ##################33
files= grab_images(img_data_dir, data_dir)
valid = grab_images(img_valid_dir, valid_dir)
random.shuffle(valid)
valid = valid[:3*batchsize]
valid_models, valid_images, _ = make_batch_images(valid, valid_dir)
if args.load:
try:
start = int(args.load_epoch) + 1
except:
start = 0
else:
start = 0
########### train #################
for epoch in range(start, args.epochs):
random.shuffle(files)
for idx in xrange(0, len(files)/args.batchsize/10):
batch = random.sample(files, args.batchsize)
batch_models, batch_images, start_time = make_batch_images(batch, data_dir)
batch_loss,_ = sess.run([mse, optim], feed_dict={images: batch_images, models:batch_models })
recon_loss.append(batch_loss)
print("Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, loss: %.4f, VALID: %.4f" % (epoch,
args.epochs, idx, len(files)/batchsize/10, time.time() - start_time, batch_loss, max_IoU))
########## check validation #############
valid_losses = 0.
IoU = 0.
for i in range(int(len(valid)/args.batchsize)):
v_images = valid_images[i*args.batchsize: (i+1)*args.batchsize]
gt_models = valid_models[i*args.batchsize: (i+1)*args.batchsize]
v_models, temp_loss = sess.run([pred, real_loss], feed_dict={images:v_images, models: gt_models})
valid_losses += temp_loss/float(valid_length)
v_models[np.where(v_models >.45)] = 1
v_models[np.where(v_models<.45)] = 0
for m, gt in zip(v_models,gt_models):
IoU += evaluate_voxel_prediction(m,gt)
valid_loss.append(valid_losses)
IoU = IoU / float(valid_length * batchsize)
valid_IoU.append(IoU)
test_valid = max_IoU
max_IoU = max(IoU, max_IoU)
if test_valid != max_IoU:
save_networks(checkpoint_dir, sess, net, name=(scope + args.ensemble ), epoch = str(epoch))
######### save graphs ###########
render_graphs(save_dir, epoch, recon_loss, valid_loss, valid_IoU, name = scope )
|
11478104
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from davg.lanefinding.ImgMgr import ImgMgr
from davg.lanefinding.BirdsEyeTransform import BirdsEyeTransform
from davg.lanefinding.Thresholds import Thresholds
from davg.lanefinding.DiagnosticScreen import DiagnosticScreen
img_mgr = ImgMgr()
birdseye = BirdsEyeTransform()
def demonstrate_gradient_threshold_comparison(fname,
grad_ksize=27, mag_ksize=27, dir_ksize=15,
gradx_min=30, gradx_max=120,
grady_min=30, grady_max=120,
mag_min=25, mag_max=120,
dir_min=0.7, dir_max=np.pi/2):
global img_mgr, birdseye
img = mpimg.imread(fname)
img = img_mgr.undistort(img)
masked = birdseye.apply_cropping_mask(img)
img = birdseye.warp(masked)
gry = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
gry_rgb = cv2.cvtColor(gry, cv2.COLOR_GRAY2RGB)
gradx = Thresholds.abs_sobel_thresh(gry, orient='x', sobel_kernel=grad_ksize, thresh=(gradx_min, gradx_max))
grady = Thresholds.abs_sobel_thresh(gry, orient='y', sobel_kernel=grad_ksize, thresh=(grady_min, grady_max))
mag_binary = Thresholds.grad_magnitude_thresh(gry, sobel_kernel=mag_ksize, thresh=(mag_min, mag_max))
dir_binary = Thresholds.grad_direction_thresh(gry, sobel_kernel=dir_ksize, thresh=(dir_min, dir_max))
screen = DiagnosticScreen.compose_2x3_screen(diag1=img, diag2=gradx, diag3=grady,
diag4=gry_rgb, diag5=mag_binary, diag6=dir_binary,
title1="Original", title2="Sobel X", title3="Sobel Y",
title4="Grayscale", title5="Magnitude", title6="Direction")
plt.figure(figsize=(12,4))
plt.imshow(screen)
plt.axis('off')
plt.show()
# UNCOMMENT TO RUN
test_images = glob.glob('test_images_s1_1296x972/*.jpg')
demonstrate_gradient_threshold_comparison(test_images[0])
|
11478133
|
from uuid import uuid4
from flask import Blueprint, Response, abort, request
from flask_restful import Api
from flasgger import swag_from
from app.docs.v2.admin.account.account_management import *
from app.models.account import AdminModel, StudentModel, SignupWaitingModel
from app.views.v2 import BaseResource, auth_required, json_required
api = Api(Blueprint(__name__, __name__))
api.prefix = '/admin/account-management'
@api.resource('/student')
class StudentAccount(BaseResource):
@auth_required(AdminModel)
@json_required({'number': int})
@swag_from(STUDENT_ACCOUNT_DELETE)
def delete(self):
"""
학생 계정 제거
"""
def generate_new_signup_waiting_student_account():
while True:
new_uuid = str(uuid4())[:4]
if not SignupWaitingModel.objects(uuid=new_uuid):
break
SignupWaitingModel(uuid=new_uuid, name=student.name, number=student.number).save()
return new_uuid
payload = request.json
student_number = payload['number']
student = StudentModel.objects(number=student_number).first()
if not student:
signup_waiting = SignupWaitingModel.objects(number=student_number).first()
return {
'uuid': signup_waiting.uuid
} if signup_waiting else Response('', 204)
else:
student.delete()
return {
'uuid': generate_new_signup_waiting_student_account()
}, 201
@api.resource('/admin')
class AdminAccount(BaseResource):
@auth_required(AdminModel)
@json_required({'id': str, 'password': str, 'name': str})
@swag_from(ADMIN_ACCOUNT_POST)
def post(self):
"""
새로운 관리자 계정 생성
"""
payload = request.json
id = payload['id']
if AdminModel.objects(id=id):
abort(409)
AdminModel(id=id, pw=self.encrypt_password(payload['password']), name=payload['name']).save()
return Response('', 201)
@auth_required(AdminModel)
@json_required({'id': str})
@swag_from(ADMIN_ACCOUNT_DELETE)
def delete(self):
"""
관리자 계정 제거
"""
payload = request.json
admin = AdminModel.objects(id=payload['id']).first()
if not admin:
return Response('', 204)
else:
admin.delete()
return Response('', 200)
|
11478167
|
import sys
# import from official repo
sys.path.append('tensorflow_models')
from official.nlp.bert.tf1_checkpoint_converter_lib import convert, BERT_V2_NAME_REPLACEMENTS, BERT_NAME_REPLACEMENTS, BERT_PERMUTATIONS, BERT_V2_PERMUTATIONS
from utils.misc import ArgParseDefault
def main(args):
convert(args.input_checkpoint, args.output_checkpoint, args.num_heads, args.name_replacements, args.name_permutations)
def parse_args():
# Parse commandline
parser = ArgParseDefault()
parser.add_argument('--input_checkpoint', required=True, help='Path to v1 checkpoint')
parser.add_argument('--output_checkpoint', required=True, help='Path to checkpoint to be written out.')
parser.add_argument('--num_heads', default=16, help='Path to checkpoint to be written out.')
parser.add_argument('--name_replacements', default=BERT_NAME_REPLACEMENTS, help='Name replacements')
parser.add_argument('--name_permutations', default=BERT_PERMUTATIONS, help='Name permuations')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
11478172
|
import typing
import torch
import torch.nn as nn
from pyraug.models.nn import *
class Encoder_Conv(BaseEncoder):
def __init__(self, args):
BaseEncoder.__init__(self)
self.input_dim = args.input_dim
self.latent_dim = args.latent_dim
self.n_channels = 1
self.layers = nn.Sequential(
nn.Conv2d(
self.n_channels, out_channels=32, kernel_size=3, stride=2, padding=1
),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, out_channels=32, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, out_channels=32, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
)
self.fc1 = nn.Sequential(nn.Linear(512, 400), nn.ReLU())
self.mu = nn.Linear(400, self.latent_dim)
self.std = nn.Linear(400, self.latent_dim)
def forward(self, x):
out = self.layers(
x.reshape(
-1, self.n_channels, int(x.shape[-1] ** 0.5), int(x.shape[-1] ** 0.5)
)
)
out = self.fc1(out.reshape(x.shape[0], -1))
return self.mu(out), self.std(out)
class Decoder_Conv(BaseDecoder):
def __init__(self, args):
BaseDecoder.__init__(self)
self.input_dim = args.input_dim
self.latent_dim = args.latent_dim
self.n_channels = 1
self.fc1 = nn.Sequential(
nn.Linear(self.latent_dim, 400), nn.ReLU(), nn.Linear(400, 512), nn.ReLU()
)
self.layers = nn.Sequential(
nn.ConvTranspose2d(32, out_channels=32, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(
32,
out_channels=32,
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(
32,
out_channels=self.n_channels,
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(self.n_channels),
nn.Sigmoid(),
)
def forward(self, z):
out = self.fc1(z)
out = self.layers(out.reshape(z.shape[0], 32, 4, 4))
return out.reshape(z.shape[0], -1)
class Metric_Custom(BaseMetric):
def __init__(self):
BaseMetric.__init__(self)
class Encoder_MLP_Custom(BaseEncoder):
def __init__(self, args: dict):
BaseEncoder.__init__(self)
if args.input_dim is None:
raise AttributeError(
"No input dimension provided !"
"'input_dim' parameter of ModelConfig instance must be set to 'data_shape' where"
"the shape of the data is [mini_batch x data_shape]. Unable to build encoder"
"automatically"
)
self.input_dim = args.input_dim
self.latent_dim = args.latent_dim
self.layers = nn.Sequential(nn.Linear(args.input_dim, 10), nn.ReLU())
self.mu = nn.Linear(10, self.latent_dim)
self.std = nn.Linear(10, self.latent_dim)
def forward(self, x):
out = self.layers(x.reshape(-1, self.input_dim))
return self.mu(out), self.std(out)
class Decoder_MLP_Custom(BaseDecoder):
def __init__(self, args: dict):
BaseDecoder.__init__(self)
if args.input_dim is None:
raise AttributeError(
"No input dimension provided !"
"'input_dim' parameter of ModelConfig instance must be set to 'data_shape' where"
"the shape of the data is [mini_batch x data_shape]. Unable to build decoder"
"automatically"
)
self.layers = nn.Sequential(
nn.Linear(args.latent_dim, 10),
nn.ReLU(),
nn.Linear(10, args.input_dim),
nn.Sigmoid(),
)
def forward(self, z):
return self.layers(z)
class Metric_MLP_Custom(BaseMetric):
def __init__(self, args: dict):
BaseMetric.__init__(self)
if args.input_dim is None:
raise AttributeError(
"No input dimension provided !"
"'input_dim' parameter of ModelConfig instance must be set to 'data_shape' where"
"the shape of the data is [mini_batch x data_shape]. Unable to build metric"
"automatically"
)
self.input_dim = args.input_dim
self.latent_dim = args.latent_dim
self.layers = nn.Sequential(nn.Linear(self.input_dim, 10), nn.ReLU())
self.diag = nn.Linear(10, self.latent_dim)
k = int(self.latent_dim * (self.latent_dim - 1) / 2)
self.lower = nn.Linear(10, k)
def forward(self, x):
h1 = self.layers(x.reshape(-1, self.input_dim))
h21, h22 = self.diag(h1), self.lower(h1)
L = torch.zeros((x.shape[0], self.latent_dim, self.latent_dim)).to(x.device)
indices = torch.tril_indices(
row=self.latent_dim, col=self.latent_dim, offset=-1
)
# get non-diagonal coefficients
L[:, indices[0], indices[1]] = h22
# add diagonal coefficients
L = L + torch.diag_embed(h21.exp())
return L
class EncoderWrongInputDim(BaseEncoder):
def __init__(self, args):
BaseEncoder.__init__(self)
self.input_dim = args.input_dim
self.fc = nn.Linear(args.input_dim - 1, args.latent_dim)
def forward(self, x):
return self.fc(x.reshape(-1, self.input_dim))
class DecoderWrongInputDim(BaseDecoder):
def __init__(self, args):
BaseDecoder.__init__(self)
self.latent_dim = args.latent_dim
self.fc = nn.Linear(args.latent_dim - 1, args.input_dim)
def forward(self, z):
return self.fc(z.reshape(-1, self.latent_dim))
class MetricWrongInputDim(BaseMetric):
def __init__(self, args):
BaseMetric.__init__(self)
self.input_dim = args.input_dim
self.fc = nn.Linear(args.input_dim - 1, args.latent_dim)
def forward(self, x):
return self.fc(x.reshape(-1, self.input_dim))
class EncoderWrongOutputDim(BaseEncoder):
def __init__(self, args):
BaseEncoder.__init__(self)
self.input_dim = args.input_dim
self.fc = nn.Linear(args.input_dim, args.latent_dim - 1)
def forward(self, x):
return self.fc(x.reshape(-1, self.input_dim))
class DecoderWrongOutputDim(BaseDecoder):
def __init__(self, args):
BaseDecoder.__init__(self)
self.latent_dim = args.latent_dim
self.fc = nn.Linear(args.latent_dim, args.input_dim - 1)
def forward(self, z):
return self.fc(z.reshape(-1, self.latent_dim))
class MetricWrongOutputDim(BaseMetric):
def __init__(self, args):
BaseMetric.__init__(self)
self.input_dim = args.input_dim
self.fc = nn.Linear(args.input_dim, args.latent_dim - 1)
def forward(self, x):
return self.fc(x.reshape(-1, self.input_dim))
class EncoderWrongOutput(BaseEncoder):
def __init__(self, args):
BaseEncoder.__init__(self)
self.input_dim = args.input_dim
self.fc = nn.Linear(args.input_dim, args.latent_dim)
def forward(self, x):
return self.fc(x.reshape(-1, self.input_dim))
class DecoderWrongOutput(BaseDecoder):
def __init__(self, args):
BaseDecoder.__init__(self)
self.latent_dim = args.latent_dim
self.fc = nn.Linear(args.latent_dim, args.input_dim)
def forward(self, z):
out = self.fc(z.reshape(-1, self.latent_dim))
return out, out, out
class MetricWrongOutput(BaseMetric):
def __init__(self, args):
BaseMetric.__init__(self)
self.input_dim = args.input_dim
self.fc = nn.Linear(args.input_dim, args.latent_dim)
def forward(self, x):
out = self.fc(x.reshape(-1, self.input_dim))
return out, out
class MetricWrongOutputDimBis(BaseMetric):
def __init__(self, args):
BaseMetric.__init__(self)
self.latent_dim = args.latent_dim
self.fc = nn.Linear(args.input_dim, args.latent_dim)
def forward(self, x):
# out = self.fc(x.reshape(-1, self.input_dim))
return torch.randn(x.shape[0], self.latent_dim, self.latent_dim - 1)
class NetBadInheritance(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, x):
return 0
|
11478189
|
import dash
import dash_html_components as html
app = dash.Dash(meta_tags=[
# A description of the app, used by e.g.
# search engines when displaying search results.
{
'name': 'description',
'content': 'My description'
},
# A tag that tells Internet Explorer (IE)
# to use the latest renderer version available
# to that browser (e.g. Edge)
{
'http-equiv': 'X-UA-Compatible',
'content': 'IE=edge'
},
# A tag that tells the browser not to scale
# desktop widths to fit mobile screens.
# Sets the width of the viewport (browser)
# to the width of the device, and the zoom level
# (initial scale) to 1.
#
# Necessary for "true" mobile support.
{
'name': 'viewport',
'content': 'width=device-width, initial-scale=1.0'
}
])
app.layout = html.Div('Simple Dash App')
if __name__ == '__main__':
app.run_server(debug=True)
|
11478205
|
from django.conf.urls import include
from django.urls import path
from django.contrib import admin
urlpatterns = (
path('', include('landing.urls', namespace='landing')),
path('', include('registration.urls', namespace='registration')),
path('slack/', include('slack.urls', namespace='slack')),
path('jobs/', include('jobs.urls', namespace='jobs')),
path('admin/', admin.site.urls, name='admin'),
path('markdownx/', include('markdownx.urls')),
)
|
11478212
|
import math
from typing import Optional
import numpy as np
from paramak import ExtrudeStraightShape
class InnerTfCoilsFlat(ExtrudeStraightShape):
"""A tf coil volume with straight inner and outer profiles and
constant gaps between each coil. Note: the inner / outer surface is not
equal distance to the center point everywhere as the corners are further
than the straight sections.
Args:
height: height of tf coils.
inner_radius: Distance between center point and the inner surface of
the tf coils.
outer_radius: Distance between center point and the outer surface of
the tf coils.
number_of_coils: number of tf coils.
gap_size: gap between adjacent tf coils.
radius_type: Controls the part of the inner surface used when
defining the inner_radius and outer_radius. Can be set to either
'corner' or 'straight'.
azimuth_start_angle: defaults to 0.0.
workplane:defaults to "XY".
rotation_axis: Defaults to "Z".
"""
def __init__(
self,
height: float,
inner_radius: float,
outer_radius: float,
number_of_coils: int,
gap_size: float,
radius_type: Optional[str] = "corner",
azimuth_start_angle: Optional[float] = 0.0,
workplane: Optional[str] = "XY",
rotation_axis: Optional[str] = "Z",
**kwargs,
) -> None:
super().__init__(
distance=height, workplane=workplane, rotation_axis=rotation_axis, **kwargs
)
self.azimuth_start_angle = azimuth_start_angle
self.height = height
self.inner_radius = inner_radius
self.outer_radius = outer_radius
self.radius_type = radius_type
self.number_of_coils = number_of_coils
self.gap_size = gap_size
self.distance = height
@property
def azimuth_start_angle(self):
return self._azimuth_start_angle
@azimuth_start_angle.setter
def azimuth_start_angle(self, value):
self._azimuth_start_angle = value
@property
def radius_type(self):
return self._radius_type
@radius_type.setter
def radius_type(self, value):
if value not in ["corner", "straight"]:
msg = f'radius_type must be either "corner" or "straight". Not {value}'
raise ValueError(msg)
self._radius_type = value
@property
def azimuth_placement_angle(self):
self.find_azimuth_placement_angle()
return self._azimuth_placement_angle
@azimuth_placement_angle.setter
def azimuth_placement_angle(self, value):
self._azimuth_placement_angle = value
@property
def height(self):
return self._height
@height.setter
def height(self, height):
self._height = height
@property
def distance(self):
return self.height
@distance.setter
def distance(self, value):
self._distance = value
@property
def inner_radius(self):
return self._inner_radius
@inner_radius.setter
def inner_radius(self, inner_radius):
self._inner_radius = inner_radius
@property
def outer_radius(self):
return self._outer_radius
@outer_radius.setter
def outer_radius(self, outer_radius):
self._outer_radius = outer_radius
@property
def number_of_coils(self):
return self._number_of_coils
@number_of_coils.setter
def number_of_coils(self, number_of_coils):
self._number_of_coils = number_of_coils
@property
def gap_size(self):
return self._gap_size
@gap_size.setter
def gap_size(self, gap_size):
self._gap_size = gap_size
def find_points(self):
"""Finds the points that describe the 2D profile of the tf coil shape"""
# / p4
# / /¦
# / / ¦
# / / ¦
# / / ¦
# p1/ ¦
# ¦ ¦
# x ¦ ¦
# ¦ ¦
# p2\ ¦
# \ \ ¦
# \ \ ¦
# \ \ ¦
# \ p3
if self.radius_type == "corner":
distance_to_inner_corner = self.inner_radius
distance_to_rear_corner = self.outer_radius
# this section calculates a new distance to the corners now that we
# know the user provided the distance to the straight
if self.radius_type == "straight":
angle = 360 / (self.number_of_coils * 2)
distance_to_inner_corner = self.inner_radius / math.cos(math.radians(angle))
distance_to_rear_corner = self.outer_radius / math.cos(math.radians(angle))
if (
self.gap_size * self.number_of_coils
> 2 * math.pi * distance_to_inner_corner
):
msg = (
"Gap_size is too large. The gap_size * number of coils must "
"be less than the circumference of the circle made by "
"the inner_radius"
)
raise ValueError(msg)
if distance_to_inner_corner != 0.0:
theta_inner = (
(2 * math.pi * distance_to_inner_corner)
- (self.gap_size * self.number_of_coils)
) / (distance_to_inner_corner * self.number_of_coils)
omega_inner = math.asin(self.gap_size / (2 * distance_to_inner_corner))
# inner points
point_1 = (
(distance_to_inner_corner * math.cos(-omega_inner)),
(-distance_to_inner_corner * math.sin(-omega_inner)),
)
point_2 = (
(
distance_to_inner_corner
* math.cos(theta_inner)
* math.cos(-omega_inner)
+ distance_to_inner_corner
* math.sin(theta_inner)
* math.sin(-omega_inner)
),
(
-distance_to_inner_corner
* math.cos(theta_inner)
* math.sin(-omega_inner)
+ distance_to_inner_corner
* math.sin(theta_inner)
* math.cos(-omega_inner)
),
)
points = [(point_1[0], point_1[1]), (point_2[0], point_2[1])]
else:
points = [(0, 0)]
# print(point_1)
# print(point_2)
theta_outer = (
(2 * math.pi * distance_to_rear_corner)
- (self.gap_size * self.number_of_coils)
) / (distance_to_rear_corner * self.number_of_coils)
omega_outer = math.asin(self.gap_size / (2 * distance_to_rear_corner))
# outer points
point_4 = (
(distance_to_rear_corner * math.cos(-omega_outer)),
(-distance_to_rear_corner * math.sin(-omega_outer)),
)
point_6 = (
(
distance_to_rear_corner * math.cos(theta_outer) * math.cos(-omega_outer)
+ distance_to_rear_corner
* math.sin(theta_outer)
* math.sin(-omega_outer)
),
(
-distance_to_rear_corner
* math.cos(theta_outer)
* math.sin(-omega_outer)
+ distance_to_rear_corner
* math.sin(theta_outer)
* math.cos(-omega_outer)
),
)
points.append((point_6[0], point_6[1]))
points.append((point_4[0], point_4[1]))
self.points = points
def find_azimuth_placement_angle(self):
"""Calculates the azimuth placement angles based on the number of tf
coils"""
angles = list(
np.linspace(
0 + self.azimuth_start_angle,
360 + self.azimuth_start_angle,
self.number_of_coils,
endpoint=False,
)
)
self.azimuth_placement_angle = angles
|
11478245
|
def init():
import os
if 'DJANGO_SETTINGS_MODULE' in os.environ:
try:
import django
django.setup()
except AttributeError:
pass
|
11478315
|
from django.shortcuts import render
def test(request):
return render(request, "context_processors/test.html")
|
11478324
|
import functools
import importlib
import inspect
import os.path
import time
from datetime import datetime
from datetime import timezone
from urllib.parse import urlparse
import pytest
def make_url_base(feed_url):
# FIXME: this is very brittle (broken query string and fragment support),
# and also very far away from test_parse where it's used.
if any(feed_url.startswith(p) for p in ['http:', 'https:', 'file:']):
sep = '/'
# ... but not really, we also support file:path\to\thing, I think
else:
sep = os.sep
url_base = sep.join(feed_url.split(sep)[:-1])
if url_base:
url_base = url_base.rstrip(sep) + sep
rel_base = (
url_base if any(feed_url.startswith(p) for p in ['http:', 'https:']) else ''
)
return url_base, rel_base
def rename_argument(original, alias):
def decorator(fn):
@functools.wraps(fn)
def wrapper(**kwargs):
kwargs[original] = kwargs.pop(alias)
return fn(**kwargs)
signature = inspect.signature(fn)
parameters = signature.parameters.copy()
parameters[alias] = parameters.pop(original).replace(name=alias)
signature = signature.replace(parameters=parameters.values())
wrapper.__signature__ = signature
return wrapper
return decorator
class Reloader:
def __init__(self, monkeypatch):
self.modules = []
self.monkeypatch = monkeypatch
def __call__(self, module):
self.modules.append(module)
return importlib.reload(module)
def undo(self):
# undo monkeypatches before reloading again,
# to ensure modules are reloaded from a "clean" environment
self.monkeypatch.undo()
while self.modules:
importlib.reload(self.modules.pop())
@pytest.fixture
def reload_module(monkeypatch):
reloader = Reloader(monkeypatch)
try:
yield reloader
finally:
reloader.undo()
class TZSetter:
def __init__(self, monkeypatch):
self.monkeypatch = monkeypatch
def __call__(self, tz):
self.monkeypatch.setenv('TZ', tz)
time.tzset()
def undo(self):
self.monkeypatch.undo()
time.tzset()
@pytest.fixture
def monkeypatch_tz(monkeypatch):
tzsetter = TZSetter(monkeypatch)
try:
yield tzsetter
finally:
try:
tzsetter.undo()
except AttributeError as e:
# on windows, we get "module 'time' has no attribute 'tzset'";
# it's ok to do nothing, since __call__() didn't call it either
if 'tzset' not in str(e):
raise
# FIXME: explain what this is
# https://github.com/lemon24/reader/issues/233
def utc_datetime(*args, **kwargs):
return datetime(*args, tzinfo=timezone.utc, **kwargs)
def naive_datetime(*args, **kwargs):
return datetime(*args, tzinfo=None, **kwargs)
|
11478348
|
import math
from dependent_injection import parameter_dependent
def test_good_dependency():
assert parameter_dependent(25, math.sqrt) == 5
def test_negative():
def bad_dependency(number):
raise Exception('Function called')
assert parameter_dependent(-1, bad_dependency) == 0
def test_zero():
def good_dependency(number):
return 0
assert parameter_dependent(0, good_dependency) == 0
def test_twenty_five():
def good_dependency(number):
return 5
assert parameter_dependent(25, good_dependency) == 5
def test_hundred():
def good_dependency(number):
return 10
assert parameter_dependent(100, good_dependency) == 10
def test_hundred_and_one():
def bad_dependency(number):
raise Exception('Function called')
assert parameter_dependent(101, bad_dependency) == 10
|
11478363
|
import time
import numpy as np
import tensordata.gfile as gfile
import tensordata.utils.request as rq
from tensordata.utils.compress import un_gz, un_tar
from tensordata.utils._utils import assert_dirs
from linora.image import save_image, array_to_image
__all__ = ['stl10']
def stl10(root):
"""Stl10 dataset from http://ai.stanford.edu/~acoates/stl10
The STL-10 dataset is an image recognition dataset for developing
unsupervised feature learning, deep learning, self-taught learning algorithms.
It is inspired by the CIFAR-10 dataset but with some modifications.
In particular, each class has fewer labeled training examples than in CIFAR-10,
but a very large set of unlabeled examples is provided to learn image models
prior to supervised training. The primary challenge is to make use of the
unlabeled data (which comes from a similar but different
distribution from the labeled data) to build a useful prior.
We also expect that the higher resolution of this dataset (96x96)
will make it a challenging benchmark for developing
more scalable unsupervised learning methods.
Attention: if exist dirs `root/stl10`, api will delete it and create it.
Data storage directory:
root = `/user/.../mydata`
stl10 data:
`root/stl10/train/1/xx.png`
`root/stl10/train/4/xx.png`
`root/stl10/train/8/xx.png`
`root/stl10/test/1/xx.png`
`root/stl10/test/4/xx.png`
`root/stl10/test/8/xx.png`
Args:
root: str, Store the absolute path of the data directory.
example:if you want data path is `/user/.../mydata/stl10`,
root should be `/user/.../mydata`.
Returns:
Store the absolute path of the data directory, is `root/stl10`.
"""
start = time.time()
task_path = assert_dirs(root, 'stl10')
url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz"
rq.files(url, gfile.path_join(task_path, url.split('/')[-1]))
un_tar(un_gz(gfile.path_join(task_path, url.split('/')[-1])))
with open(gfile.path_join(task_path, 'stl10_binary/stl10_binary/test_X.bin'), 'rb') as fin:
data = np.frombuffer(fin.read(), dtype=np.uint8).reshape(-1, 3,96,96).transpose((0, 3, 2, 1))
with open(gfile.path_join(task_path, 'stl10_binary/stl10_binary/test_y.bin'), 'rb') as fin:
data_label = np.frombuffer(fin.read(), dtype=np.uint8)
for i in set(data_label):
gfile.makedirs(gfile.path_join(task_path, 'test', str(i)))
for idx in range(data.shape[0]):
save_image(gfile.path_join(task_path, 'test', str(data_label[idx]), str(idx)+'.png'), array_to_image(data[idx]))
with open(gfile.path_join(task_path, 'stl10_binary/stl10_binary/train_X.bin'), 'rb') as fin:
data = np.frombuffer(fin.read(), dtype=np.uint8).reshape(-1, 3,96,96).transpose((0, 3, 2, 1))
with open(gfile.path_join(task_path, 'stl10_binary/stl10_binary/train_y.bin'), 'rb') as fin:
data_label = np.frombuffer(fin.read(), dtype=np.uint8)
for i in set(data_label):
gfile.makedirs(gfile.path_join(task_path, 'train', str(i)))
for idx in range(data.shape[0]):
save_image(gfile.path_join(task_path, 'train', str(data_label[idx]), str(idx)+'.png'), array_to_image(data[idx]))
with open(gfile.path_join(task_path, 'stl10_binary/stl10_binary/unlabeled_X.bin'), 'rb') as fin:
data = np.frombuffer(fin.read(), dtype=np.uint8).reshape(-1, 3,96,96).transpose((0, 3, 2, 1))
gfile.makedirs(gfile.path_join(task_path, 'unlabeled'))
for idx in range(data.shape[0]):
save_image(gfile.path_join(task_path, 'unlabeled', str(idx)+'.png'), array_to_image(data[idx]))
gfile.remove(gfile.path_join(task_path, 'stl10_binary.tar.gz'))
gfile.remove(gfile.path_join(task_path, 'stl10_binary.tar'))
gfile.remove(path_join(task_path, 'stl10_binary'))
print('stl10 dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path
|
11478379
|
from .. import networks
const = lambda x: x
def get_network(name):
if name in NETWORKS:
return NETWORKS[name]
else:
return NETWORKS["elementsregtest"]
NETWORKS = {
"liquidv1": {
"name": "Liquid",
"wif": b'\x80',
"p2pkh": b'\x00',
"p2sh": b'\x27',
"bp2sh": b'\x0c\x27',
"bech32": "ex",
"blech32": "lq",
"xprv": b'\<KEY>',
"xpub": b'\<KEY>',
"yprv": b'\<KEY>',
"zprv": b'\<KEY>',
"Yprv": b'\<KEY>',
"Zprv": b'\<KEY>',
"ypub": b'\<KEY>',
"zpub": b'\<KEY>',
"Ypub": b'\<KEY>',
"Zpub": b'\<KEY>',
"bip32": const(1776) # coin type for bip32 derivation
},
"elementsregtest": {
"name": "Liquid Regtest",
"wif": b'\xEF',
"p2pkh": b'\x6F',
"p2sh": b'\x4b',
"bp2sh": b'\x04\x4b',
"bech32": "ert",
"blech32": "el",
"xprv": b'\x04\x35\x83\x94',
"xpub": b'\<KEY>',
"yprv": b'\<KEY>',
"zprv": b'\<KEY>',
"Yprv": b'\<KEY>',
"Zprv": b'\<KEY>',
"ypub": b'\<KEY>',
"zpub": b'\<KEY>',
"Ypub": b'\<KEY>',
"Zpub": b'\<KEY>',
"bip32": const(1)
},
}
NETWORKS.update(networks.NETWORKS)
|
11478465
|
import os
from kaa import config
class TestHistory:
def test_history(self):
storage = config.KaaHistoryStorage('')
try:
hist = storage.get_history('hist1')
hist.add('1', 1)
hist.add('2', 2)
hist.add('1', 1)
assert hist.get() == [('1', 1), ('2', 2)]
assert hist.find('1') == 1
storage.flush()
assert hist.get() == [('1', 1), ('2', 2)]
assert hist.find('1') == 1
hist.add('1', 1)
hist.add('3', 3)
assert hist.get() == [('3', 3), ('1', 1), ('2', 2), ]
storage.flush()
assert hist.get() == [('3', 3), ('1', 1), ('2', 2), ]
finally:
storage.close()
def test_histclose(self):
storage = config.KaaHistoryStorage('test.db')
try:
hist = storage.get_history('hist2')
for i in range(config.History.MAX_HISTORY * 3):
hist.add(str(i))
finally:
storage.flush()
storage.close()
storage = config.KaaHistoryStorage('test.db')
try:
hist = storage.get_history('hist2')
assert len(hist.get()) == config.History.MAX_HISTORY
hist.close()
finally:
storage.close()
os.unlink('test.db')
|
11478522
|
for row in range(7):
for col in range(4):
if row==0 or row in {1,2} and col<1 or row in{4,5} and col>2 or row in {3,6} and col<3:
print('*',end=' ')
else:
print(' ',end=' ')
print()
### Method-5
for i in range(6):
for j in range(5):
if i==0 or i==2 and j<4 or j==0 and i<3 or j==4 and i in(3,4) or i==5 and j<4 :
print('*',end=' ')
else:
print(' ',end=' ')
print()
|
11478527
|
from django.conf.urls.defaults import *
import views
urlpatterns = patterns('',
(r'^request_attrs/$', views.request_processor),
)
|
11478539
|
import os
import sys
import hashlib
from trackhub import helpers
def test_example_data_md5s():
data_dir = helpers.data_dir()
data = [i.strip().split() for i in '''
3735b696b3a416a59f8755eaf5664e5a sine-hg38-0.bedgraph.bw
73ad8ba3590d0895810d069599b0e443 sine-hg38-1.bedgraph.bw
85478d1ecc5906405ccb43d1ca426d29 sine-hg38-2.bedgraph.bw
55ac2603c31b232dacfdaba07d8a25eb sine-no1-1000.bedgraph.bw
b8c983862c58fee6afa99382634ab2d8 sine-no1-100.bedgraph.bw
35fa6ac3453e2bbd503c40a1d15afc65 random-hg38-0.bigBed
3c94c294f8376f625f3701dee7641997 random-hg38-1.bigBed
3bed0726e452d677f33979b2ed1c65d6 random-hg38-2.bigBed
72934b760f1f5ee8f99d596536ef8b4c random-no1-0.bigBed
19116a3295e5679cb79ffc8904fa5abe random-no1-1.bigBed
d628cd0d3b91d8426bb9d0f99b39be52 random-no1-2.bigBed
'''.splitlines(False) if not i.strip().startswith('#') and len(i.strip()) > 0]
# for some reason, only faToTwoBit under py27 results in a different md5 than under py3.
if sys.version_info[0] == 3:
data.append(('ba2fd8b22bcad65bb6583da937ff5222', 'newOrg1.2bit'))
success = True
for md5, fn in data:
fn = os.path.join(data_dir, fn)
obs = hashlib.md5(open(fn, 'rb').read()).hexdigest()
success = success and (obs == md5)
print(obs, md5, fn)
assert success
|
11478552
|
from chainermn.iterators.multi_node_iterator import create_multi_node_iterator # NOQA
from chainermn.iterators.synchronized_iterator import create_synchronized_iterator # NOQA
|
11478578
|
import attr
@attr.s
class DatasetVersionTagSummary(object):
"""
Dataset version tag summary class
"""
name = attr.ib(type=str, default=None)
@attr.s
class DatasetVersion(object):
"""
Dataset version class
"""
version = attr.ib(type=str, default=None)
message = attr.ib(type=str, default=None)
is_committed = attr.ib(type=bool, default=None)
tags = attr.ib(type=list, factory=list)
# only used for create
dataset_id = attr.ib(type=str, default=None)
@attr.s
class DatasetVersionPreSignedS3Call(object):
"""
Dataset version pre-signed S3 call class
"""
method = attr.ib(type=str, default=None)
params = attr.ib(type=dict, factory=dict)
@attr.s
class DatasetVersionPreSignedURL(object):
"""
Dataset version pre-signed URL class
"""
url = attr.ib(type=str, default=None)
expires_in = attr.ib(type=int, default=None)
|
11478581
|
import click
import mock
import pytest
from click.testing import CliRunner
from sigopt.cli import cli
class TestRunCli(object):
@pytest.mark.parametrize('opt_into_log_collection', [False, True])
@pytest.mark.parametrize('opt_into_cell_tracking', [False, True])
def test_config_command(self, opt_into_log_collection, opt_into_cell_tracking):
runner = CliRunner()
log_collection_arg = '--enable-log-collection' if opt_into_log_collection else '--no-enable-log-collection'
cell_tracking_arg = '--enable-cell-tracking' if opt_into_cell_tracking else '--no-enable-cell-tracking'
with mock.patch('sigopt.cli.commands.config._config.persist_configuration_options') as persist_configuration_options:
result = runner.invoke(cli, [
'config',
'--api-token=some_test_token',
log_collection_arg,
cell_tracking_arg,
])
persist_configuration_options.assert_called_once_with({
'api_token': 'some_<PASSWORD>',
'code_tracking_enabled': opt_into_cell_tracking,
'log_collection_enabled': opt_into_log_collection,
})
assert result.exit_code == 0
assert result.output == ''
|
11478642
|
import imageio
import json
import numpy as np
import os
import warnings
from torch.utils import data
from onconet.datasets.factory import RegisterDataset
MP4_LOADING_ERR = "Error loading {}.\n{}"
@RegisterDataset("kinetics")
class Kinetics(data.Dataset):
"""A pytorch Dataset for the Kinetics dataset."""
def __init__(self, args, transformers, split_group):
"""Initializes the dataset.
Constructs a standard pytorch Dataset object which
can be fed into a DataLoader for batching.
Arguments:
args(object): Config.
transformers(list): A list of transformer objects.
split_group(str): The split group ['train'|'dev'|'test'].
"""
super(Kinetics, self).__init__()
args.metadata_path = os.path.join(args.metadata_dir,
self.METADATA_FILENAME)
self.args = args
self.transformers = transformers
self.split_group = split_group
with open(args.metadata_path, 'r') as f:
metadata = json.load(f)
for row in metadata:
row['path'] = os.path.join(args.img_dir, row['path'])
self.dataset = [row for row in metadata if row['split_group'] == split_group]
labels = [row['label'] for row in self.dataset]
labels = sorted(np.unique(labels))
self.label_map = {label: index for index, label in enumerate(labels)}
@staticmethod
def set_args(args):
args.num_classes = 400
args.multi_image = True
args.num_images = 32
args.video = True
@property
def METADATA_FILENAME(self):
return 'metadata.json'
@property
def NUM_FRAMES(self):
return 32
@property
def STRIDE(self):
return 2
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
sample = self.dataset[index]
try:
# Load video
video = imageio.get_reader(sample['path'], 'ffmpeg')
# Determine start of clip randomly where possible
if len(video) <= self.NUM_FRAMES * self.STRIDE:
frame_start = 0
else:
frame_start = np.random.randint(len(video) - self.NUM_FRAMES * self.STRIDE)
# Select frames in clip and loop around to start if necessary
x = [video.get_data((frame_start + i) % len(video)) for i in range(0, self.NUM_FRAMES, self.STRIDE)]
for transformer in self.transformers:
x = transformer(x)
item = {
'x': x,
'y': self.label_map[sample['label']]
}
return item
except Exception as e:
warnings.warn(MP4_LOADING_ERR.format(sample['path'], e))
|
11478685
|
from imap_tools import MailBox
# get size of message and attachments
with MailBox('imap.my.ru').login('acc', 'pwd', 'INBOX') as mailbox:
for msg in mailbox.fetch():
print(msg.date_str, msg.subject)
print('-- RFC822.SIZE message size', msg.size_rfc822)
print('-- bytes size', msg.size) # will returns headers size only when fetch headers_only=True
for att in msg.attachments:
print('---- ATT:', att.filename)
print('-- bytes size', att.size)
|
11478693
|
import pytest as pytest
from plenum.common.util import get_utc_epoch
from plenum.server.request_handlers.txn_author_agreement_disable_handler import TxnAuthorAgreementDisableHandler
from plenum.test.req_handler.conftest import taa_request
from storage.kv_in_memory import KeyValueStorageInMemory
from common.serializers.serialization import config_state_serializer
from plenum.common.constants import ROLE, STEWARD, NYM, TARGET_NYM, TXN_TYPE, TXN_AUTHOR_AGREEMENT, \
TXN_AUTHOR_AGREEMENT_TEXT, TXN_AUTHOR_AGREEMENT_VERSION, TRUSTEE, DOMAIN_LEDGER_ID, TXN_AUTHOR_AGREEMENT_DIGEST, \
TXN_AUTHOR_AGREEMENT_RETIREMENT_TS, TXN_AUTHOR_AGREEMENT_RATIFICATION_TS, TXN_METADATA, TXN_METADATA_TIME, \
TXN_AUTHOR_AGREEMENT_DISABLE
from plenum.common.exceptions import UnauthorizedClientRequest, InvalidClientRequest
from plenum.common.request import Request
from plenum.common.txn_util import reqToTxn, get_payload_data, append_txn_metadata
from plenum.server.database_manager import DatabaseManager
from plenum.server.request_handlers.static_taa_helper import StaticTAAHelper
from plenum.server.request_handlers.txn_author_agreement_handler import TxnAuthorAgreementHandler
from plenum.server.request_handlers.utils import nym_to_state_key, encode_state_value
from plenum.test.req_handler.helper import update_nym, create_taa_txn, check_taa_in_state
from plenum.test.testing_utils import FakeSomething
from state.pruning_state import PruningState
from state.state import State
@pytest.fixture(scope="function")
def txn_author_agreement_disable_handler(tconf, domain_state, config_state):
data_manager = DatabaseManager()
handler = TxnAuthorAgreementDisableHandler(data_manager)
data_manager.register_new_database(handler.ledger_id,
FakeSomething(),
config_state)
data_manager.register_new_database(DOMAIN_LEDGER_ID,
FakeSomething(),
domain_state)
return handler
@pytest.fixture(scope="function")
def taa_disable_request(tconf, domain_state):
identifier = "identifier"
update_nym(domain_state, identifier, TRUSTEE)
operation = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_DISABLE}
return Request(identifier=identifier,
signature="sign",
operation=operation)
def test_static_validation(txn_author_agreement_disable_handler, taa_disable_request):
txn_author_agreement_disable_handler.static_validation(taa_disable_request)
def test_dynamic_validation(txn_author_agreement_disable_handler, taa_disable_request):
txn_author_agreement_disable_handler.state.set(StaticTAAHelper.state_path_taa_latest(), "{}")
txn_author_agreement_disable_handler.dynamic_validation(taa_disable_request, 0)
def test_dynamic_validation_for_already_disable_taa(txn_author_agreement_disable_handler, taa_disable_request):
with pytest.raises(InvalidClientRequest,
match="Transaction author agreement is already disabled."):
txn_author_agreement_disable_handler.dynamic_validation(taa_disable_request, 0)
def test_update_state(txn_author_agreement_disable_handler,
taa_disable_request, txn_author_agreement_handler, tconf, domain_state, taa_pp_time):
# create TAAs
taa_txns = []
taa_digests = []
taa_state_datas = []
for _ in list(range(5)):
txn, digest, state_data = create_taa_txn(taa_request(tconf, domain_state, taa_pp_time), taa_pp_time)
taa_txns.append(txn)
taa_digests.append(digest)
taa_state_datas.append(state_data)
assert taa_txns
# create a disable txn
disable_seq_no = 1
disable_txn_time = get_utc_epoch()
taa_disable_txn = reqToTxn(taa_disable_request)
append_txn_metadata(taa_disable_txn, disable_seq_no, disable_txn_time)
# set a TAAs
for index, taa_txn in enumerate(taa_txns):
txn_author_agreement_handler.update_state(taa_txn, None, None)
check_taa_in_state(handler=txn_author_agreement_handler,
digest=taa_digests[index],
version=taa_state_datas[index][0][TXN_AUTHOR_AGREEMENT_VERSION],
state_data=taa_state_datas[index])
assert txn_author_agreement_disable_handler.state.get(
StaticTAAHelper.state_path_taa_latest(), isCommitted=False) == taa_digests[index].encode()
# disable TAAs
txn_author_agreement_disable_handler.update_state(taa_disable_txn, None, None)
assert txn_author_agreement_disable_handler.state.get(
StaticTAAHelper.state_path_taa_latest(), isCommitted=False) is None
# set a TAAs
for index, state_data in enumerate(taa_state_datas):
state_value = state_data[0]
state_value[TXN_AUTHOR_AGREEMENT_RETIREMENT_TS] = disable_txn_time
check_taa_in_state(handler=txn_author_agreement_handler,
digest=taa_digests[index],
version=state_value[TXN_AUTHOR_AGREEMENT_VERSION],
state_data=(state_data[0], disable_seq_no, disable_txn_time))
|
11478805
|
import xmlrpclib
class NoteAPI:
def __init__(self, srv, db, user, pwd):
common = xmlrpclib.ServerProxy('%s/xmlrpc/2/common' % (srv))
self.api = xmlrpclib.ServerProxy('%s/xmlrpc/2/object' % (srv))
self.uid = common.authenticate(db, user, pwd, {})
self.pwd = <PASSWORD>
self.db = db
self.model = 'note.note'
def execute(self, method, args, kwargs=None):
return self.api.execute_kw(
self.db, self.uid, self.pwd, self.model,
method, args, kwargs or {})
def get(self, ids=None):
return self.execute('search_read', [ids or [], ['id', 'name']], )
def set(self, text, id=None):
if id:
self.execute('write', [[id], {'name': text}])
else:
id = self.execute('create', [{'name': text, 'user_id': self.uid}])
return id
if __name__ == '__main__':
srv, db = 'http://localhost:8069', 'todo'
user, pwd = '<PASSWORD>', '<PASSWORD>'
api = NoteAPI(srv, db, user, pwd)
from pprint import pprint
pprint(api.get())
|
11478817
|
import FWCore.ParameterSet.Config as cms
import copy
from PhysicsTools.NanoAOD.nanoDQM_cfi import nanoDQM
from PhysicsTools.NanoAOD.nanoDQM_tools_cff import *
from PhysicsTools.NanoAOD.nano_eras_cff import *
## Modify plots accordingly to era
_vplots80X = nanoDQM.vplots.clone()
# Tau plots
_tauPlots80X = cms.VPSet()
for plot in _vplots80X.Tau.plots:
if (plot.name.value().find("MVA")>-1 and plot.name.value().find("2017")>-1) or (plot.name.value().find("AntiEle")>-1 and plot.name.value().find("2018")>-1):
continue
_tauPlots80X.append(plot)
_tauPlots80X.append(Plot1D('idMVAnewDM', 'idMVAnewDM', 64, -0.5, 63.5, 'IsolationMVArun2v1DBnewDMwLT ID working point: bitmask 1 = VLoose, 2 = Loose, 4 = Medium, 8 = Tight, 16 = VTight, 32 = VVTight'))
_tauPlots80X.append(Plot1D('idMVAoldDMdR03', 'idMVAoldDMdR03', 64, -0.5, 63.5, 'IsolationMVArun2v1DBdR03oldDMwLT ID working point: bitmask 1 = VLoose, 2 = Loose, 4 = Medium, 8 = Tight, 16 = VTight, 32 = VVTight'))
_tauPlots80X.append(Plot1D('rawMVAnewDM', 'rawMVAnewDM', 20, -1, 1, 'byIsolationMVArun2v1DBnewDMwLT raw output discriminator'))
_tauPlots80X.append(Plot1D('rawMVAoldDMdR03', 'rawMVAoldDMdR03', 20, -1, 1, 'byIsolationMVArun2v1DBdR03oldDMwLT raw output discriminator'))
_vplots80X.Tau.plots = _tauPlots80X
run2_miniAOD_80XLegacy.toModify(nanoDQM,
vplots = _vplots80X
)
_tauPlotsPreV9 = cms.VPSet()
for plot in nanoDQM.vplots.Tau.plots:
if plot.name.value()!="idDecayModeOldDMs":
_tauPlotsPreV9.append(plot)
_tauPlotsPreV9.extend([
Plot1D('idDecayMode', 'idDecayMode', 2, -0.5, 1.5, "tauID('decayModeFinding')"),
Plot1D('idDecayModeNewDMs', 'idDecayModeNewDMs', 2, -0.5, 1.5, "tauID('decayModeFindingNewDMs')"),
Plot1D('idMVAnewDM2017v2', 'idMVAnewDM2017v2', 128, -0.5, 127.5, 'IsolationMVArun2v1DBnewDMwLT ID working point (2017v2): bitmask 1 = VVLoose, 2 = VLoose, 4 = Loose, 8 = Medium, 16 = Tight, 32 = VTight, 64 = VVTight'),
Plot1D('idMVAoldDM', 'idMVAoldDM', 64, -0.5, 63.5, 'IsolationMVArun2v1DBoldDMwLT ID working point: bitmask 1 = VLoose, 2 = Loose, 4 = Medium, 8 = Tight, 16 = VTight, 32 = VVTight'),
Plot1D('idMVAoldDM2017v1', 'idMVAoldDM2017v1', 128, -0.5, 127.5, 'IsolationMVArun2v1DBoldDMwLT ID working point (2017v1): bitmask 1 = VVLoose, 2 = VLoose, 4 = Loose, 8 = Medium, 16 = Tight, 32 = VTight, 64 = VVTight'),
Plot1D('idMVAoldDM2017v2', 'idMVAoldDM2017v2', 128, -0.5, 127.5, 'IsolationMVArun2v1DBoldDMwLT ID working point (2017v2): bitmask 1 = VVLoose, 2 = VLoose, 4 = Loose, 8 = Medium, 16 = Tight, 32 = VTight, 64 = VVTight'),
Plot1D('idMVAoldDMdR032017v2', 'idMVAoldDMdR032017v2', 128, -0.5, 127.5, 'IsolationMVArun2v1DBdR03oldDMwLT ID working point (217v2): bitmask 1 = VVLoose, 2 = VLoose, 4 = Loose, 8 = Medium, 16 = Tight, 32 = VTight, 64 = VVTight'),
Plot1D('rawAntiEle', 'rawAntiEle', 20, -100, 100, 'Anti-electron MVA discriminator V6 raw output discriminator'),
Plot1D('rawAntiEle2018', 'rawAntiEle2018', 20, -100, 100, 'Anti-electron MVA discriminator V6 raw output discriminator (2018)'),
Plot1D('rawAntiEleCat', 'rawAntiEleCat', 17, -1.5, 15.5, 'Anti-electron MVA discriminator V6 category'),
Plot1D('rawAntiEleCat2018', 'rawAntiEleCat2018', 17, -1.5, 15.5, 'Anti-electron MVA discriminator V6 category (2018)'),
Plot1D('rawMVAnewDM2017v2', 'rawMVAnewDM2017v2', 20, -1, 1, 'byIsolationMVArun2v1DBnewDMwLT raw output discriminator (2017v2)'),
Plot1D('rawMVAoldDM', 'rawMVAoldDM', 20, -1, 1, 'byIsolationMVArun2v1DBoldDMwLT raw output discriminator'),
Plot1D('rawMVAoldDM2017v1', 'rawMVAoldDM2017v1', 20, -1, 1, 'byIsolationMVArun2v1DBoldDMwLT raw output discriminator (2017v1)'),
Plot1D('rawMVAoldDM2017v2', 'rawMVAoldDM2017v2', 20, -1, 1, 'byIsolationMVArun2v1DBoldDMwLT raw output discriminator (2017v2)'),
Plot1D('rawMVAoldDMdR032017v2', 'rawMVAoldDMdR032017v2', 20, -1, 1, 'byIsolationMVArun2v1DBdR03oldDMwLT raw output discriminator (2017v2)')
])
(run2_nanoAOD_92X | run2_nanoAOD_94XMiniAODv1 | run2_nanoAOD_94XMiniAODv2 | run2_nanoAOD_94X2016 | run2_nanoAOD_102Xv1 | run2_nanoAOD_106Xv1).toModify(nanoDQM.vplots.Tau, plots = _tauPlotsPreV9)
_METFixEE2017_DQMentry = nanoDQM.vplots.MET.clone()
_METFixEE2017_plots = cms.VPSet()
for plot in _METFixEE2017_DQMentry.plots:
if plot.name.value().find("fiducial")>-1: continue
_METFixEE2017_plots.append(plot)
_METFixEE2017_DQMentry.plots = _METFixEE2017_plots
for modifier in run2_nanoAOD_94XMiniAODv1, run2_nanoAOD_94XMiniAODv2:
modifier.toModify(nanoDQM.vplots, METFixEE2017 = _METFixEE2017_DQMentry)
_Electron_plots_2016 = copy.deepcopy(nanoDQM.vplots.Electron.plots)
_Electron_plots_2016.append(Plot1D('cutBased_HLTPreSel', 'cutBased_HLTPreSel', 2, -0.5, 1.5, 'cut-based HLT pre-selection ID'))
_Electron_plots_2016.append(Plot1D('cutBased_Spring15', 'cutBased_Spring15', 5, -0.5, 4.5, 'cut-based Spring15 ID (0:fail, 1:veto, 2:loose, 3:medium, 4:tight)'))
_Electron_plots_2016.append(Plot1D('mvaSpring16GP', 'mvaSpring16GP', 20, -1, 1, 'MVA Spring16 general-purpose ID score'))
_Electron_plots_2016.append(Plot1D('mvaSpring16GP_WP80', 'mvaSpring16GP_WP80', 2, -0.5, 1.5, 'MVA Spring16 general-purpose ID WP80'))
_Electron_plots_2016.append(Plot1D('mvaSpring16GP_WP90', 'mvaSpring16GP_WP90', 2, -0.5, 1.5, 'MVA Spring16 general-purpose ID WP90'))
_Electron_plots_2016.append(Plot1D('mvaSpring16HZZ', 'mvaSpring16HZZ', 20, -1, 1, 'MVA Spring16 HZZ ID score'))
_Electron_plots_2016.append(Plot1D('mvaSpring16HZZ_WPL', 'mvaSpring16HZZ_WPL', 2, -0.5, 1.5, 'MVA Spring16 HZZ ID loose WP'))
_Electron_plots_2016.append(NoPlot('vidNestedWPBitmapSpring15'))
#putting back the fall17V1 plots for non v9 case
_Electron_plots_withFall17V1 = copy.deepcopy(nanoDQM.vplots.Electron.plots)
_Electron_plots_withFall17V1.append(Plot1D('cutBased_Fall17_V1', 'cutBased_Fall17_V1', 5, -0.5, 4.5, 'cut-based ID Fall17 V1 (0:fail, 1:veto, 2:loose, 3:medium, 4:tight)'))
_Electron_plots_withFall17V1.append(Plot1D('mvaFall17V1Iso', 'mvaFall17V1Iso', 20, -1, 1, 'MVA Iso ID V1 score'))
_Electron_plots_withFall17V1.append(Plot1D('mvaFall17V1Iso_WP80', 'mvaFall17V1Iso_WP80', 2, -0.5, 1.5, 'MVA Iso ID V1 WP80'))
_Electron_plots_withFall17V1.append(Plot1D('mvaFall17V1Iso_WP90', 'mvaFall17V1Iso_WP90', 2, -0.5, 1.5, 'MVA Iso ID V1 WP90'))
_Electron_plots_withFall17V1.append(Plot1D('mvaFall17V1Iso_WPL', 'mvaFall17V1Iso_WPL', 2, -0.5, 1.5, 'MVA Iso ID V1 loose WP'))
_Electron_plots_withFall17V1.append(Plot1D('mvaFall17V1noIso', 'mvaFall17V1noIso', 20, -1, 1, 'MVA noIso ID V1 score'))
_Electron_plots_withFall17V1.append(Plot1D('mvaFall17V1noIso_WP80', 'mvaFall17V1noIso_WP80', 2, -0.5, 1.5, 'MVA noIso ID V1 WP80'))
_Electron_plots_withFall17V1.append(Plot1D('mvaFall17V1noIso_WP90', 'mvaFall17V1noIso_WP90', 2, -0.5, 1.5, 'MVA noIso ID V1 WP90'))
_Electron_plots_withFall17V1.append(Plot1D('mvaFall17V1noIso_WPL', 'mvaFall17V1noIso_WPL', 2, -0.5, 1.5, 'MVA noIso ID V1 loose WP'))
_Photon_plots_2016 = copy.deepcopy(nanoDQM.vplots.Photon.plots)
_Photon_plots_2016.append(Plot1D('cutBased', 'cutBased', 4, -0.5, 3.5, 'cut-based Spring16-V2p2 ID (0:fail, 1::loose, 2:medium, 3:tight)'))
_Photon_plots_2016.append(Plot1D('cutBased17Bitmap', 'cutBased17Bitmap', 8, -0.5, 7.5, 'cut-based Fall17-94X-V1 ID bitmap, 2^(0:loose, 1:medium, 2:tight)'))
_Photon_plots_2016.append(Plot1D('mvaID17', 'mvaID17', 20, -1, 1, 'MVA Fall17v1p1 ID score'))
_Photon_plots_2016.append(Plot1D('mvaID17_WP80', 'mvaID17_WP80', 2, -0.5, 1.5, 'MVA Fall17v1p1 ID WP80'))
_Photon_plots_2016.append(Plot1D('mvaID17_WP90', 'mvaID17_WP90', 2, -0.5, 1.5, 'MVA Fall17v1p1 ID WP90'))
_FatJet_plots_80x = copy.deepcopy(nanoDQM.vplots.FatJet.plots)
_FatJet_plots_80x.append(Plot1D('msoftdrop_chs', 'msoftdrop_chs', 20, -300, 300, 'Legacy uncorrected soft drop mass with CHS'))
_Flag_plots_80x = copy.deepcopy(nanoDQM.vplots.Flag.plots)
_Flag_plots_80x.append(Plot1D('BadGlobalMuon', 'BadGlobalMuon', 2, -0.5, 1.5, 'Bad muon flag'))
_Flag_plots_80x.append(Plot1D('CloneGlobalMuon', 'CloneGlobalMuon', 2, -0.5, 1.5, 'Clone muon flag'))
for modifier in run2_miniAOD_80XLegacy, run2_nanoAOD_94X2016:
modifier.toModify(nanoDQM.vplots.Electron, plots = _Electron_plots_2016)
modifier.toModify(nanoDQM.vplots.Photon, plots = _Photon_plots_2016)
run2_miniAOD_80XLegacy.toModify(nanoDQM.vplots.FatJet, plots = _FatJet_plots_80x)
run2_miniAOD_80XLegacy.toModify(nanoDQM.vplots.Flag, plots = _Flag_plots_80x)
(run2_nanoAOD_92X | run2_nanoAOD_94XMiniAODv1 | run2_nanoAOD_94XMiniAODv2 | run2_nanoAOD_94X2016 | run2_nanoAOD_102Xv1).toModify(nanoDQM.vplots.Electron, plots=_Electron_plots_withFall17V1)
run2_miniAOD_80XLegacy.toModify(nanoDQM.vplots, IsoTrack = None)
## MC
nanoDQMMC = nanoDQM.clone()
nanoDQMMC.vplots.Electron.sels.Prompt = cms.string("genPartFlav == 1")
nanoDQMMC.vplots.LowPtElectron.sels.Prompt = cms.string("genPartFlav == 1")
nanoDQMMC.vplots.Muon.sels.Prompt = cms.string("genPartFlav == 1")
nanoDQMMC.vplots.Photon.sels.Prompt = cms.string("genPartFlav == 1")
nanoDQMMC.vplots.Tau.sels.Prompt = cms.string("genPartFlav == 5")
nanoDQMMC.vplots.Jet.sels.Prompt = cms.string("genJetIdx != 1")
nanoDQMMC.vplots.Jet.sels.PromptB = cms.string("genJetIdx != 1 && hadronFlavour == 5")
from DQMServices.Core.DQMQualityTester import DQMQualityTester
nanoDQMQTester = DQMQualityTester(
qtList = cms.untracked.FileInPath('PhysicsTools/NanoAOD/test/dqmQualityTests.xml'),
prescaleFactor = cms.untracked.int32(1),
testInEventloop = cms.untracked.bool(False),
qtestOnEndLumi = cms.untracked.bool(False),
verboseQT = cms.untracked.bool(True)
)
nanoHarvest = cms.Sequence( nanoDQMQTester )
|
11478895
|
def remove(text, what):
result = []
for a in text:
try:
if what[a] >= 1:
what[a] -= 1
continue
except KeyError:
pass
result.append(a)
return ''.join(result)
|
11478901
|
from kts.modelling.mixins import RegressorMixin, NormalizeFillNAMixin
from kts.models.common import XGBMixin, LGBMMixin, CatBoostMixin, all_estimators, BLACKLISTED_PARAMS
__all__ = []
class XGBRegressor(RegressorMixin, XGBMixin): pass
class LGBMRegressor(RegressorMixin, LGBMMixin): pass
class CatBoostRegressor(RegressorMixin, CatBoostMixin): pass
__all__.extend(['XGBRegressor', 'LGBMRegressor', 'CatBoostRegressor'])
for name, estimator in all_estimators(type_filter='regressor'):
globals()[name] = type(name,
(RegressorMixin, estimator, NormalizeFillNAMixin),
{'ignored_params': BLACKLISTED_PARAMS})
__all__.append(name)
del name
del estimator
del all_estimators
|
11478929
|
import heapq
class MedianFinder(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.leftHeap = []
self.rightHeap = []
def addNum(self, num):
"""
:type num: int
:rtype: void
"""
if len(self.leftHeap) <= len(self.rightHeap):
heapq.heappush(self.leftHeap, -num)
else:
heapq.heappush(self.rightHeap, num)
if len(self.rightHeap) > 0:
left = -self.leftHeap[0]
right = self.rightHeap[0]
if left > right:
heapq.heappop(self.leftHeap)
heapq.heappop(self.rightHeap)
heapq.heappush(self.leftHeap, -right)
heapq.heappush(self.rightHeap, left)
def findMedian(self):
"""
:rtype: float
"""
if len(self.leftHeap) > len(self.rightHeap):
return -self.leftHeap[0]
else:
return (-self.leftHeap[0] + self.rightHeap[0]) / 2.0
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
|
11478973
|
import argparse
import collections
import csv
import itertools
import json
import pathlib
from typing import Dict, List, NamedTuple
import matplotlib.pyplot as plt # type: ignore
def main() -> None:
args = parse_args()
files = [
"kite-go/navigation/recommend/recommend.go",
"kite-go/client/internal/kitelocal/internal/navigation/manager.go",
"kite-go/lang/language.go",
"kite-go/client/internal/kitelocal/internal/completions/lexical.go",
"kite-go/lang/lexical/lexicalcomplete/lexicalproviders/Data_inputs.go",
"kite-go/lang/python/pythoncomplete/driver/mixing.go",
"kite-go/lang/python/pythondocs/index.go",
]
dirs = [
"",
"kite-go/client/internal",
"kite-go",
"kite-go/lang/python",
"kite-golib",
"kite-golib/lexicalv0",
"kite-python",
]
commits = Analyzer(
args.commits_retrieved_path,
args.relevant_path,
args.max_tests,
)
commits_histogram = commits.histogram()
text = Analyzer(
args.text_retrieved_path,
args.relevant_path,
args.max_tests,
)
text_histogram = text.histogram()
assert len(commits.tests) == len(text.tests)
plot_histograms(
args.histogram_path,
commits_histogram,
text_histogram,
args.max_tests,
len(commits.tests),
)
markdown = to_markdown(
args.histogram_path,
[commits.query_file(f) for f in files],
[text.query_file(f) for f in files],
[commits.query_directory(d) for d in dirs],
)
write(args.results_path, markdown)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--max_tests", type=int)
parser.add_argument("--commits_retrieved_path", type=str)
parser.add_argument("--text_retrieved_path", type=str)
parser.add_argument("--relevant_path", type=str)
parser.add_argument("--histogram_path", type=str)
parser.add_argument("--results_path", type=str)
return parser.parse_args()
class RetrievedResult(NamedTuple):
path: str
weight: float
idx: int
coverage: float
class MissingResult(NamedTuple):
path: str
weight: float
class FileResult(NamedTuple):
path: str
retrieved: List[RetrievedResult]
missing: List[MissingResult]
class DirResult(NamedTuple):
path: str
coverage: float
num_files: int
class Test(NamedTuple):
path: str
rank: int
class Sample(NamedTuple):
path: str
test: Test
class Histogram:
def __init__(self, size: int) -> None:
self.pdf = [0. for _ in range(size)]
self.unranked = 0.
def cdf(self) -> List[float]:
acc = list(itertools.accumulate([0.] + self.pdf))
total = acc[-1] + self.unranked
return [100. * a / total for a in acc]
def add_ranked(self, rank: int, value: float) -> None:
self.pdf[rank] += value
def add_unranked(self, value: float) -> None:
self.unranked += value
def plot_histograms(
path: str,
commits: Histogram,
text: Histogram,
xmax: int,
total_tests: int,
) -> None:
plt.style.use("ggplot")
fig, ax = plt.subplots(figsize=(7, 7))
cdfs = [commits.cdf(), text.cdf()]
labels = ["With commits", "Text only"]
for cdf, label in zip(cdfs, labels):
ax.plot(
list(range(len(cdf) + 1)),
[0.0] + cdf,
label=label,
)
ax.set_title("Cumulative distribution")
ax.set_xlim(0, xmax)
ax.set_ylim(0, 100)
ax.set_xlabel(f"test rank (out of {total_tests} test files)")
ax.set_ylabel("percent")
ax.legend(loc="lower right")
fig.savefig(path)
class Analyzer:
def __init__(
self,
retrieved_path: str,
relevant_path: str,
max_tests: int,
) -> None:
self.max_tests = max_tests
self.data: List[Sample] = []
self.tests = set()
with open(retrieved_path, "r") as fp:
reader = csv.reader(fp)
batches = itertools.groupby(reader, key=lambda x: x[0])
for base, batch in batches:
if is_test(base):
continue
recs = [rec for _, rec, _ in batch]
for test, idx in find_tests(recs):
self.data.append(Sample(base, Test(test, idx)))
self.tests.add(test)
with open(relevant_path, "r") as fp:
self.relevant: Dict[str, Dict[str, float]] = json.load(fp)
def histogram(self) -> Histogram:
histogram = Histogram(len(self.tests))
for path, weighted_tests in self.relevant.items():
result = self.query_file(path)
ranks = {r.path: i for i, r in enumerate(result.retrieved)}
for test, weight in weighted_tests.items():
if test in ranks:
histogram.add_ranked(ranks[test], weight)
else:
histogram.add_unranked(weight)
return histogram
def query_file(self, path: str) -> FileResult:
retrieved = [
RetrievedResult(
path=test.path,
weight=self.get_relevant(base, test.path),
idx=test.rank,
coverage=appraise(test.rank),
)
for base, test in self.data
if base == path
][:self.max_tests]
retrieved_tests = {r.path for r in retrieved}
missing = [
MissingResult(path=test, weight=weight)
for test, weight in self.relevant.get(path, {}).items()
if test not in retrieved_tests
]
return FileResult(path, retrieved, missing)
def query_directory(self, path: str) -> List[DirResult]:
if path != "" and not path.endswith("/"):
path += "/"
coverages: Dict[str, float] = collections.defaultdict(float)
files = collections.defaultdict(set)
depth = len(path.split("/"))
for base, test in self.data:
if not base.startswith(path):
continue
parts = base.split("/")
if len(parts) == depth:
continue
group = "/".join(parts[:depth])
coverages[group] += appraise(test.rank)
files[group].add(base)
dirs = [
DirResult(
path=g,
coverage=coverages[g] / len(files[g]),
num_files=len(files[g]),
)
for g in coverages
]
return sorted(dirs, key=lambda d: d.coverage, reverse=True)
def get_relevant(self, base: str, test: str) -> float:
if base not in self.relevant or test not in self.relevant[base]:
return 0
return self.relevant[base][test]
def find_tests(paths: List[str]) -> List[Test]:
recs = []
for idx, rec in enumerate(paths):
if is_test(rec):
recs.append(Test(rec, idx))
return recs or [Test("", -1)]
def is_test(path: str) -> bool:
return "test" in path
def appraise(idx: int) -> float:
if idx == -1:
return 0
return 2. ** -idx
def to_markdown(
histogram_path: str,
commits_files: List[FileResult],
text_files: List[FileResult],
directories: List[List[DirResult]],
) -> str:
return "\n\n".join([
markdown_histograms(histogram_path),
markdown_files("Files using commits", commits_files),
markdown_files("Files using text only", text_files),
markdown_directories(directories),
])
def markdown_histograms(histogram_path: str) -> str:
return "\n".join([
"# Histograms",
"",
"Note validation data leaks into training data when using commits.",
"",
f"",
])
def markdown_files(
group_title: str,
files: List[FileResult],
) -> str:
lines = [f"# {group_title}"]
for f in files:
total_coverage = sum(r.coverage for r in f.retrieved)
lines += [
"",
f"## {fmt_path(f.path)}",
"",
f"Coverage: {fmt_float(total_coverage)}",
]
total_coverage = sum(r.coverage for r in f.retrieved)
lines += [
"",
"Retrieved:",
"",
"|Test rank|Total rank|Coverage|Test|Weighted Hits|",
"|-|-|-|-|-|",
] + [
fmt_retrieved(i, r)
for i, r in enumerate(f.retrieved)
]
if not f.missing:
continue
lines += [
"",
"Relevant but not retrieved:",
"",
"|Test|Weighted Hits|",
"|-|-|",
] + [
f"|{r.path}|{r.weight}|"
for r in f.missing
]
return "\n".join(lines)
def markdown_directories(dirs: List[List[DirResult]]) -> str:
lines = [
"",
"# Directories",
]
for d in dirs:
lines += [
"",
"|Directory|Coverage|Number of files|",
"|-|-|-|",
]
lines += [
f"|{fmt_path(r.path)}|{fmt_float(r.coverage)}|{r.num_files}|"
for r in d
if r.num_files > 1
]
return "\n".join(lines)
def fmt_path(path: str) -> str:
github = "https://github.com/kiteco/kiteco/blob/master"
return f"[`{path}`]({github}/{path})"
def fmt_float(num: float) -> str:
return "{:.6f}".format(num)
def fmt_retrieved(i: int, r: RetrievedResult) -> str:
coverage = fmt_float(r.coverage)
path = fmt_path(r.path)
return f"|{i}|{r.idx}|{coverage}|{path}|{r.weight}|"
def write(path: str, text: str) -> None:
with open(path, "w") as fp:
fp.write(text)
if __name__ == "__main__":
main()
|
11478984
|
import matplotlib.pyplot as plt
import numpy as np
x = np.array([1, 2, 3, 4], dtype=np.uint8)
y = x**2 + 1
plt.plot(x, y)
y = x + 1
plt.plot(x, y)
plt.title('Graph')
plt.xlabel('X-Axis')
plt.ylabel('Y-Axis')
plt.grid('on')
plt.savefig('test1.png', dpi=300, bbox_inches='tight')
plt.show()
|
11478990
|
from qm.QuantumMachinesManager import QuantumMachinesManager
from qm.qua import *
from configuration import config
from qm import LoopbackInterface
from qm import SimulationConfig
from random import random
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi
from scipy import signal
nSamples = 100
samplingRate = 10e6
pulseDuration = nSamples / samplingRate
pulseDuration_ns = pulseDuration / 1e-9
t = np.linspace(0, pulseDuration, nSamples)
freqs = np.linspace(1, 4, 15).tolist()
phases = np.zeros_like(freqs).tolist()
amps = np.ones_like(phases).tolist()
m = np.sum(
list(
map(
lambda a: a[2] * np.sin(2 * pi * a[0] * 1e6 * t + a[1]),
zip(freqs, phases, amps),
)
),
0,
)
m = m / max(m) / 2
m = m.tolist()
mc = signal.hilbert(m)
wf1 = np.real(mc)
wf2 = np.imag(mc)
config["pulses"]["ssbPulse"]["length"] = len(wf1) * (1e9 / samplingRate)
config["waveforms"]["wf1"]["samples"] = wf1
config["waveforms"]["wf1"]["sampling_rate"] = samplingRate
config["waveforms"]["wf2"]["samples"] = wf2
config["waveforms"]["wf2"]["sampling_rate"] = samplingRate
# Open communication with the server.
QMm = QuantumMachinesManager()
# Create a quantum machine based on the configuration.
QM = QMm.open_qm(config)
with program() as prog:
play("ssb", "ssbElement")
job = QM.simulate(prog, SimulationConfig(int(1e5)))
res = job.result_handles
samples = job.get_simulated_samples()
out_vector = samples.con1.analog["1"]
f, Pxx_den = signal.periodogram(out_vector, 1e9)
plt.figure()
[plt.axvline(x=f + 50, color="k", linestyle="--") for f in freqs]
plt.semilogy(f / 1e6, Pxx_den)
plt.xlabel("Freq [MHz]")
plt.ylim([1e-15, 1e-8])
plt.xlim([40, 60])
plt.title("Single-sideband modulated signal")
plt.grid(True, which="both")
plt.show()
|
11479000
|
from paymentwall.base import Paymentwall
from paymentwall.product import Product
from paymentwall.widget import Widget
from paymentwall.pingback import Pingback
|
11479012
|
from typing import List, Optional
from spacy.language import Language
from spacy.tokens import Doc, Span
from edsnlp.matchers.phrase import EDSPhraseMatcher
from edsnlp.matchers.regex import RegexMatcher
from edsnlp.matchers.utils import Patterns
from edsnlp.pipelines.base import BaseComponent
from edsnlp.utils.filter import filter_spans
class GenericMatcher(BaseComponent):
"""
Provides a generic matcher component.
Parameters
----------
nlp : Language
The spaCy object.
terms : Optional[Patterns]
A dictionary of terms.
regex : Optional[Patterns]
A dictionary of regular expressions.
attr : str
The default attribute to use for matching.
Can be overiden using the `terms` and `regex` configurations.
filter_matches : bool
Whether to filter out matches.
on_ents_only : bool
Whether to to look for matches around pre-extracted entities only.
ignore_excluded : bool
Whether to skip excluded tokens (requires an upstream
pipeline to mark excluded tokens).
"""
def __init__(
self,
nlp: Language,
terms: Optional[Patterns],
regex: Optional[Patterns],
attr: str,
ignore_excluded: bool,
):
self.nlp = nlp
self.attr = attr
self.phrase_matcher = EDSPhraseMatcher(
self.nlp.vocab,
attr=attr,
ignore_excluded=ignore_excluded,
)
self.regex_matcher = RegexMatcher(
attr=attr,
ignore_excluded=ignore_excluded,
)
self.phrase_matcher.build_patterns(nlp=nlp, terms=terms)
self.regex_matcher.build_patterns(regex=regex)
self.set_extensions()
def process(self, doc: Doc) -> List[Span]:
"""
Find matching spans in doc.
Parameters
----------
doc:
spaCy Doc object.
Returns
-------
spans:
List of Spans returned by the matchers.
"""
matches = self.phrase_matcher(doc, as_spans=True)
regex_matches = self.regex_matcher(doc, as_spans=True)
spans = list(matches) + list(regex_matches)
return spans
def __call__(self, doc: Doc) -> Doc:
"""
Adds spans to document.
Parameters
----------
doc:
spaCy Doc object
Returns
-------
doc:
spaCy Doc object, annotated for extracted terms.
"""
matches = self.process(doc)
for span in matches:
if span.label_ not in doc.spans:
doc.spans[span.label_] = []
doc.spans[span.label_].append(span)
ents, discarded = filter_spans(list(doc.ents) + matches, return_discarded=True)
doc.ents = ents
if "discarded" not in doc.spans:
doc.spans["discarded"] = []
doc.spans["discarded"].extend(discarded)
return doc
|
11479029
|
class IsBest(object):
def __init__(self):
"""
This class check if a given value is the best so far
"""
self.val = None
def is_best(self, val) -> bool:
"""
This function returns the status of the current value and update the best value.
:param val: The current value
:return: Return a boolean True if current value is the best else False
"""
if self.val is None or (val > self.val):
self.val = val
print("Updating Best")
return True
else:
return False
|
11479032
|
from typing import Tuple, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from .pytorch_modules import SharedMLP
from .pu_utils import square_distance, index_points, farthest_point_sample, \
QueryAndGroup, GroupAll
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def forward(self, xyz: torch.Tensor, features: torch.Tensor = None,
npoint=None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
"""
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, N, C) tensor of the descriptors of the the features
:param new_xyz:
:return:
new_xyz: (B, npoint, 3) tensor of the new features' xyz
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
if npoint is not None:
self.npoint = npoint
new_features_list = []
# xyz_flipped = xyz.transpose(1, 2).contiguous()
if new_xyz is None:
new_xyz = index_points(
# xyz_flipped,
xyz,
farthest_point_sample(xyz, self.npoint)
) if self.npoint is not None else None # [B, N, C]
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features) # (B, C, npoint, nsample)
# (B, mlp[-1], npoint, nsample)
new_features = self.mlps[i](new_features)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
else:
raise NotImplementedError
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
"""Pointnet set abstraction layer with multiscale grouping"""
def __init__(self, *, npoint: int, radii: List[float],
nsamples: List[int], mlps: List[List[int]],
bn: bool = True, use_xyz: bool = True, use_res=False,
pool_method='max_pool', instance_norm=False):
"""
:param npoint: int
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param instance_norm: whether to use instance_norm
"""
super(PointnetSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None else GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
if use_res:
raise NotImplementedError
else:
self.mlps.append(
SharedMLP(mlp_spec, bn=bn, instance_norm=instance_norm)
)
self.pool_method = pool_method
class PointnetSAModule(PointnetSAModuleMSG):
"""Pointnet set abstraction layer"""
def __init__(self, *, mlp: List[int], npoint: int = None,
radius: float = None, nsample: int = None, bn: bool = True,
use_xyz: bool = True, use_res=False,
pool_method='max_pool', instance_norm=False):
"""
:param mlp: list of int, spec of the pointnet before the global max_pool
:param npoint: int, number of features
:param radius: float, radius of ball
:param nsample: int, number of samples in the ball query
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param instance_norm: whether to use instance_norm
"""
super(PointnetSAModule, self).__init__(
mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[
nsample], bn=bn, use_xyz=use_xyz, use_res=use_res,
pool_method=pool_method, instance_norm=instance_norm
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another"""
def __init__(self, *, mlp: List[int], bn: bool = True):
"""
:param mlp: list of int
:param bn: whether to use batchnorm
"""
super(PointnetFPModule, self).__init__()
self.mlp = SharedMLP(mlp, bn=bn)
def forward(self, unknown: torch.Tensor, known: torch.Tensor,
unknow_feats: torch.Tensor, known_feats: torch.Tensor) -> torch.Tensor:
"""
:param unknown: (B, n, 3) tensor of the xyz positions of the unknown features
:param known: (B, m, 3) tensor of the xyz positions of the known features
:param unknow_feats: (B, C1, n) tensor of the features to be propagated to
:param known_feats: (B, C2, m) tensor of features to be propigated
:return:
new_features: (B, mlp[-1], n) tensor of the features of the unknown features
"""
known_feats = known_feats.permute(0, 2, 1) # [B, m, C2]
B, N, C = unknown.shape
_, S, _ = known.shape
if S == 1:
interpolated_feats = known_feats.repeat(1, N, 1)
else:
dists = square_distance(unknown, known)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
weight = 1.0 / (dists + 1e-8) # [B, N, 3]
weight = weight / \
torch.sum(weight, dim=-1).view(B, N, 1) # [B, N, 3]
interpolated_feats = torch.sum(index_points(
known_feats, idx) * weight.view(B, N, 3, 1), dim=2) # [B, N, C2]
if unknow_feats is not None:
unknow_feats = unknow_feats.permute(0, 2, 1) # [B, n, C1]
new_feats = torch.cat([
unknow_feats, interpolated_feats
], dim=-1) # [B, n, C1 + C2]
else:
new_feats = interpolated_feats
new_feats = new_feats.permute(0, 2, 1) # [B, C1 + C2, n]
new_feats = new_feats.unsqueeze(-1)
new_features = self.mlp(new_feats)
return new_features.squeeze(-1)
|
11479034
|
from init_helpers import *
from image_helpers import *
from pointcloud_helpers import *
from msg_helpers import *
from threading_helpers import *
from geometry_helpers import *
from rviz_helpers import *
from cv_debug import *
from bag_crawler import *
from plotter import *
|
11479040
|
import urllib2
import json
import os
import os.path
import datetime
import time
from os.path import expanduser
def getName(urlname):
i = urlname.find("_EN")
i -= 1
s1 = ""
while (urlname[i] != '/') :
s1 += urlname[i]
i -= 1
s1 = s1[::-1] + '.jpg'
return s1
market = "en-US"
resolution = "1920x1080"
wallpaperDirectory = expanduser("~")+'/Pictures/Wallpapers/'
i = 1
while (i == 1) :
try :
urllib2.urlopen("http://google.com")
except urllib2.URLError:
time.sleep(5)
print("Hello")
else :
i = 0
response = urllib2.urlopen("http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=" + market)
obj = json.load(response)
url = obj['images'][0]['urlbase']
wallpaperName = getName(url)
print("The name of wallpaper is %s" %(wallpaperName))
url = 'http://www.bing.com' + url + '_' + resolution + '.jpg'
print(url)
if not os.path.exists(wallpaperDirectory) :
os.makedirs(wallpaperDirectory)
path = wallpaperDirectory + wallpaperName
print("Downloading Bing Wallpaper to %s" %(path))
f = open(path,"w")
bingpic = urllib2.urlopen(url)
f.write(bingpic.read())
s1 = "/usr/bin/gsettings set org.gnome.desktop.background picture-uri file:///home/fbd/Pictures/Wallpapers/" + wallpaperName
os.system(s1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.