id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
101659
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_path",
default=None,
type=str,
required=True,
help="Path to the [dev, test] dataset",
)
parser.add_argument(
"--index_path",
default=None,
type=str,
required=True,
help="Path to the indexes of contexts",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--output",
default=None,
type=str,
required=True,
help="The output file where the runs results will be written to",
)
parser.add_argument(
"--language",
default="en",
type=str,
help="The language of task",
)
parser.add_argument(
"--topk",
default=10,
type=int,
help="The number of contexts retrieved for a question",
)
args = parser.parse_args()
|
101665
|
import numpy as np
import os
import matplotlib.pyplot as plt
import glob
import re
import torch
import torch.nn as nn
import torch
import cv2
import torchvision
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import transforms
import tqdm
from PIL import Image
import albumentations as A
from src.dataset import ClassificationDataset
from src.display import display_inference_result
# resnet18
# model = torchvision.models.resnet18(pretrained=True)
# num_ftrs = model.fc.in_features
# model.fc = nn.Sequential(
# nn.Linear(num_ftrs, 500),
# # when using CrossEntropyLoss(), the output from NN should be logit values for each class
# # nn.Linear(500,1)
# # when using BCEWithLogitsLoss(), the output from NN should be logit value for True label
# nn.Linear(500, 7)
# )
# densetnet121
model = torchvision.models.densenet121(pretrained=True)
num_ftrs = model.classifier.in_features
model.classifier = nn.Sequential(
nn.Linear(num_ftrs, 500),
# when using CrossEntropyLoss(), the output from NN should be logit values for each class
# nn.Linear(500,1)
# when using BCEWithLogitsLoss(), the output from NN should be logit value for True label
nn.Linear(500, 7)
)
model.load_state_dict(torch.load('models/hand-cricket-model2.pth', map_location=torch.device('cpu')))
model.eval()
files = ['input/0_9.jpg', 'input/1_50.jpg', 'input/2_83.jpg', 'input/3_100.jpg', 'input/4_140.jpg']
files = glob.glob('/home/abhinavnayak11/Pictures/Webcam/*') # 1, 2
if __name__=='__main__':
valid_transform = A.Compose([
A.Resize(128, 128),
A.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225), max_pixel_value = 255.0, always_apply=True
)
])
# valid_images = ['test-images/depositphotos_2209844-stock-photo-hand-is-counting-number-2.jpg']
valid_images = files
batch = len(valid_images)
valid_targets = [-1]*batch
valid_data = ClassificationDataset(valid_images, valid_targets, augmentations = valid_transform)
validloader = DataLoader(valid_data, batch_size = batch, shuffle = True, num_workers = 2)
with torch.no_grad():
for samples, targets in validloader:
outputs = model(samples)
predictions = torch.argmax(outputs, dim=1)
display_inference_result(samples, predictions, outputs, denorm = True)
|
101750
|
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest import TestCase
from zkviz import zkviz
class TestListZettels(TestCase):
def test_list_zettels_with_md_extension(self):
# Create a temporary folder and write files in it
with TemporaryDirectory() as tmpdirname:
ext = ".md"
basename = "201906242157"
filepaths = []
for i in range(3):
path = Path(tmpdirname, basename + str(i) + ext)
path.touch()
filepaths.append(str(path))
files_found = zkviz.list_zettels(tmpdirname)
self.assertEqual(filepaths, files_found)
def test_list_zettels_with_txt_extension(self):
# Create a temporary folder and write files in it
with TemporaryDirectory() as tmpdirname:
ext = ".txt"
basename = "201906242157"
filepaths = []
for i in range(3):
path = Path(tmpdirname, basename + str(i) + ext)
path.touch()
filepaths.append(str(path))
files_found = zkviz.list_zettels(tmpdirname, "*.txt")
self.assertEqual(filepaths, files_found)
def test_list_zettels_with_mixed_extensions(self):
# Create a temporary folder and write files in it
with TemporaryDirectory() as tmpdirname:
filepaths = []
basename = "201906242157"
ext = ".txt"
for i in range(5):
path = Path(tmpdirname, basename + str(i) + ext)
path.touch()
filepaths.append(str(path))
ext = ".md"
for i in range(5, 10):
path = Path(tmpdirname, basename + str(i) + ext)
path.touch()
filepaths.append(str(path))
files_found = zkviz.list_zettels(tmpdirname, "*.txt|*.md")
self.assertEqual(filepaths, files_found)
class TestParseArgs(TestCase):
def test_default_extension(self):
args = zkviz.parse_args("")
self.assertEqual(["*.md"], args.pattern)
def test_overwrite_extension(self):
args = zkviz.parse_args(["--pattern", "*.txt"])
self.assertEqual(["*.txt"], args.pattern)
def test_multiple_extensions(self):
args = zkviz.parse_args(["--pattern", "*.txt", "--pattern", "*.md"])
self.assertEqual(["*.txt", "*.md"], args.pattern)
|
101775
|
import dataclasses
from types import MethodType
from typing import ( # type: ignore
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
_TypedDictMeta,
)
from dictdaora import DictDaora
from .decorator import jsondaora
from .exceptions import DeserializationError
class StringField(DictDaora):
max_length: int
min_length: int
value: str
validate: Callable[[Type['StringField'], str], None]
def __init__(self, value: Any):
self.value = value.decode() if isinstance(value, bytes) else str(value)
type(self).validate(self.value) # type: ignore
def __init_subclass__(
cls, min_length: Optional[int] = None, max_length: Optional[int] = None
) -> None:
if max_length is None and min_length is not None:
cls.validate = MethodType(validate_min_length, cls)
elif max_length is not None and min_length is None:
cls.validate = MethodType(validate_max_length, cls)
elif max_length is not None and min_length is not None:
cls.validate = MethodType(validate_min_length_max_length, cls)
elif min_length is None and max_length is None:
cls.validate = MethodType(lambda c, v: v, cls)
if min_length is not None:
cls.min_length = min_length
if max_length is not None:
cls.max_length = max_length
class IntegerField(DictDaora):
maximum: int
minimum: int
value: int
validate: Callable[[Type['IntegerField'], int], None]
def __init__(self, value: Any):
self.value = int(value)
type(self).validate(self.value) # type: ignore
def __init_subclass__(
cls, minimum: Optional[int] = None, maximum: Optional[int] = None
) -> None:
if maximum is None and minimum is not None:
cls.validate = MethodType(validate_minimum, cls)
elif maximum is not None and minimum is None:
cls.validate = MethodType(validate_maximum, cls)
elif maximum is not None and minimum is not None:
cls.validate = MethodType(validate_minimum_maximum, cls)
elif maximum is None and minimum is None:
cls.validate = MethodType(lambda c, v: v, cls)
if minimum is not None:
cls.minimum = minimum
if maximum is not None:
cls.maximum = maximum
def validate_minimum(cls: Type[IntegerField], value: int) -> None:
if not cls.minimum <= value:
raise DeserializationError(
f'Invalid minimum integer value: {cls.minimum} < {value}'
)
def validate_maximum(cls: Type[IntegerField], value: int) -> None:
if not cls.maximum >= value:
raise DeserializationError(
f'Invalid maximum integer value: {value} < {cls.maximum}'
)
def validate_minimum_maximum(cls: Type[IntegerField], value: int) -> None:
if not cls.minimum <= value <= cls.maximum:
raise DeserializationError(
f'Invalid minimum and maximum integer value: '
f'{cls.minimum} < {value} < {cls.maximum}'
)
def validate_min_length(cls: Type[StringField], value: str) -> None:
if not cls.min_length <= len(value):
raise DeserializationError(
f'Invalid min_length string value: {cls.min_length} < {len(value)}'
)
def validate_max_length(cls: Type[StringField], value: str) -> None:
if not cls.max_length >= len(value):
raise DeserializationError(
f'Invalid max_length string value: {len(value)} < {cls.max_length}'
)
def validate_min_length_max_length(cls: Type[StringField], value: str) -> None:
if not cls.min_length <= len(value) <= cls.max_length:
raise DeserializationError(
f'Invalid min_length and max_length string value: '
f'{cls.min_length} < {len(value)} < {cls.max_length}'
)
def string(
min_length: Optional[int] = None, max_length: Optional[int] = None
) -> Type[StringField]:
min_length_str = '' if min_length is None else str(min_length)
max_length_str = '' if max_length is None else str(max_length)
cls_name = f'String{min_length_str}{max_length_str}'
return type(
cls_name,
(StringField,),
{'min_length': min_length, 'max_length': max_length},
)
def integer(
minimum: Optional[int] = None, maximum: Optional[int] = None
) -> Type[IntegerField]:
minimum_str = '' if minimum is None else str(minimum)
maximum_str = '' if maximum is None else str(maximum)
cls_name = f'String{minimum_str}{maximum_str}'
return type(
cls_name, (IntegerField,), {'minimum': minimum, 'maximum': maximum},
)
def jsonschema_asdataclass(
id_: str, schema: Dict[str, Any], bases: Tuple[type, ...] = ()
) -> Type[Any]:
is_dict = any([issubclass(base, dict) for base in bases])
subschema_bases = (dict,) if is_dict else tuple()
extracted_annotations = [
extract_annotations(
id_,
is_dict,
prop_name,
prop,
subschema_bases,
schema.get('required', []),
subschema_bases,
)
for prop_name, prop in schema.get('properties', {}).items()
]
type_ = make_type_from_extracted_annotations(
id_, is_dict, extracted_annotations, bases
)
if is_dict:
type_.__additional_properties__ = schema.get(
'additionalProperties', True
)
if isinstance(type_.__additional_properties__, dict):
extracted_annotations_ = extract_annotations(
id_,
is_dict,
'additional_properties',
schema['additionalProperties'],
subschema_bases,
schema['additionalProperties'].get('required', []),
)
type_.__additional_properties__ = extracted_annotations_[1]
else:
type_.__additional_properties__ = schema.get(
'additionalProperties', False
)
return type_ # type: ignore
def extract_annotations(
id_: str,
is_dict: bool,
prop_name: str,
prop: Dict[str, Any],
bases: Tuple[type, ...],
required: List[str],
subschema_bases: Tuple[type, ...] = (),
) -> Tuple[str, Type[Any], Any]:
return (
prop_name,
Optional[
jsonschema_asdataclass(
f'{id_}_{prop_name}', prop, subschema_bases # noqa
)
]
if prop['type'] == 'object'
else (
(
jsonschema_array(id_, prop_name, prop, subschema_bases)
if prop['type'] == 'array'
else SCALARS[prop['type']]
)
if prop_name in required
else (
Optional[
jsonschema_array(id_, prop_name, prop, subschema_bases)
]
if prop['type'] == 'array'
else Optional[SCALARS[prop['type']]]
)
),
prop.get('default')
if is_dict
else dataclasses.field(default=prop.get('default')),
)
def make_type_from_extracted_annotations(
id_: str,
is_dict: bool,
extracted_annotations: List[Tuple[str, Type[Any], Any]],
bases: Tuple[type, ...],
) -> Any:
if is_dict:
type_annotations = {}
type_attributes = {}
for prop_name, prop_type, prop_default in extracted_annotations:
type_annotations[prop_name] = prop_type
if prop_default is not None:
type_attributes[prop_name] = prop_default
type_attributes['__annotations__'] = type_annotations
schema_type = type(id_, (dict,), type_attributes)
return jsondaora(_TypedDictMeta(id_, bases + (schema_type,), {}))
else:
return dataclasses.make_dataclass(
id_, extracted_annotations, bases=bases,
)
def jsonschema_array(
id_: str, prop_name: str, prop: Any, bases: Tuple[type, ...],
) -> Any:
DynamicType: Type[Any] = (
jsonschema_asdataclass(
f'{id_}_{prop_name}', prop['items'], bases
) # noqa
if (array_type := prop['items']['type']) == 'object' # noqa
else jsonschema_array(id_, prop_name, prop['items'], bases) # noqa
if array_type == 'array' # noqa
else SCALARS[array_type]
)
list_type = List[DynamicType] # type: ignore
if 'additionalItems' in prop:
list_type.__additional_items__ = prop['additionalItems'] # type: ignore
return list_type
SCALARS = {
'boolean': bool,
'string': str,
'integer': int,
'number': float,
}
|
101827
|
import numpy as np
a = np.arange(10) * 10
print(a)
# [ 0 10 20 30 40 50 60 70 80 90]
print(a[5])
# 50
print(a[8])
# 80
print(a[[5, 8]])
# [50 80]
print(a[[5, 4, 8, 0]])
# [50 40 80 0]
print(a[[5, 5, 5, 5]])
# [50 50 50 50]
idx = np.array([[5, 4], [8, 0]])
print(idx)
# [[5 4]
# [8 0]]
print(a[idx])
# [[50 40]
# [80 0]]
# print(a[[[5, 4], [8, 0]]])
# IndexError: too many indices for array
print(a[[[[5, 4], [8, 0]]]])
# [[50 40]
# [80 0]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[0])
# [0 1 2 3]
print(a_2d[2])
# [ 8 9 10 11]
print(a_2d[[2, 0]])
# [[ 8 9 10 11]
# [ 0 1 2 3]]
print(a_2d[[2, 2, 2]])
# [[ 8 9 10 11]
# [ 8 9 10 11]
# [ 8 9 10 11]]
print(a_2d[:, 1])
# [1 5 9]
print(a_2d[:, 3])
# [ 3 7 11]
print(a_2d[:, 1:2])
# [[1]
# [5]
# [9]]
print(a_2d[:, [3, 1]])
# [[ 3 1]
# [ 7 5]
# [11 9]]
print(a_2d[:, [3, 3, 3]])
# [[ 3 3 3]
# [ 7 7 7]
# [11 11 11]]
print(a_2d[0, 1])
# 1
print(a_2d[2, 3])
# 11
print(a_2d[[0, 2], [1, 3]])
# [ 1 11]
# index
# [[0, 1] [2, 3]]
# print(a_2d[[0, 2, 1], [1, 3]])
# IndexError: shape mismatch: indexing arrays could not be broadcast together with shapes (3,) (2,)
print(a_2d[[[0, 0], [2, 2]], [[1, 3], [1, 3]]])
# [[ 1 3]
# [ 9 11]]
# index
# [[0, 1] [0, 3]
# [2, 1] [2, 3]]
print(a_2d[[[0], [2]], [1, 3]])
# [[ 1 3]
# [ 9 11]]
idxs = np.ix_([0, 2], [1, 3])
print(idxs)
# (array([[0],
# [2]]), array([[1, 3]]))
print(type(idxs))
# <class 'tuple'>
print(type(idxs[0]))
# <class 'numpy.ndarray'>
print(idxs[0])
# [[0]
# [2]]
print(idxs[1])
# [[1 3]]
print(a_2d[np.ix_([0, 2], [1, 3])])
# [[ 1 3]
# [ 9 11]]
print(a_2d[np.ix_([2, 0], [3, 3, 3])])
# [[11 11 11]
# [ 3 3 3]]
print(a_2d[[0, 2]][:, [1, 3]])
# [[ 1 3]
# [ 9 11]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[np.ix_([0, 2], [1, 3])] = 100
print(a_2d)
# [[ 0 100 2 100]
# [ 4 5 6 7]
# [ 8 100 10 100]]
a_2d[np.ix_([0, 2], [1, 3])] = [100, 200]
print(a_2d)
# [[ 0 100 2 200]
# [ 4 5 6 7]
# [ 8 100 10 200]]
a_2d[np.ix_([0, 2], [1, 3])] = [[100, 200], [300, 400]]
print(a_2d)
# [[ 0 100 2 200]
# [ 4 5 6 7]
# [ 8 300 10 400]]
print(a_2d[[0, 2]][:, [1, 3]])
# [[100 200]
# [300 400]]
a_2d[[0, 2]][:, [1, 3]] = 0
print(a_2d)
# [[ 0 100 2 200]
# [ 4 5 6 7]
# [ 8 300 10 400]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[[2, 0]] = [[100, 200, 300, 400], [500, 600, 700, 800]]
print(a_2d)
# [[500 600 700 800]
# [ 4 5 6 7]
# [100 200 300 400]]
a_2d[[2, 2]] = [[-1, -2, -3, -4], [-5, -6, -7, -8]]
print(a_2d)
# [[500 600 700 800]
# [ 4 5 6 7]
# [ -5 -6 -7 -8]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_fancy = a_2d[np.ix_([0, 2], [1, 3])]
print(a_fancy)
# [[ 1 3]
# [ 9 11]]
a_fancy[0, 0] = 100
print(a_fancy)
# [[100 3]
# [ 9 11]]
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[[2, 0], ::-1])
# [[11 10 9 8]
# [ 3 2 1 0]]
print(a_2d[::2, [3, 0, 1]])
# [[ 3 0 1]
# [11 8 9]]
|
101831
|
import csv
import json
from collections import OrderedDict
csvfile = open('./pokedex.csv', 'r')
jsonfile = open('./pokedex.json', 'w')
jsonNames = ("orderID", "nDex", "name", "type1", "type2", "ability1", "ability2", "hiddenability", "hp", "atk", "def", "spatk", "spdef", "spe", "note", "tier", "image")
reader = csv.DictReader(csvfile, jsonNames)
jsonordered = [OrderedDict(sorted(item.iteritems(), key=lambda (x, y): jsonNames.index(x))) for item in reader]
for row in jsonordered:
# Change the value from String to an Int
row['orderID'] = int(row['orderID'])
row['nDex'] = int(row['nDex'])
row['hp'] = int(row['hp'])
row['atk'] = int(row['atk'])
row['def'] = int(row['def'])
row['spatk'] = int(row['spatk'])
row['spdef'] = int(row['spdef'])
row['spe'] = int(row['spe'])
json.dump(row, jsonfile)
jsonfile.write(',\n')
|
101834
|
from __future__ import print_function
class Rule(object):
def __init__(self):
pass
def __repr__(self):
return self.name()
@classmethod
def name(cls):
return cls.__name__.split('.')[-1]
@classmethod
def explain(cls):
return cls.__doc__
def __cmp__(self, other):
return cmp(unicode(self), unicode(other))
class PredicateRootRule(Rule):
rule_type = 'predicate_root'
class ArgumentRootRule(Rule):
rule_type = 'argument_root'
class PredConjRule(Rule):
type = 'predicate_conj'
class ArgumentResolution(Rule):
type = 'argument_resolution'
class ConjunctionResolution(Rule):
type = 'conjunction_resolution'
class SimplifyRule(Rule):
type = 'simple'
#______________________________________________
# Predicate root identification
class a1(PredicateRootRule):
"Extract a predicate token from the dependent of clausal relation {ccomp, csub, csubjpass}."
rule_type = 'predicate_root'
class a2(PredicateRootRule):
"Extract a predicate token from the dependent of clausal complement 'xcomp'."
rule_type = 'predicate_root'
class b(PredicateRootRule):
"Extract a predicate token from the dependent of clausal modifier."
rule_type = 'predicate_root'
class c(PredicateRootRule):
"Extract a predicate token from the governor of the relations {nsubj, nsubjpass, dobj, iobj, ccomp, xcomp, advcl}."
rule_type = 'predicate_root'
def __init__(self, e):
super(c, self).__init__()
self.e = e
def __repr__(self):
return "add_root(%s)_for_%s_from_(%s)" % (self.e.gov, self.e.rel, self.e.dep)
class d(PredicateRootRule):
"Extract a predicate token from the dependent of apposition."
rule_type = 'predicate_root'
class e(PredicateRootRule):
"Extract a predicate token from the dependent of an adjectival modifier."
rule_type = 'predicate_root'
class v(PredicateRootRule):
"Extract a predicate token from the dependent of the possessive relation 'nmod:poss' (English specific)."
rule_type = 'predicate_root'
class f(PredicateRootRule):
"Extract a conjunct token of a predicate token."
rule_type = 'predicate_root'
#_________________________________________
# Argument root identification
class g1(ArgumentRootRule):
"Extract an argument token from the dependent of the following relations {nsubj, nsubjpass, dobj, iobj}."
def __init__(self, edge):
self.edge = edge
super(g1, self).__init__()
def __repr__(self):
return 'g1(%s)' % (self.edge.rel)
class h1(ArgumentRootRule):
"Extract an argument token, which directly depends on the predicate token, from the dependent of the relations {nmod, nmod:npmod, nmod:tmod}."
class h2(ArgumentRootRule):
"Extract an argument token, which indirectly depends on the predicate token, from the dependent of the relations {nmod, nmod:npmod, nmod:tmod}."
class i(ArgumentRootRule):
"Extract an argument token from the governor of an adjectival modifier."
class j(ArgumentRootRule):
"Extract an argument token from the governor of apposition."
class w1(ArgumentRootRule):
"Extract an argument token from the governor of 'nmod:poss' (English specific)."
class w2(ArgumentRootRule):
"Extract an argument token from the dependent of 'nmod:poss' (English specific)."
class k(ArgumentRootRule):
"Extract an argument token from the dependent of the dependent of clausal complement 'ccomp'."
#__________________________________
# Predicate conjunction resolution
class pred_conj_borrow_aux_neg(PredConjRule):
"Borrow aux and neg tokens from conjoined predicate's name."
def __init__(self, friend, borrowed_token):
super(pred_conj_borrow_aux_neg, self).__init__()
self.friend = friend
self.borrowed_token = borrowed_token
class pred_conj_borrow_tokens_xcomp(PredConjRule):
"Borrow tokens from xcomp in a conjunction or predicates."
def __init__(self, friend, borrowed_token):
super(pred_conj_borrow_tokens_xcomp, self).__init__()
self.friend = friend
self.borrowed_token = borrowed_token
class cut_borrow_other(ArgumentResolution):
def __init__(self, borrowed, friend):
super(cut_borrow_other, self).__init__()
self.friend = friend
self.borrowed = borrowed
class cut_borrow_subj(ArgumentResolution):
def __init__(self, subj, friend):
super(cut_borrow_subj, self).__init__()
self.friend = friend
self.subj = subj
def __repr__(self):
return 'cut_borrow_subj(%s)_from(%s)' % (self.subj.root, self.friend.root)
class cut_borrow_obj(ArgumentResolution):
def __init__(self, obj, friend):
super(cut_borrow_obj, self).__init__()
self.friend = friend
self.obj = obj
def __repr__(self):
return 'cut_borrow_obj(%s)_from(%s)' % (self.obj.root, self.friend.root)
class borrow_subj(ArgumentResolution):
"Borrow subject from governor in (conj, xcomp of conj root, and advcl)."
# if gov_rel=='conj' and missing a subject, try to borrow the subject from
# the other event. Still no subject. Try looking at xcomp of conjunction
# root.
#
# if gov_rel==advcl and not event.has_subj() then borrow from governor.
def __init__(self, subj, friend):
super(borrow_subj, self).__init__()
self.subj = subj
self.friend = friend
def __repr__(self):
return 'borrow_subj(%s)_from(%s)' % (self.subj.root, self.friend.root)
# return 'borrow_subj(%s,%s,%s,%s)' % (self.i, self.event.root, self.friend.root, self.event.root.gov_rel)
# return 'borrow_subj(%s,%s)' % (self.friend, self.friend.subj())
class borrow_obj(ArgumentResolution):
"Borrow subject from governor in (conj, xcomp of conj root, and advcl)."
# if gov_rel=='conj' and missing a subject, try to borrow the subject from
# the other event. Still no subject. Try looking at xcomp of conjunction
# root.
#
# if gov_rel==advcl and not event.has_subj() then borrow from governor.
def __init__(self, obj, friend):
super(borrow_obj, self).__init__()
self.obj = obj
self.friend = friend
def __repr__(self):
return 'borrow_obj(%s)_from(%s)' % (self.obj.root, self.friend.root)
class share_argument(ArgumentResolution):
"Create an argument sharing tokens with another argument."
#___________________________
# Relative clause
class arg_resolve_relcl(ArgumentResolution):
"""Resolve argument of a predicate inside a relative clause. The missing
argument that we take is rooted at the governor of the `acl` dependency
relation (type acl:*) pointing at the embedded predicate.
"""
class pred_resolve_relcl(ArgumentResolution):
"Predicate has an argument from relcl resolution (`arg_resolve_relcl`)."
#__________________________________________
# Rules for post added argument root token.
class l(ArgumentResolution):
"Merge the argument token set of xcomp's dependent to the argument token set of the real predicate token."
class m(ConjunctionResolution):
"Extract a conjunct token of the argument root token."
#_________________________________________
# Predicate phrase
class PredPhraseRule(Rule):
type = 'pred_phrase'
def __init__(self, x):
self.x = x
super(PredPhraseRule, self).__init__()
# def __repr__(self):
# return '%s(%s)' % (super(PredPhraseRule, self).__repr__(), self.x)
class n1(PredPhraseRule):
"Extract a token from the subtree of the predicate root token, and add it to the predicate phrase."
class n6(PredPhraseRule):
"Add a case phrase to the predicate phrase."
class n2(PredPhraseRule):
"Drop a token, which is an argument root token, from the subtree of the predicate root token."
class n3(PredPhraseRule):
"Drop a token, which is another predicate root token, from the subtree of the predicate root token."
class n4(PredPhraseRule):
"Drop a token, which is the dependent of the relations set {ccomp, csubj, advcl, acl, acl:relcl, nmod:tmod, parataxis, appos, dep}, from the subtree of the predicate root token."
class n5(PredPhraseRule):
"Drop a token, which is a conjunct of the predicate root token or a conjunct of a xcomp's dependent token, from the subtree of the predicate root token."
#______________________________________
# Rules for extracting argument phrase.
class ArgPhraseRule(Rule):
type = 'arg_phrase'
class clean_arg_token(ArgPhraseRule):
"Extract a token from the subtree of the argument root token, and add it to the argument phrase."
def __init__(self, x):
super(clean_arg_token, self).__init__()
self.x = x
def __repr__(self):
return "clean_arg_token(%s)" %(self.x)
class move_case_token_to_pred(ArgPhraseRule):
"Extract a case token from the subtree of the argument root token."
def __init__(self, x):
super(move_case_token_to_pred, self).__init__()
self.x = x
def __repr__(self):
return "move_case_token(%s)_to_pred" %(self.x)
class predicate_has(ArgPhraseRule):
"Drop a token, which is a predicate root token, from the subtree of the argument root token."
def __init__(self, x):
super(predicate_has, self).__init__()
self.x = x
def __repr__(self):
return "predicate_has(%s)" %(self.x)
class drop_appos(ArgPhraseRule):
def __init__(self, x):
super(drop_appos, self).__init__()
self.x = x
def __repr__(self):
return "drop_appos(%s)" %(self.x)
class drop_unknown(ArgPhraseRule):
def __init__(self, x):
super(drop_unknown, self).__init__()
self.x = x
def __repr__(self):
return "drop_unknown(%s)" %(self.x)
class drop_cc(ArgPhraseRule):
"Drop the argument's cc (coordinating conjunction) from the subtree of the argument root token."
def __init__(self, x):
super(drop_cc, self).__init__()
self.x = x
def __repr__(self):
return "drop_cc(%s)" %(self.x)
class drop_conj(ArgPhraseRule):
"Drop the argument's conjuct from the subtree of the argument root token."
def __init__(self, x):
super(drop_conj, self).__init__()
self.x = x
def __repr__(self):
return "drop_conj(%s)" %(self.x)
class special_arg_drop_direct_dep(ArgPhraseRule):
def __init__(self, x):
super(special_arg_drop_direct_dep, self).__init__()
self.x = x
def __repr__(self):
return "special_arg_drop_direct_dep(%s)" %(self.x)
class embedded_advcl(ArgPhraseRule):
def __init__(self, x):
super(embedded_advcl, self).__init__()
self.x = x
def __repr__(self):
return "drop_embedded_advcl(%s)" %(self.x)
class embedded_ccomp(ArgPhraseRule):
def __init__(self, x):
super(embedded_ccomp, self).__init__()
self.x = x
def __repr__(self):
return "drop_embedded_ccomp(%s)" %(self.x)
class embedded_unknown(ArgPhraseRule):
def __init__(self, x):
super(embedded_unknown, self).__init__()
self.x = x
def __repr__(self):
return "drop_embedded_unknown(%s)" %(self.x)
#________________________________
# Rules for simple predicate.
class p1(SimplifyRule):
"Remove a non-core argument, a nominal modifier, from the predpatt."
class p2(SimplifyRule):
"Remove an argument of other type from the predpatt."
class q(SimplifyRule):
"Remove an adverbial modifier in the predicate phrase."
class r(SimplifyRule):
"Remove auxiliary in the predicate phrase."
#____________________________________________________
# Rules for manually added tokens (English specific)
class u(Rule):
"Strip the punct in the phrase."
#____________________________________________________
# English-specific rules
class LanguageSpecific(Rule):
lang = None
class EnglishSpecific(Rule):
lang = 'English'
class en_relcl_dummy_arg_filter(EnglishSpecific):
def __init__(self):
super(en_relcl_dummy_arg_filter, self).__init__()
if __name__ == '__main__':
print(a1(), a1().explain())
|
101846
|
import operator
from django.conf import settings
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Board, Column, Project, Tag, Todo, Type
REPORTER_ATTR = getattr(settings, 'BUDGET_REPORTER_ATTR', 'is_staff')
EDITOR_ATTR = getattr(settings, 'BUDGET_EDITOR_ATTR', 'is_staff')
DEVELOPER_ATTR = getattr(settings, 'BUDGET_DEVELOPER_ATTR', 'is_superuser')
class UserSerializer(serializers.HyperlinkedModelSerializer):
reporter = serializers.SerializerMethodField()
editor = serializers.SerializerMethodField()
developer = serializers.SerializerMethodField()
def get_reporter(self, obj):
f = operator.attrgetter(REPORTER_ATTR)
return f(obj) is True # force to boolean
def get_editor(self, obj):
f = operator.attrgetter(EDITOR_ATTR)
return f(obj) is True # force to boolean
def get_developer(self, obj):
f = operator.attrgetter(DEVELOPER_ATTR)
return f(obj) is True # force to boolean
class Meta:
model = User
fields = (
'id',
'username',
'last_name',
'first_name',
'email',
'reporter',
'editor',
'developer',
)
class SlimUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
'id',
)
class TodoSerializer(serializers.ModelSerializer):
project = serializers.SlugField(source='project.slug')
class Meta:
model = Todo
fields = ('id', 'title', 'github_url', 'created', 'project')
def create(self, validated_data):
project = validated_data.pop('project')
validated_data['project'] = Project.objects.get(slug=project['slug'])
return Todo.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
instance.save()
return instance
class TagSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Tag
fields = ('slug', 'name')
class TypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Type
fields = ('slug', 'name', 'color')
class BoardSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Board
fields = (
'slug',
'name',
'position',
)
class ColumnSerializer(serializers.HyperlinkedModelSerializer):
board = serializers.SlugRelatedField(
many=False,
slug_field='slug',
queryset=Board.objects.all()
)
class Meta:
model = Column
fields = (
'slug',
'name',
'board',
'position',
)
class ProjectSerializer(serializers.HyperlinkedModelSerializer):
status = serializers.SlugRelatedField(
slug_field='slug',
queryset=Column.objects.all()
)
editors = serializers.PrimaryKeyRelatedField(
many=True, required=False, queryset=User.objects.all())
reporters = serializers.PrimaryKeyRelatedField(
many=True, required=False, queryset=User.objects.all())
developers = serializers.PrimaryKeyRelatedField(
many=True, required=False, queryset=User.objects.all())
tags = serializers.SlugRelatedField(
many=True,
required=False,
slug_field='slug',
queryset=Tag.objects.all()
)
type = serializers.SlugRelatedField(
slug_field='slug',
required=False,
queryset=Type.objects.all(),
allow_null=True
)
todos = TodoSerializer(many=True, read_only=True)
class Meta:
model = Project
fields = (
'slug',
'name',
'description',
'status',
'run_date',
'preview_url',
'publish_url',
'github',
'gdoc',
'reporters',
'editors',
'developers',
'notes',
'type',
'tags',
'position',
'todos',
'archive'
)
|
101861
|
from pathlib import Path
import pytest
import sys
import ssh2net
from ssh2net import SSH2Net
from ssh2net.exceptions import ValidationError, SetupTimeout
NET2_DIR = ssh2net.__file__
UNIT_TEST_DIR = f"{Path(NET2_DIR).parents[1]}/tests/unit/"
def test_init__shell():
test_host = {"setup_host": "my_device ", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn._shell is False
def test_init_host_strip():
test_host = {"setup_host": "my_device ", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.host == "my_device"
def test_init_validate_host():
test_host = {
"setup_host": "8.8.8.8",
"setup_validate_host": True,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
assert conn.host == "8.8.8.8"
def test_init_valid_port():
test_host = {
"setup_host": "my_device ",
"setup_port": 123,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
assert conn.port == 123
def test_init_invalid_port():
test_host = {
"setup_host": "my_device ",
"setup_port": "notanint",
"auth_user": "username",
"auth_password": "password",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_setup_timeout():
test_host = {
"setup_host": "my_device ",
"setup_timeout": 10,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
assert conn.setup_timeout == 10
def test_init_invalid_setup_timeout():
test_host = {
"setup_host": "my_device ",
"setup_timeout": "notanint",
"auth_user": "username",
"auth_password": "password",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_timeout": 10,
}
conn = SSH2Net(**test_host)
assert conn.session_timeout == 10
def test_init_invalid_session_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_timeout": "notanint",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive": True,
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive is True
def test_init_invalid_session_keepalive():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive": "notabool",
}
with pytest.raises(TypeError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive_interval():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_interval": 10,
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive_interval == 10
def test_init_invalid_session_keepalive_interval():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_interval": "notanint",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive_type():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_type": "standard",
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive_type == "standard"
def test_init_invalid_session_keepalive_type():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_type": "notvalid",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive_pattern():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_pattern": "\007",
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive_pattern == "\x07"
def test_init_username_strip():
test_host = {"setup_host": "my_device", "auth_user": "username ", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.auth_user == "username"
def test_init_password_strip():
test_host = {"setup_host": "my_device", "auth_user": "username", "auth_password": "password "}
conn = SSH2Net(**test_host)
assert conn.auth_password == "password"
def test_init_ssh_key_strip():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_public_key": "/some/public/key ",
}
conn = SSH2Net(**test_host)
assert conn.auth_public_key == b"/some/public/key"
def test_init_valid_comms_strip_ansi():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_strip_ansi": True,
}
conn = SSH2Net(**test_host)
assert conn.comms_strip_ansi is True
def test_init_invalid_comms_strip_ansi():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_strip_ansi": 123,
}
with pytest.raises(TypeError):
SSH2Net(**test_host)
def test_init_valid_comms_prompt_regex():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_prompt_regex": "somestr",
}
conn = SSH2Net(**test_host)
assert conn.comms_prompt_regex == "somestr"
def test_init_invalid_comms_prompt_regex():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_prompt_regex": 123,
}
with pytest.raises(TypeError):
SSH2Net(**test_host)
def test_init_valid_comms_prompt_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"comms_operation_timeout": 10,
}
conn = SSH2Net(**test_host)
assert conn.comms_operation_timeout == 10
def test_init_invalid_comms_prompt_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"comms_operation_timeout": "notanint",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_comms_return_char():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_return_char": "\rn",
}
conn = SSH2Net(**test_host)
assert conn.comms_return_char == "\rn"
def test_init_invalid_comms_return_char():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_return_char": False,
}
with pytest.raises(TypeError) as e:
SSH2Net(**test_host)
assert str(e.value) == "'comms_return_char' must be <class 'str'>, got: <class 'bool'>'"
def test_init_valid_comms_pre_login_handler_func():
def pre_login_handler_func():
pass
login_handler = pre_login_handler_func
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_pre_login_handler": login_handler,
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_pre_login_handler)
def test_init_valid_comms_pre_login_handler_ext_func():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_pre_login_handler": "tests.unit.ext_test_funcs.some_pre_login_handler_func",
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_pre_login_handler)
def test_init_invalid_comms_pre_login_handler():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_pre_login_handler": "not.a.valid.ext.function",
}
with pytest.raises(ValueError) as e:
SSH2Net(**test_host)
assert (
str(e.value)
== f"{test_host['comms_pre_login_handler']} is an invalid comms_pre_login_handler function or path to a function."
)
def test_init_valid_comms_disable_paging_default():
test_host = {"setup_host": "my_device", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.comms_disable_paging == "term length 0"
def test_init_valid_comms_disable_paging_func():
def disable_paging_func():
pass
disable_paging = disable_paging_func
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": disable_paging,
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_disable_paging)
def test_init_valid_comms_disable_paging_ext_func():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": "tests.unit.ext_test_funcs.some_disable_paging_func",
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_disable_paging)
def test_init_valid_comms_disable_paging_str():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": "do some paging stuff",
}
conn = SSH2Net(**test_host)
assert conn.comms_disable_paging == "do some paging stuff"
def test_init_invalid_comms_disable_paging_ext_func():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": "tests.unit.ext_test_funcs.some_disable_paging_func_BAD",
}
with pytest.raises(AttributeError):
SSH2Net(**test_host)
def test_init_valid_comms_disable_paging_default():
test_host = {"setup_host": "my_device", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.comms_disable_paging == "terminal length 0"
def test_init_invalid_comms_disable_paging_str():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": 1234,
}
with pytest.raises(ValueError) as e:
SSH2Net(**test_host)
assert (
str(e.value)
== f"{test_host['comms_disable_paging']} is an invalid comms_disable_paging function, path to a function, or is not a string."
)
def test_init_ssh_config_file():
test_host = {
"setup_host": "someswitch1",
"setup_ssh_config_file": f"{UNIT_TEST_DIR}_ssh_config",
}
conn = SSH2Net(**test_host)
assert conn.auth_user == "carl"
# will fail without mocking or a real host
# def test_enter_exit():
# test_host = {"setup_host": "1.2.3.4", "auth_user": "username", "auth_password": "password"}
# with SSH2Net(**test_host) as conn:
# assert bool(conn) is True
# assert bool(conn) is False
def test_str():
test_host = {"setup_host": "1.2.3.4", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert str(conn) == f"SSH2Net Connection Object for host {test_host['setup_host']}"
def test_repr():
test_host = {"setup_host": "1.2.3.4", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert repr(conn) == (
"SSH2Net {'_shell': False, 'host': '1.2.3.4', 'port': 22, 'setup_timeout': 5, "
"'setup_use_paramiko': False, 'session_timeout': 5000, 'session_keepalive': False, "
"'session_keepalive_interval': 10, 'session_keepalive_type': 'network', "
"'session_keepalive_pattern': '\\x05', 'auth_user': 'username', 'auth_public_key': None, "
"'auth_password': '********', 'comms_strip_ansi': False, 'comms_prompt_regex': "
"'^[a-z0-9.\\\\-@()/:]{1,32}[#>$]$', 'comms_operation_timeout': 10, 'comms_return_char': "
"'\\n', 'comms_pre_login_handler': '', 'comms_disable_paging': 'terminal length 0'}"
)
def test_bool():
test_host = {"setup_host": "my_device ", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert bool(conn) is False
def test__validate_host_valid_ip():
test_host = {"setup_host": "8.8.8.8", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
r = conn._validate_host()
assert r is None
def test__validate_host_valid_dns():
test_host = {"setup_host": "google.com", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
r = conn._validate_host()
assert r is None
def test__validate_host_invalid_ip():
test_host = {
"setup_host": "255.255.255.256",
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
with pytest.raises(ValidationError) as e:
conn._validate_host()
assert str(e.value) == f"Host {test_host['setup_host']} is not an IP or resolvable DNS name."
def test__validate_host_invalid_dns():
test_host = {
"setup_host": "notresolvablename",
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
with pytest.raises(ValidationError) as e:
conn._validate_host()
assert str(e.value) == f"Host {test_host['setup_host']} is not an IP or resolvable DNS name."
def test__socket_alive_false():
test_host = {"setup_host": "127.0.0.1", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn._socket_alive() is False
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no ssh server for windows")
def test__socket_alive_true():
test_host = {"setup_host": "127.0.0.1", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
conn._socket_open()
assert conn._socket_alive() is True
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no ssh server for windows")
def test__socket_close():
test_host = {"setup_host": "127.0.0.1", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
conn._socket_open()
assert conn._socket_alive() is True
conn._socket_close()
assert conn._socket_alive() is False
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no ssh server for windows")
def test__socket_open_timeout():
test_host = {
"setup_host": "240.0.0.1",
"setup_timeout": 1,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
with pytest.raises(SetupTimeout):
conn._socket_open()
|
101870
|
from __future__ import print_function, division
from argparse import ArgumentParser
import yaml
import logging
import os
import sys
import time
from subprocess import call
from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class
from marmot.experiment.preprocessing_utils import tags_from_contexts, contexts_to_features, flatten, fit_binarizers, binarize
from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number
from marmot.experiment.learning_utils import map_classifiers, predict_all
from marmot.evaluation.evaluation_utils import compare_vocabulary
from marmot.util.persist_features import persist_features
from marmot.util.generate_crf_template import generate_crf_template
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
def label_test(flat_labels, new_test_name, text_file, method_name):
tag_map = {0: 'BAD', 1: 'OK'}
new_test_plain = open(new_test_name+'.'+method_name+'.plain', 'w')
new_test_ext = open(new_test_name+'.'+method_name+'.ext', 'w')
start_idx = 0
for s_idx, txt in enumerate(open(text_file)):
words = txt[:-1].decode('utf-8').strip().split()
tag_seq = [tag_map[flat_labels[i]] for i in range(start_idx, len(words))]
new_test_plain.write('%s\n' % ' '.join(tag_seq))
for t_idx, (tag, word) in enumerate(zip(tag_seq, words)):
new_test_ext.write('%s\t%d\t%d\t%s\t%s\n' % (method_name, s_idx, t_idx, word.encode('utf-8'), tag))
# write both hypothesis and reference
def label_test_hyp_ref(flat_labels, flat_true_labels, new_test_name, text_file):
tag_map = {0: 'BAD', 1: 'OK'}
new_test = open(new_test_name, 'w')
new_test_plain = open(new_test_name+'.plain', 'w')
start = 0
for s_idx, txt in enumerate(open(text_file)):
words = txt[:-1].decode('utf-8').strip().split()
tag_seq = [tag_map[flat_labels[i]] for i in range(start, start+len(words))]
true_tag_seq = [tag_map[flat_true_labels[i]] for i in range(start, start+len(words))]
new_test_plain.write('%s\n' % ' '.join(tag_seq))
start += len(words)
for t_idx, (tag, true_tag, word) in enumerate(zip(tag_seq, true_tag_seq, words)):
new_test.write('%d\t%d\t%s\t%s\t%s\n' % (s_idx, t_idx, word.encode('utf-8'), true_tag, tag))
# check that everything in a data_obj matches:
# - all source and target sentences exist
# - alignments don't hit out of bounds
# - target tokens really exist and are in their places
def main(config, stamp):
# the data_type is the format corresponding to the model of the data that the user wishes to learn
data_type = config['data_type'] if 'data_type' in config else (config['contexts'] if 'contexts' in config else 'plain')
bad_tagging = config['bad_tagging'] if 'bad_tagging' in config else 'pessimistic'
logger.info("data_type -- {}, bad_tagging -- {}".format(data_type, bad_tagging))
# time_stamp = str(time.time())
time_stamp = stamp
workers = config['workers']
tmp_dir = config['tmp_dir']
# one generator
train_data_generator = build_object(config['datasets']['training'][0])
train_data = train_data_generator.generate()
# test
test_data_generator = build_object(config['datasets']['test'][0])
test_data = test_data_generator.generate()
logger.info("Train data keys: {}".format(train_data.keys()))
logger.info("Train data sequences: {}".format(len(train_data['target'])))
logger.info("Sample sequence: {}".format([w.encode('utf-8') for w in train_data['target'][0]]))
# additional representations
if 'representations' in config:
representation_generators = build_objects(config['representations'])
else:
representation_generators = []
for r in representation_generators:
train_data = r.generate(train_data)
test_data = r.generate(test_data)
borders = config['borders'] if 'borders' in config else False
logger.info('here are the keys in your representations: {}'.format(train_data.keys()))
bad_tagging = config['bad_tagging'] if 'bad_tagging' in config else 'pessimistic'
# test_contexts = create_contexts_ngram(test_data, data_type=data_type, test=True, bad_tagging=bad_tagging)
test_contexts = create_contexts_ngram(test_data, data_type=data_type, test=True, bad_tagging=bad_tagging, tags_format=config['tags_format'])
print("Objects in the train data: {}".format(len(train_data['target'])))
print("UNAMBIGUOUS: ", config['unambiguous'])
# train_contexts = create_contexts_ngram(train_data, data_type=data_type, bad_tagging=bad_tagging, unambiguous=config['unambiguous'])
train_contexts = create_contexts_ngram(train_data, data_type=data_type, bad_tagging=bad_tagging, unambiguous=config['unambiguous'], tags_format=config['tags_format'])
#print("Train contexts: {}".format(len(train_contexts)))
#print("1st context:", train_contexts[0])
# the list of context objects' 'target' field lengths
# to restore the word-level tags from the phrase-level
#test_context_correspondence = get_contexts_words_number(test_contexts)
if data_type == 'sequential':
test_context_correspondence = flatten([get_contexts_words_number(cont) for cont in test_contexts])
#print(test_context_correspondence)
for idx, cont in enumerate(test_contexts):
get_cont = get_contexts_words_number(cont)
count_cont = [len(c['token']) for c in cont]
assert(all([get_cont[i] == count_cont[i] for i in range(len(cont))])), "Sum doesn't match at line {}:\n{}\n{}".format(idx, ' '.join([str(c) for c in get_cont]), ' '.join([str(c) for c in count_cont]))
assert(sum(test_context_correspondence) == sum([len(c['token']) for cont in test_contexts for c in cont])), "Sums don't match: {} and {}".format(sum(test_context_correspondence) == sum([len(c['token']) for cont in test_contexts for c in cont]))
else:
test_context_correspondence = get_contexts_words_number(test_contexts)
assert(sum(test_context_correspondence) == sum([len(c['token']) for c in test_contexts])), "Sums don't match: {} and {}".format(sum(test_context_correspondence), sum([len(c['token']) for c in test_contexts]))
# print("Token lengths:", sum([len(c['token']) for c in test_contexts]))
# assert(sum(test_context_correspondence) == 9613), "GOLAKTEKO OPASNOSTE!!!, {}".format(sum(test_context_correspondence))
# sys.exit()
# if data_type == 'sequential':
# test_context_correspondence = flatten(test_context_correspondence)
logger.info('Vocabulary comparison -- coverage for each dataset: ')
logger.info(compare_vocabulary([train_data['target'], test_data['target']]))
# END REPRESENTATION GENERATION
# FEATURE EXTRACTION
train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type)
test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type)
test_tags_true = test_data['tags']
tag_idx = 0
seg_idx = 0
# test_context_correspondence_seq = [get_contexts_words_number(cont) for cont in test_contexts]
# for idx, (tag_seq, phr_seq) in enumerate(zip(test_data['tags'], test_context_correspondence_seq)):
# assert(len(tag_seq) == sum(phr_seq)),"Something wrong in line {}:\n{}\n{}".format(idx, ' '.join(tag_seq), ' '.join([str(p) for p in phr_seq]))
# tag_idx = 0
# for d in phr_seq:
# first_tag = tag_seq[tag_idx]
# assert(all([t == first_tag for t in tag_seq[tag_idx:tag_idx+d]])), "Something wrong in line {}:\n{}\n{}".format(idx, ' '.join(tag_seq), ' '.join([str(p) for p in phr_seq]))
# try:
# indicator = [t == first_tag for t in test_data['tags'][seg_idx][tag_idx:tag_idx+d]]
# assert(all(indicator))
# tags_cnt += d
# if tags_cnt == len(test_data['tags'][seg_idx]):
# tags_cnt = 0
# seg_idx += 1
# elif tags_cnt > len(test_data['tags'][seg_idx]):
# raise
# except:
# print("No correspondence in line {}, tag {}: \n{}\n{}".format(seg_idx, tag_idx, ' '.join(test_data['tags'][seg_idx]), d))
# sys.exit()
#assert(sum(test_context_correspondence) == len(flatten(test_data['tags']))), "Sums don't match for phrase contexts and test data object: {} and {}".format(sum(test_context_correspondence), len(flatten(test_data['tags'])))
# flat_cont = flatten(test_contexts)
# flat_tags = flatten(test_data['tags'])
# for ii in range(len(flat_cont)):
if data_type == 'plain':
assert(len(test_context_correspondence) == len(test_tags)), "Lengths don't match for phrase contexts and test tags: {} and {}".format(len(test_context_correspondence), len(test_tags))
# test_tags_seq = call_for_each_element(test_contexts_seq, tags_from_contexts, data_type='sequential')
logger.info('creating feature extractors...')
feature_extractors = build_objects(config['feature_extractors'])
logger.info('mapping the feature extractors over the contexts for test...')
test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('mapping the feature extractors over the contexts for train...')
train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('number of training instances: {}'.format(len(train_features)))
logger.info('number of testing instances: {}'.format(len(test_features)))
logger.info('All of your features now exist in their raw representation, but they may not be numbers yet')
# END FEATURE EXTRACTION
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.cross_validation import permutation_test_score
import numpy as np
tag_map = {u'OK': 1, u'BAD': 0}
if data_type == 'sequential':
# TODO: save features for CRFSuite, call it
logger.info('training sequential model...')
experiment_datasets = [{'name': 'test', 'features': test_features, 'tags': test_tags}, {'name': 'train', 'features': train_features, 'tags': train_tags}]
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
print("FEATURE NAMES: ", feature_names)
persist_dir = tmp_dir
logger.info('persisting your features to: {}'.format(persist_dir))
# for each dataset, write a file and persist the features
if 'persist_format' not in config:
config['persist_format'] = 'crf_suite'
for dataset_obj in experiment_datasets:
persist_features(dataset_obj['name']+time_stamp, dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=config['persist_format'])
feature_num = len(train_features[0][0])
train_file = os.path.join(tmp_dir, 'train'+time_stamp+'.crf')
test_file = os.path.join(tmp_dir, 'test'+time_stamp+'.crf')
if config['persist_format'] == 'crf++':
# generate a template for CRF++ feature extractor
generate_crf_template(feature_num, 'template', tmp_dir)
# train a CRF++ model
call(['crf_learn', os.path.join(tmp_dir, 'template'), train_file, os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp)])
# tag a test set
call(['crf_test', '-m', os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp), '-o', test_file+'.tagged', test_file])
elif config['persist_format'] == 'crf_suite':
crfsuite_algorithm = config['crfsuite_algorithm'] if 'crfsuite_algorithm' in config else 'arow'
call(['crfsuite', 'learn', '-a', crfsuite_algorithm, '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), train_file])
test_out = open(test_file+'.tagged', 'w')
call(['crfsuite', 'tag', '-tr', '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), test_file], stdout=test_out)
test_out.close()
else:
print("Unknown persist format: {}".format(config['persist_format']))
sys.exit()
sequential_true = [[]]
sequential_predictions = [[]]
flat_true = []
flat_predictions = []
for line in open(test_file+'.tagged'):
# end of tagging, statistics reported
if line.startswith('Performance'):
break
if line == '\n':
sequential_predictions.append([])
continue
chunks = line[:-1].decode('utf-8').split()
flat_true.append(chunks[-2])
sequential_true[-1].append(chunks[-2])
flat_predictions.append(chunks[-1])
sequential_predictions[-1].append(chunks[-1])
# restoring the word-level tags
test_predictions_word, test_tags_word = [], []
for idx, n in enumerate(test_context_correspondence):
for i in range(n):
test_predictions_word.append(flat_predictions[idx])
test_tags_word.append(flat_true[idx])
print(f1_score(test_predictions_word, test_tags_word, average=None))
print(f1_score(test_predictions_word, test_tags_word, average='weighted', pos_label=None))
print("Precision: {}, recall: {}".format(precision_score(test_predictions_word, test_tags_word, average=None), recall_score(test_predictions_word, test_tags_word, average=None)))
else:
train_tags = [tag_map[tag] for tag in train_tags]
#print(test_tags)
test_tags = [tag_map[tag] for tag in test_tags]
#print(test_tags)
#sys.exit()
# data_type is 'token' or 'plain'
logger.info('start training...')
classifier_type = import_class(config['learning']['classifier']['module'])
# train the classifier(s)
classifier_map = map_classifiers(train_features, train_tags, classifier_type, data_type=data_type)
logger.info('classifying the test instances')
test_predictions = predict_all(test_features, classifier_map, data_type=data_type)
# assert(len(test_predictions) == len(flatten(test_tags_seq))), "long predictions: {}, sequential: {}".format(len(test_predictions), len(flatten(test_tags_seq)))
cnt = 0
test_predictions_seq = []
test_tags_seq_num = []
tag_map = {'OK': 1, 'BAD': 0, 1: 1, 0: 0}
long_test = True if 'multiply_data_test' in config and (config['multiply_data_test'] == 'ngrams' or config['multiply_data_test'] == '1ton') else False
# restoring the word-level tags
test_predictions_word, test_tags_word = [], []
logger.info("Test predictions lenght: {}".format(len(test_predictions)))
for idx, n in enumerate(test_context_correspondence):
for i in range(n):
test_predictions_word.append(test_predictions[idx])
test_tags_word.append(test_tags[idx])
test_tags_true_flat = flatten(test_tags_true)
test_tags_true_flat = [tag_map[t] for t in test_tags_true_flat]
# print(f1_score(test_tags_word, test_predictions_word, average=None))
# print(f1_score(test_tags_word, test_predictions_word, average='weighted', pos_label=None))
print(f1_score(test_tags_true_flat, test_predictions_word, average=None))
print(f1_score(test_tags_true_flat, test_predictions_word, average='weighted', pos_label=None))
print("Precision: {}, recall: {}".format(precision_score(test_tags_true_flat, test_predictions_word, average=None), recall_score(test_tags_true_flat, test_predictions_word, average=None)))
# TODO: remove the hard coding of the tags here
bad_count = sum(1 for t in test_tags if t == u'BAD' or t == 0)
good_count = sum(1 for t in test_tags if t == u'OK' or t == 1)
total = len(test_tags)
assert (total == bad_count+good_count), 'tag counts should be correct'
percent_good = good_count / total
logger.info('percent good in test set: {}'.format(percent_good))
logger.info('percent bad in test set: {}'.format(1 - percent_good))
random_class_results = []
random_weighted_results = []
for i in range(20):
random_tags_phrase = list(np.random.choice([1, 0], total, [percent_good, 1-percent_good]))
random_tags = []
for idx, n in enumerate(test_context_correspondence):
for i in range(n):
random_tags.append(random_tags_phrase[idx])
# random_tags = [u'GOOD' for i in range(total)]
random_class_f1 = f1_score(test_tags_true_flat, random_tags, average=None)
random_class_results.append(random_class_f1)
logger.info('two class f1 random score ({}): {}'.format(i, random_class_f1))
# random_average_f1 = f1_score(random_tags, test_tags, average='weighted')
random_average_f1 = f1_score(test_tags_true_flat, random_tags, average='weighted', pos_label=None)
random_weighted_results.append(random_average_f1)
# logger.info('average f1 random score ({}): {}'.format(i, random_average_f1))
avg_random_class = np.average(random_class_results, axis=0)
avg_weighted = np.average(random_weighted_results)
logger.info('two class f1 random average score: {}'.format(avg_random_class))
logger.info('weighted f1 random average score: {}'.format(avg_weighted))
# print("Cross-validation:")
# print(permutation_test_score())
# logger.info("Sequence correlation: ")
# print(sequence_correlation_weighted(test_tags_seq_num, test_predictions_seq, verbose=True)[1])
label_test_hyp_ref(test_predictions_word, test_tags_true_flat, os.path.join(tmp_dir, config['output_name']), config["output_test"])
# label_test(test_predictions, '/export/data/varvara/marmot/marmot/experiment/final_submissions/baseline', '/export/data/varvara/corpora/wmt15_corrected/test.target', 'BASELINE')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
parser.add_argument("--data_type", help="data type - sequential or plain")
parser.add_argument("--bad_tagging", help="tagging -- optimistic, pessimistic or super-pessimistic")
parser.add_argument("--unambiguous", default=0, help="make the tagging unambiguous -- no segmentation for spans of BAD tag (values - 0 or 1, default 0)")
parser.add_argument("--output_name", default="output", help="file to store the test set tagging")
args = parser.parse_args()
experiment_config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
experiment_config = yaml.load(cfg_file.read())
if args.data_type is not None:
experiment_config['data_type'] = args.data_type
if args.bad_tagging is not None:
experiment_config['bad_tagging'] = args.bad_tagging
experiment_config['unambiguous'] = True if int(args.unambiguous) == 1 else False
experiment_config['output_name'] = args.output_name
stamp = os.path.basename(cfg_path).replace('config', '').replace('.yaml', '') + '_' + experiment_config['bad_tagging'] + '_' + experiment_config['data_type']
if experiment_config['unambiguous']:
stamp += '_un'
main(experiment_config, stamp)
|
101902
|
from urllib.parse import urlparse
from logging import getLogger
from django.conf import settings
from django.db import transaction
from requests.auth import HTTPBasicAuth
from zipa import lattice # pylint: disable=no-name-in-module
from zinc import models
from zinc.utils.validation import is_ipv6
logger = getLogger('zinc.' + __name__)
def lattice_factory(url, user, password):
parts = urlparse(url)
if url.startswith('http://'):
lattice.config.secure = False
lattice.config.verify = False
lattice.config.host = parts.netloc
lattice.config.prefix = parts.path
lattice.config.auth = HTTPBasicAuth(user, password)
return lattice
def handle_ip(ip_addr, server, locations):
# ignore ipv6 addresses for now
if is_ipv6(ip_addr):
return
enabled = server['state'] == 'configured'
datacenter_id = int(
server['datacenter_url'].split('?')[0].split('/')[-1])
location = locations.get(datacenter_id, 'fake_location')
friendly_name = '{} {}'.format(server['hostname'].split('.')[0],
location)
ip = models.IP.objects.filter(
ip=ip_addr,
).first()
changed = False
if ip is None: # new record
ip = models.IP(ip=ip_addr, enabled=enabled)
ip.reconcile_healthcheck()
changed = True
elif ip.enabled != enabled:
ip.enabled = enabled
ip.mark_policy_records_dirty()
changed = True
if ip.hostname != server['hostname']:
ip.hostname = server['hostname']
changed = True
if ip.friendly_name != friendly_name:
ip.friendly_name = friendly_name
changed = True
if changed:
ip.save()
return ip.pk
def sync(lattice_client):
roles = set(settings.LATTICE_ROLES)
env = settings.LATTICE_ENV.lower()
servers = [
server for server in lattice.servers
if (set(server['roles']).intersection(roles) and
server['environment'].lower() == env and
server['state'].lower() not in ('unconfigured', 'decommissioned'))
]
locations = {d['id']: d['location'] for d in lattice.datacenters}
lattice_ip_pks = set()
with transaction.atomic():
for server in servers:
for ip in server.ips:
ip_pk = handle_ip(ip['ip'], server, locations)
if ip_pk is not None:
lattice_ip_pks.add(ip_pk)
if not lattice_ip_pks:
raise AssertionError("Refusing to delete all IPs!")
ips_to_remove = set(
models.IP.objects.values_list('pk', flat=True)) - lattice_ip_pks
for ip in models.IP.objects.filter(pk__in=ips_to_remove):
ip.soft_delete()
|
101908
|
from using_extend import *
f = FooBar()
if f.blah(3) != 3:
raise RuntimeError, "blah(int)"
if f.blah(3.5) != 3.5:
raise RuntimeError, "blah(double)"
if f.blah("hello") != "hello":
raise RuntimeError, "blah(char *)"
if f.blah(3, 4) != 7:
raise RuntimeError, "blah(int,int)"
if f.blah(3.5, 7.5) != (3.5 + 7.5):
raise RuntimeError, "blah(double,double)"
if f.duh(3) != 3:
raise RuntimeError, "duh(int)"
|
101930
|
from savu.plugins.plugin_tools import PluginTools
class DezingerDeprecatedTools(PluginTools):
"""A plugin for cleaning x-ray strikes based on statistical evaluation of
the near neighbourhood
"""
def define_parameters(self):
"""
outlier_mu:
visibility: basic
dtype: float
description: Threshold for defecting outliers, greater is less
sensitive.
default: 10.0
kernel_size:
visibility: basic
dtype: int
description: Number of frames included in average.
default: 5
mode:
visibility: intermediate
dtype: int
description: 'output mode, 0=normal 5=zinger strength 6=zinger
yes/no.'
default: 0
"""
|
101939
|
import asyncio
import logging.config
from pathlib import Path
from symphony.bdk.core.config.loader import BdkConfigLoader
from symphony.bdk.core.symphony_bdk import SymphonyBdk
async def run():
config = BdkConfigLoader.load_from_symphony_dir("config.yaml")
async with SymphonyBdk(config) as bdk:
ext_app_authenticator = bdk.app_authenticator()
app_auth = await ext_app_authenticator.authenticate_extension_app("appToken")
ta = app_auth.app_token
ts = app_auth.symphony_token
logging.debug("App token: %s, Symphony token: %s", ta, ts)
logging.debug("Is token pair valid: %s", await ext_app_authenticator.is_token_pair_valid(ta, ts))
logging.config.fileConfig(Path(__file__).parent.parent / "logging.conf", disable_existing_loggers=False)
asyncio.run(run())
|
101941
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import os
import matplotlib.pyplot as plt
import sys
import itertools
mpl.rcParams['legend.fontsize'] = 12
DPI = 5000
input_dir = "/home/pablo/ws/log/trajectories"
print("Reading from", input_dir)
##########################################################
################### w/ PREDICTION ########################
##########################################################
## linear speed
if len(sys.argv) == 2:
linear_speed = sys.argv[1]
else:
print("Exiting...")
exit()
trajectories_pred = pd.read_csv(input_dir + "/trajectories_pred_lifelong_{}.csv".format(linear_speed))
# get only some lines from the data
zoom_in = True
if zoom_in:
initial_row = 2300 # corresponds to roughly a timestamp of 50 s
row_200_sec = 10000 # corresponds to roughly a timestamp of 100 s
trajectories_pred = trajectories_pred.iloc[initial_row:row_200_sec]
## -- save dir
save_dir = "/home/pablo/ws/log/trajectories/lifelong_{}".format(linear_speed)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# extract data from DataFrame
status = trajectories_pred['status'].tolist()
ardrone_x = trajectories_pred['aX'].tolist()
ardrone_y = trajectories_pred['aY'].tolist()
ardrone_z = trajectories_pred['aZ'].tolist()
summit_x = trajectories_pred['sX'].tolist()
summit_y = trajectories_pred['sY'].tolist()
summit_z = trajectories_pred['sZ'].tolist()
# 3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(ardrone_x, ardrone_y, ardrone_z, 'r', label='UAV')
ax.plot(summit_x, summit_y, summit_z, 'g', label='UGV')
ax.set(xlabel='x (m)', ylabel='y (m)', zlabel='z (m)')
bottom, top = plt.ylim() # return the current ylim
ax.legend()
ax.view_init(elev=11, azim=125)
plt.show()
fig.savefig(os.path.join(save_dir, "traj3D_pred.pdf"), format='pdf', dpi=DPI)
# 2D
fig, ax = plt.subplots()
ax.plot(ardrone_x, ardrone_y, 'r', label='UAV')
ax.plot(summit_x, summit_y, 'g', label='UGV')
ax.set(xlabel='x (m)', ylabel='y (m)')
ax.legend()
ax.grid()
fig.savefig(os.path.join(save_dir, "traj2D_pred.pdf"), format='pdf', dpi=DPI)
|
101972
|
from test_helper import run_common_tests, failed, passed, get_answer_placeholders, do_not_run_on_check
if __name__ == '__main__':
do_not_run_on_check()
run_common_tests()
|
101992
|
from agents import *
from models import *
import numpy as np
import matplotlib
matplotlib.use('tkagg')
import matplotlib.pyplot as plt
import sys
import pickle
# end class world
def speed_profile(file_names):
"""
This function is to plot speed profiles for several evaluation results.
Args:
file_names (array of string): file names to be draw speed profile.
"""
# instantiate the class
robots = []
records = []
dT = 0.05
for i in range(1,len(file_names)):
f = open(file_names[i], 'rb')
record = pickle.load(f)
records.append(record)
exec('robots.append(' + record.model + '(' + record.algorithm + '(), dT))');
print(len(records))
fig = plt.figure()
ax1=plt.subplot(2, 1, 1)
ax2=plt.subplot(2, 1, 2)
for i in range(len(records)):
d = []
dot_d = []
human = HumanBall3D(MobileAgent(), dT);
for t in range(records[0].tot):
records[i].robot_moves[:, t]
human.update(robots[0])
human.move(records[0].human_moves[:, t])
robots[i].update(human)
robots[i].x = records[i].robot_moves[:, t]
Mr = robots[i].m
Mh = human.m
dim = np.shape(Mr)[0] // 2
p_idx = np.arange(dim)
v_idx = p_idx + dim
d.append(np.linalg.norm(Mr[p_idx] - Mh[p_idx]))
sgn = (Mr[p_idx+dim] - Mh[p_idx+dim]).T * (Mr[p_idx] - Mh[p_idx])
sgn = -1 if sgn < 0 else 1
dot_d.append(sgn * np.linalg.norm(Mr[p_idx+dim] - Mh[p_idx+dim]))
print(d[:10])
print(dot_d[:10])
ax1.plot(d, c='C'+str(i), label=records[i].algorithm, linestyle='-')
ax2.plot(dot_d, c='C'+str(i), label=records[i].algorithm, linestyle='--')
ax2.plot(range(-100,800,100), np.linspace(0,0,9),c='black', linestyle='-')
ax1.legend()
ax1.set_xlim(0,200)
ax1.set_ylabel('m', fontsize = 20)
# plt.show()
# fig.legend()
ax2.set_xlim(0,200)
ax2.set_xlabel('Frame (0.05s)', fontsize = 20)
ax2.set_ylabel('m/s', fontsize = 20)
# tikz_save(model+'.tex')
fig.savefig('speed_profile.pdf', bbox_inches='tight')
if __name__ == '__main__':
speed_profile(sys.argv)
|
101996
|
import FWCore.ParameterSet.Config as cms
siStripGainESProducer = cms.ESProducer("SiStripGainESProducer",
appendToDataLabel = cms.string(''),
printDebug = cms.untracked.bool(False),
AutomaticNormalization = cms.bool(False),
APVGain = cms.VPSet(
cms.PSet(
Record = cms.string('SiStripApvGainRcd'),
Label = cms.untracked.string(''),
NormalizationFactor = cms.untracked.double(1.)
),
cms.PSet(
Record = cms.string('SiStripApvGain2Rcd'),
Label = cms.untracked.string(''),
NormalizationFactor = cms.untracked.double(1.)
)
)
)
|
101999
|
from typing import Dict
import numpy as np
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig
from src.utils.technical_utils import load_obj
class LitNER(pl.LightningModule):
def __init__(self, cfg: DictConfig, tag_to_idx: Dict):
super(LitNER, self).__init__()
self.cfg = cfg
self.tag_to_idx = tag_to_idx
self.model = load_obj(cfg.model.class_name)(
embeddings_dim=cfg.datamodule.embeddings_dim, tag_to_idx=tag_to_idx, **cfg.model.params
)
self.metrics = torch.nn.ModuleDict(
{
self.cfg.metric.metric.metric_name: load_obj(self.cfg.metric.metric.class_name)(
**cfg.metric.metric.params
)
}
)
if 'other_metrics' in self.cfg.metric.keys():
for metric in self.cfg.metric.other_metrics:
self.metrics.update({metric.metric_name: load_obj(metric.class_name)(**metric.params)})
def forward(self, x, lens, *args, **kwargs):
return self.model(x, lens)
def configure_optimizers(self):
optimizer = load_obj(self.cfg.optimizer.class_name)(self.model.parameters(), **self.cfg.optimizer.params)
scheduler = load_obj(self.cfg.scheduler.class_name)(optimizer, **self.cfg.scheduler.params)
return (
[optimizer],
[{'scheduler': scheduler, 'interval': self.cfg.scheduler.step, 'monitor': self.cfg.scheduler.monitor}],
)
def training_step(self, batch, batch_idx):
embeds, lens, labels = batch
# transform tokens to embeddings
embeds = self._vectorizer(embeds)
score, tag_seq, loss = self.model(embeds, lens, labels)
labels = labels.flatten()
labels = labels[labels != self.tag_to_idx['PAD']]
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
for metric in self.metrics:
score = self.metrics[metric](tag_seq, labels)
self.log(f'train_{metric}', score, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
embeds, lens, labels = batch
embeds = self._vectorizer(embeds)
score, tag_seq, loss = self.model(embeds, lens, labels)
labels = labels.flatten()
labels = labels[labels != self.tag_to_idx['PAD']]
self.log('valid_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
for metric in self.metrics:
score = self.metrics[metric](tag_seq, labels)
self.log(f'valid_{metric}', score, on_step=True, on_epoch=True, prog_bar=True, logger=True)
|
102015
|
from .state import EOF
from .tokens import TokenEof
from .tokens_base import TOKEN_COMMAND_UNABBREVIATE
from .tokens_base import TokenOfCommand
from Vintageous import ex
@ex.command('unabbreviate', 'una')
class TokenUnabbreviate(TokenOfCommand):
def __init__(self, params, *args, **kwargs):
super().__init__(params,
TOKEN_COMMAND_UNABBREVIATE,
'unabbreviate', *args, **kwargs)
self.target_command = 'ex_unabbreviate'
@property
def short(self):
return self.params['lhs']
def scan_command_unabbreviate(state):
params = {
'lhs': None
}
m = state.expect_match(r'\s+(?P<lhs>.+?)\s*$')
params.update(m.groupdict())
return None, [TokenUnabbreviate(params), TokenEof()]
|
102051
|
from typedpy import *
class Example1(Structure):
D = Map(items=[String(), Integer()], default=lambda: {'abc': 0})
_required = []
|
102055
|
from django.shortcuts import render, redirect
# Create your views here.
from django.http import Http404
from homework_app.models import Homework, Comment
from django.views.generic.list import ListView
from homework_app.forms import CommentForm
def homework(request, homework_id):
p = Homework.objects.get(pk=homework_id)
context = {"dataset": Comment.objects.filter(homework=p), "homework": p}
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
form.user = request.user
form.homework = p
form.save()
return redirect("homework_single", homework_id)
else:
form = CommentForm()
context["form"] = form
return render(request, 'homework.html', context)
class HomeworkList(ListView):
model = Homework
def empty_path(request):
return redirect('/accounts/login/')
|
102057
|
import httpx
from statuscheck.services.bases._base import BaseServiceAPI
from statuscheck.services.models.generic import (
COMPONENT_TYPE_DEGRADED,
COMPONENT_TYPE_GOOD,
COMPONENT_TYPE_MAINTENANCE,
COMPONENT_TYPE_MAJOR_OUTAGE,
COMPONENT_TYPE_PARTIAL_OUTAGE,
COMPONENT_TYPE_SECURITY,
COMPONENT_TYPE_UNKNOWN,
TYPE_GOOD,
TYPE_INCIDENT,
TYPE_MAINTENANCE,
TYPE_OUTAGE,
TYPE_SECURITY,
Component,
Incident,
Status,
Summary,
)
STATUS_OK = 100
STATUS_PLANNED_MAINTENANCE = 200
STATUS_DEGRADED_PERFORMANCE = 300
STATUS_PARTIAL_SERVICE_DISRUPTION = 400
STATUS_MAJOR = 500
STATUS_SECURITY = 600
STATUS_TYPE_MAPPING = {
STATUS_OK: TYPE_GOOD,
STATUS_PLANNED_MAINTENANCE: TYPE_MAINTENANCE,
STATUS_DEGRADED_PERFORMANCE: TYPE_INCIDENT,
STATUS_PARTIAL_SERVICE_DISRUPTION: TYPE_INCIDENT,
STATUS_MAJOR: TYPE_OUTAGE,
STATUS_SECURITY: TYPE_SECURITY,
}
COMPONENT_STATUS_GOOD = 100
COMPONENT_STATUS_MAINTENANCE = 200
COMPONENT_STATUS_DEGRADED = 300
COMPONENT_STATUS_PARTIAL_OUTAGE = 400
COMPONENT_STATUS_MAJOR_OUTAGE = 500
COMPONENT_STATUS_SECURITY = 600
COMPONENT_STATUS_UNKNOWN = ""
COMPONENT_STATUS_MAPPING = {
COMPONENT_STATUS_GOOD: COMPONENT_TYPE_GOOD,
COMPONENT_STATUS_MAINTENANCE: COMPONENT_TYPE_MAINTENANCE,
COMPONENT_STATUS_DEGRADED: COMPONENT_TYPE_DEGRADED,
COMPONENT_STATUS_PARTIAL_OUTAGE: COMPONENT_TYPE_PARTIAL_OUTAGE,
COMPONENT_STATUS_MAJOR_OUTAGE: COMPONENT_TYPE_MAJOR_OUTAGE,
COMPONENT_STATUS_SECURITY: COMPONENT_TYPE_SECURITY,
COMPONENT_STATUS_UNKNOWN: COMPONENT_TYPE_UNKNOWN,
}
class BaseStatusIOAPI(BaseServiceAPI):
"""
Status.io pages API handler.
Documentation: https://statusio.docs.apiary.io
Public status API: https://kb.status.io/developers/public-status-api/
"""
domain_id: str = ""
def _get_base_url(self) -> str:
if not self.domain_id:
raise NotImplementedError("Please, add domain id")
return f"https://api.status.io/1.0/status/{self.domain_id}"
def get_summary(self) -> Summary:
url = self._get_base_url()
response_json = httpx.get(url).json()
status_dict = response_json["result"]["status_overall"]
status = Status(
code=str(status_dict["status_code"]),
name=STATUS_TYPE_MAPPING[status_dict["status_code"]],
description=status_dict["status"],
is_ok=status_dict["status_code"] == 100,
)
components = [
Component(
id=component["id"],
name=component["name"],
status=COMPONENT_STATUS_MAPPING[component["status_code"]],
extra_data=component,
)
for component in response_json["result"]["status"]
]
# incident payload contains trimmed component data, so we need to enrich it
components_dict = {component.id: component for component in components}
incidents = [
Incident(
id=incident["_id"],
name=incident["name"],
components=[
Component(
id=component["_id"],
name=component["name"],
status=components_dict[component["_id"]].status,
)
for component in incident.get("components_affected", [])
],
extra_data=incident,
)
for incident in response_json["result"]["incidents"]
]
self.summary = Summary(status, components, incidents)
return self.summary
|
102059
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
class MultiMaskedConv2D(Layer):
"""
Masked multitask 2-dimensional convolutional layer. This layer implements
multiple stacks of the convolutional architecture and implements masking consistent
with the MANN API to support developing sparse multitask models.
"""
def __init__(
self,
filters,
kernel_size = 3,
padding = 'same',
strides = 1,
use_bias = True,
activation = None,
kernel_initializer = 'random_normal',
bias_initializer = 'zeros',
mask_initializer = 'ones',
**kwargs
):
"""
Parameters
----------
filters : int
The number of convolutional filters to apply
kernel_size : int or tuple of ints (default 3)
The kernel size in height and width
padding : str (default 'same')
Either 'same' or 'valid', the padding to use during convolution
strides : int or tuple of ints
Stride lenghts to use during convolution
use_bias : bool (default True)
Whether to use a bias calculation on the outputs
activation : None, str, or function (default None)
Activation function to use on the outputs
kernel_initializer : str or keras initialization function (default 'random_normal')
The initialization function to use for the weights
bias_initializer : str or keras initialization function (default 'zeros')
The initialization function to use for the bias
mask_initializer : str or keras initialization function (default 'ones')
The mask initialization function to use
"""
super(MultiMaskedConv2D, self).__init__(**kwargs)
self.filters = int(filters) if not isinstance(filters, int) else filters
self.kernel_size = kernel_size
self.padding = padding
self.strides = tuple(strides) if isinstance(strides, list) else strides
self.activation = tf.keras.activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.mask_initializer = tf.keras.initializers.get(mask_initializer)
@property
def kernel_size(self):
return self._kernel_size
@kernel_size.setter
def kernel_size(self, value):
if isinstance(value, int):
self._kernel_size = (value, value)
else:
self._kernel_size = value
def build(self, input_shape):
input_shape = [
tuple(shape.as_list()) for shape in input_shape
]
if len(set(input_shape)) != 1:
raise ValueError(f'All input shapes must be equal, got {input_shape}')
simplified_shape = input_shape[0]
self.w = self.add_weight(
shape = (len(input_shape), self.kernel_size[0], self.kernel_size[1], simplified_shape[-1], self.filters),
initializer = self.kernel_initializer,
trainable = True,
name = 'weights'
)
self.w_mask = self.add_weight(
shape = self.w.shape,
initializer = self.mask_initializer,
trainable = False,
name = 'weights_mask'
)
if self.use_bias:
self.b = self.add_weight(
shape = (len(input_shape), self.filters),
initializer = self.bias_initializer,
trainable = True,
name = 'bias'
)
self.b_mask = self.add_weight(
shape = self.b.shape,
initializer = self.mask_initializer,
trainable = False,
name = 'bias_mask'
)
def call(self, inputs):
conv_outputs = [
tf.nn.convolution(
inputs[i],
self.w[i] * self.w_mask[i],
padding = self.padding.upper(),
strides = self.strides,
data_format = 'NHWC'
) for i in range(len(inputs))
]
if self.use_bias:
conv_outputs = [
conv_outputs[i] + (self.b[i] * self.b_mask[i]) for i in range(len(conv_outputs))
]
return [self.activation(output) for output in conv_outputs]
def get_config(self):
config = super().get_config().copy()
config.update(
{
'filters' : self.filters,
'kernel_size' : list(self.kernel_size),
'padding' : self.padding,
'strides' : self.strides,
'activation' : tf.keras.activations.serialize(self.activation),
'use_bias' : self.use_bias,
'kernel_initializer' : tf.keras.initializers.serialize(self.kernel_initializer),
'bias_initializer' : tf.keras.initializers.serialize(self.bias_initializer),
'mask_initializer' : tf.keras.initializers.serialize(self.mask_initializer)
}
)
return config
def set_masks(self, new_masks):
if not self.use_bias:
self.set_weights(
[self.w.numpy() * new_masks[0].astype(np.float), new_masks[0].astype(np.float)]
)
else:
self.set_weights(
[self.w.numpy() * new_masks[0].astype(np.float), self.b.numpy() * new_masks[1].astype(np.float), new_masks[0].astype(np.float), new_masks[1].astype(np.float)]
)
@classmethod
def from_config(cls, config):
return cls(
filters = config['filters'],
kernel_size = config['kernel_size'],
padding = config['padding'],
strides = config['strides'],
activation = config['activation'],
use_bias = config['use_bias'],
kernel_initializer = config['kernel_initializer'],
bias_initializer = config['bias_initializer']
)
|
102094
|
import logging
import time, datetime
from thespian.actors import *
from thespian.test import *
import signal
import os
class KillMeActor(Actor):
def receiveMessage(self, msg, sender):
logging.info('EchoActor got %s (%s) from %s', msg, type(msg), sender)
self.send(sender, os.getpid())
class ParentActor(Actor):
def receiveMessage(self, msg, sender):
if msg == 'child':
self.send(self.createActor(ChildActor), sender)
if msg == 'hello':
if not hasattr(self, 'rspmsg'):
self.rspmsg = 'world'
self.send(sender, self.rspmsg)
if isinstance(msg, ChildActorExited):
self.rspmsg = 'goodbye'
class ChildActor(Actor):
def receiveMessage(self, msg, sender):
if isinstance(msg, ActorAddress):
self.send(msg, os.getpid())
smallwait = datetime.timedelta(milliseconds=350)
@pytest.fixture(params=[
# Non-deadly
'signal.SIGCONT-world',
# 'signal.SIGINT-world',
# 'signal.SIGUSR1-world',
# 'signal.SIGUSR2-world',
# 'signal.SIGHUP-world',
# # Deadly
# 'signal.SIGTERM-goodbye',
# 'signal.SIGQUIT-goodbye',
# 'signal.SIGABRT-goodbye',
# 'signal.SIGKILL-goodbye',
])
def testSignal(request):
return request.param
class TestFuncSafeActorSignals(object):
def testCreateActorSystem(self, asys):
pass
def testSimpleActor(self, asys):
killme = asys.createActor(KillMeActor)
def testSigCont(self, asys):
killme = asys.createActor(KillMeActor)
killme_pid = asys.ask(killme, 'pid?', smallwait)
assert killme_pid # not 0 or None
os.kill(killme_pid, signal.SIGCONT)
assert killme_pid == asys.ask(killme, 'pid again?', smallwait)
def testChildSigCont(self, asys):
parent = asys.createActor(ParentActor)
assert 'world' == asys.ask(parent, 'hello', smallwait)
child_pid = asys.ask(parent, 'child', smallwait*3)
assert child_pid # not 0 or None
os.kill(child_pid, signal.SIGCONT)
assert 'world' == asys.ask(parent, 'hello', smallwait)
# n.b. Cannot test unsafe signals with the simple actor system because
# the signals affect the testing process; often causing it to exit.
class TestFuncMultiProcActorSignals(object):
def test_signal(self, asys, testSignal):
if 'proc' not in asys.base_name:
pytest.skip('Cannot send signals to primary testing process')
signame, response = testSignal.split('-')
killme = asys.createActor(KillMeActor)
killme_pid = asys.ask(killme, 'pid?', smallwait)
assert killme_pid # not 0 or None
os.kill(killme_pid, eval(signame))
time.sleep(0.2) # allow signal to be delivered
r = asys.ask(killme, 'pid again?', smallwait)
assert (killme_pid if response == 'world' else None) == r
def testChildSig(self, testSignal, asys):
if 'proc' not in asys.base_name:
pytest.skip('Cannot send signals to primary testing process')
signame, response = testSignal.split('-')
parent = asys.createActor(ParentActor)
assert 'world' == asys.ask(parent, 'hello', smallwait)
child_pid = asys.ask(parent, 'child', smallwait*3)
assert child_pid # not 0 or None
os.kill(child_pid, eval(signame))
# Child is not killed and continues running
time.sleep(0.02) # allow signal to be delivered
assert response == asys.ask(parent, 'hello', smallwait)
|
102208
|
import numpy as np
from .layer_base import LayerBase
class ReluLayer(LayerBase):
def __init__(self):
super().__init__()
self.cache = {}
def id(self):
return "Relu"
def forward(self, x):
y = np.maximum(x, 0)
self.cache["is_negative"] = (x < 0)
return y
def backward(self, dy):
is_negative = self.cache["is_negative"]
dx = dy
dx[is_negative] = 0
return dx
class SigmoidLayer(LayerBase):
def __init__(self):
super().__init__()
self.cache = {}
def id(self):
return "Sigmoid"
def forward(self, x):
y = 1 / (1 + np.exp(-x))
self.cache["y"] = y
return y
def backward(self, dy):
y = self.cache["y"]
dx = y * (1 - y) * dy
return dx
class TanhLayer(LayerBase):
def __init__(self):
super().__init__()
self.cache = {}
def id(self):
return "Tanh"
def forward(self, x):
y = np.tanh(x)
self.cache["y"] = y
return y
def backward(self, dy):
y = self.cache["y"]
dx = (1 - np.power(y, 2)) * dy
return dx
class SoftmaxWithLossLayer(LayerBase):
def __init__(self):
super().__init__()
self.cache = {}
def id(self):
return "SoftmaxWithLoss"
def forward(self, x, target):
batch_size = target.shape[0]
c = np.max(x)
y = np.exp(x - c) / np.sum(np.exp(x - c), axis=1, keepdims=True)
loss = -np.sum(np.sum(target * np.log(y), axis=1)) / batch_size
self.cache["target"] = target.copy()
self.cache["y"] = y.copy()
return loss
def backward(self, dy=1):
y = self.cache["y"].copy()
target = self.cache["target"].copy()
batch_size = target.shape[0]
dx = dy * (y - target) / batch_size
return dx
|
102260
|
from atcodertools.fmtprediction.models.calculator import CalcNode
class Index:
"""
The model to store index information of a variable, which has a likely the minimal / maximal value and for each dimension.
Up to 2 indices are now supported.
In most cases, the minimal value is 1 and the maximal value is some variable like N.
"""
def __init__(self):
self.min_index = None
self.max_index = None
def update(self, new_value: str):
self._update_min(new_value)
self._update_max(new_value)
def get_length(self):
assert self.max_index is not None
assert self.min_index is not None
return CalcNode.parse(
"{max_index}-({min_index})+1".format(
max_index=self.max_index,
min_index=self.min_index)
).simplify()
def _update_min(self, new_value: str):
if not new_value.isdecimal():
# consider variable is not always could not be minimal.
return
if (self.min_index is None) or (self.min_index.evaluate() > CalcNode.parse(new_value).evaluate()):
self.min_index = CalcNode.parse(new_value)
def _update_max(self, new_value: str):
if not new_value.isdecimal():
self.max_index = CalcNode.parse(new_value)
if (self.max_index is None) or (
len(self.max_index.get_all_variables()) == 0 and self.max_index.evaluate() < CalcNode.parse(
new_value).evaluate()
):
self.max_index = CalcNode.parse(new_value)
|
102290
|
import types
import datetime
from nose.tools import eq_ as orig_eq_
from unittest import skip
from allmychanges.utils import first, html_document_fromstring
from allmychanges.parsing.pipeline import (
get_markup,
extract_metadata,
group_by_path,
strip_outer_tag,
prerender_items,
highlight_keywords,
parse_plain_file,
parse_markdown_file,
filter_versions,
parse_file)
from allmychanges.env import Environment
def eq_(left, right):
if isinstance(left, types.GeneratorType):
left = list(left)
if isinstance(right, types.GeneratorType):
right = list(right)
orig_eq_(left, right, '\n{0}\n!=\n{1}'.format(repr(left), repr(right)))
def eq_dict(left, right):
from dictdiffer import diff
result = list(diff(left, right))
if result:
formatted = '\n' + '\n'.join(
map(repr, result))
raise AssertionError(formatted)
env = Environment(type='root', title='')
create_file = lambda filename, content: env.push(type='file_content',
filename=filename,
content=content)
def test_parsing_files():
files = [
create_file('release-notes/0.1.0.md',
"""
Initial release
===============
I wrote this library as a proof of the concept.
"""),
create_file('release-notes/0.1.1.md',
"""
Minor changes
===============
This release has small importance.
* Test case was introduced
Final word.
""")]
versions = list(parse_file(files[0])) + list(parse_file(files[1]))
eq_(4, len(versions))
v1, v2, v3, v4 = versions
eq_('release-notes/0.1.0.md', v1.title)
eq_('Initial release', v2.title)
eq_('release-notes/0.1.1.md', v3.title)
eq_('Minor changes', v4.title)
sc = v3.content
eq_("""<h1>Minor changes</h1>\n<p>This release has small importance.</p>
<ul>
<li>Test case was introduced</li>
</ul>
<p>Final word.</p>""",
sc)
def test_markup_guesser_from_extension():
eq_('markdown', get_markup('release-notes/0.1.1.md',
"Minor changes"))
eq_('rst', get_markup('release-notes/0.1.1.rst',
"Minor changes"))
eq_('plain', get_markup('release-notes/Changes',
"Minor changes"))
def test_markup_guesser_from_content():
eq_('rst', get_markup('CHANGES',
"Some text with :func:`foo` mentioned."))
# from https://github.com/celery/celery/blob/3.1/Changelog
eq_('rst', get_markup('CHANGES',
"* [Security: `CELERYSA-0002`_] Insecure default umask."))
eq_('rst', get_markup('CHANGES',
"- **Results**: ``result.get()`` was misbehaving."))
eq_('markdown', get_markup('CHANGES',
"""Some header
=========="""))
eq_('markdown', get_markup('CHANGES',
"""Some header
--------"""))
eq_('markdown', get_markup('CHANGES',
"## Some 2 level header"))
eq_('markdown', get_markup('CHANGES',
"Some [link](blah)"))
eq_('markdown', get_markup('CHANGES',
"Some [link][blah]"))
# but
eq_('plain', get_markup('CHANGES',
"Some [thing] in brackets"))
eq_('plain', get_markup('CHANGES',
"""
0.1:
* Initial release
0.1.1
* Added benchmarking script
* Added support for more serializer modules"""))
# part of the sbcl's html changelog
eq_('html', get_markup('Changelog', """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html
><head
>"""))
def test_extract_metadata():
env = Environment()
env.type = 'almost_version'
v = lambda **kwargs: env.push(**kwargs)
input_data = v(title='1.0 (2014-06-24)',
content='Fixed issue')
eq_([v(type='prerender_items',
title='1.0 (2014-06-24)',
content='Fixed issue',
date=datetime.date(2014, 6, 24))],
extract_metadata(input_data))
def test_prerender_inserts_labels_into_content_items():
env = Environment()
env.type = 'almost_version'
v = lambda **kwargs: env.push(**kwargs)
input_data = v(type='prerender_items',
title='1.0 (2014-06-24)',
content='<p>Some bug was <em>fixed</em> issue</p>',
date=datetime.date(2014, 6, 24))
expected = '<p>Some <span class="changelog-highlight-fix">bug</span> was <em><span class="changelog-highlight-fix">fixed</span></em> issue</p>'
eq_(expected, first(prerender_items(input_data)).processed_content)
input_data = v(type='prerender_items',
title='1.0 (2014-06-24)',
content='Fixed issue',
date=datetime.date(2014, 6, 24))
expected = '<span class="changelog-highlight-fix">Fixed</span> issue'
eq_(expected, first(prerender_items(input_data)).processed_content)
def test_keywords_highlighting():
eq_('Some <span class="changelog-highlight-fix">breaking changes</span>',
highlight_keywords('Some breaking changes'))
eq_('Various <span class="changelog-highlight-fix">bugfixes</span>',
highlight_keywords('Various bugfixes'))
eq_('<span class="changelog-highlight-fix">Fixed a bug</span> where blah minor',
highlight_keywords('Fixed a bug where blah minor'))
eq_('<span class="changelog-highlight-fix">Bug Fixes</span>',
highlight_keywords('Bug Fixes'))
eq_('Some <span class="changelog-highlight-fix">bug</span> was <span class="changelog-highlight-fix">fixed</span>.',
highlight_keywords('Some bug was fixed.'))
eq_('<span class="changelog-highlight-fix">Fix</span> an issue.',
highlight_keywords('Fix an issue.'))
eq_('<span class="changelog-highlight-fix">Fixes</span> an issue.',
highlight_keywords('Fixes an issue.'))
eq_('This function is <span class="changelog-highlight-dep">deprecated</span>.',
highlight_keywords('This function is deprecated.'))
eq_('This is a <span class="changelog-highlight-fix">bugfix</span> release.',
highlight_keywords('This is a bugfix release.'))
# Backward
eq_('This feature was <span class="changelog-highlight-inc">removed</span>.',
highlight_keywords('This feature was removed.'))
eq_('This change is <span class="changelog-highlight-inc">backward incompatible</span>.',
highlight_keywords('This change is backward incompatible.'))
# security
eq_('Improved <span class="changelog-highlight-sec">XSS</span> filtering',
highlight_keywords('Improved XSS filtering'))
eq_('Improved <span class="changelog-highlight-sec">security</span> in SQL',
highlight_keywords('Improved security in SQL'))
# multiple
eq_('attention to <span class="changelog-highlight-fix">bugfixes</span> and <span class="changelog-highlight-sec">security</span> issues',
highlight_keywords('attention to bugfixes and security issues'))
def test_extract_metadata_is_able_to_detect_unreleased_version():
env = Environment()
env.type = 'almost_version'
v = lambda **kwargs: env.push(**kwargs)
eq_([v(type='prerender_items',
title='1.0 (unreleased)',
unreleased=True,
content='')],
extract_metadata(
v(title='1.0 (unreleased)',
content='')))
eq_([v(type='prerender_items',
title='1.45 (not yet released)',
unreleased=True,
content='')],
extract_metadata(
v(title='1.45 (not yet released)',
content='')))
eq_([v(type='prerender_items',
title='1.45 (Under Development)',
unreleased=True,
content='')],
extract_metadata(
v(title='1.45 (Under Development)',
content='')))
eq_([v(type='prerender_items',
title='1.45',
unreleased=True,
content='Under Development')],
extract_metadata(
v(title='1.45',
content='Under Development')))
eq_([v(type='prerender_items',
title='1.45',
unreleased=True,
content='Development is in progress')],
extract_metadata(
v(title='1.45',
content='Development is in progress')))
def test_extract_metadata_ignores_unreleased_keywords_if_date_was_found_ealier():
env = Environment()
env.type = 'almost_version'
v = lambda **kwargs: env.push(**kwargs)
expected = [v(type='prerender_items',
title='1.0 (2015-02-06)',
date=datetime.date(2015, 2, 6),
content='unreleased')]
version = v(title='1.0 (2015-02-06)', content='unreleased')
result = list(extract_metadata(version))
eq_(expected, result)
def test_extract_date_only_from_first_three_lines():
env = Environment()
env.type = 'almost_version'
v = lambda **kwargs: env.push(**kwargs)
eq_(datetime.date(2015, 12, 14),
first(extract_metadata(
v(title='1.0',
content='one\ntwo\n2015-12-14'))).date)
eq_(None,
getattr(first(extract_metadata(
v(title='1.0',
content='one\ntwo\nthree\n2015-12-14'))), 'date', None))
def test_extract_unreleased_keywords_only_from_first_three_lines():
env = Environment()
env.type = 'almost_version'
v = lambda **kwargs: env.push(**kwargs)
eq_(True,
first(extract_metadata(
v(title='1.0',
content='one\ntwo\nunreleased'))).unreleased)
eq_(None,
getattr(first(extract_metadata(
v(title='1.0',
content='one\ntwo\nthree\nunreleased'))), 'unreleased', None))
def test_grouping_by_path():
env = Environment()
env.type = 'version'
v = lambda filename: env.push(filename=filename)
versions = [(10, v('docs/notes/0.1.0.rst')),
(10, v('docs/notes/0.2.0.rst')),
(10, v('docs/README')),
(1000, v('CHANGES'))]
eq_({'CHANGES': {'score': 1000, 'versions': [v('CHANGES')]},
'docs/': {'score': 30 - 5, 'versions': [v('docs/notes/0.1.0.rst'),
v('docs/notes/0.2.0.rst'),
v('docs/README')]},
'docs/README': {'score': 10, 'versions': [v('docs/README')]},
'docs/notes/': {'score': 20 - 2, 'versions': [v('docs/notes/0.1.0.rst'),
v('docs/notes/0.2.0.rst')]},
'docs/notes/0.1.0.rst': {'score': 10, 'versions': [v('docs/notes/0.1.0.rst')]},
'docs/notes/0.2.0.rst': {'score': 10, 'versions': [v('docs/notes/0.2.0.rst')]}},
group_by_path(versions))
def test_strip_outer_tag():
# simple case
eq_('Added new feature.',
strip_outer_tag('<li>Added new feature.</li>'))
# a case with embedded html
eq_('Added <b>new</b> feature.',
strip_outer_tag('<li>Added <b>new</b> feature.</li>'))
# a case with newline
eq_('Added new\n feature.',
strip_outer_tag('<li>Added new\n feature.</li>'))
# and now multiline with embedded HTML
eq_('Added new output <code>twiggy_goodies.logstash.LogstashOutput</code> which\nsends json encoded data via UDP to a logstash server.',
strip_outer_tag('<li>Added new output <code>twiggy_goodies.logstash.LogstashOutput</code> which\nsends json encoded data via UDP to a logstash server.</li>'))
# also, it should remove several nested tags too
eq_('Some text',
strip_outer_tag('<li><p>Some text</p></li>'))
# and it shouldn't stuck at such strange things
eq_('Blah',
strip_outer_tag('<p>Blah'))
# but should leave as is if there isn't any common root node
eq_('<p>Blah</p><p>minor</p>',
strip_outer_tag('<p>Blah</p><p>minor</p>'))
# and shouldn't broke on comment lines
eq_('Blah',
strip_outer_tag('<!--Comment-->Blah'))
def test_parse_plain_text():
source = u"""
0.1:
* Initial release
0.1.1
* Added benchmarking script
* Added support for more
serializer modules"""
_test_plain_parser(
source,
[
u'<ul><li>Initial release</li></ul>',
(u'<ul><li>Added benchmarking script</li>'
u'<li>Added support for more<br/>serializer modules</li></ul>'),
'<pre>' + source + '</pre>',
]
)
def test_parse_redispy_style_plain_text():
source = u"""
* 2.10.2
* Added support for Hiredis's new bytearray support. Thanks
https://github.com/tzickel
* Fixed a bug when attempting to send large values to Redis in a Pipeline.
* 2.10.1
* Fixed a bug where Sentinel connections to a server that's no longer a
master and receives a READONLY error will disconnect and reconnect to
the master."""
_test_plain_parser(
source,
[
(u'<ul><li>Added support for Hiredis\'s new bytearray support. Thanks<br/>https://github.com/tzickel</li>'
'<li>Fixed a bug when attempting to send large values to Redis in a Pipeline.</li></ul>'),
(u'<ul><li>Fixed a bug where Sentinel connections to a server that\'s no longer a<br/>master and receives a READONLY error will disconnect and reconnect to<br/>the master.</li></ul>'),
'<pre>' + source + '</pre>'
])
def test_plaintext_parser_ignores_nested_versions():
# this is a snippet from Node's changelog
# https://raw.githubusercontent.com/joyent/node/master/ChangeLog
file = create_file('Changes',
"""
2015.02.06, Version 0.12.0 (Stable)
* npm: Upgrade to 2.5.1
* mdb_v8: update for v0.12 (Dave Pacheco)
""")
versions = list(parse_plain_file(file))
eq_(2, len(versions))
eq_('Changes', versions[1].title)
v = versions[0]
eq_('2015.02.06, Version 0.12.0 (Stable)', v.title)
eq_('<ul><li>npm: Upgrade to 2.5.1</li></ul>\n<ul><li>mdb_v8: update for v0.12 (Dave Pacheco)</li></ul>',
v.content)
def _test_parser(parser, given, expected):
file = create_file('Changes', given)
versions = list(parser(file))
expected = [expected] if isinstance(expected, basestring) else expected
assert len(versions) >= len(expected)
for v, ex_v in zip(versions, expected):
eq_(ex_v.strip(), v.content.strip())
eq_(len(versions), len(expected))
_test_plain_parser = lambda *args: _test_parser(parse_plain_file, *args)
_test_md_parser = lambda *args: _test_parser(parse_markdown_file, *args)
def test_nodejs_parsing():
source = u"""
2009.08.13, Version 0.1.4, 0f888ed6de153f68c17005211d7e0f960a5e34f3
* Major refactor to evcom.
* Upgrade v8 to 1.3.4
Upgrade libev to 3.8
Upgrade http_parser to v0.2
"""
_test_plain_parser(
source,
[
u"""
<ul><li>Major refactor to evcom.</li></ul>
<ul><li>Upgrade v8 to 1.3.4<br/>Upgrade libev to 3.8<br/>Upgrade http_parser to v0.2</li></ul>
""",
'<pre>' + source + '</pre>'
])
@skip('waiting for implementation')
def test_plaintext_parsing_of_nested_lists():
source = u"""
2015.03.09 version 0.8.15
* First
* Second
* Third
* Forth
* Fifths
* Six
* Seven
* Eight
"""
_test_plain_parser(
source,
[
u"""
<ul><li>First
<ul><li>Second
<ul><li>Third
<ul><li>Fourth</li>
<li>Fifths</li></ul></li>
<li>Six
<ul><li>Seven</li>
<li>Eight</li></ul></li></ul></li></ul></li></ul>
""",
'<pre>' + source + '</pre>'
]
)
def test_versions_filter():
fs = env.push(type='file_section')
v1_0 = fs.push(version='1.0')
fs1 = fs.push(type='file_section')
v1_0_1 = fs1.push(version='1.0.1')
fs2 = fs.push(type='file_section')
v1_0_2 = fs2.push(version='1.0.2')
versions = filter_versions([v1_0, v1_0_1, v1_0_2])
eq_(2, len(versions))
eq_(v1_0_1, versions[0])
eq_(v1_0_2, versions[1])
def test_versions_filter2():
fs = env.push(type='file_section')
v1_0_5 = fs.push(type='version', version='1.0.5')
fs_child = fs.push(type='file_section')
v2_6 = fs_child.push(type='version', version='2.6')
versions = filter_versions([v1_0_5, v2_6])
eq_(1, len(versions))
eq_(v1_0_5, versions[0])
def test_html_document_fromstring():
doc = html_document_fromstring(u"""<?xml version=\'1.0\' encoding=\'UTF-8\'?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html>Blah</html>""")
assert doc != None
|
102299
|
import random
from django.core import serializers
from django.shortcuts import HttpResponse
from .models import DemoData
TEMP = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()_+=-"
# Create your views here.
def demo_views(request):
result = DemoData.objects.filter(
name="".join(random.choices(TEMP, k=random.randrange(1, 254)))
)
# x = json.dumps(request.body)
return HttpResponse(
serializers.serialize("json", result.values() if result else []),
content_type="application/json",
)
|
102302
|
from werkzeug.utils import find_modules, import_string
def import_all(import_name):
for module in find_modules(import_name, include_packages=True, recursive=True):
import_string(module)
|
102400
|
import pytest
def test_ec2user_user_group(host):
"""Check if the ec2-user user created in a ec2-user group and its UID and GUID values is 1000"""
assert host.user("ec2-user").exists
assert host.group("ec2-user").exists
assert host.user("ec2-user").uid == 1000
assert host.user("ec2-user").gid == 1000
def test_ec2user_sudoers_file(host):
"""Check if ec2-user user's sudoers file is present and correct"""
with host.sudo():
sudoers_file = host.file("/etc/sudoers.d/90-cloud-init-users")
sudoers_file.contains("ec2-user ALL=(ALL) NOPASSWD:ALL")
# @pytest.mark.parametrize('package', [])
def test_aws_agents_installed(host):
"""Check if Amazon SSM Agent is installed, its services running and enabled"""
assert host.package("amazon-ssm-agent").is_installed
assert host.service("amazon-ssm-agent.service").is_running
assert host.service("amazon-ssm-agent.service").is_enabled
def test_instance_ssh_pub_key(host):
"""Only key pair's public key should be present"""
authorized_keys = host.file("/home/ec2-user/.ssh/authorized_keys").content_string
assert len(authorized_keys.splitlines()) == 1
def test_installer_leftovers(host):
"""Check if installer logs and kickstart files removed after the installation"""
assert host.file("/root/anaconda-ks.cfg").exists == False
assert host.file("/root/original-ks.cfg").exists == False
assert host.file("/var/log/anaconda").exists == False
assert host.file("/root/install.log").exists == False
assert host.file("/root/install.log.syslog").exists == False
def test_network_is_working(host):
"""Check if networking works properly"""
almalinux = host.addr("almalinux.org")
assert almalinux.is_resolvable
assert almalinux.port(443).is_reachable
@pytest.mark.dependency()
def test_get_machine_ids(host):
"""Get machine-id of the each machine and write to a file"""
machine_id = host.file('/etc/machine-id').content_string
open('hostnames.txt', 'a').write(f'{machine_id}')
@pytest.mark.dependency(depends=["test_get_machine_ids"])
def test_uniqueness_of_machineid():
"""Check if machine-id is unique for each machine"""
machine_id_a, machine_id_b = open('hostnames.txt', 'r').read().splitlines()[:2]
assert machine_id_a != machine_id_b
@pytest.mark.dependency()
def test_get_ssh_hostkeys(host):
"""Get checksum of SSH host keys from each machine and write to file"""
with host.sudo():
host_key = host.check_output("sha256sum /etc/ssh/ssh_host_*")
open('sshhostkeys.txt', 'a').write(f'{host_key}\n')
@pytest.mark.dependency(depends=["test_get_ssh_hostkeys"])
def test_uniqueness_of_sshhostkeys():
"""Check if SSH host keys are unique for each machine"""
content = open('sshhostkeys.txt', 'r').read()
ssh_host_keys_a, ssh_host_keys_b = '\n'.join(content.splitlines()[:6]), '\n'.join(content.splitlines()[6:])
assert ssh_host_keys_a != ssh_host_keys_b
|
102413
|
import pandas as pd
import numpy as np
np.random.seed(163)
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
# Load the precomputed X mutag feature matrix.
print "Loading Feature Matrix..."
df=pd.read_csv('X_mutag.csv',header=None)
X=np.array(df)
# Load the precomputed Y mutag feature matrix.
print "Loading Class labels Matrix..."
df=pd.read_csv('Y_mutag.csv',header=None)
Y=np.array(df)
# Create the 10-fold cross validation indices.
idx = np.arange(len(Y))
np.random.shuffle(idx)
kfold = StratifiedKFold(y=Y[:,0], n_folds=10)
cvscores = []
error_labels_index=[]
X=X[idx];
Y=Y[idx];
#Perform 10-fold cross validation
print "Performing 10-fold cross validation..."
for i, (train, test) in enumerate(kfold):
# Fit the SVM model
clf2 = SVC(C=20, gamma=1.5e-04)
clf2.fit(X[train], np.ravel(Y[train]))
accuracy=clf2.score(X[test],np.ravel(Y[test]))
predicted_labels=clf2.predict(X[test])
test_labels=np.reshape(Y[test],(len(Y[test])))
error=predicted_labels-test_labels
print "Test Fold "+str(i)
print("Accuracy= %.2f%%" % (accuracy*100))
cvscores.append(accuracy * 100)
#Report average classification accuracy.
print "Average Accuracy= %.2f%%" % (np.mean(cvscores))
|
102435
|
import datetime, calendar
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from doajtest.helpers import DoajTestCase
from portality.scripts.prune_marvel import generate_delete_pattern
class TestPruneMarvel(DoajTestCase):
@classmethod
def setUpClass(cls):
cls.runs = []
years = [2016, 2017]
for year in years:
for month in range(1, 13):
num_days = calendar.monthrange(year, month)[1]
days = [datetime.date(year, month, day) for day in range(1, num_days + 1)]
for day in days:
cls.runs.append(day) # datetime(2016, 1, 1) .. datetime(2017, 12, 31)
@classmethod
def tearDownClass(cls):
cls.runs = []
def setUp(self):
pass
def tearDown(self):
pass
def test_generate_delete_pattern(self):
for run in self.runs:
with freeze_time(run):
expected_delete_date = run - relativedelta(months=3)
expected_delete_pattern = '.marvel-{}*'.format(expected_delete_date.strftime('%Y.%m'))
actual_delete_pattern = generate_delete_pattern()
assert actual_delete_pattern == expected_delete_pattern, 'Current time: {}. {} did not match expected {}'.format(run, generate_delete_pattern(), expected_delete_pattern)
assert len(actual_delete_pattern) == 16, "actual_delete_pattern is the wrong length: {} characters for '{}'".format(len(actual_delete_pattern), actual_delete_pattern)
|
102460
|
import time
import copy
import gobject
from phony.base.log import ClassLogger
from RPi import GPIO
from types import MethodType
class Inputs(ClassLogger):
_layout = {}
_inputs_by_channel = {}
_rising_callback_by_channel_name = {}
_falling_callback_by_channel_name = {}
_pulse_callback_by_channel_name = {}
def __init__(self, layout):
ClassLogger.__init__(self)
# IO event callbacks occur in another thread, dbus/gdk need
# to be made aware of this.
gobject.threads_init()
GPIO.setmode(GPIO.BCM)
self._layout = layout
for name, config in layout.iteritems():
for point in ['pin', 'pull_up_down']:
Inputs._raise_if_not_in(point, config)
config = copy.deepcopy(config)
config['name'] = name
self._inputs_by_channel[config['pin']] = config
self._configure_input(name, config)
def on_rising_edge(self, channel_name, callback):
self._rising_callback_by_channel_name[channel_name] = callback
def on_falling_edge(self, channel_name, callback):
self._falling_callback_by_channel_name[channel_name] = callback
def on_pulse(self, channel_name, callback):
self._pulse_callback_by_channel_name[channel_name] = callback
#@ClassLogger.TraceAs.call()
def _channel_changed(self, channel):
name = self._inputs_by_channel[channel]['name']
do_rise = name in self._rising_callback_by_channel_name
do_fall = name in self._falling_callback_by_channel_name
if do_rise or do_fall:
time.sleep(0.01)
if GPIO.input(channel):
high = 1
else:
high = 0
if high and do_rise:
self._rising_callback_by_channel_name[name]()
if not high and do_fall:
self._falling_callback_by_channel_name[name]()
if name in self._pulse_callback_by_channel_name:
self._pulse_callback_by_channel_name[name]()
def _configure_input(self, name, configuration):
pin = configuration['pin']
self.log().debug('Pin %d -> %s' % (pin, name))
if configuration['pull_up_down'] == 'up':
pull_up_or_down = GPIO.PUD_UP
else:
pull_up_or_down = GPIO.PUD_DOWN
if 'debounce' in configuration:
debounce = configuration['debounce']
else:
debounce = 0
GPIO.setup(pin, GPIO.IN, pull_up_down = pull_up_or_down)
GPIO.add_event_detect(pin, GPIO.BOTH, callback = self._channel_changed, bouncetime = debounce)
@staticmethod
def _raise_if_not_in(point, config):
if point not in config:
raise Exception('Missing required configuration point %s' % point)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
for channel in self._inputs_by_channel:
GPIO.remove_event_detect(channel)
class Outputs(ClassLogger):
_layout = None
def __init__(self, layout):
ClassLogger.__init__(self)
# IO event callbacks occur in another thread, dbus/gdk need
# to be made aware of this.
gobject.threads_init()
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
self._layout = layout
for name, config in layout.iteritems():
for point in ['pin', 'default']:
Outputs._raise_if_not_in(point, config)
self._conigure_output(name, config)
def _conigure_output(self, name, configuration):
pin = configuration['pin']
self.log().debug('Pin %d -> %s' % (pin, name))
GPIO.setup(pin, GPIO.OUT)
if 'invert_logic' in configuration and configuration['invert_logic']:
set_pin = lambda self,value: GPIO.output(pin, not value)
else:
set_pin = lambda self,value: GPIO.output(pin, value)
set_pin(None, configuration['default'])
setattr(self, name, MethodType(set_pin, self, type(self)))
@staticmethod
def _raise_if_not_in(point, config):
if point not in config:
raise Exception('Missing required configuration point %s' % point)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
|
102466
|
from .transformer import *
from .common import *
#tf.compat.v1.disable_eager_execution()
#
#batch_size = 40
#seq_length = 200
#hidden_size = 768
#num_attention_heads =12
#size_per_head = int(hidden_size / num_attention_heads)
#
#layer_input = tf.compat.v1.placeholder(tf.float32, shape=(batch_size*seq_length, hidden_size))
## Tensor of shape [batch_size, from_seq_length, to_seq_length].
#attention_mask = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, seq_length, seq_length))
#
#output_rnn = transformer_cell(input_tensor=layer_input,#tf.reshape(layer_input, [-1, hidden_size]),
# attention_mask=attention_mask,
# hidden_size=hidden_size,
# num_attention_heads=num_attention_heads,
# attention_head_size=size_per_head,
# batch_size = batch_size,
# seq_length = seq_length,
# intermediate_size=1280)
|
102475
|
from copy import deepcopy
from pymaclab.dsge.translators import pml_to_dynarepp
from pymaclab.dsge.translators import dynarepp_to_pml
from pymaclab.dsge.translators import pml_to_pml
from pymaclab.dsge.parsers._dsgeparser import ff_chron_str, bb_chron_str
class Translators(object):
def __init__(self,other=None):
self.template_paramdic = deepcopy(other.template_paramdic)
self._other = other
def pml_to_dynarepp(self,template_paramdic=None,fpath=None,focli=None):
# Need to do some work to make focs_li dynare-conformable!
focli = self.template_paramdic['focs_dynare']
other = self._other
vreg = other.vreg
patup = ('{-10,10}|None','endo|con|exo|iid|other','{-10,10}')
compset = set(['endo','con'])
for i1,lino in enumerate(focli):
varli = set([x[1][0] for x in vreg(patup,focli[i1],True,'max')])
if varli.intersection(compset) != set([]) and 'exo' in varli:
focli[i1] = ff_chron_str(other,str1=focli[i1],ff_int=1,vtype='exo')
else:
focli[i1] = bb_chron_str(other,str1=focli[i1],bb_int=1,vtype='iid')
template_paramdic = deepcopy(self.template_paramdic)
template_paramdic['focs_dynare'] = focli
if fpath == None:
return pml_to_dynarepp.translate(template_paramdic=template_paramdic,focli=focli)
else:
pml_to_dynarepp.translate(template_paramdic=template_paramdic,fpath=fpath,focli=focli)
def dynarepp_to_pml(self,template_paramdic=None,fpath=None,focli=None):
if fpath == None:
if template_paramdic == None:
return dynarepp_to_pml.translate(template_paramdic=self.template_paramdic,focli=focli)
else:
return dynarepp_to_pml.translate(template_paramdic=template_paramdic,focli=focli)
else:
if template_paramdic == None:
dynarepp_to_pml.translate(template_paramdic=self.template_paramdic,fpath=fpath,focli=focli)
else:
dynarepp_to_pml.translate(template_paramdic=template_paramdic,fpath=fpath,focli=focli)
def pml_to_pml(self,template_paramdic=None,fpath=None):
if fpath == None:
if template_paramdic == None:
return pml_to_pml.translate(template_paramdic=self.template_paramdic)
else:
return pml_to_pml.translate(template_paramdic=template_paramdic)
else:
if template_paramdic == None:
pml_to_pml.translate(template_paramdic=self.template_paramdic,fpath=fpath)
else:
pml_to_pml.translate(template_paramdic=template_paramdic,fpath=fpath)
|
102492
|
import unittest
from unittest import mock
from apiserver.search import parse_query
from apiserver.search import join
from apiserver.search.union import name_similarity
from .utils import DataTestCase
class TestSearch(unittest.TestCase):
def test_simple(self):
"""Test the query generation for a simple search"""
main, sup_funcs, sup_filters, vars = parse_query({
'keywords': ['green', 'taxi'],
'source': 'gov',
})
self.assertEqual(
main,
[
{
'multi_match': {
'query': 'green taxi',
'operator': 'and',
'type': 'cross_fields',
'fields': ['id^10', 'description', 'name^3', 'attribute_keywords'],
},
},
{
'bool': {
'filter': [
{
'terms': {
'source': ['gov'],
},
},
],
},
},
],
)
self.assertEqual(
sup_funcs,
[
{
'filter': {
'multi_match': {
'query': 'green taxi',
'operator': 'and',
'type': 'cross_fields',
'fields': [
'dataset_id^10',
'dataset_description',
'dataset_name^3',
'dataset_attribute_keywords',
],
},
},
'weight': 10,
},
],
)
self.assertEqual(
sup_filters,
[
{
'terms': {
'dataset_source': ['gov'],
},
},
],
)
self.assertEqual(vars, [])
def test_types(self):
"""Test the query generation for a search with dataset types"""
main, sup_funcs, sup_filters, vars = parse_query({
'keywords': ['food'],
'types': ['spatial', 'temporal'],
})
self.assertEqual(
main,
[
{
'multi_match': {
'query': 'food',
'operator': 'and',
'type': 'cross_fields',
'fields': [
'id^10',
'description',
'name^3',
'attribute_keywords',
],
},
},
{
'bool': {
'filter': [
{
'terms': {
'types': ['spatial', 'temporal'],
},
},
],
},
},
],
)
self.assertEqual(
sup_funcs,
[
{
'filter': {
'multi_match': {
'query': 'food',
'type': 'cross_fields',
'operator': 'and',
'fields': [
'dataset_id^10',
'dataset_description',
'dataset_name^3',
'dataset_attribute_keywords',
],
},
},
'weight': 10,
},
],
)
self.assertEqual(
sup_filters,
[
{
'terms': {
'dataset_types': ['spatial', 'temporal'],
},
},
],
)
self.assertEqual(vars, [])
def test_ranges(self):
"""Test the query generation for spatial/temporal ranges"""
main, sup_funcs, sup_filters, vars = parse_query({
'keywords': ['green', 'taxi'],
'source': ['gov'],
'variables': [
{
'type': 'temporal_variable',
'start': '2019-01-01',
'end': '2019-12-31',
},
{
'type': 'geospatial_variable',
'latitude1': 45.4,
'latitude2': 50.6,
'longitude1': -73.2,
'longitude2': -75.8,
},
],
})
self.assertEqual(
main,
[
{
'multi_match': {
'query': 'green taxi',
'operator': 'and',
'type': 'cross_fields',
'fields': ['id^10', 'description', 'name^3', 'attribute_keywords'],
},
},
{
'bool': {
'filter': [
{
'terms': {
'source': ['gov'],
},
},
],
},
},
{
'nested': {
'path': 'temporal_coverage',
'query': {
'bool': {
'must': [
{
'nested': {
'path': 'temporal_coverage.ranges',
'query': {
'range': {
'temporal_coverage.ranges.range': {
'gte': 1546300800.0,
'lte': 1577750400.0,
'relation': 'intersects',
},
},
},
},
},
],
},
},
},
},
{
'nested': {
'path': 'spatial_coverage.ranges',
'query': {
'bool': {
'filter': {
'geo_shape': {
'spatial_coverage.ranges.range': {
'shape': {
'type': 'envelope',
'coordinates': [
[-75.8, 50.6],
[-73.2, 45.4],
],
},
'relation': 'intersects',
},
},
},
},
},
},
},
],
)
class TestAugmentation(DataTestCase):
def test_temporal(self):
"""Test searching for augmentation with temporal data"""
main, sup_funcs, sup_filters, vars = parse_query({
'keywords': 'green taxi',
})
es = mock.Mock()
result = object()
es.search.return_value = {
'hits': {
'hits': [
result,
],
},
}
results = join.get_temporal_join_search_results(
es,
[[1.0, 2.0], [11.0, 12.0]],
None,
None,
sup_funcs,
sup_filters,
)
self.assertEqual(results, [result])
self.assertEqual(len(es.search.call_args_list), 1)
args, kwargs = es.search.call_args_list[0]
self.assertEqual(args, ())
temporal_query = lambda a, b: {
'nested': {
'path': 'ranges',
'query': {
'function_score': {
'query': {
'range': {
'ranges.range': {
'gte': a,
'lte': b,
'relation': 'intersects',
},
},
},
'script_score': {
'script': {
'lang': 'painless',
'params': {
'gte': a,
'lte': b,
'coverage': 4.0,
},
'source': lambda s: (
isinstance(s, str) and len(s) > 20
),
},
},
'boost_mode': 'replace',
},
},
'inner_hits': {
'_source': False,
'size': 100,
'name': lambda s: s.startswith('range-'),
},
'score_mode': 'sum',
},
}
kwargs.pop('request_timeout', None)
self.assertJson(
kwargs,
dict(
index='temporal_coverage',
body={
'_source': lambda d: isinstance(d, dict),
'query': {
'function_score': {
'query': {
'bool': {
'filter': [],
'should': [
temporal_query(1.0, 2.0),
temporal_query(11.0, 12.0),
],
'must_not': [],
'minimum_should_match': 1
},
},
'functions': [
{
'filter': {
'multi_match': {
'query': 'green taxi',
'operator': 'and',
'type': 'cross_fields',
'fields': [
'dataset_id^10',
'dataset_description',
'dataset_name^3',
'dataset_attribute_keywords',
],
},
},
'weight': 10,
},
],
'score_mode': 'sum',
'boost_mode': 'multiply',
},
},
},
size=50,
),
)
def test_name_similarity(self):
self.assertAlmostEqual(
name_similarity("temperature", "temperature"),
1.00,
places=2,
)
self.assertAlmostEqual(
name_similarity("fridge temperature", "temperature"),
0.56,
places=2,
)
self.assertAlmostEqual(
name_similarity("avg temperature", "temperature avg"),
0.625,
places=2,
)
self.assertAlmostEqual(
name_similarity("temperature", "temperament"),
0.38,
places=2,
)
|
102547
|
import py
class TestJitTraceInteraction(object):
def test_trace_while_blackholing(self):
import sys
l = []
printed = []
def trace(frame, event, arg):
l.append((frame.f_code.co_name, event))
return trace
def g(i, x):
if i > x - 10:
printed.append(i)
if i == x - 5:
sys.settrace(trace)
def f(x):
res = 0
for i in range(x):
res += i
g(i, x)
f(10)
sys.settrace(None)
print printed
assert l == [('g', 'call'), ('g', 'line'), ('g', 'line'), ('g', 'line'), ('g', 'return')] * 4
l1 = l
l = []
printed = []
f(10000)
sys.settrace(None)
print printed
assert l == l1
|
102551
|
r=open('all.protein.faa','r')
w=open('context.processed.all.protein.faa','w')
start = True
mem = ""
for line in r:
if '>' in line and not start:
list_char = list(mem.replace('\n',''))
list_context = []
list_context_length_before = 1
list_context_length_after = 1
for i in range(len(list_char)):
tmp=""
for j in range(i-list_context_length_before,i+list_context_length_after+1):
if j < 0 or j>=len(list_char):
tmp=tmp+'-'
else:
tmp=tmp+list_char[j]
list_context.append(tmp)
w.write (" ".join(list_context)+"\n")
mem = ""
else:
if not start:
mem=mem+line
start = False
r.close()
w.close()
|
102575
|
import os
from pathlib import Path
from appdirs import user_data_dir
class EnvManager:
"""Stashes environment variables in a file and
retrieves them in (a different process) with get_environ
with failover to os.environ
"""
app_env_dir = Path(user_data_dir("NEBULO"))
app_env = app_env_dir / ".env"
def __init__(self, **env_vars):
# Delete if exists
try:
os.remove(self.app_env)
except OSError:
pass
self.app_env_dir.mkdir(parents=True, exist_ok=True)
self.app_env.touch()
self.vars = env_vars
def __enter__(self):
with self.app_env.open("w") as env_file:
for key, val in self.vars.items():
if val is not None:
env_file.write(f"{key}={val}\n")
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
try:
os.remove(self.app_env)
except OSError:
pass
@classmethod
def get_environ(cls):
try:
with cls.app_env.open("r") as f:
for row in f:
key, value = row.split("=", 1)
os.environ[key.strip()] = value.strip()
except FileNotFoundError:
pass
return os.environ
|
102585
|
import re
import click
from matrix_connection import matrix_client
from tabulate import tabulate
@click.command()
@click.argument('pattern', required=False, type=str)
def list_rooms(pattern):
"""List room ids and keys."""
rooms = matrix_client().get_rooms()
data = [(rid, room.display_name)
for rid, room in rooms.items()]
if pattern:
data = [(rid, name) for rid, name in data
if re.search(pattern.strip('/'), name)]
print(tabulate(data, headers=['Room ID', 'Display Name']))
if __name__ == '__main__':
list_rooms()
|
102607
|
import os
import unittest
from scrapy.http import TextResponse, Request
from pdl_scraper.spiders.pdfurl_spider import PdfUrlSpider
class TestPdfUrlSpider(unittest.TestCase):
def setUp(self):
self.spider = PdfUrlSpider()
def test_find_pdfurl(self):
codigos = (
'00001',
'03847',
'00864',
'00963',
'01367',
'00052',
'00253',
'00313',
'00666',
)
expected = (
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/Contdoc01_2011.nsf/d99575da99ebfbe305256f2e006d1cf0/e8ad7d6747e75b8e052578df005a92ab/$FILE/00001.pdf',
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/Contdoc02_2011_2.nsf/d99575da99ebfbe305256f2e006d1cf0/4465dd7d442d1a6d05257d65007d0796/$FILE/PL03847021014.pdf',
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/Contdoc01_2011.nsf/d99575da99ebfbe305256f2e006d1cf0/e58512d0bfb9118d052579bb0054c0e2/$FILE/PL00864080312.-.pdf',
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/Contdoc01_2011.nsf/d99575da99ebfbe305256f2e006d1cf0/72dfe9f6ee7af28a052579d000043256/$FILE/PL00963280312....pdf',
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/Contdoc01_2011.nsf/0/8e0331a84969f79305257a4b007a7b2b/$FILE/01367300712.pdf',
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/Contdoc01_2011.nsf/d99575da99ebfbe305256f2e006d1cf0/d18f1338ca3a643b052578f1007a96ba/$FILE/00052PL1882011.pdf',
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/Contdoc01_2011.nsf/d99575da99ebfbe305256f2e006d1cf0/db59e670c91ac96b05257913006ebf4b/$FILE/PL00253220911---.pdf',
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/Contdoc01_2011.nsf/d99575da99ebfbe305256f2e006d1cf0/ad4b72e8abf4e2f20525792100085776/$FILE/PL00313051011,.pdf',
'http://www2.congreso.gob.pe/Sicr/TraDocEstProc/Contdoc01_2011.nsf/d99575da99ebfbe305256f2e006d1cf0/be68adfea28d33bd05257976004f91d3/$FILE/PL0066629122011-..pdf',
)
for i in range(len(codigos)):
codigo = codigos[i]
filename = codigo + '.html'
response = fake_response_from_file(filename)
result = self.spider.find_pdfurl(codigo, response)
self.assertEqual(expected[i], result)
def fake_response_from_file(filename, url=None):
"""
Create a Scrapy fake HTTP response from a HTML file
@param file_name: The relative filename from the responses directory,
but absolute paths are also accepted.
@param url: The URL of the response.
returns: A scrapy HTTP response which can be used for unittesting.
taken from http://stackoverflow.com/a/12741030/3605870
"""
if not url:
url = 'http://www.example.com'
request = Request(url=url)
if not filename[0] == '/':
responses_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(responses_dir, 'tests/test_spiders_data', filename)
else:
file_path = filename
file_content = open(file_path, 'r').read()
response = TextResponse(url=url, request=request, body=file_content)
response._encoding = 'latin-1'
return response
|
102629
|
from enum import Enum
from deprecation import deprecated
@deprecated(details="""Enum-value statuses are deprecated since SLIMS 6.4.
Unless your SLIMS system still uses them (see Lab Settings),
you should use the Status table and cntn_fk_status for status queries.""")
class Status(Enum):
"""List of content statusses in SLims
Can be used to fetch or update content
Examples:
>>> slims.fetch("Content",
equals("cntn_status", Status.PENDING.value))
"""
PENDING = 10
AVAILABLE = 20
LABELED = 30
APPROVED = 40
REMOVED = 50
CANCELLED = 60
|
102645
|
from mongoengine import *
class VersionModel(Document):
"""
각 클라이언트의 버전 관리를 위한 collection
"""
meta = {
'collection': 'versions'
}
platform = IntField(
required=True,
primary_key=True
)
# 1: Web
# 2: Android
# 3: IOS
version = StringField(
required=True
)
|
102718
|
import requests
import json
from bs4 import BeautifulSoup
def scrape_creatures():
print 'scraping creatures'
url = 'http://ark.gamepedia.com/Entity_IDs'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
tables = soup.find_all('table')
creature_table = tables[2]
container = {}
creatures = []
for row in creature_table.find_all('tr')[1:]:
cols = row.find_all('td')
creature = {
'name': cols[2].a.string,
'category': cols[3].a.string,
}
try:
creature['icon'] = cols[0].div.div.a.img['src']
except:
creature['icon'] = None
try:
creature['id'] = cols[4].string
except:
creature['id'] = None
try:
creature['path'] = cols[5].font.string
except:
creature['path'] = None
creatures.append(creature)
container['creatures'] = creatures
with open('data/entity_ids_creatures.json', 'w') as ofile:
ofile.write(json.dumps(container, indent=4,
separators=(',', ': ')))
def scrape_items():
print 'scraping items'
url = 'http://ark.gamepedia.com/Entity_IDs'
r = requests.get(url)
print r
soup = BeautifulSoup(r.text, 'html.parser')
tables = soup.find_all('table')
container = {}
items = []
item_table = tables[0]
for row in item_table.find_all('tr')[1:]:
cols = row.find_all('td')
item = {
'icon': cols[0].div.div.a.img['src'],
# 'id': int(cols[1].string),
'name': cols[2].a.string,
'category': cols[3].a.string,
# 'path': cols[5].font.string
}
try:
item['id'] = int(cols[1].string)
except:
item['id'] = None
try:
item['path'] = cols[5].font.string
except:
item['path'] = ''
items.append(item)
# print items
container['items'] = items
with open('data/entity_ids_items.json', 'w') as ofile:
ofile.write(json.dumps(container, indent=4,
separators=(',', ': ')))
def main():
# scrape_items()
scrape_creatures()
if __name__ == '__main__':
main()
|
102721
|
import os
import datetime
import pytest
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
import trackintel as ti
@pytest.fixture
def testdata_sp_tpls_geolife_long():
"""Generate sp and tpls sequences of the original pfs for subsequent testing."""
pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
pfs, sp = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
pfs, tpls = pfs.as_positionfixes.generate_triplegs(sp, method="between_staypoints")
tpls["type"] = "tripleg"
sp["type"] = "staypoint"
sp_tpls = sp.append(tpls, ignore_index=True).sort_values(by="started_at")
return sp_tpls
@pytest.fixture
def testdata_all_geolife_long():
"""Generate sp, tpls and trips of the original pfs for subsequent testing."""
pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
pfs, sp = pfs.as_positionfixes.generate_staypoints(method="sliding", dist_threshold=25, time_threshold=5)
sp = sp.as_staypoints.create_activity_flag(time_threshold=15)
pfs, tpls = pfs.as_positionfixes.generate_triplegs(sp, method="between_staypoints")
sp, tpls, trips = ti.preprocessing.triplegs.generate_trips(sp, tpls, gap_threshold=15)
return sp, tpls, trips
def get_test_sp(start_time, duration):
"""Generate test staypoints for tracking duration given start_time and duration."""
p1 = Point(8.5067847, 47.4)
# we generate three records, duration = 0, negative duration and positive duration
list_dict = [
{"user_id": 0, "started_at": start_time, "finished_at": start_time, "geom": p1},
{"user_id": 0, "started_at": start_time, "finished_at": start_time - duration, "geom": p1},
{"user_id": 0, "started_at": start_time, "finished_at": start_time + duration, "geom": p1},
]
sp = gpd.GeoDataFrame(data=list_dict, geometry="geom", crs="EPSG:4326")
sp.index.name = "id"
return sp
class TestTemporal_tracking_quality:
"""Tests for the temporal_tracking_quality() function."""
def test_tracking_quality_all(self, testdata_sp_tpls_geolife_long):
"""Test if the calculated total tracking quality is correct."""
sp_tpls = testdata_sp_tpls_geolife_long
# calculate tracking quality for a sample user
user_0 = sp_tpls.loc[sp_tpls["user_id"] == 0]
extent = (user_0["finished_at"].max() - user_0["started_at"].min()).total_seconds()
tracked = (user_0["finished_at"] - user_0["started_at"]).dt.total_seconds().sum()
quality_manual = tracked / extent
# test if the result of the user agrees
quality = ti.analysis.tracking_quality.temporal_tracking_quality(sp_tpls, granularity="all")
assert quality_manual == quality.loc[quality["user_id"] == 0, "quality"].values[0]
assert (quality["quality"] <= 1).all()
def test_tracking_quality_day(self, testdata_sp_tpls_geolife_long):
"""Test if the calculated tracking quality per day is correct."""
sp_tpls = testdata_sp_tpls_geolife_long
splitted_records = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="day")
# get the day relative to the start day
start_date = splitted_records["started_at"].min().date()
splitted_records["day"] = splitted_records["started_at"].apply(lambda x: (x.date() - start_date).days)
# calculate tracking quality of the first day for the first user
user_0 = splitted_records.loc[(splitted_records["user_id"] == 0) & (splitted_records["day"] == 0)]
extent = 60 * 60 * 24
tracked = (user_0["finished_at"] - user_0["started_at"]).dt.total_seconds().sum()
quality_manual = tracked / extent
# test if the result of the user agrees
quality = ti.analysis.tracking_quality.temporal_tracking_quality(sp_tpls, granularity="day")
assert quality_manual == quality.iloc[0]["quality"]
assert (quality["quality"] <= 1).all()
def test_tracking_quality_week(self, testdata_sp_tpls_geolife_long):
"""Test if the calculated tracking quality per week is correct."""
sp_tpls = testdata_sp_tpls_geolife_long
splitted = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="day")
# get the day relative to the start day
start_date = splitted["started_at"].min().date()
splitted["week"] = splitted["started_at"].apply(lambda x: (x.date() - start_date).days // 7)
# calculate tracking quality of the first week for the first user
user_0 = splitted.loc[splitted["user_id"] == 0]
extent = 60 * 60 * 24 * 7
tracked = (user_0["finished_at"] - user_0["started_at"]).dt.total_seconds().sum()
quality_manual = tracked / extent
# test if the result of the user agrees
quality = ti.analysis.tracking_quality.temporal_tracking_quality(sp_tpls, granularity="week")
assert quality_manual == quality.loc[(quality["user_id"] == 0), "quality"].values[0]
assert (quality["quality"] <= 1).all()
def test_tracking_quality_weekday(self, testdata_sp_tpls_geolife_long):
"""Test if the calculated tracking quality per weekday is correct."""
sp_tpls = testdata_sp_tpls_geolife_long
splitted = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="day")
# get the day relative to the start day
start_date = splitted["started_at"].min().date()
splitted["week"] = splitted["started_at"].apply(lambda x: (x.date() - start_date).days // 7)
splitted["weekday"] = splitted["started_at"].dt.weekday
# calculate tracking quality of the first week for the first user
user_0 = splitted.loc[(splitted["user_id"] == 0) & (splitted["weekday"] == 3)]
extent = (60 * 60 * 24) * (user_0["week"].max() - user_0["week"].min() + 1)
tracked = (user_0["finished_at"] - user_0["started_at"]).dt.total_seconds().sum()
quality_manual = tracked / extent
# test if the result of the user agrees
quality = ti.analysis.tracking_quality.temporal_tracking_quality(sp_tpls, granularity="weekday")
assert quality_manual == quality.loc[(quality["user_id"] == 0) & (quality["weekday"] == 3), "quality"].values[0]
assert (quality["quality"] <= 1).all()
def test_tracking_quality_hour(self, testdata_sp_tpls_geolife_long):
"""Test if the calculated tracking quality per hour is correct."""
sp_tpls = testdata_sp_tpls_geolife_long
splitted = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="hour")
# get the day relative to the start day
start_date = splitted["started_at"].min().date()
splitted["day"] = splitted["started_at"].apply(lambda x: (x.date() - start_date).days)
# get the hour of the record
splitted["hour"] = splitted["started_at"].dt.hour
# calculate tracking quality of an hour for the first user
user_0 = splitted.loc[(splitted["user_id"] == 0) & (splitted["hour"] == 2)]
extent = (60 * 60) * (user_0["day"].max() - user_0["day"].min() + 1)
tracked = (user_0["finished_at"] - user_0["started_at"]).dt.total_seconds().sum()
quality_manual = tracked / extent
# test if the result of the user agrees
quality = ti.analysis.tracking_quality.temporal_tracking_quality(sp_tpls, granularity="hour")
assert quality_manual == quality.loc[(quality["user_id"] == 0) & (quality["hour"] == 2), "quality"].values[0]
assert (quality["quality"] <= 1).all()
def test_tracking_quality_error(self, testdata_sp_tpls_geolife_long):
"""Test if the an error is raised when passing unknown 'granularity' to temporal_tracking_quality()."""
sp_tpls = testdata_sp_tpls_geolife_long
with pytest.raises(AttributeError):
ti.analysis.tracking_quality.temporal_tracking_quality(sp_tpls, granularity=12345)
with pytest.raises(AttributeError):
ti.analysis.tracking_quality.temporal_tracking_quality(sp_tpls, granularity="random")
def test_tracking_quality_wrong_datamodel(self):
"""Test if the a keyerror is raised when passing incorrect datamodels."""
# read positionfixes and feed to temporal_tracking_quality()
pfs, _ = ti.io.dataset_reader.read_geolife(os.path.join("tests", "data", "geolife_long"))
with pytest.raises(KeyError):
ti.analysis.tracking_quality.temporal_tracking_quality(pfs)
# generate locations and feed to temporal_tracking_quality()
sp_file = os.path.join("tests", "data", "geolife", "geolife_staypoints.csv")
sp = ti.read_staypoints_csv(sp_file, tz="utc", index_col="id")
_, locs = sp.as_staypoints.generate_locations(
method="dbscan", epsilon=10, num_samples=0, distance_metric="haversine", agg_level="dataset"
)
with pytest.raises(KeyError):
ti.analysis.tracking_quality.temporal_tracking_quality(locs)
def test_tracking_quality_user_error(self, testdata_sp_tpls_geolife_long):
"""Test if the an error is raised when passing unknown 'granularity' to _get_tracking_quality_user()."""
sp_tpls = testdata_sp_tpls_geolife_long
user_0 = sp_tpls.loc[sp_tpls["user_id"] == 0]
with pytest.raises(AttributeError):
ti.analysis.tracking_quality._get_tracking_quality_user(user_0, granularity=12345)
with pytest.raises(AttributeError):
ti.analysis.tracking_quality._get_tracking_quality_user(user_0, granularity="random")
def test_staypoints_accessors(self, testdata_all_geolife_long):
"""Test tracking_quality calculation from staypoints accessor."""
sp, _, _ = testdata_all_geolife_long
# for staypoints
sp_quality_accessor = sp.as_staypoints.temporal_tracking_quality()
sp_quality_method = ti.analysis.tracking_quality.temporal_tracking_quality(sp)
pd.testing.assert_frame_equal(sp_quality_accessor, sp_quality_method)
def test_triplegs_accessors(self, testdata_all_geolife_long):
"""Test tracking_quality calculation from triplegs accessor."""
_, tpls, _ = testdata_all_geolife_long
# for triplegs
tpls_quality_accessor = tpls.as_triplegs.temporal_tracking_quality()
tpls_quality_method = ti.analysis.tracking_quality.temporal_tracking_quality(tpls)
pd.testing.assert_frame_equal(tpls_quality_accessor, tpls_quality_method)
def test_trips_accessors(self, testdata_all_geolife_long):
"""Test tracking_quality calculation from trips accessor."""
_, _, trips = testdata_all_geolife_long
# for trips
trips_quality_accessor = trips.as_trips.temporal_tracking_quality()
trips_quality_method = ti.analysis.tracking_quality.temporal_tracking_quality(trips)
pd.testing.assert_frame_equal(trips_quality_accessor, trips_quality_method)
def test_non_positive_duration_warning(self):
"""Test the function can handle non positive duration records without running into infinite loop."""
t = pd.Timestamp("1971-01-01 00:00:00", tz="utc") # duration 0 at midnight
negative_one_hour = datetime.timedelta(hours=1) # negative duration
sp = get_test_sp(t, negative_one_hour)
warn_string = "The input dataframe does not contain any record with positive duration. Please check."
with pytest.warns(UserWarning, match=warn_string):
ti.analysis.tracking_quality.temporal_tracking_quality(sp.iloc[:-1])
def test_absolute_extent(self):
"""Test the absolute date is correctly generated for both granularity day and week."""
# we test two examples: one at midnight the other at midday
t_ls = [pd.Timestamp("1971-01-01 00:00:00", tz="utc"), pd.Timestamp("1971-01-01 12:00:00", tz="utc")]
ten_days = pd.Timedelta(days=10)
# the midnight record loses one day after the split
last_start_date_after_split_ls = [t_ls[0] + ten_days - pd.Timedelta(days=1), t_ls[0] + ten_days]
for t, last_start_date_after_split in zip(t_ls, last_start_date_after_split_ls):
sp = get_test_sp(t, ten_days)
quality = ti.analysis.tracking_quality.temporal_tracking_quality(sp, granularity="day")
# get the "date" of the last record and compare to the last "date" in data
assert quality.values[-1][-2].day == last_start_date_after_split.day
quality = ti.analysis.tracking_quality.temporal_tracking_quality(sp, granularity="week")
# get the "week" of the last record and compare to the last "week" in data
assert quality.values[-1][-2].week == last_start_date_after_split.week
def test_non_positive_duration_filtered(self):
"""Test the non positive duration records are filtered and do not affect the result."""
t = pd.Timestamp("1971-01-01 00:00:00", tz="utc") # duration 0 at midnight
one_hour = datetime.timedelta(hours=1)
sp = get_test_sp(t, one_hour)
granularity_ls = ["all", "day", "week", "weekday", "hour"]
correct_quality_ls = [1, 1 / 24, 1 / 24 / 7, 1 / 24, 1]
for granularity, correct_quality in zip(granularity_ls, correct_quality_ls):
quality = ti.analysis.tracking_quality.temporal_tracking_quality(sp, granularity=granularity)
# get the "quality" of the last record and compare to the correct_quality
assert quality.values[-1][-1] == correct_quality
class TestSplit_overlaps:
"""Tests for the _split_overlaps() function."""
def test_split_overlaps_days(self, testdata_sp_tpls_geolife_long):
"""Test if _split_overlaps() function can split records that span several days."""
sp_tpls = testdata_sp_tpls_geolife_long
# some of the records span several day
multi_day_records = sp_tpls["finished_at"].dt.day - sp_tpls["started_at"].dt.day
assert (multi_day_records > 0).any()
# split the records according to day
sp_tpls.reset_index(inplace=True)
splitted = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="day")
# no record spans several days after the split
multi_day_records = (splitted["finished_at"] - pd.to_timedelta("1s")).dt.day - splitted["started_at"].dt.day
assert (multi_day_records == 0).all()
def test_split_overlaps_hours(self, testdata_sp_tpls_geolife_long):
"""Test if _split_overlaps() function can split records that span several hours."""
sp_tpls = testdata_sp_tpls_geolife_long
# some of the records span several hours
hour_diff = sp_tpls["finished_at"].dt.hour - sp_tpls["started_at"].dt.hour
assert (hour_diff > 0).any()
# split the records according to hour
sp_tpls.reset_index(inplace=True)
splitted = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="hour")
# no record spans several hours after the split
hour_diff = (splitted["finished_at"] - pd.to_timedelta("1s")).dt.hour - splitted["started_at"].dt.hour
assert (hour_diff == 0).all()
def test_split_overlaps_hours_case2(self, testdata_sp_tpls_geolife_long):
"""Test if _split_overlaps() function can split record that have the same hour but different days."""
sp_tpls = testdata_sp_tpls_geolife_long
# get the first two records
head2 = sp_tpls.head(2).copy()
# construct the finished_at exactly one day after started_at
head2["finished_at"] = head2.apply(lambda x: x["started_at"].replace(day=x["started_at"].day + 1), axis=1)
# the records have the same hour
hour_diff = (head2["finished_at"] - pd.to_timedelta("1s")).dt.hour - head2["started_at"].dt.hour
assert (hour_diff == 0).all()
# but have different days
day_diff = (head2["finished_at"] - pd.to_timedelta("1s")).dt.day - head2["started_at"].dt.day
assert (day_diff > 0).all()
# split the records according to hour
head2.reset_index(inplace=True)
splitted = ti.analysis.tracking_quality._split_overlaps(head2, granularity="hour")
# no record has different days after the split
day_diff = (splitted["finished_at"] - pd.to_timedelta("1s")).dt.day - splitted["started_at"].dt.day
assert (day_diff == 0).all()
def test_split_overlaps_duration(self, testdata_sp_tpls_geolife_long):
"""Test if the column 'duration' gets updated after using the _split_overlaps() function."""
sp_tpls = testdata_sp_tpls_geolife_long
# initiate the duration column
sp_tpls["duration"] = sp_tpls["finished_at"] - sp_tpls["started_at"]
sp_tpls.reset_index(inplace=True)
# split the records according to day
splitted_day = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="day")
# split the records according to hour
splitted_hour = ti.analysis.tracking_quality._split_overlaps(sp_tpls, granularity="hour")
# test "duration" is recalculated after the split
assert splitted_day["duration"].sum() == sp_tpls["duration"].sum()
assert splitted_hour["duration"].sum() == sp_tpls["duration"].sum()
def test_max_iter_warning(self):
"""Test if a warning is raised when maximum iteration is reached."""
p1 = Point(8.5067847, 47.4)
# construct time that is far apart - exceeding the default max_iter
t1 = pd.Timestamp("1971-01-01 00:00:00", tz="utc")
t2 = pd.Timestamp("1981-01-01 00:00:00", tz="utc")
list_dict = [
{"user_id": 0, "started_at": t1, "finished_at": t2, "geom": p1},
]
sp = gpd.GeoDataFrame(data=list_dict, geometry="geom", crs="EPSG:4326")
sp.index.name = "id"
with pytest.warns(UserWarning):
ti.analysis.tracking_quality._split_overlaps(sp, granularity="day")
|
102730
|
import os
print "UPDATING..."
os.system("cd")
os.system('cd /root/ && rm -fr hackers-tool-kit && git clone https://github.com/unkn0wnh4ckr/hackers-tool-kit && echo "[UPDATED]: Restart Your Terminal"')
|
102731
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class PatreonManagerConfig(AppConfig):
name = 'patreonmanager'
verbose_name = _("Patreon Manager")
|
102732
|
from yaml import load, dump, FullLoader
import sys, os
class QuietLoaders:
def resource_path(self, relative):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
def __init__(self):
self.settings_path = self.resource_path(os.path.join('data', 'config/settings.yaml'))
self.default_settings_path = self.resource_path(os.path.join('data', 'config/settings-default.yaml'))
def load_settings_data(self, default=False):
if not default:
with open(self.settings_path, 'r') as some_config:
return load(some_config, Loader=FullLoader)
else:
with open(self.default_settings_path, 'r') as some_config:
return load(some_config, Loader=FullLoader)
def store_settings_data(self, new_settings):
with open(self.settings_path, 'w') as settings_config:
dump(new_settings, settings_config)
|
102763
|
from __future__ import absolute_import, division, print_function
import codecs
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import copy
import os
import os.path as path
import sys
import toml
import nfldb
import nflfan.provider as provider
import nflfan.score as score
_xdg_home = os.getenv('XDG_CONFIG_HOME')
"""XDG user configuration directory."""
if not _xdg_home:
home = os.getenv('HOME')
if not home:
_xdg_home = ''
else:
_xdg_home = path.join(home, '.config')
_data_paths = [
path.join(_xdg_home, 'nflfan'),
path.join(sys.prefix, 'share', 'nflfan'),
]
"""A list of paths to check for loading data files."""
builtin_providers = {
'yahoo': provider.Yahoo,
'espn': provider.ESPN,
}
"""The default set of providers defined by nflfan."""
def load_config(providers=builtin_providers, file_path=''):
"""
Reads and loads the configuration file containing fantasy football
league information.
The return value is a dictionary mapping provider name (e.g.,
`yahoo`) to a list of leagues for that provider. Each league
is guaranteed to have at least a `name`, `season`, `phase`
and `scoring` attributes filled in as values that are not
`None`. Providers also have their own specific mandatory fields:
If no configuration file can be found, then an `IOError` is raised.
"""
def prov_leagues(d):
return ((k, d[k]) for k in sorted(d.keys()) if isinstance(d[k], dict))
schema = {
'all': {
'req': provider.Provider.conf_required,
'opt': provider.Provider.conf_optional,
},
}
for prov in providers.values():
schema[prov.provider_name] = {
'req': prov.conf_required, 'opt': prov.conf_optional,
}
raw = toml.loads(get_data('config.toml', file_path=file_path))
scoring = merge(raw['scoring'])
conf = {'leagues': OrderedDict()}
for pname in sorted(raw.keys()):
prov = raw[pname]
if pname == 'scoring':
continue
if not isinstance(prov, dict):
conf[pname] = prov
continue
conf['leagues'][pname] = OrderedDict()
for lg_name, lg in prov_leagues(prov):
lg['league_name'] = lg_name
lg['provider_class'] = providers[pname]
apply_schema(schema, scoring, pname, prov, lg)
lg = provider.League(lg['season'], lg['phase'], lg['league_id'],
pname, lg_name, lg['scoring'], lg)
conf['leagues'][pname][lg_name] = lg
return conf
def merge(s):
"""
Given a nesting of TOML dictionaries, return a flat list of each
scheme in `s`. This applies the inheritance used is configuration
files so that each scheme has each attribute fully resolved.
"""
def settings_and_subschemes(d, defaults):
settings, subs = {}, {}
for k, v in d.items():
if isinstance(v, dict):
subs[k] = v
else:
settings[k] = v
for k, v in defaults.items():
if k not in settings:
settings[k] = v
return copy.deepcopy(settings), subs
def merge(d, defaults, name):
settings, subs = settings_and_subschemes(d, defaults)
schemes[name] = settings
for subname, subscheme in subs.items():
fullname = '%s.%s' % (name, subname)
merge(subscheme, settings, fullname)
schemes = {}
for name, scheme in s.items():
merge(scheme, {}, name)
return schemes
def get_data(name, file_path=''):
"""
Reads the contents of a configuration data file with name
`name`. If `file_path` is given, then it is used if it exists.
If no file can be found, then an `IOError` is raised.
"""
if file_path:
paths = [file_path] + _data_paths
else:
paths = _data_paths
for fp in map(lambda p: path.join(p, name), paths):
try:
with codecs.open(fp) as fp:
return fp.read()
except IOError:
pass
raise IOError("Could not find configuration file %s" % name)
def cache_path():
"""
Returns a file path to the cache directory. If a cache directory
does not exist, one is created.
If there is a problem creating a cache directory, an `IOError`
exception is raised.
"""
for fp in _data_paths:
if os.access(fp, os.R_OK):
cdir = path.join(fp, 'data')
if not os.access(cdir, os.R_OK):
try:
os.mkdir(cdir)
except IOError as e:
raise IOError(e + ' (please create a cache directory)')
return cdir
raise IOError('could not find or create a cache directory')
def apply_schema(schema, scoring, prov_name, prov, lg):
"""
Applies the scheme for the provider `prov_name` to the league `lg`
while using `prov` as a dictionary of default values for `lg`.
`scoring` should be a dictionary mapping names to scoring schemes.
The `schema` should be a dictionary mapping provider name to its
set of required and optional fields. Namely, each value should be
a dictionary with two keys: `req` and `opt`, where each correspond
to a list of required and optional fields, respectively. There
must also be an `all` key in `schema` that specifies required and
optional fields for every provider.
If a required field in the provider's scheme is missing, then a
`ValueError` is raised.
"""
def get_scoring(ref):
try:
return score.ScoreSchema(ref, scoring[ref])
except KeyError:
raise KeyError("Scoring scheme %s does not exist." % ref)
def val(key, required=False):
v = lg.get(key, prov.get(key, None))
if required and v is None:
raise ValueError("Provider %s must have %s." % (prov_name, key))
elif key == 'scoring':
return get_scoring(v)
elif key == 'phase':
v = nfldb.Enums.season_phase[v.lower().title()]
return v
for r in schema['all']['req'] + schema[prov_name]['req']:
lg[r] = val(r, required=True)
for o in schema['all']['opt'] + schema[prov_name]['opt']:
lg[o] = val(o)
|
102809
|
import numpy as np
from model import generate_recommendations
user_address = '0x8c373ed467f3eabefd8633b52f4e1b2df00c9fe8'
already_rated = ['0x006bea43baa3f7a6f765f14f10a1a1b08334ef45','0x5102791ca02fc3595398400bfe0e33d7b6c82267','0x68d57c9a1c35f63e2c83ee8e49a64e9d70528d25','0xc528c28fec0a90c083328bc45f587ee215760a0f']
k = 5
model_dir = '../jobs/wals_ml_local_20190107_235006'
user_map = np.load(model_dir + "/model/user.npy")
item_map = np.load(model_dir + "/model/item.npy")
row_factor = np.load(model_dir + "/model/row.npy")
col_factor = np.load(model_dir + "/model/col.npy")
user_idx = np.searchsorted(user_map, user_address)
user_rated = [np.searchsorted(item_map, i) for i in already_rated]
recommendations = generate_recommendations(user_idx, user_rated, row_factor, col_factor, k)
tokens = [item_map[i] for i in recommendations]
print(tokens)
|
102831
|
from setuptools import setup, find_packages
setup(
name="Segy2Segy",
version="0.2",
packages=find_packages(exclude=["tests*"]),
scripts=['core/segy2segy.py'],
install_requires=['gdal', 'obspy'],
author="<NAME>",
author_email="<EMAIL>",
description="A command line tool for projecting and transforming coordinates in SEGY files",
license="BSD",
keywords="segy sgy seismic projection",
url="http://geophysicslabs.com",
)
|
102866
|
import torch
from torch import nn
from torch.nn import functional as F
def masked_normalization(logits, mask):
scores = F.softmax(logits, dim=-1)
# apply the mask - zero out masked timesteps
masked_scores = scores * mask.float()
# re-normalize the masked scores
normed_scores = masked_scores.div(masked_scores.sum(-1, keepdim=True))
return normed_scores
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len, device=lengths.device)
.type_as(lengths)
.unsqueeze(0).expand(batch_size, max_len)
.lt(lengths.unsqueeze(1)))
class SelfAttention(nn.Module):
def __init__(self, attention_size,
baseline=False,
batch_first=True,
layers=1,
dropout=.0,
non_linearity="tanh"):
super(SelfAttention, self).__init__()
self.batch_first = batch_first
if non_linearity == "relu":
activation = nn.ReLU()
else:
activation = nn.Tanh()
if baseline:
layers = 2
modules = []
for i in range(layers - 1):
modules.append(nn.Linear(attention_size, attention_size))
modules.append(activation)
modules.append(nn.Dropout(dropout))
# last attention layer must output 1
modules.append(nn.Linear(attention_size, 1))
# modules.append(activation)
# modules.append(nn.Dropout(dropout))
self.attention = nn.Sequential(*modules)
# self.softmax = nn.Softmax(dim=-1)
def forward(self, sequence, lengths):
"""
:param sequence: shape: (batch_size, seq_length, hidden_size)
:param lengths:
:return:
"""
energies = self.attention(sequence).squeeze(-1)
# construct a mask, based on sentence lengths
mask = sequence_mask(lengths, energies.size(1))
# scores = masked_normalization_inf(energies, mask)
scores = masked_normalization(energies, mask)
# scores are of shape: (batch_size, seq_length)
contexts = (sequence * scores.unsqueeze(-1)).sum(1)
return contexts, scores
|
102867
|
from .utils.suite_writer import Suite
from contextlib import contextmanager
import pytest
# pylint: disable=redefined-outer-name
def test_expect_failure_not_met(suite, test):
test.expect_failure()
with _raises_assertion('Test did not fail as expected'):
suite.run()
def test_expect_error_not_met(suite, test):
test.expect_error()
with _raises_assertion('Test did not issue error as expected'):
suite.run()
@contextmanager
def _raises_assertion(msg):
with pytest.raises(AssertionError) as caught:
yield
assert str(caught.value) == msg
@pytest.fixture
def test(suite):
return suite[len(suite) // 2]
@pytest.fixture
def suite():
s = Suite()
for _ in range(10):
s.add_test()
return s
|
102902
|
from baserow.contrib.database.formula.exceptions import BaserowFormulaException
class InvalidNumberOfArguments(BaserowFormulaException):
def __init__(self, function_def, num_args):
if num_args == 1:
error_prefix = "1 argument was"
else:
error_prefix = f"{num_args} arguments were"
super().__init__(
f"{error_prefix} given to the {function_def}, it must instead "
f"be given {function_def.num_args}"
)
class MaximumFormulaSizeError(BaserowFormulaException):
def __init__(self):
super().__init__("it exceeded the maximum formula size")
class UnknownFieldByIdReference(BaserowFormulaException):
def __init__(self, unknown_field_id):
super().__init__(
f"there is no field with id {unknown_field_id} but the formula"
f" included a direct reference to it"
)
class UnknownOperator(BaserowFormulaException):
def __init__(self, operatorText):
super().__init__(f"it used the unknown operator {operatorText}")
class BaserowFormulaSyntaxError(BaserowFormulaException):
pass
|
102903
|
from time import time
from functools import wraps
import matplotlib.pyplot as plt
from mandelbrot.python_mandel import compute_mandel as compute_mandel_py
from mandelbrot.hybrid_mandel import compute_mandel as compute_mandel_hy
from mandelbrot.cython_mandel import compute_mandel as compute_mandel_cy
def timer(func, name):
@wraps(func)
def wrapper(*args, **kwargs):
t_start = time()
val = func(*args, **kwargs)
t_end = time()
print(f"Time taken for {name}: {t_end - t_start}")
return val
return wrapper
mandel_py = timer(compute_mandel_py, "Python")
mandel_hy = timer(compute_mandel_hy, "Hybrid")
mandel_cy = timer(compute_mandel_cy, "Cython")
Nx = 320
Ny = 240
steps = 255
mandel_py(Nx, Ny, steps)
mandel_hy(Nx, Ny, steps)
vals = mandel_cy(Nx, Ny, steps)
fig, ax = plt.subplots()
ax.imshow(vals.T, extent=(-2.5, 0.5, -1.2, 1.2))
plt.show()
|
102932
|
from invitations.adapters import BaseInvitationsAdapter
from registration.signals import user_registered
class DepartmentInvitationsAdapter(BaseInvitationsAdapter):
def get_user_signed_up_signal(self):
return user_registered
|
102985
|
import numpy
from gensim.summarization.bm25 import BM25
class WrappedBM25(BM25):
def __init__(self, docs, tokenizer='spacy'):
self.docs = docs
if tokenizer == 'spacy':
try:
import spacy
except ImportError:
raise ImportError('Please install spacy and spacy "en" model: '
'`pip install -U spacy && '
'python -m spacy download en` '
'or find alternative installation options '
'at spacy.io')
self._spacy = spacy.load('en')
self.tokenizer = self.spacy_tokenize
else:
self.tokenizer = self.split_tokenize
corpus = []
for doc in self.docs:
corpus.append(self.tokenizer(doc))
super().__init__(corpus)
self.average_idf = sum(map(lambda k: float(self.idf[k]), self.idf.keys())) / len(self.idf.keys())
def find_topk_doc(self, doc, topk=10, rm_first=True):
scores = self.get_scores(self.tokenizer(doc))
arg_idx = numpy.argsort(scores)
arg_idx = arg_idx[::-1]
result = []
for i in range(topk):
result.append(self.docs[arg_idx[i]])
if rm_first:
del result[0] # remove self
return result
def find_tailk_doc(self, doc, tailk=10):
scores = self.get_scores(self.tokenizer(doc))
arg_idx = numpy.argsort(scores)
result = []
for i in range(tailk):
result.append(self.docs[arg_idx[i]])
return result
def spacy_tokenize(self, text):
tokens = self._spacy.tokenizer(text)
return [t.text for t in tokens]
def split_tokenize(self, text):
return text.strip().split(' ')
|
102997
|
from torch_rgcn.utils import *
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
from torch import nn
import math
import torch
class DistMult(Module):
""" DistMult scoring function (from https://arxiv.org/pdf/1412.6575.pdf) """
def __init__(self,
indim,
outdim,
num_nodes,
num_rel,
w_init='standard-normal',
w_gain=False,
b_init=None):
super(DistMult, self).__init__()
self.w_init = w_init
self.w_gain = w_gain
self.b_init = b_init
# Create weights & biases
self.relations = nn.Parameter(torch.FloatTensor(indim, outdim))
if b_init:
self.sbias = Parameter(torch.FloatTensor(num_nodes))
self.obias = Parameter(torch.FloatTensor(num_nodes))
self.pbias = Parameter(torch.FloatTensor(num_rel))
else:
self.register_parameter('sbias', None)
self.register_parameter('obias', None)
self.register_parameter('pbias', None)
self.initialise_parameters()
def initialise_parameters(self):
"""
Initialise weights and biases
Options for initialising weights include:
glorot-uniform - glorot (aka xavier) initialisation using a uniform distribution
glorot-normal - glorot (aka xavier) initialisation using a normal distribution
schlichtkrull-uniform - schlichtkrull initialisation using a uniform distribution
schlichtkrull-normal - schlichtkrull initialisation using a normal distribution
normal - using a standard normal distribution
uniform - using a uniform distribution
Options for initialising biases include:
ones - setting all values to one
zeros - setting all values to zero
normal - using a standard normal distribution
uniform - using a uniform distribution
"""
# Weights
init = select_w_init(self.w_init)
if self.w_gain:
gain = nn.init.calculate_gain('relu')
init(self.relations, gain=gain)
else:
init(self.relations)
# Checkpoint 6
# print('min', torch.min(self.relations))
# print('max', torch.max(self.relations))
# print('mean', torch.mean(self.relations))
# print('std', torch.std(self.relations))
# print('size', self.relations.size())
# Biases
if self.b_init:
init = select_b_init(self.b_init)
init(self.sbias)
init(self.pbias)
init(self.obias)
def s_penalty(self, triples, nodes):
""" Compute Schlichtkrull L2 penalty for the decoder """
s_index, p_index, o_index = split_spo(triples)
s, p, o = nodes[s_index, :], self.relations[p_index, :], nodes[o_index, :]
return s.pow(2).mean() + p.pow(2).mean() + o.pow(2).mean()
def forward(self, triples, nodes):
""" Score candidate triples """
s_index, p_index, o_index = split_spo(triples)
s, p, o = nodes[s_index, :], self.relations[p_index, :], nodes[o_index, :]
scores = (s * p * o).sum(dim=-1)
if self.b_init:
scores = scores + (self.sbias[s_index] + self.pbias[p_index] + self.obias[o_index])
return scores
class RelationalGraphConvolutionNC(Module):
"""
Relational Graph Convolution (RGC) Layer for Node Classification
(as described in https://arxiv.org/abs/1703.06103)
"""
def __init__(self,
triples=None,
num_nodes=None,
num_relations=None,
in_features=None,
out_features=None,
edge_dropout=None,
edge_dropout_self_loop=None,
bias=True,
decomposition=None,
vertical_stacking=False,
diag_weight_matrix=False,
reset_mode='glorot_uniform'):
super(RelationalGraphConvolutionNC, self).__init__()
assert (triples is not None or num_nodes is not None or num_relations is not None or out_features is not None), \
"The following must be specified: triples, number of nodes, number of relations and output dimension!"
# If featureless, use number of nodes instead as input dimension
in_dim = in_features if in_features is not None else num_nodes
out_dim = out_features
# Unpack arguments
weight_decomp = decomposition['type'] if decomposition is not None and 'type' in decomposition else None
num_bases = decomposition['num_bases'] if decomposition is not None and 'num_bases' in decomposition else None
num_blocks = decomposition['num_blocks'] if decomposition is not None and 'num_blocks' in decomposition else None
self.triples = triples
self.num_nodes = num_nodes
self.num_relations = num_relations
self.in_features = in_features
self.out_features = out_features
self.weight_decomp = weight_decomp
self.num_bases = num_bases
self.num_blocks = num_blocks
self.vertical_stacking = vertical_stacking
self.diag_weight_matrix = diag_weight_matrix
self.edge_dropout = edge_dropout
self.edge_dropout_self_loop = edge_dropout_self_loop
# If this flag is active, the weight matrix is a diagonal matrix
if self.diag_weight_matrix:
self.weights = torch.nn.Parameter(torch.empty((self.num_relations, self.in_features)), requires_grad=True)
self.out_features = self.in_features
self.weight_decomp = None
bias = False
# Instantiate weights
elif self.weight_decomp is None:
self.weights = Parameter(torch.FloatTensor(num_relations, in_dim, out_dim))
elif self.weight_decomp == 'basis':
# Weight Regularisation through Basis Decomposition
assert num_bases > 0, \
'Number of bases should be set to higher than zero for basis decomposition!'
self.bases = Parameter(torch.FloatTensor(num_bases, in_dim, out_dim))
self.comps = Parameter(torch.FloatTensor(num_relations, num_bases))
elif self.weight_decomp == 'block':
# Weight Regularisation through Block Diagonal Decomposition
assert self.num_blocks > 0, \
'Number of blocks should be set to a value higher than zero for block diagonal decomposition!'
assert in_dim % self.num_blocks == 0 and out_dim % self.num_blocks == 0,\
f'For block diagonal decomposition, input dimensions ({in_dim}, {out_dim}) must be divisible ' \
f'by number of blocks ({self.num_blocks})'
self.blocks = nn.Parameter(
torch.FloatTensor(num_relations, self.num_blocks, in_dim // self.num_blocks, out_dim // self.num_blocks))
else:
raise NotImplementedError(f'{self.weight_decomp} decomposition has not been implemented')
# Instantiate biases
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters(reset_mode)
def reset_parameters(self, reset_mode='glorot_uniform'):
""" Initialise biases and weights (glorot_uniform or uniform) """
if reset_mode == 'glorot_uniform':
if self.weight_decomp == 'block':
nn.init.xavier_uniform_(self.blocks, gain=nn.init.calculate_gain('relu'))
elif self.weight_decomp == 'basis':
nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu'))
else:
nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))
if self.bias is not None:
torch.nn.init.zeros_(self.bias)
elif reset_mode == 'schlichtkrull':
if self.weight_decomp == 'block':
nn.init.xavier_uniform_(self.blocks, gain=nn.init.calculate_gain('relu'))
elif self.weight_decomp == 'basis':
nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu'))
else:
nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))
if self.bias is not None:
torch.nn.init.zeros_(self.bias)
elif reset_mode == 'uniform':
stdv = 1.0 / math.sqrt(self.weights.size(1))
if self.weight_decomp == 'block':
self.blocks.data.uniform_(-stdv, stdv)
elif self.weight_decomp == 'basis':
self.bases.data.uniform_(-stdv, stdv)
self.comps.data.uniform_(-stdv, stdv)
else:
self.weights.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
else:
raise NotImplementedError(f'{reset_mode} parameter initialisation method has not been implemented')
def forward(self, features=None):
""" Perform a single pass of message propagation """
assert (features is None) == (self.in_features is None), "in_features not provided!"
in_dim = self.in_features if self.in_features is not None else self.num_nodes
triples = self.triples
out_dim = self.out_features
edge_dropout = self.edge_dropout
weight_decomp = self.weight_decomp
num_nodes = self.num_nodes
num_relations = self.num_relations
vertical_stacking = self.vertical_stacking
general_edge_count = int((triples.size(0) - num_nodes)/2)
self_edge_count = num_nodes
# Choose weights
if weight_decomp is None:
weights = self.weights
elif weight_decomp == 'basis':
weights = torch.einsum('rb, bio -> rio', self.comps, self.bases)
elif weight_decomp == 'block':
weights = block_diag(self.blocks)
else:
raise NotImplementedError(f'{weight_decomp} decomposition has not been implemented')
# Determine whether to use cuda or not
if weights.is_cuda:
device = 'cuda'
else:
device = 'cpu'
# Stack adjacency matrices either vertically or horizontally
adj_indices, adj_size = stack_matrices(
triples,
num_nodes,
num_relations,
vertical_stacking=vertical_stacking,
device=device
)
num_triples = adj_indices.size(0)
vals = torch.ones(num_triples, dtype=torch.float, device=device)
# Apply normalisation (vertical-stacking -> row-wise rum & horizontal-stacking -> column-wise sum)
sums = sum_sparse(adj_indices, vals, adj_size, row_normalisation=vertical_stacking, device=device)
if not vertical_stacking:
# Rearrange column-wise normalised value to reflect original order (because of transpose-trick)
n = general_edge_count
i = self_edge_count
sums = torch.cat([sums[n:2 * n], sums[:n], sums[-i:]], dim=0)
vals = vals / sums
# Construct adjacency matrix
if device == 'cuda':
adj = torch.cuda.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size)
else:
adj = torch.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size)
if self.diag_weight_matrix:
assert weights.size() == (num_relations, in_dim)
else:
assert weights.size() == (num_relations, in_dim, out_dim)
if self.in_features is None:
# Message passing if no features are given
output = torch.mm(adj, weights.view(num_relations * in_dim, out_dim))
elif self.diag_weight_matrix:
fw = torch.einsum('ij,kj->kij', features, weights)
fw = torch.reshape(fw, (self.num_relations * self.num_nodes, in_dim))
output = torch.mm(adj, fw)
elif self.vertical_stacking:
# Message passing if the adjacency matrix is vertically stacked
af = torch.spmm(adj, features)
af = af.view(self.num_relations, self.num_nodes, in_dim)
output = torch.einsum('rio, rni -> no', weights, af)
else:
# Message passing if the adjacency matrix is horizontally stacked
fw = torch.einsum('ni, rio -> rno', features, weights).contiguous()
output = torch.mm(adj, fw.view(self.num_relations * self.num_nodes, out_dim))
assert output.size() == (self.num_nodes, out_dim)
if self.bias is not None:
output = torch.add(output, self.bias)
return output
class RelationalGraphConvolutionLP(Module):
"""
Relational Graph Convolution (RGC) Layer for Link Prediction
(as described in https://arxiv.org/abs/1703.06103)
"""
def __init__(self,
num_nodes=None,
num_relations=None,
in_features=None,
out_features=None,
edge_dropout=None,
edge_dropout_self_loop=None,
decomposition=None,
vertical_stacking=False,
w_init='glorot-normal',
w_gain=False,
b_init=None):
super(RelationalGraphConvolutionLP, self).__init__()
assert (num_nodes is not None or num_relations is not None or out_features is not None), \
"The following must be specified: number of nodes, number of relations and output dimension!"
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# If featureless, use number of nodes instead as feature input dimension
in_dim = in_features if in_features is not None else num_nodes
out_dim = out_features
# Unpack arguments
weight_decomp = decomposition['type'] if decomposition is not None and 'type' in decomposition else None
num_bases = decomposition['num_bases'] if decomposition is not None and 'num_bases' in decomposition else None
num_blocks = decomposition['num_blocks'] if decomposition is not None and 'num_blocks' in decomposition else None
self.num_nodes = num_nodes
self.num_relations = num_relations
self.in_features = in_dim
self.out_features = out_dim
self.weight_decomp = weight_decomp
self.num_bases = num_bases
self.num_blocks = num_blocks
self.vertical_stacking = vertical_stacking
self.edge_dropout = edge_dropout
self.edge_dropout_self_loop = edge_dropout_self_loop
self.w_init = w_init
self.w_gain = w_gain
self.b_init = b_init
# Create weight parameters
if self.weight_decomp is None:
self.weights = Parameter(torch.FloatTensor(num_relations, in_dim, out_dim).to(device))
elif self.weight_decomp == 'basis':
# Weight Regularisation through Basis Decomposition
assert num_bases > 0, \
'Number of bases should be set to higher than zero for basis decomposition!'
self.bases = Parameter(torch.FloatTensor(num_bases, in_dim, out_dim).to(device))
self.comps = Parameter(torch.FloatTensor(num_relations, num_bases).to(device))
elif self.weight_decomp == 'block':
# Weight Regularisation through Block Diagonal Decomposition
assert self.num_blocks > 0, \
'Number of blocks should be set to a value higher than zero for block diagonal decomposition!'
assert in_dim % self.num_blocks == 0 and out_dim % self.num_blocks == 0, \
f'For block diagonal decomposition, input dimensions ({in_dim}, {out_dim}) must be divisible ' \
f'by number of blocks ({self.num_blocks})'
self.blocks = nn.Parameter(
torch.FloatTensor(num_relations - 1, self.num_blocks, in_dim // self.num_blocks,
out_dim // self.num_blocks).to(device))
self.blocks_self = nn.Parameter(torch.FloatTensor(in_dim, out_dim).to(device))
else:
raise NotImplementedError(f'{self.weight_decomp} decomposition has not been implemented')
# Create bias parameters
if b_init:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.initialise_weights()
if self.bias is not None:
self.initialise_biases()
def initialise_biases(self):
"""
Initialise bias parameters using one of the following methods:
ones - setting all values to one
zeros - setting all values to zero
normal - using a standard normal distribution
uniform - using a uniform distribution
"""
b_init = self.b_init
init = select_b_init(b_init)
init(self.bias)
def initialise_weights(self):
"""
Initialise weights parameters using one of the following methods:
glorot-uniform - glorot (aka xavier) initialisation using a uniform distribution
glorot-normal - glorot (aka xavier) initialisation using a normal distribution
schlichtkrull-uniform - schlichtkrull initialisation using a uniform distribution
schlichtkrull-normal - schlichtkrull initialisation using a normal distribution
normal - using a standard normal distribution
uniform - using a uniform distribution
"""
w_init = self.w_init
w_gain = self.w_gain
# Add scaling factor according to non-linearity function used
if w_gain:
gain = nn.init.calculate_gain('relu')
else:
gain = 1.0
# Select appropriate initialisation method
init = select_w_init(w_init)
if self.weight_decomp == 'block':
schlichtkrull_normal_(self.blocks, shape=[(self.num_relations-1)//2, self.in_features//self.num_blocks], gain=gain)
# Checkpoint 3
# print('min', torch.min(self.blocks))
# print('max', torch.max(self.blocks))
# print('mean', torch.mean(self.blocks))
# print('std', torch.std(self.blocks))
# print('size', self.blocks.size())
schlichtkrull_normal_(self.blocks_self, shape=[(self.num_relations-1)//2, self.in_features//self.num_blocks], gain=gain)
# Checkpoint 4
# print('min', torch.min(self.blocks_self))
# print('max', torch.max(self.blocks_self))
# print('mean', torch.mean(self.blocks_self))
# print('std', torch.std(self.blocks_self))
# print('size', self.blocks_self.size())
elif self.weight_decomp == 'basis':
init(self.bases, gain=gain)
init(self.comps, gain=gain)
else:
init(self.weights, gain=gain)
def forward(self, triples, features=None):
""" Perform a single pass of message propagation """
assert (features is None) == (self.in_features is None), "in_features not given"
in_dim = self.in_features if self.in_features is not None else self.num_nodes
out_dim = self.out_features
num_nodes = self.num_nodes
num_relations = self.num_relations
vertical_stacking = self.vertical_stacking
original_num_relations = int((self.num_relations-1)/2) # Count without inverse and self-relations
device = 'cuda' if torch.cuda.is_available() else 'cpu'
triples = triples.to(device)
features = features.to(device)
# Apply weight decomposition
if self.weight_decomp is None:
weights = self.weights
elif self.weight_decomp == 'basis':
weights = torch.einsum('rb, bio -> rio', self.comps, self.bases)
elif self.weight_decomp == 'block':
pass
else:
raise NotImplementedError(f'{self.weight_decomp} decomposition has not been implemented')
# Define edge dropout rate for self-loops
if self.training and self.edge_dropout["self_loop_type"] != 'schlichtkrull-dropout':
self_loop_keep_prob = 1 - self.edge_dropout["self_loop"]
else:
self_loop_keep_prob = 1
with torch.no_grad():
# Add inverse relations
inverse_triples = generate_inverses(triples, original_num_relations)
# Add self-loops to triples
self_loop_triples = generate_self_loops(
triples, num_nodes, original_num_relations, self_loop_keep_prob, device=device)
triples_plus = torch.cat([triples, inverse_triples, self_loop_triples], dim=0)
# Stack adjacency matrices either vertically or horizontally
adj_indices, adj_size = stack_matrices(
triples_plus,
num_nodes,
num_relations,
vertical_stacking=vertical_stacking,
device=device
)
num_triples = adj_indices.size(0)
vals = torch.ones(num_triples, dtype=torch.float, device=device)
assert vals.size(0) == (triples.size(0) + inverse_triples.size(0) + self_loop_triples.size(0))
# Apply normalisation (vertical-stacking -> row-wise rum & horizontal-stacking -> column-wise sum)
sums = sum_sparse(adj_indices, vals, adj_size, row_normalisation=vertical_stacking, device=device)
if not vertical_stacking:
# Rearrange column-wise normalised value to reflect original order (because of transpose-trick)
n = triples.size(0)
i = self_loop_triples.size(0)
sums = torch.cat([sums[n : 2*n], sums[:n], sums[-i:]], dim=0)
vals = vals / sums
# Construct adjacency matrix
if device == 'cuda':
adj = torch.cuda.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size)
else:
adj = torch.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size)
if self.in_features is None:
# Message passing if no features are given
if self.weight_decomp == 'block':
weights = block_diag(self.blocks)
weights = torch.cat([weights, self.blocks_self], dim=0)
output = torch.mm(adj, weights.view(num_relations * in_dim, out_dim))
elif self.vertical_stacking:
# Message passing if the adjacency matrix is vertically stacked
if self.weight_decomp == 'block':
weights = block_diag(self.blocks)
weights = torch.cat([weights, self.blocks_self], dim=0)
af = torch.spmm(adj, features)
af = af.view(self.num_relations, self.num_nodes, in_dim)
output = torch.einsum('rio, rni -> no', weights, af)
else:
# Message passing if the adjacency matrix is horizontally stacked
if self.weight_decomp == 'block':
n = features.size(0)
r = num_relations - 1
input_block_size = in_dim // self.num_blocks
output_block_size = out_dim // self.num_blocks
num_blocks = self.num_blocks
block_features = features.view(n, num_blocks, input_block_size)
fw = torch.einsum('nbi, rbio -> rnbo', block_features, self.blocks).contiguous()
assert fw.shape == (r, n, num_blocks, output_block_size), f"{fw.shape}, {(r, n, num_blocks, output_block_size)}"
fw = fw.view(r, n, out_dim)
self_fw = torch.einsum('ni, io -> no', features, self.blocks_self)[None, :, :]
if self.training and self.edge_dropout["self_loop_type"] == 'schlichtkrull-dropout':
self_fw = nn.functional.dropout(self_fw, p=self.edge_dropout["self_loop"], training=True,inplace=False)
fw = torch.cat([fw, self_fw], dim=0)
output = torch.mm(adj, fw.view(self.num_relations * self.num_nodes, out_dim))
else:
fw = torch.einsum('ni, rio -> rno', features, weights).contiguous()
output = torch.mm(adj, fw.view(self.num_relations * self.num_nodes, out_dim))
assert output.size() == (self.num_nodes, out_dim)
if self.bias is not None:
output = torch.add(output, self.bias)
# Checkpoint 5
# print('min', torch.min(output))
# print('max', torch.max(output))
# print('mean', torch.mean(output))
# print('std', torch.std(output))
# print('size', output.size())
return output
|
103010
|
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='PoseDetDetector',
pretrained='pretrained/dla34-ba72cf86.pth',
# pretrained='open-mmlab://msra/hrnetv2_w32',
backbone=dict(
type='DLA',
return_levels=True,
levels=[1, 1, 1, 2, 2, 1],
channels=[16, 32, 64, 128, 256, 512],
ouput_indice=[3,4,5,6],
),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=128,
start_level=1,
add_extra_convs='on_input',
num_outs=4,
# num_outs=3,
norm_cfg=norm_cfg,),
bbox_head=dict(
# type='PoseDetHead',
type='PoseDetHeadHeatMapMl',
norm_cfg=norm_cfg,
num_classes=1,
in_channels=128,
feat_channels=128,
embedding_feat_channels=128,
init_convs=3,
refine_convs=2,
cls_convs=2,
gradient_mul=0.1,
dcn_kernel=(1,17),
refine_num=1,
point_strides=[8, 16, 32, 64],
point_base_scale=4,
num_keypoints=17,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_keypoints_init=dict(type='KeypointsLoss',
d_type='L2',
weight=.1,
stage='init',
normalize_factor=1,
),
loss_keypoints_refine=dict(type='KeypointsLoss',
d_type='L2',
weight=.2,
stage='refine',
normalize_factor=1,
),
loss_heatmap=dict(type='HeatmapLoss', weight=.1, with_sigmas=False),
)
)
# training and testing settings
train_cfg = dict(
init=dict(
assigner=dict(type='KeypointsAssigner',
scale=4,
pos_num=1,
number_keypoints_thr=3,
num_keypoints=17,
center_type='keypoints',
# center_type='box'
),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='OksAssigner',
pos_PD_thr=0.7,
neg_PD_thr=0.7,
min_pos_iou=0.52,
ignore_iof_thr=-1,
match_low_quality=True,
num_keypoints=17,
number_keypoints_thr=3, #
),
allowed_border=-1,
pos_weight=-1,
debug=False
),
cls=dict(
assigner=dict(
type='OksAssigner',
pos_PD_thr=0.6,
neg_PD_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1,
match_low_quality=False,
num_keypoints=17,
number_keypoints_thr=3,
),
allowed_border=-1,
pos_weight=-1,
debug=False
),
)
test_cfg = dict(
nms_pre=500,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='keypoints_nms', iou_thr=0.2),
max_per_img=100)
|
103066
|
from bson import ObjectId
from odmantic import Model
class Player(Model):
name: str
level: int = 1
document = {"name": "Leeroy", "_id": ObjectId("5f8352a87a733b8b18b0cb27")}
user = Player.parse_doc(document)
print(repr(user))
#> Player(
#> id=ObjectId("5f8352a87a733b8b18b0cb27"),
#> name="Leeroy",
#> level=1,
#> )
|
103078
|
from autogluon.core.utils.feature_selection import *
from autogluon.core.utils.utils import unevaluated_fi_df_template
import numpy as np
from numpy.core.fromnumeric import sort
import pandas as pd
import pytest
def evaluated_fi_df_template(features, importance=None, n=None):
rng = np.random.default_rng(0)
importance_df = pd.DataFrame({'name': features})
importance_df['importance'] = rng.standard_normal(len(features)) if importance is None else importance
importance_df['stddev'] = rng.standard_normal(len(features))
importance_df['p_value'] = None
importance_df['n'] = 5 if n is None else n
importance_df.set_index('name', inplace=True)
importance_df.index.name = None
return importance_df
@pytest.fixture
def sample_features():
return ['a', 'b', 'c', 'd', 'e']
@pytest.fixture
def sample_importance_df_1(sample_features):
return evaluated_fi_df_template(sample_features, importance=[0.2, 0.2, None, 1., None], n=[10, 5, 0, 5, 0])
@pytest.fixture
def sample_importance_df_2(sample_features):
return evaluated_fi_df_template(sample_features, importance=[-0.1, -0.1, 0.1, None, None], n=[5, 10, 10, 0, 0])
def test_add_noise_column_df():
# test noise columns are appended to input dataframe and feature_metadata
X = pd.DataFrame({'a': [1, 2]})
args = {'rng': np.random.default_rng(0), 'count': 2}
X_noised, noise_columns = add_noise_column(X, **args)
expected_features = X.columns.tolist() + noise_columns
assert expected_features == X_noised.columns.tolist()
def test_merge_importance_dfs_base(sample_features):
# test the scenario when previous feature importance df is none
prev_df, curr_df = None, unevaluated_fi_df_template(sample_features)
assert merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set()) is curr_df
def test_merge_importance_dfs_same_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from the same fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set())
assert [score if score == score else None for score in result_df['importance'].tolist()] == [0., 0.1, 0.1, 1., None]
assert result_df['n'].tolist() == [15, 15, 10, 5, 0]
def test_merge_importance_dfs_different_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from a different fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set(sample_features)
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert len(using_prev_fit_fi) == 2
assert [score if score == score else None for score in result_df['importance'].tolist()] == [-0.1, -0.1, 0.1, 1., None]
assert result_df['n'].tolist() == [5, 10, 10, 5, 0]
def test_merge_importance_dfs_all(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from both same and different fitted models
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set([sample_features[0]])
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert [score if score == score else None for score in result_df['importance'].tolist()] == [-0.1, 0., 0.1, 1., None]
assert result_df['n'].tolist() == [5, 15, 10, 5, 0]
assert using_prev_fit_fi == set()
def test_sort_features_by_priority_base(sample_features):
# test the ordering of feature importance computation when no prior feature importance computation was done
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=None, using_prev_fit_fi=set())
assert sorted_features == sample_features
def test_sort_features_by_priority_same_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from the same fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=set())
assert sorted_features == prev_importance_df.sort_values('importance').index.tolist()
def test_sort_features_by_priority_different_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from a different fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
using_prev_fit_fi = sample_features[-2:]
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
sorted_prev_fit_features = prev_importance_df[prev_importance_df.index.isin(using_prev_fit_fi)].sort_values('importance').index.tolist()
sorted_curr_fit_features = prev_importance_df[~prev_importance_df.index.isin(using_prev_fit_fi)].sort_values('importance').index.tolist()
expected_features = sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features
def test_sort_features_by_priority_all(sample_features):
# test the ordering of feature importance computation when feature impotance computation comes from mix of current and previous fit models,
# and some feature are unevaluated
length = len(sample_features)
using_prev_fit_fi = set(sample_features[:length//3])
evaluated_rows, unevaluated_rows = evaluated_fi_df_template(sample_features[:length//2]), unevaluated_fi_df_template(sample_features[length//2:])
prev_importance_df = pd.concat([evaluated_rows, unevaluated_rows])
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
unevaluated_features = unevaluated_rows.index.tolist()
sorted_prev_fit_features = evaluated_rows[(~evaluated_rows.index.isin(sample_features[length//2:]))
& (evaluated_rows.index.isin(using_prev_fit_fi))].sort_values('importance').index.tolist()
sorted_curr_fit_features = evaluated_rows[(~evaluated_rows.index.isin(sample_features[length//2:]))
& (~evaluated_rows.index.isin(using_prev_fit_fi))].sort_values('importance').index.tolist()
expected_features = unevaluated_features + sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features
|
103083
|
from unittest import mock
from unittest.mock import call
from django.test import override_settings
from lego.apps.external_sync.external import ldap
from lego.apps.users.constants import GROUP_COMMITTEE
from lego.apps.users.models import AbakusGroup, User
from lego.utils.test_utils import BaseTestCase
class LDAPTestCase(BaseTestCase):
fixtures = ["test_abakus_groups.yaml", "test_users.yaml"]
@mock.patch("lego.apps.external_sync.external.ldap.LDAPLib")
def setUp(self, ldap_mock):
self.ldap = ldap.LDAPSystem()
def test_migrate(self):
"""Make sure the org units is created"""
self.ldap.migrate()
self.ldap.ldap.update_organization_unit.assert_has_calls(
(call("users"), call("groups"))
)
def test_filter_users(self):
"""Test1 is the only user with a ldap password hash"""
filtered = self.ldap.filter_users(User.objects.all()).values_list(
"username", flat=True
)
self.assertListEqual(list(filtered), ["test1"])
@override_settings(LDAP_GROUPS=["UserAdminTest"])
def test_filter_groups(self):
"""Return groups with a name in the LDAP_GROUPS setting plus committees"""
filtered = self.ldap.filter_groups(AbakusGroup.objects.all()).values_list(
"name", flat=True
)
self.assertSetEqual(
set(filtered),
set(
AbakusGroup.objects.filter(type=GROUP_COMMITTEE).values_list(
"name", flat=True
)
).union(["UserAdminTest"]),
)
def test_search_user(self):
"""Search for a user"""
self.ldap.ldap.search_user.return_value = True
user = User.objects.get(username="test1")
self.assertTrue(self.ldap.user_exists(user))
self.ldap.ldap.search_user.assert_called_once_with("test1")
def test_add_user(self):
"""Make user the LDAPLib.ass_user is called with the correct arguments"""
user = User.objects.get(username="test1")
self.ldap.add_user(user)
self.ldap.ldap.add_user.assert_called_once_with(
user.username,
user.first_name,
user.last_name,
user.email,
user.crypt_password_hash,
)
def test_update_user_correct_password(self):
"""
The update user function is simple, but we makes sure the password change function is
called if necessary
"""
user = User.objects.get(username="test1")
self.ldap.ldap.check_password.return_value = True
self.ldap.update_user(user)
self.ldap.ldap.change_password.assert_not_called()
self.ldap.ldap.check_password.return_value = False
self.ldap.update_user(user)
self.ldap.ldap.change_password.assert_called_once_with(
user.username, user.crypt_password_hash
)
def test_search_group(self):
"""Search for a group"""
self.ldap.ldap.search_group.return_value = True
group = AbakusGroup.objects.get(name="UserAdminTest")
self.assertTrue(self.ldap.group_exists(group))
self.ldap.ldap.search_group.assert_called_once_with(group.id)
def test_add_group(self):
"""Make sure LDAPLib.add_group is called with the correct arguments"""
group = AbakusGroup.objects.get(name="UserAdminTest")
self.ldap.add_group(group)
self.ldap.ldap.add_group.assert_called_once_with(group.id, group.name.lower())
members = list(group.memberships.values_list("user__username", flat=True))
self.ldap.ldap.update_group_members.assert_called_once_with(
group.name.lower(), members
)
def test_update_group(self):
"""Make sure memberships gets updated at group update"""
group = AbakusGroup.objects.get(name="UserAdminTest")
members = list(group.memberships.values_list("user__username", flat=True))
self.ldap.update_group(group)
self.ldap.ldap.update_group_members.assert_called_once_with(
group.name.lower(), members
)
|
103094
|
DOMAIN = "audiconnect"
CONF_VIN = "vin"
CONF_CARNAME = "carname"
CONF_ACTION = "action"
MIN_UPDATE_INTERVAL = 5
DEFAULT_UPDATE_INTERVAL = 10
CONF_SPIN = "spin"
CONF_REGION = "region"
CONF_SERVICE_URL = "service_url"
CONF_MUTABLE = "mutable"
SIGNAL_STATE_UPDATED = "{}.updated".format(DOMAIN)
TRACKER_UPDATE = f"{DOMAIN}_tracker_update"
RESOURCES = [
"position",
"last_update_time",
"mileage",
"range",
"service_inspection_time",
"service_inspection_distance",
"oil_change_time",
"oil_change_distance",
"oil_level",
"charging_state",
"max_charge_current",
"engine_type1",
"engine_type2",
"parking_light",
"any_window_open",
"any_door_unlocked",
"any_door_open",
"trunk_unlocked",
"trunk_open",
"hood_open",
"tank_level",
"state_of_charge",
"remaining_charging_time",
"plug_state",
"sun_roof",
"doors_trunk_status",
]
COMPONENTS = {
"sensor": "sensor",
"binary_sensor": "binary_sensor",
"lock": "lock",
"device_tracker": "device_tracker",
"switch": "switch",
}
|
103105
|
from soccer_geometry.transformation import Transformation
from soccer_geometry.camera import Camera
|
103110
|
import os
import json
import time
import torch
import itertools
import detectron2.utils.comm as comm
from fvcore.common.file_io import PathManager
from detectron2.config import global_cfg
from detectron2.engine.train_loop import HookBase
from detectron2.evaluation.testing import flatten_results_dict
__all__ = ["EvalHookDeFRCN"]
class EvalHookDeFRCN(HookBase):
"""
Run an evaluation function periodically, and at the end of training.
It is executed every ``eval_period`` iterations and after the last iteration.
"""
def __init__(self, eval_period, eval_function, cfg):
"""
Args:
eval_period (int): the period to run `eval_function`. Set to 0 to
not evaluate periodically (but still after the last iteration).
eval_function (callable): a function which takes no arguments, and
returns a nested dict of evaluation metrics.
cfg: config
Note:
This hook must be enabled in all or none workers.
If you would like only certain workers to perform evaluation,
give other workers a no-op function (`eval_function=lambda: None`).
"""
self._period = eval_period
self._func = eval_function
self.cfg = cfg
def _do_eval(self):
results = self._func()
if results:
assert isinstance(
results, dict
), "Eval function must return a dict. Got {} instead.".format(results)
flattened_results = flatten_results_dict(results)
for k, v in flattened_results.items():
try:
v = float(v)
except Exception as e:
raise ValueError(
"[EvalHook] eval_function should return a nested dict of float. "
"Got '{}: {}' instead.".format(k, v)
) from e
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
if comm.is_main_process() and results:
# save evaluation results in json
is_final = self.trainer.iter + 1 >= self.trainer.max_iter
os.makedirs(
os.path.join(self.cfg.OUTPUT_DIR, 'inference'), exist_ok=True)
output_file = 'res_final.json' if is_final else \
'iter_{:07d}.json'.format(self.trainer.iter)
with PathManager.open(os.path.join(self.cfg.OUTPUT_DIR, 'inference',
output_file), 'w') as fp:
json.dump(results, fp)
# Evaluation may take different time among workers.
# A barrier make them start the next iteration together.
comm.synchronize()
def after_step(self):
next_iter = self.trainer.iter + 1
if self._period > 0 and next_iter % self._period == 0:
self._do_eval()
def after_train(self):
# This condition is to prevent the eval from running after a failed training
if self.trainer.iter + 1 >= self.trainer.max_iter:
self._do_eval()
# func is likely a closure that holds reference to the trainer
# therefore we clean it to avoid circular reference in the end
del self._func
|
103146
|
from imghdr import what
from os import getenv
from json import loads, dumps
import flask
from rockset import Client, Q
from flask_cors import CORS
from sys import argv
app = flask.Flask(__name__, static_folder='compendium/images')
CORS(app)
rs = Client(api_key=getenv('RS2_TOKEN') or argv[1], api_server='api.rs2.usw2.rockset.com')
class ServerException(Exception):
def __init__(self, message, status_code, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
selects = {
'treasure': 'drops',
'monsters': 'drops',
'materials': 'cooking_effect, hearts_recovered',
'equipment': 'attack, defense'
}
creatures_selects = {
'food': 'hearts_recovered, cooking_effect',
'others': 'drops'
}
def creatures_category(version):
others = list(
rs.sql(
Q(
'select id, name, description, common_locations, image, category, {} from "botw-api".creatures where cooking_effect is null'.format(creatures_selects['others'])
)
)
)
foods = list(
rs.sql(
Q(
'select id, name, description, common_locations, image, category, {} from "botw-api".creatures where cooking_effect is not null'.format(creatures_selects['food'])
)
)
)
if version == 'v1':
food_key = 'non-food'
elif version == 'v2':
food_key = 'non_food'
return {food_key: others, 'food': foods}
def single_category(category):
query = 'select id, name, description, common_locations, image, category, {} from "botw-api".{}'.format(selects[category], category)
return list(
rs.sql(Q(query))
)
def id_name_query(target, where):
for category in list(selects.keys()):
res = list(rs.sql(Q('select id, name, description, common_locations, image, category, {} from "botw-api".{} where {}=\'{}\''.format(selects[category], category, where, target))))
if res != []:
return category, res[0]
res = list(rs.sql(Q('select id, name, description, hearts_recovered, category, cooking_effect, common_locations, image from "botw-api".creatures where {}=\'{}\''.format(where, target))))
if res != []:
if res[0]['cooking_effect'] == None:
res = list(rs.sql(Q('select id, name, description, drops, common_locations, image, category from "botw-api".creatures where {}=\'{}\''.format(where, target))))
return 'creatures', res[0]
return None
def all(version):
category_metadata = {}
for category in selects.keys():
category_metadata[category] = single_category(category)
category_metadata['creatures'] = creatures_category(version)
return {'data': category_metadata}
def entry(version, inp):
try:
try:
int(inp)
cat, query_res = id_name_query(inp, '_id')
return {'data': query_res}
except ValueError:
cat, query_res = id_name_query(inp.lower().replace('_', ' '), 'name')
return {'data': query_res}
except TypeError:
return {'data': {}, 'message': 'no results'}
def img_entry(version, inp):
try:
try: # inp is ID
_, query_res = id_name_query(int(inp), '_id')
target_entry = query_res['name'].replace(' ', '_').replace('+', '')
except ValueError: # inp is name
target_entry = inp.replace(' ', '_').replace('+', '+')
print(target_entry)
return flask.send_from_directory('compendium/images', target_entry, mimetype=f'image/{what(f"compendium/images/{target_entry}")}')
except TypeError:
return {'data': {}, 'message': 'no results'}
def treasure(version):
return {'data': single_category('treasure')}
def monsters(version):
return {'data': single_category('monsters')}
def materials(version):
return {'data': single_category('materials')}
def equipment(version):
return {'data': single_category('equipment')}
def creatures(version):
return {'data': creatures_category(version)}
@app.route('/api/<version>')
def prod_all(version):
res = all(version)
return(res)
@app.route('/api/<version>/entry/<inp>/image')
def entry_img(version, inp):
return(img_entry(version, inp))
@app.route('/api/<version>/entry/<inp>')
def prod_entry(version, inp):
res = entry(version, inp)
return(res)
@app.route('/api/<version>/category/treasure')
def prod_treasure(version):
res = treasure(version)
return(res)
@app.route('/api/<version>/category/monsters')
def prod_monsters(version):
res = monsters(version)
return(res)
@app.route('/api/<version>/category/materials')
def prod_materials(version):
res = materials(version)
return(res)
@app.route('/api/<version>/category/equipment')
def prod_equipment(version):
res = equipment(version)
return(res)
@app.route('/api/<version>/category/creatures')
def prod_creatures(version):
res = creatures(version)
return(res)
@app.route('/')
@app.route('/api')
def home():
return flask.redirect('https://gadhagod.github.io/Hyrule-Compendium-API')
if __name__ == '__main__':
app.run()
|
103169
|
from abc import ABC, abstractmethod
class Verb(ABC):
"""
This docstring is used in the help message when doing
`htcondor noun verb --help`
"""
# The options class dict is a nested dict containing kwargs
# per option for the add_argument method of ArgumentParser,
# see COMMON_OPTIONS in __init__.py for an example.
options = {}
# The __init__ method should take the Verb's options
# and execute whatever it is the user expects to happen.
# The first arg should always be logger.
@abstractmethod
def __init__(self, logger, *args, **kwargs):
raise NotImplementedError
|
103181
|
class YamboSpectra():
"""
Class to show optical absorption spectra
"""
def __init__(self,energies,data):
self.energies = energies
self.data = data
|
103292
|
import sys
import os
import logging
import datetime
import datetime
import json
import traceback
import copy
import random
import string
import gzip
import asyncio
from pathlib import Path
from collections.abc import Iterable
import discord
from tqdm import tqdm
__version__ = "0.3.3"
PBAR_UPDATE_INTERVAL = 100
PBAR_MINIMUM_MESSAGES = 1000 # Minimum number of messages to begin showing a progress bar for
# There's a few websocket events that we need to log (like GUILD_CREATE),
# but also a few we didn't ask for we want to do without (pings, typing notifications, new messages),
# at least until a realtime mode is introduced. For this purpose we use a blacklist.
WS_EVENT_BLACKLIST = [None, 'TYPING_START', 'MESSAGE_CREATE', 'MESSAGE_UPDATE', 'MESSAGE_REACTION_ADD']
class NotFoundError(Exception):
pass
class DiscardClient(discord.Client):
def __init__(self, *args, discard=None, **kwargs):
super().__init__(*args, **kwargs)
self.discard = discard
self.is_user_account = self.discard.is_user_account
self.exception = None
# monkeypatch discord.py request function to log
request_func = self.http.request
async def request_func_wrapped(route, *, files=None, **kwargs):
datetime_start = datetime.datetime.now(datetime.timezone.utc)
response = await request_func(route, files=files, **kwargs) # XXX await?
datetime_end = datetime.datetime.now(datetime.timezone.utc)
discard.log_http_request(route, kwargs, response, datetime_start, datetime_end)
return response
self.http.request = request_func_wrapped
# Override the default run method in order to preserve KeyboardInterrupt
def run(self, *args, **kwargs):
loop = self.loop
try:
loop.run_until_complete(self.start(*args, **kwargs))
except KeyboardInterrupt:
self.exception = sys.exc_info()
finally:
loop.close()
async def on_ready(self):
if self.discard.mode == 'profile':
print(f'We have logged in as {self.user.name} (id {self.user.id})')
if not self.is_user_account:
# Fetch self using the HTTP API (not supported for user accounts)
user = await self.fetch_user(self.user.id)
print(f"Fetched user: {user}")
else:
# Fetch own profile using the HTTP API (not supported for bot accounts)
profile = await self.fetch_user_profile(self.user.id)
print(f"Fetched profile: {profile}")
elif self.discard.mode == 'channel':
for channel_id in self.discard.channel_ids:
channel = self.get_channel(channel_id)
if channel is None:
raise NotFoundError(f"Channel not found: {channel_id}")
await self.archive_channel(channel)
elif self.discard.mode == 'guild':
for guild_id in self.discard.guild_ids:
guild = self.get_guild(guild_id)
if guild is None:
raise NotFoundError(f"Guild not found: {guild_id}")
await self.archive_guild(guild)
else:
raise ValueError(f"Unknown mode: {self.discard.mode}")
# Quit
await self.close()
async def archive_channel(self, channel: discord.abc.GuildChannel):
print(f"Processing channel: {channel}")
self.discard.start_channel(channel)
# XXX is it a good idea for userbots to do this?
#await self.fetch_channel(channel.id)
num_messages = 0
oldest_message = None
newest_message = None
message = None
pbar = None
# before and after datetimes must be timezone-naive in UTC (why not timezone-aware UTC?)
async for message in channel.history(after=self.discard.after, before=self.discard.before, limit=None,
oldest_first=True):
if oldest_message is None:
oldest_message = message
expected_timedelta = (self.discard.before or datetime.datetime.now()) - oldest_message.created_at
for reaction in message.reactions:
# Fetch the users who reacted
async for user in reaction.users():
pass
if num_messages % PBAR_UPDATE_INTERVAL == 0 and num_messages > PBAR_MINIMUM_MESSAGES:
timedelta = message.created_at - oldest_message.created_at
if pbar is None:
pbar = tqdm(total=expected_timedelta.days, initial=timedelta.days, unit="day", miniters=1)
else:
diff = timedelta.days - pbar.n
if diff:
pbar.update(diff)
num_messages += 1
if pbar:
pbar.update(expected_timedelta.days - pbar.n)
pbar.close()
newest_message = message
self.discard.end_channel(channel, num_messages, oldest_message, newest_message)
async def archive_guild(self, guild: discord.Guild):
print(f"Processing guild: {guild}")
self.discard.start_guild(guild)
# XXX is it a good idea for userbots to do this?
await self.fetch_guild(guild.id)
await guild.fetch_channels()
await guild.fetch_roles()
await guild.fetch_emojis()
channels = []
for channel in guild.text_channels:
if channel.permissions_for(guild.me).read_messages:
channels.append(channel)
print(f"{len(channels)} accessible channels...")
for channel in channels:
await self.archive_channel(channel)
self.discard.end_guild(guild, len(channels))
async def on_socket_raw_send(self, payload):
self.discard.log_ws_send(payload)
async def on_socket_response(self, msg):
self.discard.log_ws_recv(msg)
async def on_error(self, event_method, *args, **kwargs):
# Reraising the exception doesn't close the connection,
# so we save it and raise it outside.
# TODO some errors would be best logged but kept non-fatal to still
# fetch the most data possible.
# Have an option for that.
self.exception = sys.exc_info()
await self.close()
class Discard():
def __init__(self, token, mode, output_dir, command=None, channel_id=None, guild_id=None,
is_user_account=False, no_scrub=False, before=None, after=None,
gzip=False):
self.token = token
self.mode = mode
self.command = command
self.channel_ids = channel_id
if not isinstance(self.channel_ids, Iterable):
self.channel_ids = [self.channel_ids]
self.guild_ids = guild_id
if not isinstance(self.guild_ids, Iterable):
self.guild_ids = [self.guild_ids]
self.is_user_account = is_user_account
self.no_scrub = no_scrub
self.output_dir_root = output_dir
self.client = None
if (before and not isinstance(before, datetime.datetime)) or (after and not isinstance(after, datetime.datetime)):
raise TypeError("before and after must be datetime objects")
self.before = before
self.after = after
self.gzip = gzip
self.client = DiscardClient(discard=self)
def start(self):
self.datetime_start = datetime.datetime.now(datetime.timezone.utc)
self.ident = ''.join([random.choice(string.ascii_lowercase + string.digits) for i in range(24)])
self.datetime_end = None
self.finished = False
self.completed = False
self.errors = False
self.exception = None
self.traceback = None
self.num_http_requests = 0
self.num_ws_packets = 0
self.num_messages = 0
self.num_guild_messages = 0
self.profile = None
self.run_directory = self.datetime_start.strftime('%Y%m%dT%H%M%S_'+self.mode)
if not self.before and not self.after:
self.run_directory += '_full'
self.output_directory = self.output_dir_root / Path(self.run_directory)
if os.path.exists(self.output_directory):
self.run_directory += "_" + self.ident[0:5]
self.output_directory = self.output_dir_root / Path(self.run_directory)
if os.path.exists(self.output_directory):
raise RuntimeError("Fatal: Run directory already exists")
os.makedirs(self.output_directory)
self.write_meta_file()
self.open_request_file('run.jsonl')
def open_request_file(self, filepath):
filepath = Path(filepath)
if len(filepath.parts) > 1:
os.makedirs(self.output_directory / filepath.parts[0], exist_ok=True)
if self.gzip:
filepath = filepath.with_name(filepath.name + '.gz')
if os.path.exists(self.output_directory / filepath):
raise RuntimeError("Request file already exists")
open_func = gzip.open if self.gzip else open
self.request_file = open_func(self.output_directory / filepath, 'wt')
def end(self):
self.request_file.close()
self.finished = True
self.datetime_end = datetime.datetime.now(datetime.timezone.utc)
self.write_meta_file()
def run(self):
self.start()
try:
self.client.run(self.token, bot=not self.is_user_account)
if self.client.exception:
t, v, tb = self.client.exception
raise v.with_traceback(tb)
except BaseException as ex:
self.errors = True
self.exception = type(ex).__name__ + f": {ex}"
self.traceback = traceback.format_exc()
self.end()
raise
self.completed = True
print("Completed")
self.end()
def write_meta_file(self):
obj = {
'client': {
'name': 'discard',
'version': __version__,
'discord.py_version': discord.__version__
},
'command': self.command,
'settings': {
'mode': self.mode,
'token': self.token if self.no_scrub else None,
'is_user_account': self.is_user_account,
'output_dir': str(self.output_dir_root),
'after': self.after.isoformat() if self.after else None,
'before': self.before.isoformat() if self.before else None,
'no_scrub': self.no_scrub,
'gzip': self.gzip
},
'run': {
'datetime_start': self.datetime_start.isoformat(),
'datetime_end': self.datetime_end.isoformat() if self.datetime_end else None,
'run_directory': self.run_directory,
'ident': self.ident,
'completed': self.completed,
'finished': self.finished,
'errors': self.errors,
'exception': self.exception,
'traceback': self.traceback,
},
'summary': {
'num_http_requests': self.num_http_requests,
'num_ws_packets': self.num_ws_packets,
'num_messages': self.num_messages
},
'user': None
}
if self.client and self.client.user:
obj['user'] = {
'id': self.client.user.id,
'name': self.client.user.name,
'discriminator': self.client.user.discriminator,
'bot': self.client.user.bot
}
with open(self.output_directory / Path('run.meta.json'), 'w') as f:
json.dump(obj, f, indent=4, ensure_ascii=False)
def start_channel(self, channel):
self.request_file.close()
self.num_guild_messages = 0
guild_id = channel.guild.id
self.open_request_file(f'{guild_id}/{channel.id}.jsonl')
def end_channel(self, channel, num_messages, oldest_message, newest_message):
# This information is intentionally minimalistic. It's supposed to be
# a human-readable summary, not a resource. Logged requests contain all data.
obj = {
'channel': {
'id': channel.id,
'name': channel.name,
'type': str(channel.type)
},
'summary': {
'num_messages': num_messages,
'oldest_message': None,
'newest_message': None
}
}
if oldest_message is not None:
obj['summary']['oldest_message'] = {
'id': oldest_message.id,
'timestamp': oldest_message.created_at.isoformat() # TODO these need to be converted to UTC!
}
if newest_message is not None:
obj['summary']['newest_message'] = {
'id': newest_message.id,
'timestamp': newest_message.created_at.isoformat()
}
with open(self.output_directory / Path(f'{channel.guild.id}/{channel.id}.meta.json'), 'w') as f:
json.dump(obj, f, indent=4, ensure_ascii=False)
self.num_messages += num_messages
self.num_guild_messages += num_messages
def start_guild(self, guild):
self.request_file.close()
self.open_request_file(f'{guild.id}/guild.jsonl')
def end_guild(self, guild, num_channels):
obj = {
'guild': {
'id': guild.id,
'name': guild.name,
},
'summary': {
'num_channels': num_channels,
'num_messages': self.num_guild_messages
}
}
with open(self.output_directory / Path(f'{guild.id}/guild.meta.json'), 'w') as f:
json.dump(obj, f, indent=4, ensure_ascii=False)
def log_http_request(self, route, kwargs, response, datetime_start, datetime_end):
obj = {
'type': 'http',
'datetime_start': datetime_start.isoformat(),
'datetime_end': datetime_end.isoformat(),
'request': {
'method': route.method,
'url': route.url,
},
'response': {
'data': response
}
}
if 'params' in kwargs:
obj['request']['params'] = kwargs['params']
json.dump(obj, self.request_file, ensure_ascii=False)
self.request_file.write('\n')
self.num_http_requests += 1
def log_ws_send(self, data):
now = datetime.datetime.now()
obj = {
'type': 'ws',
'datetime': now.isoformat(),
'direction': 'send',
'data': data,
}
if not self.no_scrub and self.token in data:
obj['data'] = data.replace(self.token, '[SCRUBBED]')
obj['scrubbed'] = True
json.dump(obj, self.request_file, ensure_ascii=False)
self.request_file.write('\n')
self.num_ws_packets += 1
def log_ws_recv(self, data):
if 't' in data:
if data['t'] in WS_EVENT_BLACKLIST:
return
now = datetime.datetime.now()
obj = {
'type': 'ws',
'datetime': now.isoformat(),
'direction': 'recv',
'data': data
}
json.dump(obj, self.request_file, ensure_ascii=False)
self.request_file.write('\n')
self.num_ws_packets += 1
|
103357
|
from django.apps import AppConfig
class OfficialDocumentsCollectionConfig(AppConfig):
name = 'official_documents_collection'
|
103385
|
import unittest
import numpy as np
import scipy.sparse
from injector import Injector
from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapper
from decai.simulation.logging_module import LoggingModule
class TestFeatureIndexMapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
])
cls.f = inj.get(FeatureIndexMapper)
def test_map_dense(self):
x_train = np.random.random_sample((10, 3))
x_test = np.random.random_sample((4, x_train.shape[1]))
train, test, feature_index_mapping = self.f.map(x_train, x_test)
self.assertIs(train, x_train)
self.assertIs(test, x_test)
self.assertIsNone(feature_index_mapping)
def test_map_sparse(self):
x_train = np.array([[0, 0, 1, 1, 0], [0, 2, 0, 0, 0]])
x_test = np.array([[1, 0, 1, 0, 1], [0, 0, 3, 0, 0]])
x_train_sparse = scipy.sparse.csr_matrix((17348, 4288315073), dtype=np.uint8)
x_train_sparse[x_train.nonzero()] = x_train[x_train.nonzero()]
x_test_sparse = scipy.sparse.csr_matrix((3333, 21312344), dtype=np.uint8)
x_test_sparse[x_test.nonzero()] = x_test[x_test.nonzero()]
mapped_train, mapped_test, feature_index_mapping = self.f.map(x_train_sparse, x_test_sparse)
self.assertEqual(int, type(feature_index_mapping[0]))
self.assertEqual([1, 2, 3], feature_index_mapping)
self.assertTrue(mapped_train.sum(axis=0).all(),
"Every column should have at least one non-zero value.")
x_train_expected = np.zeros((x_train_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_train_expected[0, 1] = 1
x_train_expected[0, 2] = 1
x_train_expected[1, 0] = 2
self.assertTrue(np.array_equal(x_train_expected, mapped_train), mapped_train)
x_test_expected = np.zeros((x_test_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_test_expected[0, 1] = 1
x_test_expected[1, 1] = 3
self.assertTrue(np.array_equal(x_test_expected, mapped_test), mapped_test)
|
103387
|
from pyNastran.dev.bdf_vectorized.test.test_coords import *
from pyNastran.dev.bdf_vectorized.test.test_mass import *
from pyNastran.dev.bdf_vectorized.cards.elements.solid.test_solids import *
from pyNastran.dev.bdf_vectorized.cards.elements.shell.test_shell import *
from pyNastran.dev.bdf_vectorized.cards.elements.rod.test_rods import *
#from pyNastran.dev.bdf_vectorized.cards.elements.bar.test_bars import *
#from pyNastran.dev.bdf_vectorized.cards.elements.beam.test_beams import *
#from pyNastran.dev.bdf_vectorized.cards.elements.spring.test_spring import *
#from pyNastran.dev.bdf_vectorized.cards.elements.shear.test_shear import *
#from pyNastran.dev.bdf_vectorized.cards.elements.damper.test_dampers import *
if __name__ == "__main__": # pragma: no cover
import unittest
unittest.main()
|
103394
|
import abc
class Metrics(abc.ABC):
def __init__(self):
raise NotImplementedError
@abc.abstractmethod
def calculate(self) -> float:
raise NotImplementedError
class Accuracy(Metrics):
def __init__(self):
super().__init__()
self._num_correct = 0
self._num_samples = 0
@property
def num_correct(self) -> int:
return self._num_correct
@property
def num_samples(self) -> int:
return self._num_samples
@num_correct.setter
def num_correct(self, num_correct: int):
if not isinstance(num_correct, int):
raise TypeError(f'{type(num_correct).__name__} not allowed')
self._num_correct = num_correct
@num_samples.setter
def num_samples(self, num_samples: int):
if not isinstance(num_samples, int):
raise TypeError(f'{type(num_samples).__name__} not allowed')
self._num_samples = num_samples
def add_num_correct(self, add_num_correct: int):
if not isinstance(add_num_correct, int):
raise TypeError(f'{type(add_num_correct).__name__} not allowed')
self._num_correct += add_num_correct
def add_num_samples(self, add_num_samples: int):
if not isinstance(add_num_samples, int):
raise TypeError(f'{type(add_num_samples).__name__} not allowed')
self._num_samples += add_num_samples
def calculate(self) -> float:
return float(self._num_correct / self._num_samples)
def update_for(self, add_num_correct: int, add_num_samples: int) -> float:
self.add_num_correct(add_num_correct)
self.add_num_samples(add_num_samples)
return self.calculate()
|
103404
|
def f(*, b):
return b
def f(a, *, b):
return a + b
def f(a, *, b, c):
return a + b + c
def f(a, *, b=c):
return a + b
def f(a, *, b=c, c):
return a + b + c
def f(a, *, b=c, c=d):
return a + b + c
def f(a, *, b=c, c, d=e):
return a + b + c + d
def f(a=None, *, b=None):
return a + b
|
103416
|
from .Dataset import Dataset
from .constants import *
from .DataLoader import DataLoader, create_datasets
from .Dict import Dict
|
103438
|
import json
from collections import Iterator
from os.path import join
from elasticsearch import Elasticsearch
from examples.imdb.conf import ES_HOST, ES_USE_AUTH, ES_PASSWORD, ES_USER, DATA_DIR
from pandagg.index import DeclarativeIndex, Action
from pandagg.mappings import Keyword, Text, Float, Nested, Integer
class Movies(DeclarativeIndex):
name = "movies"
mappings = {
"dynamic": False,
"properties": {
"movie_id": Keyword(),
"name": Text(fields={"raw": Keyword()}),
"year": Integer(),
"rank": Float(),
"genres": Keyword(),
"roles": Nested(
properties={
"role": Keyword(),
"actor_id": Keyword(),
"gender": Keyword(),
"first_name": Text(fields={"raw": Keyword()}),
"last_name": Text(fields={"raw": Keyword()}),
"full_name": Text(fields={"raw": Keyword()}),
}
),
"directors": Nested(
properties={
"director_id": Keyword(),
"first_name": Text(fields={"raw": Keyword()}),
"last_name": Text(fields={"raw": Keyword()}),
"full_name": Text(fields={"raw": Keyword()}),
"genres": Keyword(),
}
),
"nb_directors": Integer(),
"nb_roles": Integer(),
},
}
def operations_iterator() -> Iterator[Action]:
with open(join(DATA_DIR, "serialized.json"), "r") as f:
for line in f.readlines():
d = json.loads(line)
yield {"_source": d, "_id": d["id"]}
if __name__ == "__main__":
client_kwargs = {"hosts": [ES_HOST]}
if ES_USE_AUTH:
client_kwargs["http_auth"] = (ES_USER, ES_PASSWORD)
client = Elasticsearch(**client_kwargs)
movies = Movies(client)
print("Index creation")
movies.save()
print("Write documents")
movies.docs.bulk(
actions=operations_iterator(), _op_type_overwrite="index"
).perform()
movies.refresh()
|
103464
|
import random
import time
from agora.retry.backoff import Backoff
class Strategy:
"""Determines whether or not an action should be retried. Strategies are allowed to delay or cause other side
effects.
"""
def should_retry(self, attempts: int, e: Exception) -> bool:
"""Returns whether or not to retry, based on this strategy.
:param attempts: Tee number of attempts that have occurred. Starts at 1, since the action is evaluated first.
:param e: The :class:`Exception <Exception>` that was raised.
:return: A bool indicating whether the action should be retried, based on this strategy.
"""
raise NotImplementedError('Strategy is an abstract class. Strategy must implement should_retry().')
class LimitStrategy(Strategy):
"""A strategy that limits the total umber of retries.
:param max_attempts: The max number of attempts. Should be greater than 1, since the action is evaluated first.
"""
def __init__(self, max_attempts):
self.max_attempts = max_attempts
def should_retry(self, attempts: int, e: Exception) -> bool:
return attempts < self.max_attempts
class RetriableErrorsStrategy(Strategy):
"""A strategy that specifies which errors can be retried.
:param: retriable_errors: A list of :class:`Exception <Exception>` classes that can be retried.
"""
def __init__(self, retriable_errors):
self.retriable_errors = retriable_errors
def should_retry(self, attempts: int, e: Exception) -> bool:
for error in self.retriable_errors:
if isinstance(e, error):
return True
return False
class NonRetriableErrorsStrategy(Strategy):
"""A strategy that specifies which errors should not be retried.
:param: non_retriable_errors: A list of :class:`Exception <Exception>` classes that shouldn't be retried.
"""
def __init__(self, non_retriable_errors):
self.non_retriable_errors = non_retriable_errors
def should_retry(self, attempts: int, e: Exception) -> bool:
for error in self.non_retriable_errors:
if isinstance(e, error):
return False
return True
class BackoffStrategy(Strategy):
"""A strategy that will delay the next retry, provided the action raised an error.
:param: backoff: The :class:`Backoff <agora.retry.backoff.Backoff> to use to determine the amount of time to delay.
:param max_backoff: The maximum backoff, in seconds.
"""
def __init__(self, backoff: Backoff, max_backoff: float):
self.backoff = backoff
self.max_backoff = max_backoff
def should_retry(self, attempts: int, e: Exception) -> bool:
delay = min(self.max_backoff, self.backoff.get_backoff(attempts))
time.sleep(delay)
return True
class BackoffWithJitterStrategy(Strategy):
"""A strategy that will delay the next retry, with jitter induced on the delay provided by `backoff`.
The jitter parameter is a percentage of the total delay (after capping) that the timing can be off by. For example,
a capped delay of 0.1s with a jitter of 0.1 will result in a delay of 0.1s +/- 0.01s.
:param: backoff: The :class:`Backoff <agora.retry.backoff.Backoff> to use to determine the amount of time to delay.
:param max_backoff: The maximum backoff, in seconds.
:param jitter: A percentage of the total delay that timing can be off by.
"""
def __init__(self, backoff: Backoff, max_backoff: float, jitter: float):
self.backoff = backoff
self.max_backoff = max_backoff
self.jitter = jitter
def should_retry(self, attempts: int, e: Exception) -> bool:
delay = min(self.max_backoff, self.backoff.get_backoff(attempts))
time.sleep(delay * (1 + random.random() * self.jitter * 2 - self.jitter))
return True
|
103467
|
import torch
import torch.nn as nn
import os
from .models import Darknet
from .utils.utils import non_max_suppression, rescale_boxes
class YoLov3HumanDetector(nn.Module):
def __init__(self, weights_path="weights/yolov3.weights",
conf_thres=0.8, nms_thres=0.4, img_size=416, device=torch.device("cpu")):
super().__init__()
self.conf_thres = conf_thres
self.nms_thres = nms_thres
self.img_size = img_size
# Set up model
model_def = os.path.abspath(os.path.dirname(__file__))
# model_def = os.path.join(model_def, "config", "yolov3.cfg")
model_def = os.path.join(model_def, "config", "yolov3-spp.cfg")
model = Darknet(model_def, img_size=img_size).to(device)
if weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(weights_path))
model.eval()
self.device = device
self.model = model.to(device)
def forward(self, input_imgs, input_shapes, factor=1.05):
"""
Run YOLOv3 on input_imgs and return the largest bounding boxes of the person in input_imgs.
Args:
input_imgs (torch.tensor): (bs, 3, height, width) is in the range of [0, 1],
input_shapes (list[tuple]): [(height, width), (height, width), ...],
factor (float): the factor to enlarge the original boxes, e.g [x0, y0, x1, y1] -> [xx0, yy0, xx1, yy1],
here (xx1 - xx0) / (x1 - x0) = factor and (yy1 - yy0) / (y1 - y0) = factor.
Returns:
boxes_list (list[tuple or None]): (x1, y1, x2, y2) or None
"""
# Get detections
with torch.no_grad():
# img, _ = pad_to_square(input_imgs, 0)
# Resize
img_detections = self.model(input_imgs)
img_detections = non_max_suppression(img_detections, self.conf_thres, self.nms_thres)
bs = len(img_detections)
boxes_list = [None for _ in range(bs)]
# Draw bounding boxes and labels of detections
for i, (detections, img_shape) in enumerate(zip(img_detections, input_shapes)):
if detections is not None:
# Rescale boxes to original image
detections = rescale_boxes(detections, self.img_size, img_shape)
max_area = 0
boxes = None
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
# if is `person`
if int(cls_pred) != 0:
continue
box_w = x2 - x1
box_h = y2 - y1
area = box_h * box_w
if area > max_area:
max_area = area
boxes = (x1, y1, x2, y2)
if boxes is not None:
boxes_list[i] = self.enlarge_boxes(boxes, img_shape, factor=factor)
return boxes_list
@staticmethod
def enlarge_boxes(boxes, orig_shape, factor=1.0):
"""
Args:
boxes (list or tuple): (x0, y0, x1, y1),
orig_shape (tuple or list): (height, width),
factor (float): the factor to enlarge the original boxes, e.g [x0, y0, x1, y1] -> [xx0, yy0, xx1, yy1],
here (xx1 - xx0) / (x1 - x0) = factor and (yy1 - yy0) / (y1 - y0) = factor.
Returns:
new_boxes (list of tuple): (xx0, yy0, xx1, yy1),
here (xx1 - xx0) / (x1 - x0) = factor and (yy1 - yy0) / (y1 - y0) = factor.
"""
height, width = orig_shape
x0, y0, x1, y1 = boxes
w = x1 - x0
h = y1 - y0
cx = (x1 + x0) / 2
cy = (y1 + y0) / 2
half_new_w = w * factor / 2
half_new_h = h * factor / 2
xx0 = int(max(0, cx - half_new_w))
yy0 = int(max(0, cy - half_new_h))
xx1 = int(min(width, cx + half_new_w))
yy1 = int(min(height, cy + half_new_h))
new_boxes = (xx0, yy0, xx1, yy1)
return new_boxes
|
103484
|
import EchelleJSON as ej
import numpy as np
# Read all of the file names and convert the strings to UT and JD
f = open("files.txt")
files = ["{}.json".format(ff[:-6]) for ff in f.readlines()]
# Read the HJDN field
f = open("HJD.txt", "w")
for ff in files:
edict = ej.read("jsons_BCV/{}".format(ff))
HJD = edict["HJD"]
f.write("{}\n".format(HJD))
f.close()
|
103515
|
import torch
import torch.nn.functional as F
def aggregate_sbg(prob, keep_bg=False, hard=False):
device = prob.device
k, _, h, w = prob.shape
ex_prob = torch.zeros((k+1, 1, h, w), device=device)
ex_prob[0] = 0.5
ex_prob[1:] = prob
ex_prob = torch.clamp(ex_prob, 1e-7, 1-1e-7)
logits = torch.log((ex_prob /(1-ex_prob)))
if hard:
# Very low temperature o((⊙﹏⊙))o 🥶
logits *= 1000
if keep_bg:
return F.softmax(logits, dim=0)
else:
return F.softmax(logits, dim=0)[1:]
def aggregate_wbg(prob, keep_bg=False, hard=False):
k, _, h, w = prob.shape
new_prob = torch.cat([
torch.prod(1-prob, dim=0, keepdim=True),
prob
], 0).clamp(1e-7, 1-1e-7)
logits = torch.log((new_prob /(1-new_prob)))
if hard:
# Very low temperature o((⊙﹏⊙))o 🥶
logits *= 1000
if keep_bg:
return F.softmax(logits, dim=0)
else:
return F.softmax(logits, dim=0)[1:]
def aggregate_wbg_channel(prob, keep_bg=False, hard=False):
new_prob = torch.cat([
torch.prod(1-prob, dim=1, keepdim=True),
prob
], 1).clamp(1e-7, 1-1e-7)
logits = torch.log((new_prob /(1-new_prob)))
if hard:
# Very low temperature o((⊙﹏⊙))o 🥶
logits *= 1000
if keep_bg:
return logits, F.softmax(logits, dim=1)
else:
return logits, F.softmax(logits, dim=1)[:, 1:]
|
103524
|
from fastapi import APIRouter
from .api.v1.job import router as job_router
from .api.v1.record import router as record_router
router = APIRouter()
router.include_router(job_router)
router.include_router(record_router)
|
103544
|
import dash_html_components as html
import dash_vtk
from dash_docs import tools
from dash_docs import styles
from dash_docs import reusable_components as rc
examples = tools.load_examples(__file__)
layout = html.Div([
rc.Markdown('''
# Click and Hover Callbacks
It's possible to create callbacks based on user clicks and hovering. First, you need to specify the `pickingModes` prop in
`dash_vtk.View` to be a list of modes you want to capture. The following values are accepted:
* `"click"`
* `"hover"`
Afterwards, you need to create callbacks where the inputs and states include one of the following read-only properties of `dash_vtk.View`.
* `clickInfo`: Called when the user clicks on an object.
* `hoverInfo`: Called when the user hovers over an object.
> The full documentation for `dash_vtk.View` can be found in the [API reference](/vtk/reference).
## Callback structure
You can notice that the `clickInfo` or `hoverInfo` data will be a dictionary with various keys describing the picked object. The keys include:
* `displayPosition`: The x,y,z coordinate with on the user's screen.
* `ray`: A line between two points in 3D space (xyz1, xyz2) that represent the mouse position. It covers the full space under the 2D mouse position.
* `representationId`: The ID assigned to the `dash_vtk.GeometryRepresentation` containing your object.
* `worldPosition`: The x, y, z coordinates in the 3D environment that you are rendering where the ray hit the object. It corresponds to the 3D coordinate on the surface of the object under your mouse.
'''),
rc.Markdown('''
## Output `clickInfo` to `html.Pre`
The following example shows you how to concisely display the output of `clickInfo` inside an `html.Pre`:
'''),
html.Details(open=False, children=[
html.Summary('View full code'),
rc.Markdown(
examples['t07_click_info.py'][0],
style=styles.code_container
),
]),
html.Div(
examples['t07_click_info.py'][1],
className='example-container'
),
rc.Markdown('''
## Update representation state with `hoverInfo`
You can also construct more complex hover callbacks, which would affect the `actor` and `state` of your geometry representations.
In the [terrain mesh demo](https://dash-gallery.plotly.host/dash-vtk-explorer/pyvista-terrain-following-mesh), whenever you hover
over the surface, a callback is fired and the output is displayed on your screen:

The full code can be found [here](https://github.com/plotly/dash-vtk/tree/master/demos/pyvista-terrain-following-mesh), but the
following snippet summarizes what is needed to capture hover events in the image above:
```py
# ...
vtk_view = dash_vtk.View(
id="vtk-view",
pickingModes=["hover"],
children=[
dash_vtk.GeometryRepresentation(id="vtk-representation", ...),
dash_vtk.GeometryRepresentation(
id="pick-rep",
children=[
dash_vtk.Algorithm(id="pick-sphere", ...)
],
# ...
),
],
)
app.layout = html.Div([
# ...,
vtk_view,
# ...
])
@app.callback(
[
Output("tooltip", "children"),
Output("pick-sphere", "state"),
Output("pick-rep", "actor"),
],
[Input("vtk-view", "clickInfo"), Input("vtk-view", "hoverInfo")],
)
def onInfo(clickData, hoverData):
info = hoverData if hoverData else clickData
if info:
if (
"representationId" in info
and info["representationId"] == "vtk-representation"
):
return (
[json.dumps(info, indent=2)],
{"center": info["worldPosition"]},
{"visibility": True},
)
return dash.no_update, dash.no_update, dash.no_update
return [""], {}, {"visibility": False}
```
You can also use `hoverInfo` to update the state of another geometry representation. The image below shows how to update a cone position, orientation and size in order to probe the race car object:

Learn more by reading the [source code](https://github.com/plotly/dash-sample-apps/tree/master/apps/dash-vehicle-geometry) or trying out the [Vehicle Geometry app](https://dash-gallery.plotly.host/dash-vehicle-geometry/).
'''),
])
|
103604
|
import argparse, time, sys, os, subprocess
class snmpRecon(object):
def __init__(self):
self.parseArgs()
self.paramStrings=['1.3.6.1.2.1.25.1.6.0', '1.3.6.1.2.1.25.4.2.1.2', '1.3.6.1.2.1.25.4.2.1.4', '1.3.6.1.2.1.25.2.3.1.4', '1.3.6.1.2.1.25.6.3.1.2', '1.3.6.1.4.1.77.1.2.25', '1.3.6.1.2.1.6.13.1.3']
self.communityList="wordlists/community.txt"
self.nmapScripts(self.host, self.port)
self.onesixtyoneScan(self.host, self.port)
self.snmpEnum(self.host, self.port)
def parseArgs(self):
parser = argparse.ArgumentParser(prog='snmpEnumerator', add_help=True)
parser.add_argument('host', help='host to scan')
parser.add_argument('port', help='port to scan')
args = parser.parse_args()
self.host=args.host
self.port=args.port
def nmapScripts(self, ip_address, port):
print "INFO: Performing nmap snmp script scan for " + ip_address + ":" + port
nmapSCAN = "nmap -sV -Pn -vv -p %s --script=snmp* -oN pillageResults/%s_snmp.nmap %s" % (port, ip_address, ip_address)
subprocess.check_output(nmapSCAN, shell=True)
def onesixtyoneScan(self,ip_address,port):
print "INFO: Performing OneSixtyOne snmp scan for " + ip_address + ":" + port
oneSixtyOneSCAN="onesixtyone -c %s %s >> pillageResults/%s_161snmp.txt" % (self.communityList, ip_address, ip_address)
subprocess.check_output(oneSixtyOneSCAN, shell=True)
def snmpEnum (self, ip_address, port):
print "INFO: Performing snmpwalk scan for " + ip_address + ":" + port
for param in self.paramStrings:
try:
snmpWalkSCAN="snmpwalk -c public -v1 %s %s >> pillageResults/%s_snmpwalk.txt;" % (ip_address, param, ip_address)
subprocess.check_output(snmpWalkSCAN, shell=True)
except:
pass
print "INFO: Performing snmpcheck scan for " + ip_address + ":" + port
try:
snmpCheckSCAN="snmpcheck -t %s >> pillageResults/%s_snmpcheck.txt;" % (ip_address, ip_address)
subprocess.check_output(snmpCheckSCAN, shell=True)
except:
pass
if __name__ == "__main__":
snmp = snmpRecon()
|
103615
|
import asyncio
import elasticsearch
import json
import logging
import requests
import time
from urllib.parse import urlencode
from datamart_core import Discoverer
from datamart_core.common import setup_logging
logger = logging.getLogger(__name__)
class ZenodoDiscoverer(Discoverer):
EXTENSIONS = ('.xls', '.xlsx', '.csv', '.sav')
FILE_TYPES = ['csv', 'xlsx', 'sav']
def __init__(self, *args, **kwargs):
super(ZenodoDiscoverer, self).__init__(*args, **kwargs)
with open('zenodo.json') as fp:
obj = json.load(fp)
self.keyword_query = obj.pop('keyword_query', '')
if obj:
logger.warning("Unknown keys in configuration: %s",
', '.join(obj))
logger.info("Loaded keyword from zenodo.json: %s",
self.keyword_query)
def discover_datasets(self):
seen = set()
url = (
'https://zenodo.org/api/records/'
'?' + urlencode(
dict(
page=1,
size=200,
q=self.keyword_query,
file_type=self.FILE_TYPES,
type='dataset',
),
doseq=True
)
)
while url:
logger.info("Getting %s", url)
headers = {'Accept': 'application/json'}
response = requests.get(url, headers=headers)
response.raise_for_status()
obj = response.json()
for record in obj:
self.process_record(record)
seen.add(record['id'])
if 'next' in response.links:
url = response.links['next']['url']
time.sleep(2)
else:
url = None
# Clean up the datasets we didn't see
deleted = 0
size = 10000
query = {
'query': {
'term': {
'materialize.identifier': self.identifier,
},
},
}
hits = self.elasticsearch.scan(
index='datasets,pending',
query=query,
size=size,
_source=['materialize.zenodo_record_id'],
)
for h in hits:
if h['_source']['materialize']['zenodo_record_id'] not in seen:
self.delete_dataset(full_id=h['_id'])
deleted += 1
if deleted:
logger.info("Deleted %d missing datasets", deleted)
def process_record(self, record):
# Get metadata common for the whole deposit
record_metadata = dict(
name=record['title'],
source='zenodo.org',
source_url='https://zenodo.org/record/%d' % record['id'],
)
if 'license' in record['metadata']:
record_metadata['license'] = record['metadata']['license']
description = ''
if record['metadata'].get('description'):
description += record['metadata']['description']
if record['metadata'].get('keywords'):
description += '\n\n' + ', '.join(record['metadata']['keywords'])
if description:
record_metadata['description'] = description
logger.info("Processing record %s %r", record['id'], record['title'])
# Process each file
for file in record['files']:
if not file['filename'].lower().endswith(self.EXTENSIONS):
continue
dataset_id = '%s.%s' % (record['id'], file['id'])
# See if we've ingested this file
try:
self.elasticsearch.get(
'datasets',
'%s.%s' % (self.identifier, dataset_id),
_source=False,
)
except elasticsearch.NotFoundError:
pass
else:
logger.info("Dataset already in index")
return
try:
hit = self.elasticsearch.get(
'pending',
'%s.%s' % (self.identifier, dataset_id),
_source=['status'],
)['_source']
except elasticsearch.NotFoundError:
pass
else:
logger.info(
"Dataset already in pending index, status=%s",
hit.get('status'),
)
return
logger.info("File %s", file['filename'])
file_metadata = dict(
record_metadata,
name='%s - %s' % (
record_metadata['name'], file['filename'],
),
size=file['filesize'],
)
direct_url = file['links']['download']
# Discover this dataset
self.record_dataset(
dict(
zenodo_record_id=record['id'],
zenodo_file_id=file['id'],
zenodo_record_updated=record['modified'],
direct_url=direct_url,
),
file_metadata,
dataset_id=dataset_id,
)
if __name__ == '__main__':
setup_logging()
asyncio.get_event_loop().run_until_complete(
ZenodoDiscoverer('datamart.zenodo').run()
)
|
103626
|
import torch
from torch import Tensor
from torch.nn import Module
class ExponentialMovingAverage(Module):
def __init__(self, *size: int, momentum: float = 0.995):
super(ExponentialMovingAverage, self).__init__()
self.register_buffer("average", torch.ones(*size))
self.register_buffer("initialised", torch.tensor(False))
self.momentum = momentum
@torch.no_grad()
def forward(self, x: Tensor):
if self.training:
self.update(x=x)
return self.average
def update(self, x: Tensor):
if self.initialised.all():
self.average.copy_(x.lerp(self.average, self.momentum))
else:
self.average.copy_(x)
self.initialised.copy_(~self.initialised)
|
103645
|
from django.conf.urls import url
from dojo.engagement import views
urlpatterns = [
# engagements and calendar
url(r'^calendar$', views.engagement_calendar, name='calendar'),
url(r'^calendar/engagements$', views.engagement_calendar, name='engagement_calendar'),
url(r'^engagement$', views.engagement, name='engagement'),
url(r'^engagements_all$', views.engagements_all, name='engagements_all'),
url(r'^engagement/(?P<eid>\d+)$', views.view_engagement,
name='view_engagement'),
url(r'^engagement/(?P<eid>\d+)/ics$', views.engagement_ics,
name='engagement_ics'),
url(r'^engagement/(?P<eid>\d+)/edit$', views.edit_engagement,
name='edit_engagement'),
url(r'^engagement/(?P<eid>\d+)/delete$', views.delete_engagement,
name='delete_engagement'),
url(r'^engagement/(?P<eid>\d+)/add_tests$', views.add_tests,
name='add_tests'),
url(r'^engagement/(?P<eid>\d+)/import_scan_results$',
views.import_scan_results, name='import_scan_results'),
url(r'^engagement/(?P<eid>\d+)/close$', views.close_eng,
name='close_engagement'),
url(r'^engagement/(?P<eid>\d+)/reopen$', views.reopen_eng,
name='reopen_engagement'),
url(r'^engagement/(?P<eid>\d+)/complete_checklist$',
views.complete_checklist, name='complete_checklist'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/add$',
views.add_risk_acceptance, name='add_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/add/(?P<fid>\d+)$',
views.add_risk_acceptance, name='add_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)$',
views.view_risk_acceptance, name='view_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/edit$',
views.edit_risk_acceptance, name='edit_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/expire$',
views.expire_risk_acceptance, name='expire_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/reinstate$',
views.reinstate_risk_acceptance, name='reinstate_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/delete$',
views.delete_risk_acceptance, name='delete_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/risk_acceptance/(?P<raid>\d+)/download$',
views.download_risk_acceptance, name='download_risk_acceptance'),
url(r'^engagement/(?P<eid>\d+)/threatmodel$', views.view_threatmodel,
name='view_threatmodel'),
url(r'^engagement/(?P<eid>\d+)/threatmodel/upload$',
views.upload_threatmodel, name='upload_threatmodel'),
]
|
103646
|
import numpy as np
import tensorflow as tf
from deep_da.model.util import util_tf
"""
Models used in DANN paper
"""
class Model:
__base_n_hidden = [3072, 2048]
def __init__(self,
output_size: int=10,
n_hidden: list=None):
__n_hidden = n_hidden or self.__base_n_hidden
self.__n_hidden = __n_hidden + [output_size]
def __call__(self,
feature,
scope=None,
reuse=None):
n_hidden = [feature.get_shape()[1]] + self.__n_hidden
with tf.variable_scope(scope or "domain_classifier", reuse=reuse):
def fc(_feature, _n):
_feature = util_tf.full_connected(_feature, [n_hidden[_n], n_hidden[_n + 1]], scope='fc_%i' % _n)
return _feature
for i in range(3):
feature = fc(feature, i)
if i != 2:
feature = tf.nn.relu(feature)
feature = tf.nn.softmax(feature)
# tf.assert_equal(feature.get_shape()[1], 10)
return feature
class FeatureExtractor:
__base_cnn_channel = [64, 64, 128]
__base_cnn_filter = [5, 5, 5]
__base_cnn_stride = [2, 2, 2]
def __init__(self,
image_shape: list,
cnn_channel: list=None,
cnn_filter: list=None,
cnn_stride: list = None):
assert len(image_shape) == 3
self.__image_shape = image_shape
__cnn_channel = cnn_channel or self.__base_cnn_channel
self.__cnn_channel = [self.__image_shape[-1]] + __cnn_channel
self.__cnn_filter = cnn_filter or self.__base_cnn_filter
self.__cnn_stride = cnn_stride or self.__base_cnn_stride
def __call__(self,
image,
keep_prob=None,
scope=None,
reuse=None):
with tf.variable_scope(scope or "feature_extractor", reuse=reuse):
def conv_pool(_feature, _n, _keep_prob=None):
_shape = [self.__cnn_filter[_n], self.__cnn_filter[_n],
self.__cnn_channel[_n], self.__cnn_channel[_n+1]]
_feature = util_tf.convolution(_feature,
weight_shape=_shape,
stride=[self.__cnn_stride[_n]] * 2,
padding='SAME',
scope='conv_%i' % _n)
_feature = tf.nn.relu(_feature)
_feature = tf.nn.max_pool(_feature,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
if _keep_prob is not None:
_feature = tf.nn.dropout(_feature, keep_prob=_keep_prob)
return _feature
feature = conv_pool(image, 0)
feature = conv_pool(feature, 1, keep_prob)
feature = conv_pool(feature, 2)
def flatten(layer):
_size = np.prod(layer.get_shape().as_list()[1:])
return tf.reshape(layer, [-1, _size])
feature = flatten(feature)
return feature
class DomainClassifier:
__base_n_hidden = [1024, 1024]
def __init__(self,
output_size=1,
n_hidden: list=None):
__n_hidden = n_hidden or self.__base_n_hidden
self.__n_hidden = __n_hidden + [output_size]
def __call__(self,
feature,
scope=None,
reuse=None):
n_hidden = [feature.get_shape()[1]] + self.__n_hidden
with tf.variable_scope(scope or "domain_classifier", reuse=reuse):
def fc(_feature, _n):
_feature = util_tf.full_connected(_feature, [n_hidden[_n], n_hidden[_n + 1]], scope='fc_%i' % _n)
return _feature
for i in range(3):
feature = fc(feature, i)
if i != 2:
feature = tf.nn.relu(feature)
# tf.assert_equal(feature.get_shape()[1], 1)
return tf.nn.sigmoid(feature)
|
103651
|
import os
from setuptools import setup
PROJECT_NAME = 'actionslog'
ROOT = os.path.abspath(os.path.dirname(__file__))
VENV = os.path.join(ROOT, '.venv')
VENV_LINK = os.path.join(VENV, 'local')
install_requires = [
'Django>=1.11.20',
'django-jsonfield>=0.9.15',
'pytz>=2015.7',
]
project = __import__(PROJECT_NAME)
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
data_files = []
for dirpath, dirnames, filenames in os.walk(PROJECT_NAME):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'):
del dirnames[i]
if '__init__.py' in filenames:
continue
elif filenames:
for f in filenames:
data_files.append(os.path.join(
dirpath[len(PROJECT_NAME) + 1:], f))
with open(os.path.join(root_dir, 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-actions-logger',
version=project.get_version(),
packages=['actionslog', 'actionslog.migrations'],
include_package_data=True,
url='https://github.com/shtalinberg/django-actions-logger',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='A Django app that keeps a log of user actions or changes in objects',
long_description=README,
keywords='django actions log action logger',
install_requires=install_requires,
package_data={PROJECT_NAME: data_files},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Utilities',
],
zip_safe=False,
)
|
103672
|
from core.run.event_dispatcher.register import EventRegister
def build_runner(model, runner_config, data_source_context, config, event_register: EventRegister):
if runner_config['type'] == 'default':
from .training.default.builder import build_default_training_runner
return build_default_training_runner(model, runner_config, data_source_context, config, event_register)
elif runner_config['type'] == 'default_evaluation' or runner_config['type'] == 'coarse_to_fine_evaluation':
from .evaluation.default import DefaultSiamFCEvaluator
return DefaultSiamFCEvaluator()
else:
raise NotImplementedError(runner_config['type'])
|
103692
|
import socket
import asyncio
import time
import random
import json
import requests
from walkoff_app_sdk.app_base import AppBase
class BreachSense(AppBase):
__version__ = "1.0.0"
app_name = "Breachsense" # this needs to match "name" in api.yaml
def __init__(self, redis, logger, console_logger=None):
"""
Each app should have this __init__ to set up Redis and logging.
:param redis:
:param logger:
:param console_logger:
"""
super().__init__(redis, logger, console_logger)
async def Basic_search(self, api_key, search_term, date):
if date:
url = f"https://breachsense.io/api?lic={api_key}&s={search_term}&date={date}&json"
else:
url = f"https://breachsense.io/api?lic={api_key}&s={search_term}&json"
try:
response = requests.get(url)
return response.text
except Exception as e:
return "Exception occured: %s" % e
async def Display_Description(self, api_key, search_term, date):
if date:
url = f"https://breachsense.io/api?lic={api_key}&s={search_term}&date={date}&attr&json"
else:
url = f"https://breachsense.io/api?lic={api_key}&s={search_term}&attr&json"
try:
response = requests.get(url)
return response.text
except Exception as e:
return "Exception occured: %s" % e
async def Strict_search(self, api_key, search_term, date):
if date:
url = f"https://breachsense.io/api?lic={api_key}&s={search_term}&date={date}&strict&json"
else:
url = f"https://breachsense.io/api?lic={api_key}&s={search_term}&strict&json"
try:
response = requests.get(url)
return response.text
except Exception as e:
return "Exception occured: %s" % e
async def Check_credits(self, api_key):
url = f"https://breachsense.io/api?lic={api_key}&r&json"
try:
response = requests.get(url)
return response.text
except Exception as e:
return "Exception occured: %s" % e
async def Domain_Monitor(self, api_key, action, domain):
url = f"https://breachsense.io/api?lic={api_key}&action={action}&dom={domain}&json"
try:
response = requests.get(url)
return response.text
except Exception as e:
return "Exception occured: %s" % e
async def Custom_search(self, api_key, search_term, date, extra_Params):
if date:
url = f"https://breachsense.io/api?lic={api_key}&s={search_term}&date={date}&{extra_Params}&json"
else:
url = f"https://breachsense.io/api?lic={api_key}&s={search_term}&{extra_Params}&json"
try:
response = requests.get(url)
return response.text
except Exception as e:
return "Exception occured: %s" % e
if __name__ == "__main__":
asyncio.run(BreachSense.run(), debug=True)
|
103698
|
import setuptools
setup_args = dict(
name="grr-grafanalib-dashboards",
description="GRR grafanalib Monitoring Dashboards",
license="Apache License, Version 2.0",
url="https://github.com/google/grr/tree/master/monitoring/grafana",
maintainer="GRR Development Team",
maintainer_email="<EMAIL>",
packages=setuptools.find_packages(),
install_requires=[
# It is mentioned in grafanalib docs that "We'll probably
# make changes that break backwards compatibility, although
# we'll try hard not to", so we stick with version 0.5.7.
"grafanalib==0.5.7",
],
)
setuptools.setup(**setup_args)
|
103702
|
from .box import Box
from .cylinder import Cylinder
from .sphere import Sphere
from .random_primitive import RandomPrimitive
from .plane import Plane
|
103710
|
from __future__ import print_function
import click
from click.testing import CliRunner
from kcleaner import cli
runner = CliRunner()
|
103712
|
from crypt import mksalt
from datetime import datetime, timedelta
from typing import List, Optional
from arrow.arrow import Arrow
from fastapi.encoders import jsonable_encoder
import sqlalchemy
from sqlalchemy import or_
from sqlalchemy.orm import Session
from sqlalchemy.sql.functions import func
from app import crud
from app.crud.base import CRUDBase
from app.logger import logger
from app.models.domain import Domain
from app.models.event import Event
from app.core.config import settings
from app.models.user import User
from app.schemas.domain import DomainCreate, DomainUpdate
class CRUDDomain(CRUDBase[Domain, DomainCreate, DomainUpdate]):
def create_with_owner(
self, db: Session, *, obj_in: DomainCreate, owner_id: int
) -> Optional[Domain]:
obj_in_data = jsonable_encoder(obj_in)
db_obj = self.model(**obj_in_data, owner_id=owner_id)
db.add(db_obj)
try:
db.commit()
db.refresh(db_obj)
except sqlalchemy.exc.IntegrityError:
return None
return db_obj
def get_multi_by_owner(
self, db: Session, *, owner_id: int, skip: int = 0, limit: int = 100
) -> List[Domain]:
return (
db.query(self.model)
.filter(Domain.owner_id == owner_id)
.filter(Domain.delete_at.is_(None))
.order_by(Domain.domain_name)
.offset(skip)
.limit(limit)
.all()
)
def get_by_name(self, db: Session, name: str) -> Domain:
return (
db.query(self.model)
.filter(Domain.domain_name == name, Domain.delete_at.is_(None))
.scalar()
)
def get_by_name_check_permission(
self, db: Session, name: str, current_user: Optional[User]
) -> Optional[Domain]:
obj = (
db.query(self.model)
.filter(Domain.domain_name == name, Domain.delete_at.is_(None))
.scalar()
)
if not obj:
return None
if obj.public is True:
return obj
if current_user:
if crud.user.is_superuser(current_user) or obj.owner_id == current_user.id:
return obj
return None
def mark_for_removal(self, db: Session, domain: Domain) -> None:
domain.delete_at = datetime.now() + timedelta(
days=settings.SOFT_DELETE_DURATION_DAYS
)
db.commit()
def refresh_domain_salts(self, db: Session) -> None:
filter_before = Arrow.now().shift(days=-1).datetime
domains = db.query(Domain).filter(
or_(Domain.salt_last_changed <= filter_before, Domain.salt.is_(None))
)
i: int = 0
for i, domain in enumerate(domains):
domain.salt = mksalt()
domain.salt_last_changed = func.now()
db.add(domain)
db.commit()
logger.info("Updated salt for %d domains", i)
def delete_pending_domains(self, db: Session) -> None:
domain_ids = (
db.query(Domain)
.filter(Domain.delete_at < datetime.now())
.with_entities(Domain.id)
.all()
)
domain_ids = tuple(domain_id[0] for domain_id in domain_ids)
if domain_ids:
db.execute(
"delete from event where domain_id in :domain_ids",
{"domain_ids": domain_ids},
)
db.execute(
"delete from domain where id in :domain_ids", {"domain_ids": domain_ids}
)
db.commit()
domain = CRUDDomain(Domain)
|
103715
|
import sys
import os
pattern = sys.argv[1]
print(pattern)
def get_result_line(i, pattern):
filename = pattern.format(i)
if os.path.exists(filename):
lines = open(filename, 'r').readlines()[-3:]
return str(i) + "\t" + '\t'.join([line.split(':')[1].strip() for line in lines])
return None
results = []
for epoch in list(range(30)) + ['_best'] + ['_last']:
result_line = get_result_line(epoch, pattern)
results.append(result_line) if result_line is not None else None
print("epoch\tPrec\tRecall\tF_0.5")
for line in results:
print(line)
|
103755
|
import numpy as np
# projection mask of NYUv2
PMASK = np.zeros([480, 640], dtype=np.float64)
PMASK[44:471, 40:601] = 1.0
# sorted names
METRIC_NAMES = [
'RMSE',
'Mean RMSE',
'Mean Log10',
'Abs Rel Diff',
'Squa Rel Diff',
'delta < 1.25',
'delta < 1.25^2',
'delta < 1.25^3',
]
def get_metrics(
depths, preds, projection_mask=True, masks=None, rmse_only=False):
'''
Args:
depths: a list of ground truth depth maps, of dtype np.float64
in range (0,1).
preds: a list of predictions, of dtype np.float64, in range (0,1).
projection_mask: if use the valid projection mask of NYUv2,
in which case, the depth map has size 480x640.
Returns:
A dictionary of different metrics.
'''
# Check shape and dtype
assert len(preds) == len(depths)
for i in range(len(preds)):
assert preds[i].dtype == np.float64
assert depths[i].dtype == np.float64
assert preds[i].shape == depths[i].shape
preds = np.stack(preds, axis=0) * 10.0
depths = np.stack(depths, axis=0) * 10.0
results = {}
# Masks
if masks:
masks = np.stack(masks, axis=0)
masks = np.float64(depths > 0) * np.float64(masks)
else:
masks = np.float64(depths > 0)
if projection_mask:
assert masks.shape[1:] == (480, 640)
masks = masks * PMASK[None]
masks = masks.astype(np.bool)
npixels = np.sum(masks, axis=(1, 2))
diff = preds - depths
# MSE, RMSE
mse = np.sum(diff**2.0 * masks, axis=(1, 2)) / npixels
mse = np.mean(mse)
rmse = np.sqrt(mse)
if rmse_only:
return rmse
results['MSE'] = mse
results['RMSE'] = rmse
# Delta
delta = np.maximum(preds / depths, depths / preds)
delta1 = np.sum(np.float64(delta < 1.25) * masks, axis=(1, 2)) / npixels
delta2 = np.sum(np.float64(delta < 1.25**2) * masks, axis=(1, 2)) / npixels
delta3 = np.sum(np.float64(delta < 1.25**3) * masks, axis=(1, 2)) / npixels
results['delta < 1.25'] = np.mean(delta1)
results['delta < 1.25^2'] = np.mean(delta2)
results['delta < 1.25^3'] = np.mean(delta3)
# Absolute relative difference
abrdiff = np.abs(diff) * masks / depths
abrdiff = np.sum(abrdiff, axis=(1, 2)) / npixels
results['Abs Rel Diff'] = np.mean(abrdiff)
# Squared relative difference
sqrdiff = np.square(diff) * masks / depths
sqrdiff = np.sum(sqrdiff, axis=(1, 2)) / npixels
results['Squa Rel Diff'] = np.mean(sqrdiff)
# Mean log10
log10 = np.abs(np.log10(preds) - np.log10(depths))
log10 = np.sum(log10 * masks, axis=(1, 2)) / npixels
results['Mean Log10'] = np.mean(log10)
# Mean RMSE
mrmse = np.sum(np.square(diff) * masks, axis=(1, 2)) / npixels
mrmse = np.mean(np.sqrt(mrmse))
results['Mean RMSE'] = mrmse
return results
|
103779
|
import torch
import torch.nn as nn
from loss_functions import AngularPenaltySMLoss
class Stem_layer(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size, drop_rate, pool_size):
super().__init__()
dilation = 1
self.conv = nn.Conv1d(
in_ch,
out_ch,
kernel_size,
padding=int((kernel_size + (kernel_size - 1) * (dilation - 1)) / 2),
dilation=dilation,
stride=1,
bias=False,
)
self.bn = nn.BatchNorm1d(out_ch)
self.relu = nn.ReLU()
self.pooling = nn.MaxPool1d(kernel_size=pool_size, stride=2)
self.drop = nn.Dropout(drop_rate)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pooling(x)
x = self.drop(x)
return x
class Stem_layer_upsample(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size, drop_rate, scale_factor):
super().__init__()
dilation = 1
self.conv = nn.Conv1d(
in_ch,
out_ch,
kernel_size,
padding=int((kernel_size + (kernel_size - 1) * (dilation - 1)) / 2)+1,
dilation=dilation,
stride=1,
bias=False,
)
self.bn = nn.BatchNorm1d(out_ch)
self.relu = nn.ReLU()
self.upsample = nn.Upsample(scale_factor=scale_factor, mode='linear', align_corners=True)
self.drop = nn.Dropout(drop_rate)
def forward(self, x):
x = self.upsample(x)
x = self.conv(x)
x = self.drop(x)
return x
class Wave_block(nn.Module):
def __init__(self, out_ch, kernel_size, dilation,drop_rate):
super().__init__()
self.kernel_size = kernel_size
self.out_ch = out_ch
self.conv1 = nn.Conv1d(
out_ch,
out_ch,
kernel_size,
padding=int((kernel_size + (kernel_size - 1) * (dilation - 1)) / 2),
dilation=dilation,
bias=False,
)
self.conv2 = nn.Conv1d(
out_ch,
out_ch,
kernel_size,
padding=int((kernel_size + (kernel_size - 1) * (dilation - 1)) / 2),
dilation=dilation,
bias=False,
)
self.conv_res = nn.Conv1d(out_ch, out_ch, 1, padding=0, dilation=dilation, bias=False,)
self.conv_skip = nn.Conv1d(out_ch, out_ch, 1, padding=0, dilation=dilation, bias=False,)
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
self.bn1 = nn.BatchNorm1d(out_ch)
self.bn2 = nn.BatchNorm1d(out_ch)
def forward(self, x):
res_x = x
tanh = self.tanh(self.bn1(self.conv1(x)))
sig = self.sigmoid(self.bn2(self.conv2(x)))
res = torch.mul(tanh, sig)
res_out = self.conv_res(res) + res_x
skip_out = self.conv_skip(res)
return res_out, skip_out
class ECGNet(nn.Module):
def __init__(self, n_channels, hparams, input_block=Stem_layer, basic_block=Wave_block,decoder_out_block = Stem_layer_upsample):
super().__init__()
self.basic_block = basic_block
self.hparams = hparams['model']
# stem layers
self.layer1 = input_block(
n_channels, self.hparams['n_filt_stem'], self.hparams['kern_size'], self.hparams['dropout'], 3
)
self.layer2 = input_block(
self.hparams['n_filt_stem'],
self.hparams['n_filt_res'],
self.hparams['kern_size'],
self.hparams['dropout'],
3,
)
# wavenet(residual) layers
self.layer3 = self.basic_block(self.hparams['n_filt_res'], self.hparams['kern_size'], 2,self.hparams['dropout'])
self.layer4 = self.basic_block(self.hparams['n_filt_res'], self.hparams['kern_size'], 4,self.hparams['dropout'])
self.layer5 = self.basic_block(self.hparams['n_filt_res'], self.hparams['kern_size'], 8,self.hparams['dropout'])
self.layer6 = self.basic_block(self.hparams['n_filt_res'], self.hparams['kern_size'], 16,self.hparams['dropout'])
self.layer7 = self.basic_block(self.hparams['n_filt_res'], self.hparams['kern_size'], 32,self.hparams['dropout'])
self.layer8 = self.basic_block(self.hparams['n_filt_res'], self.hparams['kern_size'], 64,self.hparams['dropout'])
self.layer9 = self.basic_block(self.hparams['n_filt_res'], self.hparams['kern_size'], 128,self.hparams['dropout'])
self.layer10 = self.basic_block(self.hparams['n_filt_res'], self.hparams['kern_size'], 256,self.hparams['dropout'])
self.conv_out_1 = input_block(
self.hparams['n_filt_res'], self.hparams['n_filt_out_conv_1'], self.hparams['kern_size'], self.hparams['dropout'], 3
)
#self.bn1 = nn.BatchNorm1d(self.hparams['n_filt_out_conv_1'])
self.conv_out_2 = input_block(
self.hparams['n_filt_out_conv_1'], self.hparams['n_filt_out_conv_2'], self.hparams['kern_size'], self.hparams['dropout'], 2
)
#main head
self.fc = nn.Linear(self.hparams['n_filt_out_conv_2'], 27) #4733,27)#
self.out = torch.nn.Sigmoid()
#autoencoder head
self.output_decoder_1 = decoder_out_block(self.hparams['n_filt_res'],self.hparams['n_filt_stem'],self.hparams['kern_size'],self.hparams['dropout'],
2)
self.output_decoder_2 = decoder_out_block(self.hparams['n_filt_stem'], n_channels,
1, self.hparams['dropout'],
2)
def _make_layers(self, out_ch, kernel_size, n, basic_block):
# dilation_rates = [2 ** i for i in range(n)]
layers = []
for layer in range(n):
layers.append(basic_block(out_ch, out_ch, kernel_size))
return nn.Sequential(*layers)
def forward(self, x):
# x, h_0 = self.input_layer_1(x)
# x = x.cpu().detach()
# x,(h_0,c_0) = self.input_layer_1(x)
x = x.permute(0, 2, 1)
x = self.layer1(x)
x = self.layer2(x)
x, skip_1 = self.layer3(x)
#x = self.bn3(x)
x, skip_2 = self.layer4(x)
#x = self.bn4(x)
x, skip_3 = self.layer5(x)
#x = self.bn5(x)
x, skip_4 = self.layer6(x)
#x = self.bn6(x)
x, skip_5 = self.layer7(x)
#x = self.bn7(x)
x, skip_6 = self.layer8(x)
#x = self.bn8(x)
x, skip_7 = self.layer9(x)
#x = self.bn9(x)
x, skip_8 = self.layer10(x)
#x = self.bn10(x)
#decoder head
decoder_out = torch.relu(self.output_decoder_1(x))
decoder_out = self.output_decoder_2(decoder_out)
decoder_out = decoder_out[:,:,:-2]
decoder_out = decoder_out.reshape(-1, decoder_out.shape[2], decoder_out.shape[1])
#main head
x = skip_1 + skip_2 + skip_3 + skip_4 + skip_5 + skip_6 + skip_7 + skip_8
x = self.conv_out_1(x)
x = self.conv_out_2(x)
x = torch.mean(x, dim=2)
x = self.out(self.fc(x))
return x,decoder_out
|
103801
|
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
dict = {}
for i in arr :
if i in dict :
dict[i] += 1
else :
dict[i] = 1
count = 0
s = set(dict.values())
ns = len(s)
nl = len(dict.values())
if nl != ns :
return False
else :
return True
|
103803
|
import pytest
from testfixtures import LogCapture
@pytest.fixture(autouse=True)
def log_capture():
with LogCapture() as capture:
yield capture
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.