id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
110972
|
from __future__ import print_function
import json
import random as rd
def read_dict(filename):
f = open(filename, "r")
rs = f.read()
if rs.find("\n") >= 0:
rules = rs.split("\n")
else:
rules = rs.split("\r")
out_rule = []
for rule in rules:
if len(rule) < 1:
continue
details = rule.split("|")
flat_tree = details[0].split(" ")
first_edge = flat_tree[0]
edges = [[flat_tree[i*3+1], int(flat_tree[i*3+2]), flat_tree[i*3+3]] for i in range(int(len(flat_tree)/3))]
flat_words = details[1].split(" ")
words = [[int(flat_words[i*2]), flat_words[i*2+1]] for i in range(int(len(flat_words)/2))]
dervs = [int(x) for x in details[2].split(" ")]
out_rule.append([first_edge, edges, words, dervs])
return out_rule
def match_tree(sen, rule):
[tok, dep] = sen
note_dict = {}
for dep_pair in dep:
if not dep_pair[2] in note_dict:
note_dict[dep_pair[2]] = [[dep_pair[0],dep_pair[1]]]
else:
note_dict[dep_pair[2]].append([dep_pair[0],dep_pair[1]])
[fe, es, ws, ds] = rule
remain = []
if fe in note_dict:
remain = note_dict[fe]
for new_dep in es:
newremain = []
for remain_item in remain:
if new_dep[0] in note_dict:
for note_pair in note_dict[new_dep[0]]:
indx = 0 if new_dep[2] == "f" else 1
if note_pair[indx] == remain_item[new_dep[1]]:
newremain.append(remain_item + [note_pair[1-indx]])
remain = newremain
newremain = []
for remain_item in remain:
agree = True
for chk_ws in ws:
wcontain = chk_ws[1].split("^")
yc = [x for x in wcontain if not "!" in x]
nc = [x[1:] for x in wcontain if "!" in x]
if (not (tok[remain_item[chk_ws[0]]] in yc or len(yc)<1)) or (tok[remain_item[chk_ws[0]]] in nc):
agree = False
break
if agree:
newremain.append([remain_item[x] for x in ds])
remain = newremain
return remain
def make_net(content, edd, cd, sd):
net = {}
for sen in content:
newsen = sen
for edd_rule in edd:
combine_list = match_tree(newsen, edd_rule)
for combine_item in combine_list:
newsen[0][combine_item[0]] = ".".join([newsen[0][x] for x in combine_item])
for cd_rule in cd:
combine_list = match_tree(newsen, cd_rule)
for combine_item in combine_list:
twowords = [newsen[0][combine_item[i]] for i in range(2)]
if not twowords[0] in net:
net[twowords[0]] = [set([]),set([])]
net[twowords[0]][0] |= set([twowords[1]])
for sd_rule in sd:
combine_list = match_tree(newsen, sd_rule)
for combine_item in combine_list:
twowords = [newsen[0][combine_item[i]] for i in range(2)]
if not twowords[0] in net:
net[twowords[0]] = [set([]),set([])]
net[twowords[0]][1] |= set([twowords[1]])
return net
def bfs_net(net, deep_remain, key_set):
this_layer = [set([x for j in [list(net[k][i]) for k in key_set] for x in j]) for i in range(2)]
next_key = [[x for x in this_layer[i] if x in net] for i in range(2)]
if deep_remain > 1:
next_layer = [bfs_net(net, deep_remain - 1, next_key[i])[i] for i in range(2)]
else:
next_layer = [set([]),set([])]
return [this_layer[i] | next_layer[i] for i in range(2)]
def net_compare(nbig, nsmall):
max_deep = 2
agree = "a"
for key in nsmall:
if not key in nbig:
return "b"
results = bfs_net(nbig, max_deep, set([key]))
ks = nsmall[key]
if results[0] & ks[0] != ks[0] or results[1] & ks[1] != ks[1]:
agree = "b"
break
return agree
def _run():
lessondict = json.load(open("./dependencies/lesson_tf.json","r"))
questiondict = json.load(open("./dependencies/question_tf.json","r"))
ground_truth = json.load(open("./answer/gdt.json","r"))
question_keys = [x for x in questiondict.keys()]
entitydistinct_dict = read_dict("./dicts/entdis.txt")
causalty_dict = read_dict("./dicts/causality.txt")
structure_dict = read_dict("./dicts/structure.txt")
print("reading completed.")
lessons_list = [lessondict[key] for key in lessondict]
lesson_content = []
for l in lessons_list:
thislesson = [l[str(i+1)] for i in range(len(l))]
lesson_content += thislesson
lesson_net = make_net(lesson_content, entitydistinct_dict, causalty_dict, structure_dict)
raw_result_dict = {}
qnetlen = {}
for key in question_keys:
question_net = make_net(questiondict[key], entitydistinct_dict, causalty_dict, structure_dict)
qnetlen[key] = len(question_net)
raw_result_dict[key] = net_compare(lesson_net, question_net)
clean_result_dict = {}
count1_a = 0
count1_b = 0
count1_c = 0
count1_d = 0
count2_a = 0
count2_b = 0
count2_all = 0
count3 = 0
for key in raw_result_dict:
clean_result_dict[key] = raw_result_dict[key]
if clean_result_dict[key] == ground_truth[key] and qnetlen[key] > 0:
if clean_result_dict[key] == "a":
count1_a += 1
else:
count1_c += 1
if clean_result_dict[key] != ground_truth[key] and qnetlen[key] > 0:
if clean_result_dict[key] == "a":
count1_b += 1
else:
count1_d += 1
if qnetlen[key] == 0:
if ground_truth[key] == "a":
count2_a += 1
else:
count2_b += 1
count2_all += 1
if clean_result_dict[key] == ground_truth[key]:
count3 += 1
print("Question has CERG:\n Method:T GT:T / Method:T GT:F / Method:F GT:F / Method:F GT:T: ",end="")
print(count1_a,end="")
print(" / ",end="")
print(count1_b,end="")
print(" / ",end="")
print(count1_c,end="")
print(" / ",end="")
print(count1_d)
print("Question has no CERG\n Method:T GT:T / Method:T GT:F / All: ",end="")
print(count2_a,end="")
print(" / ",end="")
print(count2_b,end="")
print(" / ",end="")
print(count2_all)
print("Precision: ",end="")
print(float(count3) / 998)
with open("./answer/candidate.json","w") as f1:
json.dump(clean_result_dict, f1)
if __name__ == "__main__":
print("tfproc.py...")
_run()
print("done.")
|
110994
|
from . import good, fail
class BaseClass():
pass
class SubClass(BaseClass):
pass
good('isinstance(BaseClass)', BaseClass())
good('isinstance(BaseClass)', SubClass())
good('isinstance(SubClass)', SubClass())
fail('isinstance(SubClass)', BaseClass())
class BaseClass2(object):
pass
class SubClass2(BaseClass2):
pass
good('isinstance(BaseClass2)', BaseClass2())
good('isinstance(BaseClass2)', SubClass2())
good('isinstance(SubClass2)', SubClass2())
fail('isinstance(SubClass2)', BaseClass2())
# Let's try with 3, old-style
class BaseClass3():
pass
class MidClass3(BaseClass3):
pass
class SubClass3(MidClass3):
pass
good('isinstance(BaseClass3)', BaseClass3())
good('isinstance(BaseClass3)', SubClass3())
good('isinstance(SubClass3)', SubClass3())
fail('isinstance(SubClass3)', BaseClass3())
class BaseClass4(object):
pass
class MidClass4(BaseClass4):
pass
class SubClass4(MidClass4):
pass
good('isinstance(BaseClass4)', BaseClass4())
good('isinstance(BaseClass4)', SubClass4())
good('isinstance(SubClass4)', SubClass4())
fail('isinstance(SubClass4)', BaseClass4())
|
111043
|
import datetime
import re
from pyasn1.codec.der import decoder
from ldap_shell.krb5 import constants, asn1
class KerberosException(Exception):
pass
def _asn1_decode(data, asn1Spec):
if isinstance(data, str) or isinstance(data, bytes):
data, substrate = decoder.decode(data, asn1Spec=asn1Spec)
if substrate != b'':
raise KerberosException("asn1 encoding invalid")
return data
class EncryptedData(object):
def __init__(self):
self.etype = None
self.kvno = None
self.ciphertext = None
def from_asn1(self, data):
data = _asn1_decode(data, asn1.EncryptedData())
self.etype = constants.EncryptionTypes(data.getComponentByName('etype')).value
kvno = data.getComponentByName('kvno')
if (kvno is None) or (kvno.hasValue() is False):
self.kvno = False
else:
self.kvno = kvno
self.ciphertext = str(data.getComponentByName('cipher'))
return self
def to_asn1(self, component):
component.setComponentByName('etype', int(self.etype))
if self.kvno:
component.setComponentByName('kvno', self.kvno)
component.setComponentByName('cipher', self.ciphertext)
return component
class Principal:
"""The principal's value can be supplied as:
* a single string
* a sequence containing a sequence of component strings and a realm string
* a sequence whose first n-1 elemeents are component strings and whose last
component is the realm
If the value contains no realm, then default_realm will be used."""
def __init__(self, value=None, default_realm=None, type=None):
self.type = constants.PrincipalNameType.NT_UNKNOWN
self.components = []
self.realm = None
if value is None:
return
if isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(value, Principal):
self.type = value.type
self.components = value.components[:]
self.realm = value.realm
elif isinstance(value, str):
m = re.match(r'((?:[^\\]|\\.)+?)(@((?:[^\\@]|\\.)+))?$', value)
if not m:
raise KerberosException("invalid principal syntax")
def unquote_component(comp):
return re.sub(r'\\(.)', r'\1', comp)
if m.group(2) is not None:
self.realm = unquote_component(m.group(3))
else:
self.realm = default_realm
self.components = [
unquote_component(qc)
for qc in re.findall(r'(?:[^\\/]|\\.)+', m.group(1))]
elif len(value) == 2:
self.components = value[0]
self.realm = value[-1]
if isinstance(self.components, str):
self.components = [self.components]
elif len(value) >= 2:
self.components = value[0:-1]
self.realm = value[-1]
else:
raise KerberosException("invalid principal value")
if type is not None:
self.type = type
def __eq__(self, other):
if isinstance(other, str):
other = Principal(other)
return (self.type == constants.PrincipalNameType.NT_UNKNOWN.value
or other.type == constants.PrincipalNameType.NT_UNKNOWN.value
or self.type == other.type) \
and all(map(lambda a, b: a == b, self.components, other.components)) \
and self.realm == other.realm
def __str__(self):
def quote_component(comp):
return re.sub(r'([\\/@])', r'\\\1', comp)
ret = "/".join([quote_component(c) for c in self.components])
if self.realm is not None:
ret += "@" + self.realm
return ret
def __repr__(self):
return "Principal((" + repr(self.components) + ", " + \
repr(self.realm) + "), t=" + str(self.type) + ")"
def from_asn1(self, data, realm_component, name_component):
name = data.getComponentByName(name_component)
self.type = constants.PrincipalNameType(
name.getComponentByName('name-type')).value
self.components = [
str(c) for c in name.getComponentByName('name-string')]
self.realm = str(data.getComponentByName(realm_component))
return self
def components_to_asn1(self, name):
name.setComponentByName('name-type', int(self.type))
strings = name.setComponentByName('name-string'
).getComponentByName('name-string')
for i, c in enumerate(self.components):
strings.setComponentByPosition(i, c)
return name
class Ticket:
def __init__(self):
# This is the kerberos version, not the service principal key
# version number.
self.tkt_vno = None
self.service_principal = None
self.encrypted_part = None
def from_asn1(self, data):
data = _asn1_decode(data, asn1.Ticket())
self.tkt_vno = int(data.getComponentByName('tkt-vno'))
self.service_principal = Principal()
self.service_principal.from_asn1(data, 'realm', 'sname')
self.encrypted_part = EncryptedData()
self.encrypted_part.from_asn1(data.getComponentByName('enc-part'))
return self
def to_asn1(self, component):
component.setComponentByName('tkt-vno', 5)
component.setComponentByName('realm', self.service_principal.realm)
asn1.seq_set(component, 'sname',
self.service_principal.components_to_asn1)
asn1.seq_set(component, 'enc-part', self.encrypted_part.to_asn1)
return component
def __str__(self):
return "<Ticket for %s vno %s>" % (str(self.service_principal), str(self.encrypted_part.kvno))
class KerberosTime(object):
INDEFINITE = datetime.datetime(1970, 1, 1, 0, 0, 0)
@staticmethod
def to_asn1(dt):
# A KerberosTime is really just a string, so we can return a
# string here, and the asn1 library will convert it correctly.
return "%04d%02d%02d%02d%02d%02dZ" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
@staticmethod
def from_asn1(data):
data = str(data)
year = int(data[0:4])
month = int(data[4:6])
day = int(data[6:8])
hour = int(data[8:10])
minute = int(data[10:12])
second = int(data[12:14])
if data[14] != 'Z':
raise KerberosException("timezone in KerberosTime is not Z")
return datetime.datetime(year, month, day, hour, minute, second)
|
111082
|
import torch.nn as nn
import hypergan as hg
class Residual(hg.Layer):
"""
---
description: 'layer residual for configurable component'
---
# residual layer
`residual` adds one or more residual blocks https://paperswithcode.com/method/residual-block
## optional arguments
The number of residual blocks to add
## input size
Any 4-d tensor
## output size
Same as input size
## syntax
```json
"residual COUNT"
```
## examples
```json
"residual 3"
```
"""
def __init__(self, component, args, options):
super(Residual, self).__init__(component, args, options)
self.size = component.current_size
layers = []
for i in range(args[0] or 3):
layers += [nn.Conv2d(self.size.channels, self.size.channels, 3, 1, padding = (1, 1))]
layers += [nn.ReLU()]
layers += [nn.Conv2d(self.size.channels, self.size.channels, 3, 1, padding = (1, 1))]
layers += [nn.ReLU()]
self.residual = nn.Sequential(*layers)
def output_size(self):
return self.size
def forward(self, input, context):
residual = self.residual(input)
return input + residual
|
111092
|
import socket
def getfromhostname(hostname):
print ("AS info for hostname :"+hostname)
ip = socket.gethostbyname(hostname)
from cymruwhois import Client
c=Client()
r=c.lookup(ip)
print (r.asn)
print (r.owner)
def getfromip(ip):
print ("AS info for IP : "+ip)
from cymruwhois import Client
c=Client()
r=c.lookup(ip)
print (r.asn)
print (r.owner)
getfromhostname("google.com")
getfromip("172.16.17.32")
|
111097
|
from typing import Sequence
import pytest
from faker import Faker
from overhave.entities import (
FeatureExtractor,
FeatureTypeExtractionError,
OverhaveFileSettings,
ScenariosTestFileNotFound,
)
from tests.objects import FeatureTestContainer, get_test_feature_extractor
from tests.unit.feature.conftest import get_incorrect_test_file_settings
class TestFeatureExtractor:
""" Unit tests for :class:`FeatureExtractor`. """
@pytest.mark.parametrize("test_file_settings", [get_incorrect_test_file_settings()], indirect=True)
def test_feature_type_extraction_error(self, test_file_settings: OverhaveFileSettings) -> None:
with pytest.raises(FeatureTypeExtractionError):
FeatureExtractor(file_settings=test_file_settings)._extract_project_data()
@pytest.mark.parametrize("test_file_settings", [get_incorrect_test_file_settings()], indirect=True)
def test_scenarios_test_file_not_found_error(self, test_file_settings: OverhaveFileSettings, faker: Faker) -> None:
extractor = FeatureExtractor(file_settings=test_file_settings)
extractor._feature_types = [faker.word()]
with pytest.raises(ScenariosTestFileNotFound): # noqa: PT012
extractor._check_pytest_bdd_scenarios_test_files()
@pytest.mark.parametrize("test_feature_extractor", [get_test_feature_extractor()], indirect=True)
def test_feature_extractor_properties(
self, test_feature_extractor: FeatureExtractor, test_feature_containers: Sequence[FeatureTestContainer]
) -> None:
assert set(test_feature_extractor.feature_types) == {feature.type for feature in test_feature_containers}
assert test_feature_extractor.feature_type_to_dir_mapping == {
feature.type: feature.project_path.parent for feature in test_feature_containers
}
|
111118
|
import ast, collections, dis, types, sys
from functools import reduce
from itertools import chain
from check_subset import check_conformity
def Instruction(opcode, arg):
return bytes([opcode] if arg is None else [opcode, arg % 256, arg // 256])
def concat(assemblies): return b''.join(assemblies)
def SetLineNo(lineno): return b''
def make_lnotab(assembly): return 1, b''
def plumb_depths(assembly): return 10
def assemble(assembly): return assembly
def denotation(opcode):
if opcode < dis.HAVE_ARGUMENT:
return Instruction(opcode, None)
else:
return lambda arg: Instruction(opcode, arg)
op = type('op', (), dict([(name, denotation(opcode))
for name, opcode in dis.opmap.items()]))
def make_table():
table = collections.defaultdict(lambda: len(table))
return table
def collect(table):
return tuple(sorted(table, key=table.get))
def run(filename, module_name):
f = open(filename)
source = f.read()
f.close()
return module_from_ast(module_name, filename, ast.parse(source))
def module_from_ast(module_name, filename, t):
code = code_for_module(module_name, filename, t)
module = types.ModuleType(module_name, ast.get_docstring(t))
exec(code, module.__dict__)
return module
def code_for_module(module_name, filename, t):
return CodeGen(filename, StubScope()).compile_module(t, module_name)
class StubScope: freevars, cellvars, derefvars = (), (), ()
class CodeGen(ast.NodeVisitor):
def __init__(self, filename, scope):
self.filename = filename
self.scope = scope
self.constants = make_table()
self.names = make_table()
self.varnames = make_table()
def compile_module(self, t, name):
assembly = self(t.body) + self.load_const(None) + op.RETURN_VALUE
return self.make_code(assembly, name, 0)
def make_code(self, assembly, name, argcount):
kwonlyargcount = 0
nlocals = len(self.varnames)
stacksize = plumb_depths(assembly)
flags = ( (0x02 if nlocals else 0)
| (0x10 if self.scope.freevars else 0)
| (0x40 if not self.scope.derefvars else 0))
firstlineno, lnotab = make_lnotab(assembly)
return types.CodeType(argcount, kwonlyargcount,
nlocals, stacksize, flags, assemble(assembly),
self.collect_constants(),
collect(self.names), collect(self.varnames),
self.filename, name, firstlineno, lnotab,
self.scope.freevars, self.scope.cellvars)
def __call__(self, t):
if isinstance(t, list): return concat(map(self, t))
assembly = self.visit(t)
return SetLineNo(t.lineno) + assembly if hasattr(t, 'lineno') else assembly
def generic_visit(self, t):
raise NotImplementedError()
def load_const(self, constant):
return op.LOAD_CONST(self.constants[constant, type(constant)])
def collect_constants(self):
return tuple([constant for constant,_ in collect(self.constants)])
def visit_NameConstant(self, t): return self.load_const(t.value) # for None/True/False
def visit_Num(self, t): return self.load_const(t.n)
def visit_Str(self, t): return self.load_const(t.s)
visit_Bytes = visit_Str
def visit_Name(self, t):
if isinstance(t.ctx, ast.Load): return self.load(t.id)
elif isinstance(t.ctx, ast.Store): return self.store(t.id)
else: assert False
def load(self, name): return op.LOAD_NAME(self.names[name])
def store(self, name): return op.STORE_NAME(self.names[name])
def visit_Call(self, t):
assert len(t.args) < 256 and len(t.keywords) < 256
return (self(t.func) + self(t.args) + self(t.keywords)
+ op.CALL_FUNCTION((len(t.keywords) << 8) | len(t.args)))
def visit_keyword(self, t):
return self.load_const(t.arg) + self(t.value)
def visit_Expr(self, t):
return self(t.value) + op.POP_TOP
def visit_Assign(self, t):
def compose(left, right): return op.DUP_TOP + left + right
return self(t.value) + reduce(compose, map(self, t.targets))
if __name__ == '__main__':
sys.argv.pop(0)
run(sys.argv[0], '__main__')
|
111130
|
from openchat.models.base_model import BaseModel
from openchat.models.dialogpt import DialoGPT
from openchat.models.imagemodel import LxmertBot
__all__ = [BaseModel, DialoGPT, LxmertBot]
|
111213
|
import re
import fbuild.builders.c.gcc
# ------------------------------------------------------------------------------
class Intel(fbuild.builders.c.gcc.Gcc):
def version(self, *args, **kwargs):
"""Return the version of the Intel compiler executable."""
stdout, stderr = self.ctx.execute((self.exe, '--version'), quieter=1)
m = re.match(r'ic(?:c|pc) \(ICC\) ([\d.]+)', stdout.decode())
return m.group(1) if m else None
# ------------------------------------------------------------------------------
def make_cc(ctx, exe=None, default_exes=['icc'], **kwargs):
return Intel(ctx,
fbuild.builders.find_program(ctx, [exe] if exe else default_exes),
**kwargs)
# ------------------------------------------------------------------------------
def static(*args, make_cc=make_cc, **kwargs):
return fbuild.builders.c.gcc.static(*args, make_cc=make_cc, **kwargs)
def shared(*args, make_cc=make_cc, **kwargs):
return fbuild.builders.c.gcc.shared(*args, make_cc=make_cc, **kwargs)
|
111257
|
from typing import List
import torch
from deepclustering.method import _Method
from deepclustering.model import Model
from torch import Tensor
class SubSpaceClusteringMethod(_Method):
def __init__(
self,
model: Model,
lamda: float = 0.1,
lr: float = 0.0001,
num_samples: int = 100,
device: torch.device = torch.device("cuda"),
*args,
**kwargs,
):
super().__init__(model, *args, **kwargs)
assert isinstance(
device, torch.device
), f"device should be torch.device, given {device}."
self.lr = float(lr)
self.lamda = float(lamda)
self.device = device
self.adj_matrix = torch.randn(
(num_samples, num_samples), dtype=torch.float32
).to(self.device)
self._diagnoal_remove(self.adj_matrix)
# self.adj_matrix = torch.eye(num_samples).to(self.device) #+ 0.1*torch.randn((num_samples,num_samples)).to(device)*torch.eye(num_samples).to(self.device)
print()
def _diagnoal_remove(self, matrix):
assert (
matrix.shape.__len__() == 2 and matrix.shape[0] == matrix.shape[1]
), f"check the matrix dimension, given {matrix.shape}"
for i in range(len(matrix)):
matrix[i, i] = 0
assert self.check_diagnal_zero(matrix), f"matrix diag remove failed."
@staticmethod
def check_diagnal_zero(matrix: Tensor) -> bool:
return torch.allclose(matrix.diag(), torch.zeros_like(matrix.diag()))
def set_input(self, imgs: Tensor, index: List[int], *args, **kwargs):
super().set_input(*args, **kwargs)
assert imgs.shape[0] == len(index), (
f"imgs and index lengths should be the same, given len(imgs)="
f"{len(imgs)}, len(index)={len(index)}."
)
self.imgs = imgs
# self.pred, self._representation = self.model(self.imgs)
self._representation = self.imgs.view(self.imgs.shape[0], -1)
self.index = index
assert self._representation.shape[0] == self.index.shape[0]
self.current_adj_matrix: Tensor = self.adj_matrix[index][:, index]
assert self.current_adj_matrix.shape == torch.Size([len(index), len(index)])
# choose the minibatch of adj_matrix
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
self._update_dictionary()
# self._gradient_descent()
def _gradient_descent(self):
current_adj_matrix = self.current_adj_matrix.clone()
self._diagnoal_remove(current_adj_matrix)
_reconstr_loss = (
(self._representation - torch.mm(current_adj_matrix, self._representation))
.norm(p=2, dim=1)
.mean()
)
self.model.zero_grad()
_reconstr_loss.backward()
self.model.step()
# print(_reconstr_loss)
def _update_dictionary(self):
assert self.check_diagnal_zero(self.current_adj_matrix)
X2 = self._representation.mm(self._representation.t()).detach()
I = torch.eye(len(self.current_adj_matrix)).to(self.device)
for _ in range(1000):
current_adj_matrix_hat = self.current_adj_matrix - self.lr * X2.mm(
self.current_adj_matrix - I
)
current_adj_sign = current_adj_matrix_hat.sign()
new_current_adj = (
torch.max(
current_adj_matrix_hat.__abs__() - self.lr * self.lamda,
torch.zeros_like(current_adj_matrix_hat),
)
* current_adj_sign
)
self._diagnoal_remove(new_current_adj)
self.current_adj_matrix = new_current_adj
# update the whole matrix
for i, c in enumerate(self.index):
self.adj_matrix[c, self.index] = new_current_adj[:, i] # new_current_adj
# self.adj_matrix.scatter((self.index, self.index), -1000)
class SubSpaceClusteringMethod2(SubSpaceClusteringMethod):
def __init__(
self,
model: Model,
lamda: float = 0.1,
lr: float = 0.005,
num_samples: int = 100,
device: torch.device = torch.device("cuda"),
*args,
**kwargs,
):
super().__init__(model, lamda, lr, num_samples, device, *args, **kwargs)
def _update_dictionary(self):
# reconstruction:
current_adj_matrix = self.current_adj_matrix.clone()
for _ in range(1000):
self._diagnoal_remove(current_adj_matrix)
current_adj_matrix.requires_grad = True
_reconstr_loss = (
(
self._representation
- torch.mm(current_adj_matrix, self._representation)
)
.norm(p=2, dim=1)
.mean()
)
_sparsity_loss = current_adj_matrix.norm(p=1, dim=0).mean()
_loss = _reconstr_loss + _sparsity_loss
_loss.backward()
# print(f"sparsity:{_sparsity_loss}, reconstruction:{_reconstr_loss}")
new_current_adj_matrix = (
current_adj_matrix - self.lamda * current_adj_matrix.grad
)
new_current_adj_matrix = new_current_adj_matrix.detach()
current_adj_matrix.grad.zero_()
# new_current_adj_matrix[new_current_adj_matrix.__abs__()<=0.0001]=0 #* torch.eye(len(self.index)).to(self.device)
self._diagnoal_remove(new_current_adj_matrix)
current_adj_matrix = new_current_adj_matrix
for i, c in enumerate(self.index):
self.adj_matrix[c, self.index] = new_current_adj_matrix[
i
] # new_current_adj
print(
f"reconstruction:{_reconstr_loss}, sparsity:{_sparsity_loss}, current_adj_max:{new_current_adj_matrix.diag().max()}, min:{new_current_adj_matrix.diag().min()}"
)
def update(self):
self._update_dictionary()
|
111258
|
from .workflows import ETL
from .load import MarcottiLoad
from .transform import MarcottiTransform, MarcottiEventTransform
|
111267
|
from edi_835_parser.elements.identifier import Identifier
from edi_835_parser.elements.dollars import Dollars
from edi_835_parser.elements.adjustment_group_code import AdjustmentGroupCode
from edi_835_parser.elements.adjustment_reason_code import AdjustmentReasonCode
from edi_835_parser.segments.utilities import split_segment
class ServiceAdjustment:
identification = 'CAS'
identifier = Identifier()
group_code = AdjustmentGroupCode()
reason_code = AdjustmentReasonCode()
amount = Dollars()
def __init__(self, segment: str):
self.segment = segment
segment = split_segment(segment)
self.identifier = segment[0]
self.group_code = segment[1]
self.reason_code = segment[2]
self.amount = segment[3]
def __repr__(self):
return '\n'.join(str(item) for item in self.__dict__.items())
if __name__ == '__main__':
pass
|
111323
|
import os
import random
HELP_DESC = ("!imagetest\t\t-\tSend back test image\n")
async def register_to(plugin):
# Echo back the given command
async def imagetest_callback(room, event):
exts = ["gif", "jpg", "png", "jpeg"]
images = filter(lambda x:any(x.lower().endswith(ext) for ext in exts), os.listdir())
await plugin.send_image(random.choice(list(images)))
# Add a command handler waiting for the echo command
imagetest_handler = plugin.CommandHandler("imagetest", imagetest_callback)
plugin.add_handler(imagetest_handler)
|
111406
|
from river import optim
from .log_reg import LogisticRegression
class Perceptron(LogisticRegression):
"""Perceptron classifier.
In this implementation, the Perceptron is viewed as a special case of the logistic regression.
The loss function that is used is the Hinge loss with a threshold set to 0, whilst the learning
rate of the stochastic gradient descent procedure is set to 1 for both the weights and the
intercept.
Parameters
----------
l2
Amount of L2 regularization used to push weights towards 0.
clip_gradient
Clips the absolute value of each gradient value.
initializer
Weights initialization scheme.
Attributes
----------
weights
The current weights.
Examples
--------
>>> from river import datasets
>>> from river import evaluate
>>> from river import linear_model as lm
>>> from river import metrics
>>> from river import preprocessing as pp
>>> dataset = datasets.Phishing()
>>> model = pp.StandardScaler() | lm.Perceptron()
>>> metric = metrics.Accuracy()
>>> evaluate.progressive_val_score(dataset, model, metric)
Accuracy: 85.84%
"""
def __init__(
self,
l2=0.0,
clip_gradient=1e12,
initializer: optim.initializers.Initializer = None,
):
super().__init__(
optimizer=optim.SGD(1),
intercept_lr=1,
loss=optim.losses.Hinge(threshold=0.0),
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer,
)
|
111415
|
from django.conf import settings
from django.conf.urls import include, url
from tastypie.api import NamespacedApi
from namespaced.api.resources import NamespacedNoteResource, NamespacedUserResource
api = NamespacedApi(api_name='v1', urlconf_namespace='special')
api.register(NamespacedNoteResource(), canonical=True)
api.register(NamespacedUserResource(), canonical=True)
if settings.DJANGO_VERSION >= settings.DJANGO_19:
included = include((api.urls, 'special'))
else:
included = include(api.urls, namespace='special')
urlpatterns = [
url(r'^api/', included),
]
|
111436
|
import pickle
from pathlib import Path
class Saveable:
"""
Base class of saveable classes.
"""
#override
def dump_state(self):
"""
return a picklable state_dict that should be saved/loaded
"""
raise NotImplementedError()
#override
def load_state(self, state_dict):
"""
Apply data from state_dict
"""
raise NotImplementedError()
#override
def save(self, filepath):
"""
Saves data acquired from .dump_state() to filepath
The data will be stored in pickle format with numpy arrays.
"""
filepath = Path(filepath)
filepath.write_bytes(pickle.dumps (self.dump_state(), 4))
#override
def load(self, filepath):
"""
Loads data and calls load_state.
Returns true if success.
"""
filepath = Path(filepath)
if filepath.exists():
self.load_state(pickle.loads(filepath.read_bytes()))
return True
return False
|
111459
|
import numpy as np
from simdkalman.primitives import predict, update
# define model
state_transition = np.array([[1,1],[0,1]])
process_noise = np.eye(2)*0.01
observation_model = np.array([[1,0]])
observation_noise = np.array([[1.0]])
# initial state
m = np.array([0, 1])
P = np.eye(2)
# predict next state
m, P = predict(m, P, state_transition, process_noise)
# first observation
y = np.array([4])
m, P = update(m, P, observation_model, observation_noise, y)
# predict second state
m, P = predict(m, P, state_transition, process_noise)
print('mean')
print(m)
print('cov')
print(P)
|
111482
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param Stapling_Timeout: {"description": "OCSP Stapling Timeout", "format": "counter", "type": "number", "oid": "10", "optional": true, "size": "8"}
:param Certificate_Revoked: {"description": "Revoked Certificate Response", "format": "counter", "type": "number", "oid": "3", "optional": true, "size": "8"}
:param Request: {"description": "Request", "format": "counter", "type": "number", "oid": "1", "optional": true, "size": "8"}
:param Stapling_Certificate_Revoked: {"description": "OCSP Stapling Revoked Certificate Response", "format": "counter", "type": "number", "oid": "8", "optional": true, "size": "8"}
:param Certificate_Unknown: {"description": "Unknown Certificate Response", "format": "counter", "type": "number", "oid": "4", "optional": true, "size": "8"}
:param Stapling_Certificate_Unknown: {"description": "OCSP Stapling Unknown Certificate Response", "format": "counter", "type": "number", "oid": "9", "optional": true, "size": "8"}
:param Stapling_Certificate_Good: {"description": "OCSP Stapling Good Certificate Response", "format": "counter", "type": "number", "oid": "7", "optional": true, "size": "8"}
:param Timeout: {"description": "Timeout", "format": "counter", "type": "number", "oid": "5", "optional": true, "size": "8"}
:param Certificate_Good: {"description": "Good Certificate Response", "format": "counter", "type": "number", "oid": "2", "optional": true, "size": "8"}
:param Stapling_Request: {"description": "OCSP Stapling Request Send", "format": "counter", "type": "number", "oid": "6", "optional": true, "size": "8"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.Stapling_Timeout = ""
self.Certificate_Revoked = ""
self.Request = ""
self.Stapling_Certificate_Revoked = ""
self.Certificate_Unknown = ""
self.Stapling_Certificate_Unknown = ""
self.Stapling_Certificate_Good = ""
self.Timeout = ""
self.Certificate_Good = ""
self.Stapling_Request = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Instance(A10BaseClass):
"""Class Description::
Statistics for the object instance.
Class instance supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param name: {"description": "Specify OCSP authentication server name", "format": "string-rlx", "minLength": 1, "oid": "1001", "optional": false, "maxLength": 63, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/aam/authentication/server/ocsp/instance/{name}/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "name"]
self.b_key = "instance"
self.a10_url="/axapi/v3/aam/authentication/server/ocsp/instance/{name}/stats"
self.DeviceProxy = ""
self.stats = {}
self.name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
111487
|
from matplotlib import animation, pyplot as plt
from django.http import HttpResponse
from app.models import Game
def visualization_view(request, object_id):
game = Game.objects.filter(id=object_id).first()
if not game:
return HttpResponse(f"Can't find game with id {object_id}.")
if (
game.initial_thresholds is None
or game.left_actions is None
or game.right_actions is None
or game.left_rewards is None
or game.right_rewards is None
):
return HttpResponse("Can't create a video.")
fig, ax = plt.subplots(2, 2, figsize=(15, 8), gridspec_kw={"width_ratios": [3, 1]})
th_animation = ThresholdsAnimation(ax[0, 0], game)
bandit_rewards_animation = BanditRewardsAnimation(ax[1, 0], game)
agent_rewards_animation = AgentRewardsAnimation(ax[0, 1], game)
WithoutAnimation(ax[1, 1], game)
def animate(step):
return (
th_animation.animate(step)
+ bandit_rewards_animation.animate(step)
+ agent_rewards_animation.animate(step)
)
ani = animation.FuncAnimation(
fig, animate, interval=50, blit=True, save_count=len(game.steps)
)
return HttpResponse(f"<h1>{game_info(game)}</h1>{ani.to_html5_video()}")
class AnimationABC:
def __init__(self, ax, game, left_color="red", right_color="blue"):
self.game = game
self.left_agent_name = game.left_name
self.right_agent_name = game.right_name
self.left_color = left_color
self.right_color = right_color
self.num_bandits = len(game.initial_thresholds)
self.plot_objects = self.init_func(ax)
def init_func(self, ax):
raise NotImplementedError()
def animate(self, step):
raise NotImplementedError()
class WithoutAnimation(AnimationABC):
def init_func(self, ax):
ax.set_axis_off()
def animate(self, step):
return ()
class ThresholdsAnimation(AnimationABC):
def __init__(self, ax, game, *args, **kwargs):
bandit_to_th = dict(enumerate(game.initial_thresholds.tolist()))
self.sorted_bandits = sorted(bandit_to_th, key=lambda x: -bandit_to_th[x])
self.bandit_to_order = {b: i for i, b in enumerate(self.sorted_bandits)}
super().__init__(ax, game, *args, **kwargs)
def order_bandits(self, values):
return [values[i] for i in self.sorted_bandits]
def init_func(self, ax):
ax.get_xaxis().set_visible(False)
ax.set_ylabel("thresholds")
th, *_ = ax.plot(
self.order_bandits(self.game.initial_thresholds), "o", color="gray"
)
left_bandit, *_ = ax.plot([], "o", ms=10, color=self.left_color, alpha=0.5)
right_bandit, *_ = ax.plot([], "o", ms=10, color=self.right_color, alpha=0.5)
previous_left_bandit, *_ = ax.plot(
[], "o", ms=8, color=self.left_color, alpha=0.25
)
previous_right_bandit, *_ = ax.plot(
[], "o", ms=8, color=self.right_color, alpha=0.25
)
step_info = ax.text(70, 80, "Step 0")
left_agent_info = ax.text(
70, 70, f"{self.left_agent_name} - {0}", color=self.left_color
)
right_agent_info = ax.text(
70, 60, f"{self.right_agent_name} - {0}", color=self.right_color
)
return (
th,
left_bandit,
right_bandit,
previous_left_bandit,
previous_right_bandit,
step_info,
left_agent_info,
right_agent_info,
)
def animate(self, step):
(
thresholds,
left_bandit,
right_bandit,
previous_left_bandit,
previous_right_bandit,
step_info,
left_agent_info,
right_agent_info,
) = self.plot_objects
info = self.game.steps[step]
th = info["thresholds"]
thresholds.set_ydata(self.order_bandits(th))
left_bandit.set_data(
[self.bandit_to_order[info["left_action"]], th[info["left_action"]]]
)
right_bandit.set_data(
[self.bandit_to_order[info["right_action"]], th[info["right_action"]]]
)
step_info.set_text(f"Step {step}")
left_agent_info.set_text(
f"{self.left_agent_name} - {info['total_left_reward']}"
)
right_agent_info.set_text(
f"{self.right_agent_name} - {info['total_right_reward']}"
)
if step > 0:
info = self.game.steps[step - 1]
th = info["thresholds"]
previous_left_bandit.set_data(
[self.bandit_to_order[info["left_action"]], th[info["left_action"]]]
)
previous_right_bandit.set_data(
[self.bandit_to_order[info["right_action"]], th[info["right_action"]]]
)
return (
thresholds,
left_bandit,
right_bandit,
previous_left_bandit,
previous_right_bandit,
step_info,
left_agent_info,
right_agent_info,
)
class BanditRewardsAnimation(AnimationABC):
def __init__(self, ax, game, *args, **kwargs):
bandit_to_th = dict(enumerate(game.initial_thresholds.tolist()))
self.sorted_bandits = sorted(bandit_to_th, key=lambda x: -bandit_to_th[x])
self.bandit_to_order = {b: i for i, b in enumerate(self.sorted_bandits)}
self.bandit_distribution = self.get_bandit_distribution(game)
super().__init__(ax, game, *args, **kwargs)
def order_bandits(self, values):
return [values[i] for i in self.sorted_bandits]
def get_bandit_distribution(self, game):
num_bandits = len(game.initial_thresholds)
left_action_count = [0] * num_bandits
right_action_count = [0] * num_bandits
left_reward_count = [0] * num_bandits
right_reward_count = [0] * num_bandits
out = []
for d in game.steps:
left_action_count[d["left_action"]] += 1
right_action_count[d["right_action"]] += 1
left_reward_count[d["left_action"]] += d["left_reward"]
right_reward_count[d["right_action"]] += d["right_reward"]
out.append(
{
"left_action_count": self.order_bandits(left_action_count),
"right_action_count": self.order_bandits(right_action_count),
"left_reward_count": self.order_bandits(left_reward_count),
"right_reward_count": self.order_bandits(right_reward_count),
}
)
return out
def init_func(self, ax):
ax.get_xaxis().set_visible(False)
ax.set_ylabel("rewards")
ax.set_ylim(
0,
max(
self.bandit_distribution[-1]["left_action_count"]
+ self.bandit_distribution[-1]["right_action_count"]
),
)
left_rewards = ax.bar(
range(self.num_bandits),
[0] * self.num_bandits,
color=self.left_color,
alpha=0.5,
)
right_rewards = ax.bar(
range(self.num_bandits),
[0] * self.num_bandits,
color=self.right_color,
alpha=0.5,
)
left_turns = ax.bar(
range(self.num_bandits),
[0] * self.num_bandits,
color=self.left_color,
linewidth=0,
alpha=0.2,
width=1,
)
right_turns = ax.bar(
range(self.num_bandits),
[0] * self.num_bandits,
color=self.right_color,
linewidth=0,
alpha=0.2,
width=1,
)
return left_rewards, right_rewards, left_turns, right_turns
def animate(self, step):
left_rewards, right_rewards, left_turns, right_turns = self.plot_objects
bd = self.bandit_distribution[step]
for patches, data in [
(left_rewards.patches, bd["left_reward_count"]),
(right_rewards.patches, bd["right_reward_count"]),
(left_turns.patches, bd["left_action_count"]),
(right_turns.patches, bd["right_action_count"]),
]:
for p, d in zip(patches, data):
p.set_height(d)
return ()
class AgentRewardsAnimation(AnimationABC):
def init_func(self, ax):
ax.set_ylabel("reward")
ax.set_xlabel("time")
ax.plot(self.game.left_rewards, color=self.left_color)
ax.plot(self.game.right_rewards, color=self.right_color)
time_line, *_ = ax.plot(
[0, 0], [0, max(self.game.total_rewards())], color="black"
)
return (time_line,)
def animate(self, step):
time_line, *_ = self.plot_objects
time_line.set_xdata([step, step])
return (time_line,)
def game_info(game):
l_name, r_name = game.left_name, game.right_name
l_old, r_old = game.left_current_rating, game.right_current_rating
l_new, r_new = game.left_new_rating, game.right_new_rating
if l_old is None or r_old is None or l_new is None or r_new is None:
return ""
l_diff = l_new - l_old
r_diff = r_new - r_old
def to_str(name, old, diff):
if diff > 0:
return (
f"<strong>'{name}'</strong> {round(old, 1)} "
f"(<span style='color: green'>+{round(diff, 1)}</span>)"
)
else:
return (
f"'{name}' {round(old, 1)} "
f"(<span style='color: red'>-{round(abs(diff), 1)}</span>)"
)
return f"{to_str(l_name, l_old, l_diff)} - {to_str(r_name, r_old, r_diff)}"
|
111581
|
from datetime import date, datetime
import pytest
from opennem.spiders.bom.utils import get_archive_page_for_station_code
@pytest.mark.parametrize(["web_code", "month", "expected_result"], [
("4019", datetime(2021, 10, 1).date(), "http://www.bom.gov.au/climate/dwo/202110/html/IDCJDW4019.202110.shtml"),
("0000", datetime(2021, 1, 1).date(), "http://www.bom.gov.au/climate/dwo/202101/html/IDCJDW0000.202101.shtml")
])
def test_get_archive_page_for_station_code(
web_code: str,
month: date,
expected_result: str
) -> None:
bom_archive_page = get_archive_page_for_station_code(web_code, month)
assert bom_archive_page == expected_result, "Returned url matches expected archive page"
|
111585
|
import tensorflow as tf
IDENTIFIER_OUTPUT_LAYER = "Output"
def get_out(output_layer: str, out_feature_dim, scale_node_size, name: str = 'decoder'):
if output_layer == "gaussian":
output_decoder_layer = GaussianOutput(
original_dim=out_feature_dim,
use_node_scale=scale_node_size,
name=f"Gaussian{IDENTIFIER_OUTPUT_LAYER}_{name}",
)
elif output_layer == "nb":
output_decoder_layer = NegBinOutput(
original_dim=out_feature_dim,
use_node_scale=scale_node_size,
name=f"NegBin{IDENTIFIER_OUTPUT_LAYER}_{name}",
)
elif output_layer == "nb_shared_disp":
output_decoder_layer = NegBinSharedDispOutput(
original_dim=out_feature_dim,
use_node_scale=scale_node_size,
name=f"NegBinSharedDisp{IDENTIFIER_OUTPUT_LAYER}_{name}",
)
elif output_layer == "nb_const_disp":
output_decoder_layer = NegBinConstDispOutput(
original_dim=out_feature_dim,
use_node_scale=scale_node_size,
name=f"NegBinConstDisp{IDENTIFIER_OUTPUT_LAYER}_{name}",
)
else:
raise ValueError("tried to access a non-supported output layer %s" % output_layer)
return output_decoder_layer
class LinearOutput(tf.keras.layers.Layer):
"""Linear output layer."""
def __init__(self, use_node_scale: bool = False, name: str = "linear_output", **kwargs):
"""Initialize LinearOutput.
Parameters
----------
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.use_node_scale = use_node_scale
self.var_bias = None
def get_config(self):
"""Get config LinearOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build LinearOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
genes_dim = input_shapes[0][-1]
self.var_bias = self.add_weight("var_bias", shape=[1, genes_dim], initializer="zeros")
def call(self, inputs, **kwargs):
"""Call LinearOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
eta_loc
eta_scale
"""
bound = 60.0
mean, sf = inputs
var = self.var_bias
if self.use_node_scale:
mean = mean * tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip")
var = tf.zeros_like(mean) + var # broadcast
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -tf.exp(bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
# exp_mean = mean_clip + sf
eta_loc = mean_clip
eta_scale = tf.exp(var_clip)
return [eta_loc, eta_scale]
class LinearConstDispOutput(tf.keras.layers.Layer):
"""Linear output layer with constant dispersion."""
def __init__(self, use_node_scale: bool = False, name: str = "linear_const_disp_output", **kwargs):
"""Initialize LinearConstDispOutput.
Parameters
----------
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.use_node_scale = use_node_scale
self.var_bias = None
def get_config(self):
"""Get config LinearConstDispOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def call(self, inputs, **kwargs):
"""Call LinearConstDispOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
eta_loc
eta_scale
"""
bound = 60.0
mean, sf = inputs
var = tf.zeros_like(mean)
if self.use_node_scale:
mean = mean * tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip")
var = tf.zeros_like(mean) + var
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -tf.exp(bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
# exp_mean = mean_clip + sf
eta_loc = mean_clip
eta_scale = tf.exp(var_clip)
return [eta_loc, eta_scale]
class GaussianOutput(tf.keras.layers.Layer):
"""Log normal likelihood output layer."""
def __init__(self, original_dim=None, use_node_scale: bool = False, name: str = "gaussian_output", **kwargs):
"""Initialize GaussianOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
self.var_bias = None
def get_config(self):
"""Get config GaussianOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build GaussianOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
self.var_bias = self.add_weight("var_bias", shape=[1, self.original_dim], initializer="zeros")
def call(self, inputs, **kwargs):
"""Call GaussianOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
eta_loc
eta_scale
"""
bound = 60.0
activation, sf = inputs
in_node_dim = activation.shape[1]
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
var = self.var_bias
mean = tf.reshape(mean, [-1, in_node_dim, self.original_dim], name="output_layer_reshape_mean")
var = tf.zeros_like(mean) + var # broadcast
if self.use_node_scale:
mean = mean * tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip")
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -tf.exp(bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
# exp_mean = mean_clip + sf
eta_loc = mean_clip
eta_scale = tf.exp(var_clip)
return [eta_loc, eta_scale]
class GaussianConstDispOutput(tf.keras.layers.Layer):
"""Log normal likelihood output layer."""
def __init__(self, original_dim=None, use_node_scale: bool = False, name: str = "gaussian_output", **kwargs):
"""Initialize GaussianConstDispOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
def get_config(self):
"""Get config GaussianConstDispOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build GaussianConstDispOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
def call(self, inputs, **kwargs):
"""Call GaussianConstDispOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
eta_loc
eta_scale
"""
bound = 60.0
activation, sf = inputs
in_node_dim = activation.shape[1]
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
mean = tf.reshape(mean, [-1, in_node_dim, self.original_dim], name="output_layer_reshape_mean")
var = tf.zeros_like(mean)
if self.use_node_scale:
mean = mean * tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip")
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -tf.exp(bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
# exp_mean = mean_clip + sf
eta_loc = mean_clip
eta_scale = tf.exp(var_clip)
return [eta_loc, eta_scale]
class NegBinOutput(tf.keras.layers.Layer):
"""Negative binomial output layer."""
def __init__(self, original_dim=None, use_node_scale: bool = False, name: str = "neg_bin_output", **kwargs):
"""Initialize NegBinOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
self.var = None
def get_config(self):
"""Get config NegBinOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build NegBinOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
self.var = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
def call(self, inputs, **kwargs):
"""Call NegBinOutput.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
exp_mean
exp_var
"""
bound = 60.0
activation, sf = inputs
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
var = self.var(activation)
if self.use_node_scale:
mean = mean + tf.math.log(tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip"))
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -bound, bound, "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
exp_mean = tf.exp(mean_clip)
exp_var = tf.exp(var_clip)
return [exp_mean, exp_var]
class NegBinSharedDispOutput(tf.keras.layers.Layer):
"""Negative binomial output layer with dispersion shared over features."""
def __init__(
self, original_dim=None, use_node_scale: bool = False, name: str = "neg_bin_shared_disp_output", **kwargs
):
"""Initialize NegBinSharedDispOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
self.var = None
self.var_bias = None
def get_config(self):
"""Get config NegBinSharedDispOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build NegBinSharedDispOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
self.var_bias = self.add_weight("var_bias", shape=[1, self.original_dim], initializer="zeros")
def call(self, inputs, **kwargs):
"""Call NegBinSharedDispOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
exp_mean
exp_var
"""
bound = 60.0
activation, sf = inputs
in_node_dim = activation.shape[1]
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
var = self.var_bias
mean = tf.reshape(mean, [-1, in_node_dim, self.original_dim], name="output_layer_reshape_mean")
if self.use_node_scale:
mean = mean + tf.math.log(tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip"))
var = tf.zeros_like(mean) + var # broadcast
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -bound, bound, "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
exp_mean = tf.exp(mean_clip)
exp_var = tf.exp(var_clip)
return [exp_mean, exp_var]
class NegBinConstDispOutput(tf.keras.layers.Layer):
"""Negative binomial output layer with constant dispersion."""
def __init__(
self, original_dim=None, use_node_scale: bool = False, name: str = "neg_bin_const_disp_output", **kwargs
):
"""Initialize NegBinConstDispOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
self.var = None
def get_config(self):
"""Get config NegBinConstDispOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build NegBinConstDispOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
def call(self, inputs, **kwargs):
"""Call NegBinConstDispOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
exp_mean
exp_var
"""
bound = 60.0
activation, sf = inputs
in_node_dim = activation.shape[1]
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
var = tf.zeros_like(mean)
mean = tf.reshape(mean, [-1, in_node_dim, self.original_dim], name="output_layer_reshape_mean")
if self.use_node_scale:
mean = mean + tf.math.log(tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip"))
var = tf.zeros_like(mean) + var # broadcast
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -bound, bound, "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
exp_mean = tf.exp(mean_clip)
exp_var = tf.exp(var_clip)
return [exp_mean, exp_var]
|
111587
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
WEIGHTS_FINAL_INIT = 3e-3
BIAS_FINAL_INIT = 3e-4
def fan_in_uniform_init(tensor, fan_in=None):
"""Utility function for initializing actor and critic"""
if fan_in is None:
fan_in = tensor.size(-1)
w = 1. / np.sqrt(fan_in)
nn.init.uniform_(tensor, -w, w)
class Actor(nn.Module):
def __init__(self, hidden_size, num_inputs, action_space):
super(Actor, self).__init__()
self.action_space = action_space
num_outputs = action_space.shape[0]
# Layer 1
self.linear1 = nn.Linear(num_inputs, hidden_size[0])
self.ln1 = nn.LayerNorm(hidden_size[0])
# Layer 2
self.linear2 = nn.Linear(hidden_size[0], hidden_size[1])
self.ln2 = nn.LayerNorm(hidden_size[1])
# Output Layer
self.mu = nn.Linear(hidden_size[1], num_outputs)
# Weight Init
fan_in_uniform_init(self.linear1.weight)
fan_in_uniform_init(self.linear1.bias)
fan_in_uniform_init(self.linear2.weight)
fan_in_uniform_init(self.linear2.bias)
nn.init.uniform_(self.mu.weight, -WEIGHTS_FINAL_INIT, WEIGHTS_FINAL_INIT)
nn.init.uniform_(self.mu.bias, -BIAS_FINAL_INIT, BIAS_FINAL_INIT)
def forward(self, inputs):
x = inputs
# Layer 1
x = self.linear1(x)
x = self.ln1(x)
x = F.relu(x)
# Layer 2
x = self.linear2(x)
x = self.ln2(x)
x = F.relu(x)
# Output
mu = torch.tanh(self.mu(x))
return mu
class Critic(nn.Module):
def __init__(self, hidden_size, num_inputs, action_space):
super(Critic, self).__init__()
self.action_space = action_space
num_outputs = action_space.shape[0]
# Layer 1
self.linear1 = nn.Linear(num_inputs, hidden_size[0])
self.ln1 = nn.LayerNorm(hidden_size[0])
# Layer 2
# In the second layer the actions will be inserted also
self.linear2 = nn.Linear(hidden_size[0] + num_outputs, hidden_size[1])
self.ln2 = nn.LayerNorm(hidden_size[1])
# Output layer (single value)
self.V = nn.Linear(hidden_size[1], 1)
# Weight Init
fan_in_uniform_init(self.linear1.weight)
fan_in_uniform_init(self.linear1.bias)
fan_in_uniform_init(self.linear2.weight)
fan_in_uniform_init(self.linear2.bias)
nn.init.uniform_(self.V.weight, -WEIGHTS_FINAL_INIT, WEIGHTS_FINAL_INIT)
nn.init.uniform_(self.V.bias, -BIAS_FINAL_INIT, BIAS_FINAL_INIT)
def forward(self, inputs, actions):
x = inputs
# Layer 1
x = self.linear1(x)
x = self.ln1(x)
x = F.relu(x)
# Layer 2
x = torch.cat((x, actions), 1) # Insert the actions
x = self.linear2(x)
x = self.ln2(x)
x = F.relu(x)
# Output
V = self.V(x)
return V
|
111598
|
from kik.messages.message import Message
class FriendPickerMessage(Message):
"""
A friend picker message, as documented at `<https://dev.kik.com/#/docs/messaging#friend-picker-response-object>`_.
"""
def __init__(self, picked=None, chat_type=None, **kwargs):
super(FriendPickerMessage, self).__init__(type='friend-picker', **kwargs)
self.picked = picked
self.chat_type = chat_type
@classmethod
def property_mapping(cls):
mapping = super(FriendPickerMessage, cls).property_mapping()
mapping.update({
'picked': 'picked',
'chat_type': 'chatType'
})
return mapping
|
111613
|
import os
import re
import sys
import romkan
phone_cleanup_pattern = re.compile(r'(UA_|SWA_|M_|\{| WB\}|\})')
def cleanup_transcription(phone_sequence):
phone_sequence = phone_cleanup_pattern.sub('', phone_sequence).strip()
return phone_sequence
def parse_dictionary_file(path):
nonsil = set()
word_cleanup_pattern = re.compile(r'\(\d+\)')
line_break_pattern = re.compile(r'\}\s+')
word_pattern = re.compile(r'^{([^{}]+)\s+')
dictionary = {}
word_characters = set()
with open(path, 'r', encoding = 'utf8') as f:
try:
for line in f:
line = line.strip()
if line == '':
continue
try:
word, phones = line_break_pattern.split(line, maxsplit=1)
except ValueError:
raise(Exception('There was a problem with the line \'{}\'.'.format(line)))
if 'SIL' in phones or '+QK' in phones:
continue
word = word[1:].strip()
if '{' in word:
word = word_pattern.match(line)
word = word.groups()[0]
phones = word_pattern.sub('',line)
word = word_cleanup_pattern.sub('', word)
word = word.strip()
#word = word.lower()
word_characters.update(word)
phones = cleanup_transcription(phones)
matches = phones.split()
if len(matches) == 2 and matches[0] == matches[1]:
matches = matches[:1]
nonsil.update(matches)
if word not in dictionary:
dictionary[word] = []
dictionary[word].append(matches)
except UnicodeDecodeError:
s = f.readline()
print(repr(s))
print(f.readline())
raise(Exception)
return dictionary, nonsil, word_characters
def generate_japanese_dictionary(source_dir, dictionary):
split_re = re.compile(r'[]、 ]+')
cleanup_re = re.compile(r'[]{}]')
kanasplit_re = re.compile(r'\[+')
nnize_re = re.compile(r'n(?=[pbtdkgrwsczj]|$)')
trl_dir = os.path.join(source_dir, 'trl')
adc_dir = os.path.join(source_dir, 'adc')
not_found = {}
ignore = set(['nokoribuN', 'tamawatte', 'fuyukai'])
endings = ['na', 'datta']
new_dictionary = {}
graphemes = set([])
for filename in sorted(os.listdir(trl_dir)):
print(filename)
with open(os.path.join(trl_dir, filename), 'r', encoding = 'eucjp') as f:
for line in f:
if line.startswith(';'):
continue
words = split_re.split(line)
for w in words:
w = w.strip()
if not w:
continue
if w in ['。','、','}']:
continue
if w.startswith('<'):
continue
kanji = None
kana = None
if '[' in w:
print(w)
w = cleanup_re.sub('',w)
kanji, kana = kanasplit_re.split(w)
else:
kana = w
#print(kanji,kana)
print(w)
print(kana)
romanji = romkan.to_roma(kana)
#print(romanji)
romanji = romanji.replace("n'", 'N')
romanji = nnize_re.sub(r'N', romanji)
try:
d = dictionary[romanji]
if kanji is not None:
new_dictionary[kanji] = d
graphemes.update(kanji)
new_dictionary[kana] = d
graphemes.update(kana)
except KeyError:
for e in endings:
if romanji.endswith(e):
to_lookup = romanji[:-len(e)]
try:
print(dictionary[to_lookup] + dictionary[e])
except KeyError:
if romanji not in not_found:
not_found[romanji] = set()
not_found[romanji].add(filename)
break
else:
if romanji not in not_found:
not_found[romanji] = set()
not_found[romanji].add(filename)
return new_dictionary, graphemes, not_found
def save_dictionary(dictionary, path):
with open(path, 'w', encoding = 'utf8') as f:
for w, pronunciations in sorted(dictionary.items()):
for p in pronunciations:
outline = '{}\t{}\n'.format(w, ' '.join(p))
f.write(outline)
def dict_prep():
source_dir = r'D:\Data\GlobalPhone\Japanese\Japanese'
path = r'D:\Data\GlobalPhone\Japanese\Japanese_Dict\Japanese-GPDict.txt'
dict_dir = r'D:\Data\GlobalPhone\output\JA\dict'
dictionary, nonsil, word_characters = parse_dictionary_file(path)
lexicon_path = os.path.join(dict_dir, 'original_dictionary.txt')
save_dictionary(dictionary, lexicon_path)
dictionary, word_characters, not_found = generate_japanese_dictionary(source_dir, dictionary)
lexicon_path = os.path.join(dict_dir, 'new_dictionary.txt')
save_dictionary(dictionary, lexicon_path)
with open(os.path.join(dict_dir, 'not_found.txt'), 'w', encoding = 'utf8') as f:
for w, files in sorted(not_found.items()):
f.write('{}\t{}\n'.format(w, ', '.join(sorted(files))))
if __name__ == '__main__':
dict_prep()
|
111623
|
import pytest
from pyseeyou.locales import get_parts_of_num
from pyseeyou.cldr_rules import CARDINALS
# ========================
# GENERATED AUTOMATICALLY
# DON'T MODIFY MANUALLY
# ========================
def check(assertions, plural_fn):
for assertion in assertions:
match, samples = assertion
for sample in samples:
assert plural_fn(*get_parts_of_num(sample)) == match
def test_cardinal_af():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['af'])
def test_cardinal_ak():
assertions = [
('one', ['0','1','0.0','1.0','0.00']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['ak'])
def test_cardinal_am():
assertions = [
('one', ['0','1','0.0','0.5','1.0','0.00','0.02','0.04']),
('other', ['2','10','17','100','1000','1.1','1.85','2.6','10.0','100.0'])
]
check(assertions, CARDINALS['am'])
def test_cardinal_ar():
assertions = [
('zero', ['0','0.0','0.00','0.000']),
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('few', ['3','7','10','103','107','110','1003','3.0','4.0','5.0']),
('many', ['11','19','26','111','1011','11.0','12.0','13.0']),
('other', ['100','101','102','200','201','202','300','301','302','0.1','0.5','0.9','1.1','1.4','1.7','10.1'])
]
check(assertions, CARDINALS['ar'])
def test_cardinal_ars():
assertions = [
('zero', ['0','0.0','0.00','0.000']),
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('few', ['3','7','10','103','107','110','1003','3.0','4.0','5.0']),
('many', ['11','19','26','111','1011','11.0','12.0','13.0']),
('other', ['100','101','102','200','201','202','300','301','302','0.1','0.5','0.9','1.1','1.4','1.7','10.1'])
]
check(assertions, CARDINALS['ars'])
def test_cardinal_as():
assertions = [
('one', ['0','1','0.0','0.5','1.0','0.00','0.02','0.04']),
('other', ['2','10','17','100','1000','1.1','1.85','2.6','10.0','100.0'])
]
check(assertions, CARDINALS['as'])
def test_cardinal_asa():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['asa'])
def test_cardinal_ast():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ast'])
def test_cardinal_az():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['az'])
def test_cardinal_be():
assertions = [
('one', ['1','21','31','1.0','21.0','31.0']),
('few', ['2','3','4','22','23','24','32','33','34','2.0','3.0','4.0']),
('many', ['0','5','12','19','100','0.0','5.0','6.0']),
('other', ['0.1','0.5','0.9','1.1','1.4','1.7','10.1'])
]
check(assertions, CARDINALS['be'])
def test_cardinal_bem():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['bem'])
def test_cardinal_bez():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['bez'])
def test_cardinal_bg():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['bg'])
def test_cardinal_bh():
assertions = [
('one', ['0','1','0.0','1.0','0.00']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['bh'])
def test_cardinal_bm():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['bm'])
def test_cardinal_bn():
assertions = [
('one', ['0','1','0.0','0.5','1.0','0.00','0.02','0.04']),
('other', ['2','10','17','100','1000','1.1','1.85','2.6','10.0','100.0'])
]
check(assertions, CARDINALS['bn'])
def test_cardinal_bo():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['bo'])
def test_cardinal_br():
assertions = [
('one', ['1','21','31','1.0','21.0','31.0']),
('two', ['2','22','32','2.0','22.0','32.0']),
('few', ['3','4','9','3.0','4.0','9.0']),
('many', ['1000000','1000000.0','1000000.00','1000000.000']),
('other', ['0','5','7','8','10','15','20','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['br'])
def test_cardinal_brx():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['brx'])
def test_cardinal_bs():
assertions = [
('one', ['1','21','31','0.1','1.1','2.1']),
('few', ['2','3','4','22','23','24','32','33','34','0.2','0.3','0.4','1.2','1.3','1.4','2.2','2.3','2.4']),
('other', ['0','5','12','19','100','0.0','0.5','0.75','1.0','1.5','1.75','2.0'])
]
check(assertions, CARDINALS['bs'])
def test_cardinal_ca():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ca'])
def test_cardinal_ce():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ce'])
def test_cardinal_ceb():
assertions = [
('one', ['0','2','3','5','7','0.0','0.15','0.3','0.5','0.7']),
('other', ['4','6','9','0.4','0.6','0.9'])
]
check(assertions, CARDINALS['ceb'])
def test_cardinal_cgg():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['cgg'])
def test_cardinal_chr():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['chr'])
def test_cardinal_ckb():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ckb'])
def test_cardinal_cs():
assertions = [
('one', ['1']),
('few', ['2','3','4']),
('many', ['0.0','0.75','1.5','10.0','100.0']),
('other', ['0','5','12','19','100'])
]
check(assertions, CARDINALS['cs'])
def test_cardinal_cy():
assertions = [
('zero', ['0','0.0','0.00','0.000']),
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('few', ['3','3.0','3.00','3.000']),
('many', ['6','6.0','6.00','6.000']),
('other', ['4','5','7','14','20','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['cy'])
def test_cardinal_da():
assertions = [
('one', ['1','0.1','0.85','1.6']),
('other', ['0','2','9','16','100','0.0','2.0','2.7','3.4','10.0'])
]
check(assertions, CARDINALS['da'])
def test_cardinal_de():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['de'])
def test_cardinal_dsb():
assertions = [
('one', ['1','101','201','0.1','1.1','2.1']),
('two', ['2','102','202','0.2','1.2','2.2']),
('few', ['3','4','103','0.3','0.4','1.3']),
('other', ['0','5','12','19','100','0.0','0.5','0.75','1.0','1.5','1.75','2.0'])
]
check(assertions, CARDINALS['dsb'])
def test_cardinal_dv():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['dv'])
def test_cardinal_dz():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['dz'])
def test_cardinal_ee():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ee'])
def test_cardinal_el():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['el'])
def test_cardinal_en():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['en'])
def test_cardinal_eo():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['eo'])
def test_cardinal_es():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['es'])
def test_cardinal_et():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['et'])
def test_cardinal_eu():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['eu'])
def test_cardinal_fa():
assertions = [
('one', ['0','1','0.0','0.5','1.0','0.00','0.02','0.04']),
('other', ['2','10','17','100','1000','1.1','1.85','2.6','10.0','100.0'])
]
check(assertions, CARDINALS['fa'])
def test_cardinal_ff():
assertions = [
('one', ['0','1','0.0','0.75','1.5']),
('other', ['2','10','17','100','1000','2.0','2.75','3.5','10.0','100.0'])
]
check(assertions, CARDINALS['ff'])
def test_cardinal_fi():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['fi'])
def test_cardinal_fil():
assertions = [
('one', ['0','2','3','5','7','0.0','0.15','0.3','0.5','0.7']),
('other', ['4','6','9','0.4','0.6','0.9'])
]
check(assertions, CARDINALS['fil'])
def test_cardinal_fo():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['fo'])
def test_cardinal_fr():
assertions = [
('one', ['0','1','0.0','0.75','1.5']),
('other', ['2','10','17','100','1000','2.0','2.75','3.5','10.0','100.0'])
]
check(assertions, CARDINALS['fr'])
def test_cardinal_fur():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['fur'])
def test_cardinal_fy():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['fy'])
def test_cardinal_ga():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('few', ['3','5','6','3.0','4.0','5.0']),
('many', ['7','9','10','7.0','8.0','9.0']),
('other', ['0','11','18','25','100','0.0','0.45','0.9','1.1','1.35','1.6','10.1'])
]
check(assertions, CARDINALS['ga'])
def test_cardinal_gd():
assertions = [
('one', ['1','11','1.0','11.0','1.00']),
('two', ['2','12','2.0','12.0','2.00']),
('few', ['3','7','10','13','16','19','3.0','4.0','5.0']),
('other', ['0','20','27','34','100','0.0','0.45','0.9','1.1','1.35','1.6','10.1'])
]
check(assertions, CARDINALS['gd'])
def test_cardinal_gl():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['gl'])
def test_cardinal_gsw():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['gsw'])
def test_cardinal_gu():
assertions = [
('one', ['0','1','0.0','0.5','1.0','0.00','0.02','0.04']),
('other', ['2','10','17','100','1000','1.1','1.85','2.6','10.0','100.0'])
]
check(assertions, CARDINALS['gu'])
def test_cardinal_guw():
assertions = [
('one', ['0','1','0.0','1.0','0.00']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['guw'])
def test_cardinal_gv():
assertions = [
('one', ['1','11','21']),
('two', ['2','12','22']),
('few', ['0','20','40']),
('many', ['0.0','0.75','1.5','10.0','100.0']),
('other', ['3','7','10','13','16','19','23'])
]
check(assertions, CARDINALS['gv'])
def test_cardinal_ha():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ha'])
def test_cardinal_haw():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['haw'])
def test_cardinal_he():
assertions = [
('one', ['1']),
('two', ['2']),
('many', ['20','30','40']),
('other', ['0','3','10','17','101','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['he'])
def test_cardinal_hi():
assertions = [
('one', ['0','1','0.0','0.5','1.0','0.00','0.02','0.04']),
('other', ['2','10','17','100','1000','1.1','1.85','2.6','10.0','100.0'])
]
check(assertions, CARDINALS['hi'])
def test_cardinal_hr():
assertions = [
('one', ['1','21','31','0.1','1.1','2.1']),
('few', ['2','3','4','22','23','24','32','33','34','0.2','0.3','0.4','1.2','1.3','1.4','2.2','2.3','2.4']),
('other', ['0','5','12','19','100','0.0','0.5','0.75','1.0','1.5','1.75','2.0'])
]
check(assertions, CARDINALS['hr'])
def test_cardinal_hsb():
assertions = [
('one', ['1','101','201','0.1','1.1','2.1']),
('two', ['2','102','202','0.2','1.2','2.2']),
('few', ['3','4','103','0.3','0.4','1.3']),
('other', ['0','5','12','19','100','0.0','0.5','0.75','1.0','1.5','1.75','2.0'])
]
check(assertions, CARDINALS['hsb'])
def test_cardinal_hu():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['hu'])
def test_cardinal_hy():
assertions = [
('one', ['0','1','0.0','0.75','1.5']),
('other', ['2','10','17','100','1000','2.0','2.75','3.5','10.0','100.0'])
]
check(assertions, CARDINALS['hy'])
def test_cardinal_ia():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ia'])
def test_cardinal_id():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['id'])
def test_cardinal_ig():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ig'])
def test_cardinal_ii():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ii'])
def test_cardinal_in():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['in'])
def test_cardinal_io():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['io'])
def test_cardinal_is():
assertions = [
('one', ['1','21','31','0.1','0.85','1.6','10.1','100.1']),
('other', ['0','2','9','16','100','0.0','2.0','3.0'])
]
check(assertions, CARDINALS['is'])
def test_cardinal_it():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['it'])
def test_cardinal_iu():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('other', ['0','3','10','17','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['iu'])
def test_cardinal_iw():
assertions = [
('one', ['1']),
('two', ['2']),
('many', ['20','30','40']),
('other', ['0','3','10','17','101','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['iw'])
def test_cardinal_ja():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ja'])
def test_cardinal_jbo():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['jbo'])
def test_cardinal_jgo():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['jgo'])
def test_cardinal_ji():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ji'])
def test_cardinal_jmc():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['jmc'])
def test_cardinal_jv():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['jv'])
def test_cardinal_jw():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['jw'])
def test_cardinal_ka():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ka'])
def test_cardinal_kab():
assertions = [
('one', ['0','1','0.0','0.75','1.5']),
('other', ['2','10','17','100','1000','2.0','2.75','3.5','10.0','100.0'])
]
check(assertions, CARDINALS['kab'])
def test_cardinal_kaj():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['kaj'])
def test_cardinal_kcg():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['kcg'])
def test_cardinal_kde():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['kde'])
def test_cardinal_kea():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['kea'])
def test_cardinal_kk():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['kk'])
def test_cardinal_kkj():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['kkj'])
def test_cardinal_kl():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['kl'])
def test_cardinal_km():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['km'])
def test_cardinal_kn():
assertions = [
('one', ['0','1','0.0','0.5','1.0','0.00','0.02','0.04']),
('other', ['2','10','17','100','1000','1.1','1.85','2.6','10.0','100.0'])
]
check(assertions, CARDINALS['kn'])
def test_cardinal_ko():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ko'])
def test_cardinal_ks():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ks'])
def test_cardinal_ksb():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ksb'])
def test_cardinal_ksh():
assertions = [
('zero', ['0','0.0','0.00','0.000']),
('one', ['1','1.0','1.00','1.000']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['ksh'])
def test_cardinal_ku():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ku'])
def test_cardinal_kw():
assertions = [
('zero', ['0','0.0','0.00','0.000']),
('one', ['1','1.0','1.00','1.000']),
('two', ['2','22','42','2.0','22.0','42.0']),
('few', ['3','23','43','3.0','23.0','43.0']),
('many', ['21','41','61','21.0','41.0','61.0']),
('other', ['4','12','19','100','1000000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['kw'])
def test_cardinal_ky():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ky'])
def test_cardinal_lag():
assertions = [
('zero', ['0','0.0','0.00','0.000']),
('one', ['1','0.1','0.85','1.6']),
('other', ['2','10','17','100','1000','2.0','2.75','3.5','10.0','100.0'])
]
check(assertions, CARDINALS['lag'])
def test_cardinal_lb():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['lb'])
def test_cardinal_lg():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['lg'])
def test_cardinal_lkt():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['lkt'])
def test_cardinal_ln():
assertions = [
('one', ['0','1','0.0','1.0','0.00']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['ln'])
def test_cardinal_lo():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['lo'])
def test_cardinal_lt():
assertions = [
('one', ['1','21','31','1.0','21.0','31.0']),
('few', ['2','6','9','22','26','29','102','2.0','3.0','4.0']),
('many', ['0.1','0.5','0.9','1.1','1.4','1.7','10.1']),
('other', ['0','10','15','20','30','0.0','10.0','11.0'])
]
check(assertions, CARDINALS['lt'])
def test_cardinal_lv():
assertions = [
('zero', ['0','10','15','20','30','0.0','10.0','11.0']),
('one', ['1','21','31','0.1','1.0','1.1']),
('other', ['2','6','9','22','26','29','102','0.2','0.55','0.9','1.2','1.55','1.9','10.2'])
]
check(assertions, CARDINALS['lv'])
def test_cardinal_mas():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['mas'])
def test_cardinal_mg():
assertions = [
('one', ['0','1','0.0','1.0','0.00']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['mg'])
def test_cardinal_mgo():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['mgo'])
def test_cardinal_mk():
assertions = [
('one', ['1','21','31','0.1','1.1','2.1']),
('other', ['0','2','9','16','100','0.0','0.2','0.6','1.0','1.2','1.45','1.7'])
]
check(assertions, CARDINALS['mk'])
def test_cardinal_ml():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ml'])
def test_cardinal_mn():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['mn'])
def test_cardinal_mo():
assertions = [
('one', ['1']),
('few', ['0','2','9','16','102','0.0','0.75','1.5','10.0','100.0']),
('other', ['20','28','35','100','1000'])
]
check(assertions, CARDINALS['mo'])
def test_cardinal_mr():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['mr'])
def test_cardinal_ms():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ms'])
def test_cardinal_mt():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('few', ['0','2','6','10','102','105','107','0.0','2.0','3.0']),
('many', ['11','15','19','111','114','117','1011','11.0','12.0','13.0']),
('other', ['20','28','35','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.1'])
]
check(assertions, CARDINALS['mt'])
def test_cardinal_my():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['my'])
def test_cardinal_nah():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['nah'])
def test_cardinal_naq():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('other', ['0','3','10','17','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['naq'])
def test_cardinal_nb():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['nb'])
def test_cardinal_nd():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['nd'])
def test_cardinal_ne():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ne'])
def test_cardinal_nl():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['nl'])
def test_cardinal_nn():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['nn'])
def test_cardinal_nnh():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['nnh'])
def test_cardinal_no():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['no'])
def test_cardinal_nqo():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['nqo'])
def test_cardinal_nr():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['nr'])
def test_cardinal_nso():
assertions = [
('one', ['0','1','0.0','1.0','0.00']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['nso'])
def test_cardinal_ny():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ny'])
def test_cardinal_nyn():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['nyn'])
def test_cardinal_om():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['om'])
def test_cardinal_or():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['or'])
def test_cardinal_os():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['os'])
def test_cardinal_pa():
assertions = [
('one', ['0','1','0.0','1.0','0.00']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['pa'])
def test_cardinal_pap():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['pap'])
def test_cardinal_pl():
assertions = [
('one', ['1']),
('few', ['2','3','4','22','23','24','32','33','34']),
('many', ['0','5','12','19','100']),
('other', ['0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['pl'])
def test_cardinal_prg():
assertions = [
('zero', ['0','10','15','20','30','0.0','10.0','11.0']),
('one', ['1','21','31','0.1','1.0','1.1']),
('other', ['2','6','9','22','26','29','102','0.2','0.55','0.9','1.2','1.55','1.9','10.2'])
]
check(assertions, CARDINALS['prg'])
def test_cardinal_ps():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ps'])
def test_cardinal_pt():
assertions = [
('one', ['0','1','0.0','0.75','1.5']),
('other', ['2','10','17','100','1000','2.0','2.75','3.5','10.0','100.0'])
]
check(assertions, CARDINALS['pt'])
def test_cardinal_pt_PT():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['pt-PT'])
def test_cardinal_rm():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['rm'])
def test_cardinal_ro():
assertions = [
('one', ['1']),
('few', ['0','2','9','16','102','0.0','0.75','1.5','10.0','100.0']),
('other', ['20','28','35','100','1000'])
]
check(assertions, CARDINALS['ro'])
def test_cardinal_rof():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['rof'])
def test_cardinal_root():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['root'])
def test_cardinal_ru():
assertions = [
('one', ['1','21','31']),
('few', ['2','3','4','22','23','24','32','33','34']),
('many', ['0','5','12','19','100']),
('other', ['0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ru'])
def test_cardinal_rwk():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['rwk'])
def test_cardinal_sah():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['sah'])
def test_cardinal_saq():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['saq'])
def test_cardinal_sc():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['sc'])
def test_cardinal_scn():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['scn'])
def test_cardinal_sd():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['sd'])
def test_cardinal_sdh():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['sdh'])
def test_cardinal_se():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('other', ['0','3','10','17','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['se'])
def test_cardinal_seh():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['seh'])
def test_cardinal_ses():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ses'])
def test_cardinal_sg():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['sg'])
def test_cardinal_sh():
assertions = [
('one', ['1','21','31','0.1','1.1','2.1']),
('few', ['2','3','4','22','23','24','32','33','34','0.2','0.3','0.4','1.2','1.3','1.4','2.2','2.3','2.4']),
('other', ['0','5','12','19','100','0.0','0.5','0.75','1.0','1.5','1.75','2.0'])
]
check(assertions, CARDINALS['sh'])
def test_cardinal_shi():
assertions = [
('one', ['0','1','0.0','0.5','1.0','0.00','0.02','0.04']),
('few', ['2','6','10','2.0','3.0','4.0']),
('other', ['11','19','26','100','1000','1.1','1.5','1.9','2.1','2.4','2.7','10.1'])
]
check(assertions, CARDINALS['shi'])
def test_cardinal_si():
assertions = [
('one', ['0','1','0.0','0.1','1.0']),
('other', ['2','10','17','100','1000','0.2','0.55','0.9','1.1','1.45','1.8','10.0'])
]
check(assertions, CARDINALS['si'])
def test_cardinal_sk():
assertions = [
('one', ['1']),
('few', ['2','3','4']),
('many', ['0.0','0.75','1.5','10.0','100.0']),
('other', ['0','5','12','19','100'])
]
check(assertions, CARDINALS['sk'])
def test_cardinal_sl():
assertions = [
('one', ['1','101','201']),
('two', ['2','102','202']),
('few', ['3','4','103','0.0','0.75','1.5','10.0','100.0']),
('other', ['0','5','12','19','100'])
]
check(assertions, CARDINALS['sl'])
def test_cardinal_sma():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('other', ['0','3','10','17','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['sma'])
def test_cardinal_smi():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('other', ['0','3','10','17','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['smi'])
def test_cardinal_smj():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('other', ['0','3','10','17','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['smj'])
def test_cardinal_smn():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('other', ['0','3','10','17','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['smn'])
def test_cardinal_sms():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('two', ['2','2.0','2.00','2.000']),
('other', ['0','3','10','17','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['sms'])
def test_cardinal_sn():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['sn'])
def test_cardinal_so():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['so'])
def test_cardinal_sq():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['sq'])
def test_cardinal_sr():
assertions = [
('one', ['1','21','31','0.1','1.1','2.1']),
('few', ['2','3','4','22','23','24','32','33','34','0.2','0.3','0.4','1.2','1.3','1.4','2.2','2.3','2.4']),
('other', ['0','5','12','19','100','0.0','0.5','0.75','1.0','1.5','1.75','2.0'])
]
check(assertions, CARDINALS['sr'])
def test_cardinal_ss():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ss'])
def test_cardinal_ssy():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ssy'])
def test_cardinal_st():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['st'])
def test_cardinal_sv():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['sv'])
def test_cardinal_sw():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['sw'])
def test_cardinal_syr():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['syr'])
def test_cardinal_ta():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ta'])
def test_cardinal_te():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['te'])
def test_cardinal_teo():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['teo'])
def test_cardinal_th():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['th'])
def test_cardinal_ti():
assertions = [
('one', ['0','1','0.0','1.0','0.00']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['ti'])
def test_cardinal_tig():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['tig'])
def test_cardinal_tk():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['tk'])
def test_cardinal_tl():
assertions = [
('one', ['0','2','3','5','7','0.0','0.15','0.3','0.5','0.7']),
('other', ['4','6','9','0.4','0.6','0.9'])
]
check(assertions, CARDINALS['tl'])
def test_cardinal_tn():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['tn'])
def test_cardinal_to():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['to'])
def test_cardinal_tr():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['tr'])
def test_cardinal_ts():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ts'])
def test_cardinal_tzm():
assertions = [
('one', ['0','1','11','18','24','0.0','1.0','11.0']),
('other', ['2','6','10','100','103','106','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['tzm'])
def test_cardinal_ug():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ug'])
def test_cardinal_uk():
assertions = [
('one', ['1','21','31']),
('few', ['2','3','4','22','23','24','32','33','34']),
('many', ['0','5','12','19','100']),
('other', ['0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['uk'])
def test_cardinal_ur():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['ur'])
def test_cardinal_uz():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['uz'])
def test_cardinal_ve():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['ve'])
def test_cardinal_vi():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['vi'])
def test_cardinal_vo():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['vo'])
def test_cardinal_vun():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['vun'])
def test_cardinal_wa():
assertions = [
('one', ['0','1','0.0','1.0','0.00']),
('other', ['2','10','17','100','1000','0.1','0.5','0.9','1.1','1.4','1.7','10.0'])
]
check(assertions, CARDINALS['wa'])
def test_cardinal_wae():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['wae'])
def test_cardinal_wo():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['wo'])
def test_cardinal_xh():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['xh'])
def test_cardinal_xog():
assertions = [
('one', ['1','1.0','1.00','1.000']),
('other', ['0','2','9','16','100','0.0','0.45','0.9','1.1','1.35','1.6','10.0'])
]
check(assertions, CARDINALS['xog'])
def test_cardinal_yi():
assertions = [
('one', ['1']),
('other', ['0','2','9','16','100','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['yi'])
def test_cardinal_yo():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['yo'])
def test_cardinal_yue():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['yue'])
def test_cardinal_zh():
assertions = [
('other', ['0','8','15','100','1000','0.0','0.75','1.5','10.0','100.0'])
]
check(assertions, CARDINALS['zh'])
def test_cardinal_zu():
assertions = [
('one', ['0','1','0.0','0.5','1.0','0.00','0.02','0.04']),
('other', ['2','10','17','100','1000','1.1','1.85','2.6','10.0','100.0'])
]
check(assertions, CARDINALS['zu'])
# ================================
# END AUTOMATICALLY GENERATED CODE
# ================================
|
111629
|
from os import path
from urllib import request
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from datasets import AbstractDataset
class CompasDataset(AbstractDataset):
def __init__(self, split, args, normalize=True):
super().__init__('compas', split)
datafile = path.join(self.data_dir, 'compas-scores-two-years.csv')
if not path.exists(datafile):
request.urlretrieve(
'https://github.com/propublica/compas-analysis/raw/master/compas-scores-two-years.csv', datafile
)
df = pd.read_csv(datafile)
df = df[df['days_b_screening_arrest'] >= -30]
df = df[df['days_b_screening_arrest'] <= 30]
df = df[df['is_recid'] != -1]
df = df[df['c_charge_degree'] != '0']
df = df[df['score_text'] != 'N/A']
df['in_custody'] = pd.to_datetime(df['in_custody'])
df['out_custody'] = pd.to_datetime(df['out_custody'])
df['diff_custody'] = (df['out_custody'] - df['in_custody']).dt.total_seconds()
df['c_jail_in'] = pd.to_datetime(df['c_jail_in'])
df['c_jail_out'] = pd.to_datetime(df['c_jail_out'])
df['diff_jail'] = (df['c_jail_out'] - df['c_jail_in']).dt.total_seconds()
df.drop(
[
'id', 'name', 'first', 'last', 'v_screening_date', 'compas_screening_date', 'dob', 'c_case_number',
'screening_date', 'in_custody', 'out_custody', 'c_jail_in', 'c_jail_out'
], axis=1, inplace=True
)
df = df[df['race'].isin(['African-American', 'Caucasian'])]
features = df.drop(['is_recid', 'is_violent_recid', 'violent_recid', 'two_year_recid'], axis=1)
labels = 1 - df['two_year_recid']
features = features[[
'age', 'sex', 'race', 'diff_custody', 'diff_jail', 'priors_count', 'juv_fel_count', 'c_charge_degree',
'c_charge_desc', 'v_score_text'
]]
continuous_vars = []
self.categorical_columns = []
for col in features.columns:
if features[col].isnull().sum() > 0:
features.drop(col, axis=1, inplace=True)
else:
if features[col].dtype == np.object:
self.categorical_columns += [col]
else:
continuous_vars += [col]
protected_att = args.protected_att if args.protected_att is not None else 'race'
self.protected_unique = features[protected_att].nunique()
protected = np.logical_not(pd.Categorical(features[protected_att]).codes)
features = pd.get_dummies(features, columns=self.categorical_columns, prefix_sep='=')
self.continuous_columns = [features.columns.get_loc(var) for var in continuous_vars]
self.one_hot_columns = {}
for column_name in self.categorical_columns:
ids = [i for i, col in enumerate(features.columns) if col.startswith('{}='.format(column_name))]
if len(ids) > 0:
assert len(ids) == ids[-1] - ids[0] + 1
self.one_hot_columns[column_name] = ids
print('categorical features: ', self.one_hot_columns.keys())
self.column_ids = {col: idx for idx, col in enumerate(features.columns)}
features = torch.tensor(features.values.astype(np.float32), device=self.device)
labels = torch.tensor(labels.values.astype(np.int64), device=self.device).bool().long()
protected = torch.tensor(protected, device=self.device).bool()
X_train, self.X_test, y_train, self.y_test, protected_train, self.protected_test = train_test_split(
features, labels, protected, test_size=0.2, random_state=0
)
self.X_train, self.X_val, self.y_train, self.y_val, self.protected_train, self.protected_val = train_test_split(
X_train, y_train, protected_train, test_size=0.2, random_state=0
)
if normalize:
self._normalize(self.continuous_columns)
self._assign_split()
|
111644
|
import behave
@behave.when(u'I get Task by "{get_method}"')
def step_impl(context, get_method):
if get_method == 'name':
context.task_get = context.project.tasks.get(task_name=context.task.name)
elif get_method == 'id':
context.task_get = context.project.tasks.get(task_id=context.task.id)
@behave.when(u'I get Task by wrong "{get_method}"')
def step_impl(context, get_method):
try:
if get_method == 'name':
context.task_get = context.project.tasks.get(task_name='randomName')
elif get_method == 'id':
context.task_get = context.project.tasks.get(task_id='randomId')
context.error = None
except Exception as e:
context.error = e
@behave.then(u'Task received equals task created')
def step_impl(context):
assert context.task.to_json() == context.task_get.to_json()
|
111654
|
from __future__ import annotations
import numpy as np
import pytest
from .helpers import ( # noqa: F401
assert_eq,
line_delim_records_file,
load_records_eager,
load_records_lazy,
)
def test_ufunc_add(line_delim_records_file) -> None: # noqa: F811
daa = load_records_lazy(line_delim_records_file).analysis.x1
caa = load_records_eager(line_delim_records_file).analysis.x1
a1 = daa + 2
a2 = caa + 2
assert_eq(a1, a2)
def test_ufunc_sin(line_delim_records_file) -> None: # noqa: F811
daa = load_records_lazy(line_delim_records_file).analysis.x1
caa = load_records_eager(line_delim_records_file).analysis.x1
a1 = np.sin(daa)
a2 = np.sin(caa)
assert_eq(a1, a2)
@pytest.mark.parametrize("f", [np.add.accumulate, np.add.reduce])
def test_ufunc_method_raise(line_delim_records_file, f) -> None: # noqa: F811
daa = load_records_lazy(line_delim_records_file).analysis.x1
with pytest.raises(NotImplementedError, match="Array ufunc supports only method"):
f(daa, daa)
|
111670
|
from scipy.spatial import distance
import operator
import json
import numpy as np
import sys
import math
import argparse
parser = argparse.ArgumentParser(description='Ranking groups')
parser.add_argument('--groups' , help = 'path to groups')
parser.add_argument('--ef' , help = 'path to reviewer embeddings')
parser.add_argument('--rankedgroups' , help = 'path to ranked groups')
args = parser.parse_args()
class Groups:
def __init__(self,users,prods,score,scoregt,id):
self.users=users
self.prods=prods
self.score=score
self.scoregt=scoregt
self.id=id
def __lt__(self, other):
return len(self.users) < len(other.users)
c=0
groups=set()
grps={}
grpmapping={}
avggrpsize=0
size=0
with open(args.groups, 'r') as fp:
finalgrps = json.load(fp)
filee=open(args.ef,'r')
mapping={}
c=0
for f in filee:
c=c+1
if c==1:
continue
fsplit=f.strip().split(" ")
if fsplit[0] not in mapping:
mapping[int(fsplit[0])]=map(float, fsplit[1:])
emb_size=len(fsplit[1:])
filee.close()
userset=set()
gtscore={}
size={}
grpusers={}
for g1 in finalgrps:
finalgrps[g1]['users']=map(int,finalgrps[g1]['users'])
finalgrps[g1]['prods']=map(int,finalgrps[g1]['prods'])
group=Groups(finalgrps[g1]['users'],finalgrps[g1]['prods'],finalgrps[g1]['fakegt'],finalgrps[g1]['scoregt'],finalgrps[g1]['id'])
if len(finalgrps[g1]['users']) not in size:
size[len(finalgrps[g1]['users'])]=0
size[len(finalgrps[g1]['users'])]=size[len(finalgrps[g1]['users'])]+1
groups.add(group)
grpmapping[finalgrps[g1]['id']]=group
if finalgrps[g1]['id'] not in grpusers:
grpusers[finalgrps[g1]['id']]=finalgrps[g1]['users']
avggrpsize=avggrpsize+len(finalgrps[g1]['users'])
r_gt=[]
avggrpsize=avggrpsize/(len(groups)*1.0)
score={}
def density():
for gm in grpmapping:
g=grpmapping[gm]
avg=[0 for i in range(emb_size)]
ans=0
for u in g.users:
if u in mapping:
avg=[avg[i]+mapping[u][i] for i in range(emb_size)]
avg=[(a*1.0)/len(g.users) for a in avg]
for u in g.users:
if u in mapping:
ans=ans+distance.euclidean(mapping[u], avg)
if gm not in score:
score[gm]=ans/(1.0*len(g.users))
sorted_score=sorted(score.items(), key=operator.itemgetter(1))
return sorted_score
def rank():
sorted_score=density()
filew=open(args.rankedgroups,'w')
for grp in sorted_score:
filew.write(str(grp[0])+"\n")
filew.close()
rank()
print 'end'
|
111683
|
import pefile
def get_sections(filename):
sections= {}
pe= pefile.PE(filename)
sections= {section.Name+'_'+str(section.SizeOfRawData): section.get_data() for section in pe.sections}
return sections
# This file is a malicious file that I copied for demo purposes from another repo. Be careful while running it :)
sections= get_sections('03.exe')
print "These are the names of the sections in this file"
print sections.keys()
|
111689
|
from gui.window import Form
from gui.draw import *
from PIL import Image, ImageQt
import random, io, os
import numpy as np
import torch
import cv2
import torchvision.transforms as transforms
from util import util
import os
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from pconv.model import PConvUNet
class model(QtWidgets.QWidget, Form):
shape = 'line'
CurrentWidth = 1
def __init__(self, opt):
super(model, self).__init__()
self.setupUi(self)
self.opt = opt
# self.show_image = None
self.show_result_flag = False
self.opt.loadSize = [256, 256]
self.img_root = './results/'
self.graphicsView_2.setMaximumSize(self.opt.loadSize[0]+30, self.opt.loadSize[1]+30)
self.device = torch.device("cpu")
# Define the model
print("Loading the Model...")
self.model = PConvUNet(finetune=False, layer_size=7)
self.model.load_state_dict(torch.load("model.pth", map_location=self.device)['model'])
self.model.to(self.device)
self.model.eval()
# show logo
self.show_logo()
# original mask
self.new_painter()
# selcet model
#self.comboBox.activated.connect(self.load_model)
# load image
self.pushButton.clicked.connect(self.load_image)
# save result
self.pushButton_4.clicked.connect(self.save_result)
# draw/erasure the mask
self.radioButton.toggled.connect(lambda: self.draw_mask('line'))
self.radioButton_2.toggled.connect(lambda: self.draw_mask('rectangle'))
self.spinBox.valueChanged.connect(self.change_thickness)
# erase
self.pushButton_5.clicked.connect(self.clear_mask)
# fill image, image process
self.transform = transforms.Compose([transforms.ToTensor()])
self.pushButton_3.clicked.connect(self.predict)
# show the result
self.pushButton_6.clicked.connect(self.show_result)
def showImage(self, fname):
"""Show the masked images"""
#value = self.comboBox.currentIndex()
img = Image.open(fname).convert('RGB')
self.img_original = img.resize(self.opt.loadSize)
self.img = self.img_original
self.show_image = ImageQt.ImageQt(self.img)
self.new_painter(self.show_image)
def show_result(self):
"""Show the results and original image"""
if self.show_result_flag:
self.show_result_flag = False
out = TF.to_pil_image(self.img_out)
new_pil_image = out
new_qt_image = ImageQt.ImageQt(new_pil_image)
else:
self.show_result_flag = True
new_qt_image = ImageQt.ImageQt(self.img_original)
self.graphicsView_2.scene = QtWidgets.QGraphicsScene()
item = QtWidgets.QGraphicsPixmapItem(QtGui.QPixmap.fromImage(new_qt_image))
self.graphicsView_2.scene.addItem(item)
self.graphicsView_2.setScene(self.graphicsView_2.scene)
def show_logo(self):
img = QtWidgets.QLabel(self)
img.setGeometry(750, 20, 140, 50)
# read images
pixmap = QtGui.QPixmap("./gui/icon.png")
pixmap = pixmap.scaled(100, 100, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
img.setPixmap(pixmap)
img.show()
def load_model(self):
"""Load different kind models for different datasets and mask types"""
#value = self.comboBox.currentIndex()
if value == 0:
raise NotImplementedError("Please choose a model")
else:
# define the model type and dataset type
index = value-1
self.opt.name = self.model_name[index]
self.model = create_model(self.opt)
def load_image(self):
"""Load the image"""
self.fname, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'select the image', os.path.expanduser("~"), 'Image files(*.jpg *.png)')
self.showImage(self.fname)
def save_result(self):
self.opt.results_dir = "./results"
"""Save the results to the disk"""
util.mkdir(self.opt.results_dir)
img_name = self.fname.split('/')[-1]
# save the original image
original_name = '%s_%s' % ('original', img_name)
original_path = os.path.join(self.opt.results_dir, original_name)
img_original = util.tensor2im(self.img_truth)
util.save_image(img_original, original_path)
# save the mask
mask_name = '%s_%d_%s' % ('mask', self.PaintPanel.iteration, img_name)
mask_path = os.path.join(self.opt.results_dir, mask_name)
img_mask = util.tensor2im(self.img_c)
ret, img_mask = cv2.threshold(img_mask, 150, 255, cv2.THRESH_BINARY)
util.save_image(img_mask, mask_path)
# save the results
result_name = '%s_%d_%s' % ('result', self.PaintPanel.iteration, img_name)
result_path = os.path.join(self.opt.results_dir, result_name)
img_result = TF.to_pil_image(self.img_out)
img_result = np.array(img_result)
util.save_image(img_result, result_path)
def new_painter(self, image=None):
"""Build a painter to load and process the image"""
# painter
self.PaintPanel = painter(self, image)
self.PaintPanel.close()
self.stackedWidget.insertWidget(0, self.PaintPanel)
self.stackedWidget.setCurrentWidget(self.PaintPanel)
def change_thickness(self, num):
"""Change the width of the painter"""
self.CurrentWidth = num
self.PaintPanel.CurrentWidth = num
def draw_mask(self, maskStype):
"""Draw the mask"""
self.shape = maskStype
self.PaintPanel.shape = maskStype
def clear_mask(self):
"""Clear the mask"""
self.showImage(self.fname)
if self.PaintPanel.Brush:
self.PaintPanel.Brush = False
else:
self.PaintPanel.Brush = True
def set_input(self):
"""Set the input for the network"""
# get the test mask from painter
self.PaintPanel.saveDraw()
buffer = QtCore.QBuffer()
buffer.open(QtCore.QBuffer.ReadWrite)
self.PaintPanel.map.save(buffer, 'PNG')
pil_im = Image.open(io.BytesIO(buffer.data()))
# transform the image to the tensor
img = self.transform(self.img)
#value = self.comboBox.currentIndex()
mask = torch.autograd.Variable(self.transform(pil_im)).unsqueeze(0)
# mask from the random mask
# mask = Image.open(self.mname)
# mask = torch.autograd.Variable(self.transform(mask)).unsqueeze(0)
mask = (mask < 1).float()
# get I_m and I_c for image with mask and complement regions for training
mask = mask
self.img_truth = img * 2 - 1
self.img_m = mask * self.img_truth
self.img_c = mask
return self.img_m, self.img_c, self.img_truth, mask
def predict(self):
# Loading Input and Mask
print("Loading the inputs...")
img_m, img_c, img_truth, mask = self.set_input()
img_original = util.tensor2im(img_truth)
org = Image.fromarray(img_original)
org = TF.to_tensor(org.convert('RGB'))
img_mask = util.tensor2im(img_c)
ret, img_mask = cv2.threshold(img_mask, 150, 255, cv2.THRESH_BINARY)
mask = Image.fromarray(img_mask)
mask = mask.convert('L')
mask = mask.point(lambda x: 0 if x<128 else 255, '1')
mask = TF.to_tensor(mask.convert('RGB'))
inp = org * mask
# Model prediction
print("Model Prediction...")
with torch.no_grad():
inp_ = inp.unsqueeze(0).to(self.device)
mask_ = mask.unsqueeze(0).to(self.device)
raw_out, _ = self.model(inp_, mask_)
# Post process
raw_out = raw_out.to(torch.device('cpu')).squeeze()
raw_out = raw_out.clamp(0.0, 1.0)
out = mask * inp + (1 - mask) * raw_out
self.img_out = out
self.show_result_flag = True
self.show_result()
|
111698
|
from logging import Logger
from time import time
from typing import Callable, Optional
import pytorch_lightning as pl
class ProgressBar(pl.callbacks.ProgressBarBase):
"""A custom ProgressBar to log the training progress."""
def __init__(self, logger: Logger, refresh_rate: int = 50) -> None:
"""Create a ProgressBar.
The ProgressBar provided by Lightning is based on tqdm. Its output always roll over the
previous information, and the printed logs are too brief. This custom one serializes all the
metrics provided by user, and the outputs are much more detailed. The logs are delivered to
a Logging.logger (rather than printed to CLI directly), which can easily captured into a log
file.
Args:
logger: A logging.Logger to record the training log.
refresh_rate: Determines at which rate (in number of batches) the progress bars get
updated. Set it to ``0`` to disable the display.
"""
super().__init__()
self._logger = logger
self._refresh_rate = refresh_rate
self._enabled = True
# a time flag to indicate the beginning of an epoch
self._time = 0
@property
def refresh_rate(self) -> int:
return self._refresh_rate
@property
def is_enabled(self) -> bool:
return self._enabled
@property
def is_disabled(self) -> bool:
return not self.is_enabled
def disable(self) -> None:
# No need to disable the ProgressBar on processes with LOCAL_RANK != 1, because the
# StreamHandler of logging is disabled on these processes.
self._enabled = True
def enable(self) -> None:
self._enabled = True
@staticmethod
def _serialize_metrics(progressbar_log_dict: dict, filter_fn: Optional[Callable[[str], bool]] = None) -> str:
if filter_fn:
progressbar_log_dict = {k: v for k, v in progressbar_log_dict.items() if filter_fn(k)}
msg = ''
for metric, value in progressbar_log_dict.items():
if type(value) is str:
msg += f'{metric}: {value} '
elif 'acc' in metric:
msg += f'{metric}: {value:.3%} '
else:
msg += f'{metric}: {value:f} '
return msg
def on_train_start(self, trainer, pl_module):
super().on_train_start(trainer, pl_module)
self._logger.info(f'Trainer fit begins ... '
f'Current epoch: {trainer.current_epoch}, batch: {self.train_batch_idx}')
def on_epoch_start(self, trainer, pl_module):
super().on_epoch_start(trainer, pl_module)
total_train_batches = self.total_train_batches
total_val_batches = self.total_val_batches
total_test_batches = self.total_test_batches
if total_train_batches != float('inf') and not trainer.fast_dev_run:
# val can be checked multiple times per epoch
val_check_batch = max(1, int(total_train_batches * trainer.val_check_interval))
val_checks_per_epoch = total_train_batches // val_check_batch
total_val_batches = total_val_batches * val_checks_per_epoch
total_batches = total_train_batches + total_val_batches + total_test_batches
self._logger.info(f'\n '
f'>>> >>> >>> >>> Epoch {trainer.current_epoch}, including {total_batches} batches '
f'(train: {total_train_batches}, val: {total_val_batches}, test: {total_test_batches}) '
f'<<< <<< <<< <<<')
self._time = time()
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
super().on_train_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
if self.is_enabled and self.train_batch_idx % self.refresh_rate == 0:
batch_time = (time() - self._time) / self.train_batch_idx
msg = f'Train (Epoch {trainer.current_epoch}, ' \
f'Batch {self.train_batch_idx} / {self.total_train_batches}, {batch_time:.2f}s/it) => '
msg += self._serialize_metrics(trainer.progress_bar_dict,
filter_fn=lambda x: not x.startswith('val_') and not x.startswith('test_'))
self._logger.info(msg)
def on_train_end(self, trainer, pl_module):
super().on_train_end(trainer, pl_module)
self._logger.info(f'Trainer fit ends.')
def on_validation_start(self, trainer, pl_module):
super().on_validation_start(trainer, pl_module)
if not trainer.sanity_checking:
self._logger.info(f'\n '
f'>>> Validate step begins ... Epoch {trainer.current_epoch}, '
f'including {self.total_val_batches} batches')
self._time = time()
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
super().on_validation_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
if self.is_enabled and self.val_batch_idx % self.refresh_rate == 0:
batch_time = (time() - self._time) / self.val_batch_idx
msg = f'Validate (Epoch {trainer.current_epoch}, ' \
f'Batch {self.val_batch_idx} / {self.total_val_batches}, {batch_time:.2f}s/it) => '
msg += self._serialize_metrics(trainer.progress_bar_dict,
filter_fn=lambda x: x.startswith('val_') and x.endswith('_step'))
self._logger.info(msg)
def on_validation_end(self, trainer, pl_module):
super().on_validation_end(trainer, pl_module)
if not trainer.sanity_checking:
msg = '>>> Validate ends => '
msg += self._serialize_metrics(trainer.progress_bar_dict,
filter_fn=lambda x: x.startswith('val_') and x.endswith('_epoch'))
self._logger.info(msg)
def on_test_start(self, trainer, pl_module):
super().on_test_start(trainer, pl_module)
self._logger.info(f'\n >>> >>> >>> >>> Test, '
f'including {self.total_test_batches} batches '
f'<<< <<< <<< <<<')
self._time = time()
def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
super().on_test_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
if self.is_enabled and self.test_batch_idx % self.refresh_rate == 0:
batch_time = (time() - self._time) / self.test_batch_idx
msg = f'Test (Batch {self.test_batch_idx} / {self.total_test_batches}, {batch_time:.2f}s/it) => '
msg += self._serialize_metrics(trainer.progress_bar_dict,
filter_fn=lambda x: x.startswith('test_') and x.endswith('_step'))
self._logger.info(msg)
def on_test_end(self, trainer, pl_module):
super().on_test_end(trainer, pl_module)
msg = '>>> Test ends => '
msg += self._serialize_metrics(trainer.progress_bar_dict,
filter_fn=lambda x: x.startswith('test_') and x.endswith('_epoch'))
self._logger.info(msg + '\n')
def on_sanity_check_start(self, trainer, pl_module):
super().on_sanity_check_start(trainer, pl_module)
self._logger.info('Validate set sanity check begins.')
def on_sanity_check_end(self, trainer, pl_module):
super().on_sanity_check_end(trainer, pl_module)
self._logger.info('Validate set sanity check ends.')
|
111735
|
from __future__ import print_function, absolute_import
from collections import OrderedDict
from ._result_base import H5NastranResultBase
from h5Nastran.post_process.result_readers.punch import PunchReader
import numpy as np
import tables
from six import iteritems
class H5NastranResultPunch(H5NastranResultBase):
def __init__(self, *args, **kwargs):
super(H5NastranResultPunch, self).__init__(*args, **kwargs)
def load_punch(self, filename):
if self._bdf is None:
raise Exception('BDF must be loaded first!')
if self._f06 is not None:
raise Exception('F06 has already been loaded. Cannot load punch file after f06.')
self._punch = filename
self._punch_subcase_ids.clear()
reader = PunchReader(filename)
reader.register_callback(self._load_punch_table)
reader.read()
self.h5f.flush()
for table in self._tables:
table.finalize()
self._tables.clear()
self._write_unsupported_tables()
self._punch_finalize()
def _punch_finalize(self):
dtype = np.dtype([('SUBCASE_ID', '<i8'), ('LOAD_FACTOR', '<f8'), ('DOMAIN_ID', '<i8')])
format = tables.descr_from_dtype(dtype)[0]
self.h5f.create_table(self.table_paths.subcase_path, self.table_paths.subcase_table, format,
'SUBCASES', expectedrows=len(self._punch_subcase_ids), createparents=True)
table = self.h5f.get_node(self.table_paths.subcase)
data = np.zeros(len(self._punch_subcase_ids), dtype=dtype)
subcase_id = data['SUBCASE_ID']
load_factor = data['LOAD_FACTOR']
domain_id = data['DOMAIN_ID']
for key, domain_id_ in iteritems(self._punch_subcase_ids):
index = domain_id_ - 1
subcase_id_, load_factor_ = key
subcase_id[index] = subcase_id_
load_factor[index] = load_factor_
domain_id[index] = domain_id_
table.append(data)
self.h5f.flush()
def _load_punch_table(self, table_data):
key = table_data.header.subcase_id_num, table_data.header.load_factor
if key not in self._punch_subcase_ids:
self._punch_subcase_ids[key] = len(self._punch_subcase_ids) + 1
results_type = table_data.header.results_type_basic
table = self._result_tables.get(results_type, None)
if table is None:
return self._unsupported_table(table_data)
table.write_punch_data(table_data)
self._tables.add(table)
|
111743
|
import unittest
import sys
# automake build dir
sys.path.insert(0, '..')
sys.path.insert(0, '../.libs')
# cmake build dir
sys.path.insert(0, '../../../build/bindings/python')
from pywsman import *
class TestAddSelector(unittest.TestCase):
def test_add_selector(self):
options = ClientOptions()
assert options is not None
options.add_selector("foo", "bar")
if __name__ == '__main__':
unittest.main()
|
111756
|
from utils import CSVScraper
class PeelPersonScraper(CSVScraper):
# http://opendata.peelregion.ca/data-categories/regional-geography/ward-boundaries-(2018-2022).aspx
csv_url = 'http://opendata.peelregion.ca/media/43505/wards1822_csv.csv'
|
111788
|
from django.apps import AppConfig
class ResetPasswordConfig(AppConfig):
name = "reset_password"
|
111802
|
def find(n):
l = [''] * (n + 1)
size = 1
m = 1
while (size <= n):
i = 0
while(i < m and (size + i) <= n):
l[size + i] = "1" + l[size - m + i]
i += 1
i = 0
while(i < m and (size + m + i) <= n):
l[size + m + i] = "2" + l[size - m + i]
i += 1
m = m << 1
size = size + m
print(l[n])
for _ in range(int(input())):
find(int(input()))
|
111804
|
import datetime
import hashlib
import logging
import os
from logging.handlers import SysLogHandler
from utils.settings_handler import settings
LOG_FORMAT = "%(asctime)s %(levelname)s: %(message)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
class ColoredFormatter(logging.Formatter):
"""
Apply only to the console handler.
"""
green = "\u001b[32m"
cyan = "\u001b[36m"
reset = "\u001b[0m"
def format(self, record):
format_style = self._fmt
if record.getMessage().startswith("id="):
format_style = f"{ColoredFormatter.green}{format_style}{ColoredFormatter.reset}"
if record.getMessage().startswith("msg="):
format_style = f"{ColoredFormatter.cyan}{format_style}{ColoredFormatter.reset}"
formatter = logging.Formatter(format_style)
return formatter.format(record)
def set_up_logging(save_to_file=True, print_to_console=True, logger_name="bot"):
"""
Logger for tenhou communication and AI output
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
if print_to_console:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = ColoredFormatter(LOG_FORMAT, datefmt=DATE_FORMAT)
ch.setFormatter(formatter)
logger.addHandler(ch)
log_prefix = settings.LOG_PREFIX
if not log_prefix:
log_prefix = hashlib.sha1(settings.USER_ID.encode("utf-8")).hexdigest()[:5]
if save_to_file:
logs_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "logs")
if not os.path.exists(logs_directory):
os.mkdir(logs_directory)
formatter = logging.Formatter(LOG_FORMAT, datefmt=DATE_FORMAT)
# we need it to distinguish different bots logs (if they were run in the same time)
file_name = "{}_{}.log".format(log_prefix, datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S"))
fh = logging.FileHandler(os.path.join(logs_directory, file_name), encoding="utf-8")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
if settings.PAPERTRAIL_HOST_AND_PORT:
syslog = SysLogHandler(address=settings.PAPERTRAIL_HOST_AND_PORT)
game_id = f"BOT_{log_prefix}"
formatter = ColoredFormatter(f"%(asctime)s {game_id}: %(message)s", datefmt=DATE_FORMAT)
syslog.setFormatter(formatter)
logger.addHandler(syslog)
return logger
|
111860
|
import torch
from torch import nn, Tensor
class MaxoutLinear(nn.Module):
"""
A linear maxout layer:
output_i = max_{j = 1,...,k} (w_1 input + b_1, w_2 input + b_2,..., w_k input + b_k)
References:
<NAME> et al. "Maxout Networks." https://arxiv.org/pdf/1302.4389.pdf
"""
def __init__(self, in_features: int, out_features: int, nb_features: int, bias: bool=True):
super(MaxoutLinear, self).__init__()
self._features = nn.ModuleList(
[nn.Linear(in_features=in_features, out_features=out_features, bias=bias) for _ in range(nb_features)]
)
def forward(self, input: Tensor) -> Tensor:
"""
:param input: (batch_size, in_features)
:return: (batch_size, out_features)
"""
features = [self._features[i](input) for i in range(len(self._features))]
return torch.max(torch.stack(features, dim=-1), dim=-1)[0]
|
111862
|
import io, re
from setuptools import setup
with io.open("README.rst", "rt", encoding="utf8") as f:
readme = f.read()
with io.open("gridwxcomp/__init__.py", "rt", encoding="utf8") as f:
version = re.search(r"__version__ = \'(.*?)\'", f.read()).group(1)
requires = [
'bokeh>=1.0.4',
'click>=7.0',
'fiona>=1.7.13',
'gdal',
'netCDF4',
'numpy>=1.15',
'pandas>=0.24',
'rasterstats>=0.13',
'refet>=0.3.7',
'scipy>=1.1.0',
'shapely==1.6.4',
'xlrd==1.2.0'
]
tests_require = ['pytest']
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Environment :: Console',
'Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering',
'Intended Audience :: Science/Research'
]
setup(
name='gridwxcomp',
version=version,
description='Compare meterological station data to gridded data',
long_description=readme,
author='<NAME> and <NAME>',
author_email='<EMAIL>',
license='Apache',
url='https://github.com/WSWUP/gridwxcomp',
download_url='https://github.com/WSWUP/gridwxcomp/archive/v0.1.0.tar.gz',
platforms=['Windows','Linux','Mac OS X'],
classifiers=classifiers,
packages=['gridwxcomp', 'gridwxcomp.scripts'],
install_requires=requires,
tests_require=tests_require,
package_data={'gridwxcomp': ['example_data/*'],
'gridwxcomp': ['env/*.yml'],
'gridwxcomp': ['gridmet_cell_data.csv']},
include_package_data=True,
entry_points='''
[console_scripts]
gridwxcomp=gridwxcomp.scripts.gridwxcomp:gridwxcomp
'''
)
|
111873
|
from ccgetusers.generator import GenerateToken
from ccgetusers.customerid import CustomerId
from ccgetusers.users import Users
import sys
def main():
if len(sys.argv) < 4:
print('Must provide CloudCheckr CMx auth endpoint, client id and access key')
print('ccgetusers <cloudcheckr endpoint> <client id> <access key>')
sys.exit(-1)
else:
cc_endpoint = sys.argv[1]
client_id = sys.argv[2]
client_secret = sys.argv[3]
token = GenerateToken(cc_endpoint=cc_endpoint, client_id=client_id, client_secret=client_secret)
token = token.token
customer_id = CustomerId(cc_endpoint=cc_endpoint, token=token)
customer_id = customer_id.customer_id
users = Users(cc_endpoint=cc_endpoint, token=token, customer_id=customer_id)
return users.users
|
111883
|
from cryptacular.bcrypt import BCRYPTPasswordManager
manager = BCRYPTPasswordManager()
hashed = manager.encode('password')
assert manager.check(hashed, 'password')
|
111886
|
import unittest
import random
import math
from pyneval.model.euclidean_point import EuclideanPoint,Line
def rand(k):
return random.uniform(0, k)
class TestPointMethods(unittest.TestCase):
def test_point_to_line(self):
p = EuclideanPoint([49.4362, 111.12, 322.687])
l = Line(coords=[[47.9082, 110.024, 323.994],[56.0636, 112.369, 318.703]])
l2 = Line(coords=[[49.4362, 111.12, 322.687],[56.0636, 112.369, 318.703]])
print(p.distance(l))
print(p.distance(l2))
def test_foot_point1(self):
for i in range(5000):
p = EuclideanPoint([1,2,3])
l = Line(coords=[[2,3,4],[5,6,7]])
p.get_foot_point(l)
def test_foot_point2(self):
for i in range(5000):
p = EuclideanPoint([1,2,3])
l = Line(coords=[[2,3,4],[5,6,7]])
p.get_foot_point(l)
def test_foot_point_right(self):
for i in range(5000):
p = EuclideanPoint([rand(10),rand(10),rand(10)])
line = Line(coords=[[rand(10),rand(10),rand(10)],[rand(10),rand(10),rand(10)]])
ans1 = p.get_foot_point(line)
ans2 = p.get_foot_point(line)
for i in range(0,3):
self.assertTrue(math.fabs(ans1._pos[i] - ans2._pos[i]) < 0.0000001)
if __name__ == "__main__":
unittest.main()
|
111896
|
import sys
import os
root = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
)
)
sys.argv[0] = os.path.realpath(sys.argv[0])
fp = open(os.path.join(root, "argv.txt"), "w")
fp.write(repr(sys.argv))
fp.write('\n')
fp.close()
|
111911
|
from functools import cmp_to_key
import sublime
import sublime_plugin
from sublime import Region
class MoveTextHorizCommand(sublime_plugin.TextCommand):
def move_text_horiz(self, edit, direction, selections=None):
selections = selections or list(self.view.sel())
if direction > 1:
selections.reverse()
for region in selections:
if region.empty():
continue
orig_region = region
sel_region = Region(region.begin() + direction, region.end() + direction)
if sel_region.a < 0 or sel_region.b > self.view.size():
continue
if direction < 0:
dest_region = Region(region.begin() + direction, region.end())
move_text = self.view.substr(region) + self.view.substr(Region(region.begin() + direction, region.begin()))
else:
dest_region = Region(region.begin(), region.end() + direction)
move_text = self.view.substr(Region(region.end(), region.end() + direction)) + self.view.substr(region)
# Remove selection from RegionSet
self.view.sel().subtract(orig_region)
# Replace the selection with transformed text
self.view.replace(edit, dest_region, move_text)
# Add the new selection
self.view.sel().add(sel_region)
class MoveTextLeftCommand(MoveTextHorizCommand):
def run(self, edit):
self.move_text_horiz(edit, -1)
class MoveTextRightCommand(MoveTextHorizCommand):
def run(self, edit):
self.move_text_horiz(edit, 1)
class MoveTextVertCommand(sublime_plugin.TextCommand):
def __init__(self, view):
super(MoveTextVertCommand, self).__init__(view)
view.move_text_vert_column = None
def move_text_vert(self, region, edit, direction):
orig_region = region
select_begin = None
if region.empty():
row, col = self.view.rowcol(region.begin())
select_begin = col
region = self.view.full_line(region.begin())
move_text = self.view.substr(region)
# calculate number of characters to the left
row, col = self.view.rowcol(region.begin())
# if the last command was a vertical move, use that column
# the column is stored on the view - each command has its own instance,
# and we don't want two buffers to modify the same object (e.g. MoveTextVertCommand)
cmd, _, _ = self.view.command_history(0, True)
if cmd != 'move_text_up' and cmd != 'move_text_down':
self.view.move_text_vert_column = col
elif self.view.move_text_vert_column:
col = self.view.move_text_vert_column
dest_row = row + direction
max_row = self.view.rowcol(self.view.size())[0]
if dest_row < 0:
dest_row = 0
elif dest_row > max_row:
dest_row = max_row
self.view.sel().subtract(orig_region)
self.view.replace(edit, region, '')
# starting at the destination row at col 0, count off "col" characters
# it's possible that there aren't enough characters in the destination row,
# so stop if we end up on the wrong row, or past the buffer
dest_point = self.view.text_point(dest_row, 0)
if dest_point is None:
dest_point = self.view.size()
else:
dest_line = self.view.line(dest_point)
if dest_point + col > dest_line.b:
dest_point = dest_line.b
else:
dest_point = dest_point + col
self.view.insert(edit, dest_point, move_text)
if select_begin is None:
sel_region = Region(dest_point, dest_point + len(move_text))
else:
sel_region = Region(dest_point + select_begin)
return sel_region
class MoveTextUpCommand(MoveTextVertCommand):
def run(self, edit):
regions = list(self.view.sel())
new_regions = []
for region in regions:
new_regions.append(self.move_text_vert(region, edit, -1))
self.view.sel().add_all(new_regions)
try:
region = new_regions[0]
self.view.show(region.begin())
except IndexError:
pass
class MoveTextDownCommand(MoveTextVertCommand):
def run(self, edit):
new_regions = []
for region in reversed(self.view.sel()):
new_regions.append(self.move_text_vert(region, edit, 1))
self.view.sel().add_all(new_regions)
try:
region = new_regions[0]
self.view.show(region.begin())
except IndexError:
pass
|
111914
|
from bitmovin_api_sdk.encoding.outputs.generic_s3.generic_s3_api import GenericS3Api
from bitmovin_api_sdk.encoding.outputs.generic_s3.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.outputs.generic_s3.generic_s3_output_list_query_params import GenericS3OutputListQueryParams
|
111930
|
from a10sdk.common.A10BaseClass import A10BaseClass
class AdminLockout(A10BaseClass):
"""Class Description::
Admin user lockout configuration.
Class admin-lockout supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param duration: {"description": "Admin user lockout duration, in minutes, by default 10 (Admin user lockout duration in minutes, 0 means forever)", "format": "number", "default": 10, "optional": true, "maximum": 1440, "minimum": 0, "type": "number"}
:param threshold: {"description": "Admin user lockout threshold, by default 5", "format": "number", "default": 5, "optional": true, "maximum": 10, "minimum": 1, "type": "number"}
:param enable: {"default": 0, "optional": true, "type": "number", "description": "Enable admin user lockout", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param reset_time: {"description": "After how long to reset the lockout counter, in minutes, by default 10 (Time in minutes after which to reset the lockout counter)", "format": "number", "default": 10, "optional": true, "maximum": 1440, "minimum": 1, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/admin-lockout`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "admin-lockout"
self.a10_url="/axapi/v3/admin-lockout"
self.DeviceProxy = ""
self.duration = ""
self.threshold = ""
self.enable = ""
self.uuid = ""
self.reset_time = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
111987
|
import numpy as np
from numba import njit, prange
# consav
from consav import linear_interp # for linear interpolation
from consav import golden_section_search # for optimization in 1D
# local modules
import utility
# a. define objective function
@njit
def obj_bellman(c,m,interp_w,par):
""" evaluate bellman equation """
# a. end-of-period assets
a = m-c
# b. continuation value
w = linear_interp.interp_1d(par.grid_a,interp_w,a)
# c. total value
value_of_choice = utility.func(c,par) + w
return -value_of_choice # we are minimizing
# b. solve bellman equation
@njit(parallel=True)
def solve_bellman(t,sol,par):
"""solve bellman equation using nvfi"""
# unpack (this helps numba optimize)
v = sol.v[t]
c = sol.c[t]
# loop over outer states
for ip in prange(par.Np): # in parallel
# loop over cash-on-hand
for im in range(par.Nm):
# a. cash-on-hand
m = par.grid_m[im]
# b. optimal choice
c_low = np.fmin(m/2,1e-8)
c_high = m
c[ip,im] = golden_section_search.optimizer(obj_bellman,c_low,c_high,args=(m,sol.w[ip],par),tol=par.tol)
# note: the above finds the minimum of obj_bellman in range [c_low,c_high] with a tolerance of par.tol
# and arguments (except for c) as specified
# c. optimal value
v[ip,im] = -obj_bellman(c[ip,im],m,sol.w[ip],par)
|
112001
|
import os
from retriever.engines import choose_engine
from retriever.lib.defaults import SCRIPT_WRITE_PATH
from retriever.lib.rdatasets import create_rdataset, update_rdataset_catalog
from retriever.lib.repository import check_for_updates
from retriever.lib.scripts import SCRIPT_LIST, name_matches
from retriever.lib.socrata import find_socrata_dataset_by_id, create_socrata_dataset
def download(dataset, path='./', quiet=False, sub_dir='', debug=False, use_cache=True):
"""Download scripts for retriever."""
args = {
'dataset': dataset,
'command': 'download',
'path': path,
'sub_dir': sub_dir,
'quiet': quiet
}
engine = choose_engine(args)
engine.use_cache = use_cache
script_list = SCRIPT_LIST()
if not script_list or not os.listdir(SCRIPT_WRITE_PATH):
check_for_updates()
script_list = SCRIPT_LIST()
scripts = name_matches(script_list, args['dataset'])
if scripts:
for script in scripts:
print("=> Downloading", script.name)
try:
script.download(engine, debug=debug)
script.engine.final_cleanup()
except Exception as e:
print(e)
if debug:
raise
elif args['dataset'].startswith('socrata') and (scripts is None):
socrata_id = args['dataset'].split('-', 1)[1]
resource = find_socrata_dataset_by_id(socrata_id)
if "error" in resource.keys():
if resource["datatype"][0] == "map":
print("{} because map type datasets are not supported".format(
resource["error"]))
else:
print("{} because it is of type {} and not tabular".format(
resource["error"], resource["datatype"][1]))
elif len(resource.keys()) == 0:
return
else:
print("=> Downloading", args['dataset'])
name = f"socrata-{socrata_id}"
create_socrata_dataset(engine, name, resource)
elif (scripts is None) and (args['dataset'].startswith('rdataset')):
print("=> Downloading", args['dataset'])
rdataset = args['dataset'].split('-')
update_rdataset_catalog()
package, dataset_name = rdataset[1], rdataset[2]
create_rdataset(engine, package, dataset_name)
else:
message = "Run retriever.datasets() to see the list of currently " \
"available datasets."
raise ValueError(message)
return engine
|
112009
|
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import numpy as np
from model import IMSEG
import tensorflow as tf
import h5py
flags = tf.app.flags
flags.DEFINE_integer("epoch", 0, "Epoch to train [0]")
flags.DEFINE_integer("iteration", 0, "Iteration to train. Either epoch or iteration need to be zero [0]")
flags.DEFINE_integer("pretrain_iters", 2000, "Iteration for supervised training [1000]")
flags.DEFINE_integer("retrain_iters", 4, "Set to positive number N for doing one supervised PASS (training all shapes in supervision_list) every N iterations. Set to 0 for fully supervised training. Set to negative number -N for doing one supervised PASS every N epochs [4]")
flags.DEFINE_float("learning_rate", 0.0001, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_string("dataset", "03001627_vox", "The name of dataset")
flags.DEFINE_integer("real_size", 32, "output point-value voxel grid size in training [64]")
flags.DEFINE_integer("points_per_shape", 8192, "num of points per shape [32768]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("data_dir", "./data", "Root directory of dataset [data]")
flags.DEFINE_string("supervision_list", "obj_train_list.txt", "A list of objects for supervised training")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_boolean("train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("recon", False, "(in testing mode) True for outputing reconstructed shape with colored segmentation [False]")
flags.DEFINE_boolean("pointcloud", False, "(in testing mode) True for outputing point cloud with colored segmentation [False]")
flags.DEFINE_boolean("mesh", False, "(in testing mode) True for outputing mesh with colored segmentation [False]")
flags.DEFINE_boolean("iou", False, "(in testing mode) True for outputing IOU for test shapes [False]")
flags.DEFINE_boolean("enhance_vertical", False, "True for applying data enhancement by moving model in vertical direction [False]")
flags.DEFINE_boolean("supervised", False, "True for supervised training, False for unsupervised [False]")
flags.DEFINE_boolean("L1reg", False, "True for adding L1 regularization at layer 3 [False]")
FLAGS = flags.FLAGS
ID2name = {
'02691156': 'airplane',
'02773838': 'bag',
'02954340': 'cap',
'02958343': 'car',
'03001627': 'chair',
'03261776': 'earphone',
'03467517': 'guitar',
'03624134': 'knife',
'03636649': 'lamp', # lamp - missing one part
'03642806': 'laptop',
'03790512': 'motorbike',
'03797390': 'mug',
'03948459': 'pistol',
'04099429': 'rocket',
'04225987': 'skateboard',
'04379243': 'table' # table - missing one part
}
ID2Partnum = {'02691156': 4,
'02773838': 2,
'02954340': 2,
'02958343': 4,
'03001627': 4,
'03261776': 3,
'03467517': 3,
'03624134': 2,
'03636649': 4,
'03642806': 2,
'03790512': 6,
'03797390': 2,
'03948459': 3,
'04099429': 3,
'04225987': 3,
'04379243': 3 }
def main(_):
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
#run_config = tf.ConfigProto(gpu_options=gpu_options)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with tf.Session(config=run_config) as sess:
imseg = IMSEG(
sess,
FLAGS.real_size,
FLAGS.points_per_shape,
FLAGS.supervised,
FLAGS.L1reg,
supervision_list = FLAGS.supervision_list,
is_training = FLAGS.train,
dataset_name=FLAGS.dataset,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir)
if FLAGS.train:
imseg.train(FLAGS)
else:
if FLAGS.recon:
imseg.test_dae(FLAGS) #output reconstructed shape with colored segmentation
if FLAGS.pointcloud:
imseg.test_pointcloud(FLAGS) #output point cloud with colored segmentation
if FLAGS.mesh:
imseg.test_obj(FLAGS) #output mesh with colored segmentation
if FLAGS.iou:
imseg.test_pcSeg(FLAGS) #output IOU for test shapes
if __name__ == '__main__':
tf.app.run()
|
112015
|
import json
import decimal
from datetime import datetime
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
# Rename keys from old to new
def map_keys(items):
"""
Args:
itams: List of dict items to be mapped
return:
items: List of dict items whose keys have been renamed according to map_table below
"""
map_table = {
"pckg": "package",
"lyrVrsn": "layerVersion",
"pckgVrsn": "packageVersion",
"rgn": "region",
"dplySts": "deployStatus",
"rqrmntsTxt": "dependencies",
"arn": "arn",
"exDt": "expiryDate",
"rqrmntsHsh": "requirementsHash",
}
new_items = []
for item in items:
new_item = {}
for k in item.keys():
if k == "rqrmntsTxt":
new_item[map_table[k]] = item[k].split("\n")
if k == "exDt":
new_item[map_table[k]] = datetime.fromtimestamp(item[k]).isoformat()
else:
new_item[map_table[k]] = item[k]
new_items.append(new_item)
return new_items
|
112021
|
import inspect
from typing import Set
from sonosco.common.constants import COLLECTIONS, PRIMITIVES, CLASS_MODULE_FIELD, CLASS_NAME_FIELD, SERIALIZED_FIELD
def get_constructor_args(cls) -> Set[str]:
"""
E.g.
class Bar():
def __init__(self, arg1, arg2):
get_constructor_args(Bar)
# returns ['arg1', 'arg2']
Args:
cls (object):
Returns: set containing names of constructor arguments
"""
return set(inspect.getfullargspec(cls.__init__).args[1:])
def get_class_by_name(name: str) -> type:
"""
Returns type object of class specified by name
Args:
name: full name of the class (with packages)
Returns: class object
"""
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def is_serialized_collection_of_serializables(obj: any) -> bool:
return is_serialized_collection(obj) and \
len(obj) != 0 and \
all(is_serialized_dataclass(o) for o in obj)
# TODOL Will catch collections of types as well, change this
def is_serialized_collection_of_callables(obj: any) -> bool:
return is_serialized_collection(obj) and \
len(obj) != 0 and \
all(is_serialized_type(o) for o in obj)
def is_serialized_collection(obj: object) -> bool:
"""
Checks if object is a collection
Args:
obj: any python object
Returns: True if object is a collection
"""
return type(obj) in COLLECTIONS
def is_serialized_primitive(obj: object) -> bool:
"""
Checks if object is a primitive
Args:
obj: any python object
Returns: True if object is a primitive
"""
return type(obj) in PRIMITIVES
def is_serialized_type(obj: object) -> bool:
"""
Checks if object is a type
Args:
obj: any python object
Returns: True if object is a type
"""
return type(obj) is dict and CLASS_MODULE_FIELD in obj and CLASS_NAME_FIELD in obj
def is_serialized_dataclass(obj: object) -> bool:
"""
Checks if object is a type
Args:
obj: any python object
Returns: True if object is a type
"""
return type(obj) is dict and CLASS_MODULE_FIELD in obj and CLASS_NAME_FIELD in obj \
and SERIALIZED_FIELD in obj
def raise_unsupported_data_type():
"""
Raises TypeError
Returns:
"""
raise TypeError("Unsupported data type. Currently only primitives, lists of primitives and types"
"objects can be serialized")
|
112022
|
import librosa
from pydub import AudioSegment
import numpy as np
import os
def match_target_amplitude(sound, target_dBFS=-10):
change_in_dBFS = target_dBFS - sound.dBFS
return sound.apply_gain(change_in_dBFS)
new_folder = 'listening_test'
os.mkdir(new_folder)
walked_os = list(os.walk('.')) # We are going to add files here and we do not want to walk them too
for path, subdirs, files in walked_os:
for name in files:
filename = os.path.join(path, name)
if not filename.endswith('.wav'):
continue
sound, fs = librosa.core.load(filename)
resampled_sound = librosa.core.resample(sound, fs, 48000)
shifted_sound = (resampled_sound * (2 ** 31 - 1)).astype(np.int32)
sound = AudioSegment(shifted_sound.tobytes(),
frame_rate=48000,
sample_width=4, # 4 bytes, so 32 bit sample
channels=1) # mono
sound = sound.fade_in(500)
sound = sound.fade_out(500)
normalized_sound = match_target_amplitude(sound)
input_path_aslist = filename.split('\\')
output_path_aslist = [input_path_aslist[0], new_folder, *input_path_aslist[1:]]
if not os.path.exists(os.path.join(*output_path_aslist[:-1])):
os.mkdir(os.path.join(*output_path_aslist[:-1]))
output_name = os.path.join(*output_path_aslist)
normalized_sound.export(output_name, format='wav')
|
112047
|
def print_line():
print("-" * 60)
def print_full_header(build_step_name):
print_line()
print(" Build Step /// {}".format(build_step_name))
def print_footer():
print_line()
def log(level, data):
print("{0}: {1}".format(level, data))
|
112065
|
import argparse
import os
from bs4 import BeautifulSoup
def print_differences(pred_xml, gold_xml):
print(os.path.basename(pred_xml))
sets = []
for thing in [pred_xml, gold_xml]:
soup = BeautifulSoup(open(thing, 'r').read(), 'xml')
items = set()
for tag in soup.find('TAGS').findChildren():
items.add((tag.get('TYPE'), tag.get('start'), tag.get('end'), tag.get('text')))
sets.append(items)
false_positives = sets[0] - sets[1]
if len(false_positives) > 0:
print(' false positives:')
for fp in false_positives:
print(' -', fp)
false_negatives = sets[1] - sets[0]
if len(false_negatives) > 0:
print(' false negatives:')
for fn in false_negatives:
print(' -', fn)
print('-' * 100)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('pred',
help='the predictions file or directory')
parser.add_argument('gold', help='the gold file or directory')
args = parser.parse_args()
if os.path.isdir(args.pred):
for xml in [f for f in os.listdir(args.pred) if f.endswith('.xml')]:
pred_xml = os.path.join(args.pred, xml)
gold_xml = os.path.join(args.gold, xml)
print_differences(pred_xml, gold_xml)
else:
print_differences(args.pred, args.gold)
if __name__ == '__main__':
main()
|
112066
|
from xpath_helper import xh, filter
def test_and_operator(html_doc):
h1_path = xh.get_element_by_tag("h1", filter.and_operator(
filter.value_contains("motherfudging"), filter.value_contains("website")))
elements = html_doc.xpath(str(h1_path))
assert len(elements) != 0
assert "The " in elements[0].text
def test_or(html_doc):
h1_path = xh.get_element_by_tag("h1", filter.value_contains(
"motherfudging").or_operator(filter.value_equals("motherfudging")))
elements = html_doc.xpath(str(h1_path))
assert len(elements) != 0
assert "The " in elements[0].text
def test_empty(html_doc):
aFilter = filter.has_attribute("Toto")
h1_path = xh.get_element_by_tag("h1", aFilter)
elements = html_doc.xpath(str(h1_path))
assert len(elements) == 0
aFilter.empty()
h1_path = xh.get_element_by_tag("h1", aFilter)
elements = html_doc.xpath(str(h1_path))
assert len(elements) != 0
def test_isEmpty(html_doc):
assert filter.has_attribute("Toto").is_empty() == False
assert filter.is_empty() == True
def test_has_attribute(html_doc):
body_path = xh.get_element_by_tag("body", filter.has_attribute("data-new-gr-c-s-check-loaded"))
elements = html_doc.xpath(str(body_path))
assert len(elements) != 0
def test_attribute_contains(html_doc):
body_path = xh.get_element_by_tag("body", filter.attribute_contains("data-new-gr-c-s-check-loaded", "8"))
elements = html_doc.xpath(str(body_path))
assert len(elements) != 0
def test_attribute_equals(html_doc):
body_path = xh.get_element_by_tag("body", filter.attribute_equals("data-new-gr-c-s-check-loaded", "8.884.0"))
elements = html_doc.xpath(str(body_path))
assert len(elements) != 0
def test_attribute_not_equals(html_doc):
body_path = xh.get_element_by_tag("body", filter.attribute_not_equals("data-new-gr-c-s-check-loaded", "toto"))
elements = html_doc.xpath(str(body_path))
assert len(elements) != 0
def test_attribute_less_than(html_doc):
li_path = xh.get_element_by_tag("li", filter.attribute_less_than("data-number", 21)
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_attribute_less_thanOrEqualsTo(html_doc):
li_path = xh.get_element_by_tag("li", filter.attribute_less_than_or_equal_to("data-number", 20)
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_attribute_greater_than(html_doc):
li_path = xh.get_element_by_tag("li", filter.attribute_greater_than("data-number", 24)
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_attribute_greater_than_or_equal_to(html_doc):
li_path = xh.get_element_by_tag("li", filter.attribute_greater_than_or_equal_to("data-number", 25)
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_value_contains(html_doc):
li_path = xh.get_element_by_tag("li", filter.value_contains("Stuff doesn't weigh a ton (in fact it'")
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_value_equals(html_doc):
li_path = xh.get_element_by_tag("li", filter.value_equals(20)
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_value_not_equals(html_doc):
li_path = xh.get_element_by_tag("li", filter.value_greater_than(14).and_operator(filter.value_not_equals(20))
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
assert elements[0].text == "15"
def test_value_less_than(html_doc):
li_path = xh.get_element_by_tag("li", filter.value_less_than(16)
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_value_less_thanOrEqualsTo(html_doc):
li_path = xh.get_element_by_tag("li", filter.value_less_than_or_equal_to(15)
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_value_greater_than(html_doc):
li_path = xh.get_element_by_tag("li", filter.value_greater_than(19)
)
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_value_greater_thanOrEqualsTo(html_doc):
li_path = xh.get_element_by_tag(
"li", filter.value_greater_than_or_equal_to(20))
elements = html_doc.xpath(str(li_path))
assert len(elements) != 0
def test_get(html_doc):
p_path = xh.get_element_by_tag(
"body"
).get_element_by_tag("p", filter.get(2))
elements = html_doc.xpath(str(p_path))
assert len(elements) != 0
assert "You probably build websites using vim" in elements[0].text
def test_get_first(html_doc):
p_path = xh.get_element_by_tag(
"body").get_element_by_tag("p", filter.get_first())
elements = html_doc.xpath(str(p_path))
assert len(elements) != 0
assert "For real" in elements[0].text
def test_get_last(html_doc):
p_path = xh.get_element(filter.attribute_equals(
"class", "tleft")).get_element_by_tag("p", filter.get_last())
elements = html_doc.xpath(str(p_path))
assert len(elements) != 0
assert "He's happy" in elements[0].text
def test_not(html_doc):
p_path = xh.get_element_by_tag("body").get_element_by_tag(
"p", filter.not_operator(filter.attribute_equals("class", "st")))
elements = html_doc.xpath(str(p_path))
assert len(elements) != 0
assert "For real" not in elements[0].text
|
112122
|
import json
# Parse GeoJson data
jsdata = """{
"type": "Feature",
"id": "OpenLayers.Feature.Vector_314",
"properties": {
},
"geometry": {
"type": "Point",
"coordinates": [
97.03125,
39.7265625
]
},
"crs": {
"type": "name",
"properties": {
"name": "urn: ogc: def: crs: OGC: 1.3:CRS84"
}
}
}"""
# Try to eval() the data
point = eval(jsdata)
print(point["geometry"])
# Use the json module
print(json.loads(jsdata))
# Parse and then dump GeoJSON
pydata = json.loads(jsdata)
print(json.dumps(pydata))
|
112150
|
import errno
import itertools
import os
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast
import numpy as np
from loguru import logger
from tqdm import tqdm
import slp.util.system as system
import slp.util.types as types
from slp.config.nlp import SPECIAL_TOKENS
from slp.data.transforms import HuggingFaceTokenizer, SpacyTokenizer, ToTokenIds
def create_vocab(
corpus: Union[List[str], List[List[str]]],
vocab_size: int = -1,
special_tokens: Optional[SPECIAL_TOKENS] = None,
) -> Dict[str, int]:
"""Create the vocabulary based on tokenized input corpus
* Injects special tokens in the vocabulary
* Calculates the occurence count for each token
* Limits vocabulary to vocab_size most common tokens
Args:
corpus (Union[List[str], List[List[str]]]): The tokenized corpus as a list of sentences or a list of tokenized sentences
vocab_size (int): [description]. Limit vocabulary to vocab_size most common tokens.
Defaults to -1 which keeps all tokens.
special_tokens Optional[SPECIAL_TOKENS]: Special tokens to include in the vocabulary. Defaults to None.
Returns:
Dict[str, int]: Dictionary of all accepted tokens and their corresponding occurence counts
Examples:
>>> create_vocab(["in", "a", "galaxy", "far", "far", "away"])
{'far': 2, 'away': 1, 'galaxy': 1, 'a': 1, 'in': 1}
>>> create_vocab(["in", "a", "galaxy", "far", "far", "away"], vocab_size=3)
{'far': 2, 'a': 1, 'in': 1}
>>> create_vocab(["in", "a", "galaxy", "far", "far", "away"], vocab_size=3, special_tokens=slp.config.nlp.SPECIAL_TOKENS)
{'[PAD]': 0, '[MASK]': 0, '[UNK]': 0, '[BOS]': 0, '[EOS]': 0, '[CLS]': 0, '[SEP]': 0, 'far': 2, 'a': 1, 'in': 1}
"""
if isinstance(corpus[0], list):
corpus = list(itertools.chain.from_iterable(corpus))
freq = Counter(corpus)
if special_tokens is None:
extra_tokens = []
else:
extra_tokens = special_tokens.to_list()
if vocab_size < 0:
vocab_size = len(freq)
take = min(vocab_size, len(freq))
logger.info(f"Keeping {vocab_size} most common tokens out of {len(freq)}")
def take0(x: Tuple[Any, Any]) -> Any:
"""Take first tuple element"""
return x[0]
common_words = list(map(take0, freq.most_common(take)))
common_words = list(set(common_words) - set(extra_tokens))
words = extra_tokens + common_words
if len(words) > vocab_size:
words = words[: vocab_size + len(extra_tokens)]
def token_freq(t):
"""Token frequeny"""
return 0 if t in extra_tokens else freq[t]
vocab = dict(zip(words, map(token_freq, words)))
logger.info(f"Vocabulary created with {len(vocab)} tokens.")
logger.info(f"The 10 most common tokens are:\n{freq.most_common(10)}")
return vocab
class EmbeddingsLoader(object):
def __init__(
self,
embeddings_file: str,
dim: int,
vocab: Optional[Dict[str, int]] = None,
extra_tokens: Optional[SPECIAL_TOKENS] = None,
) -> None:
"""Load word embeddings in text format
Args:
embeddings_file (str): File where embeddings are stored (e.g. glove.6B.50d.txt)
dim (int): Dimensionality of embeddings
vocab (Optional[Dict[str, int]]): Load only embeddings in vocab. Defaults to None.
extra_tokens (Optional[slp.config.nlp.SPECIAL_TOKENS]): Create random embeddings for these special tokens.
Defaults to None.
"""
self.embeddings_file = embeddings_file
self.vocab = vocab
self.cache_ = self._get_cache_name()
self.dim_ = dim
self.extra_tokens = extra_tokens
def __repr__(self):
"""String representation of class"""
return f"{self.__class__.__name__}({self.embeddings_file}, {self.dim_})"
def in_accepted_vocab(self, word: str) -> bool:
"""Check if word exists in given vocabulary
Args:
word (str): word from embeddings file
Returns:
bool: Word exists
"""
return True if self.vocab is None else word in self.vocab
def _get_cache_name(self) -> str:
"""Create a cache file name to avoid reloading the embeddings
Cache name is something like glove.6B.50d.1000.p,
where 1000 is the size of the vocab provided in __init__
Returns:
str: Cache file name
"""
head, tail = os.path.split(self.embeddings_file)
filename, _ = os.path.splitext(tail)
if self.vocab is not None:
cache_name = os.path.join(head, f"{filename}.{len(self.vocab)}.p")
else:
cache_name = os.path.join(head, f"{filename}.p")
logger.info(f"Cache: {cache_name}")
return cache_name
def _dump_cache(self, data: types.Embeddings) -> None:
"""Save loaded embeddings to cache as a pickle
Saves a tuple of (word2idx, idx2word, embeddings)
Args:
data (types.Embeddings): (word2idx, idx2word, embeddings) tuple
"""
system.pickle_dump(data, self.cache_)
def _load_cache(self) -> types.Embeddings:
"""Load Embeddings from cache
Returns:
types.Embeddings: (word2idx, idx2word, embeddings) tuple
"""
return cast(types.Embeddings, system.pickle_load(self.cache_))
def augment_embeddings(
self,
word2idx: Dict[str, int],
idx2word: Dict[int, str],
embeddings: List[np.ndarray],
token: str,
emb: Optional[np.ndarray] = None,
) -> Tuple[Dict[str, int], Dict[int, str], List[np.ndarray]]:
"""Create a random embedding for a special token and append it to the embeddings array
Args:
word2idx (Dict[str, int]): Current word2idx map
idx2word (Dict[int, str]): Current idx2word map
embeddings (List[np.ndarray]): Embeddings array as list of embeddings
token (str): The special token (e.g. [PAD])
emb (Optional[np.ndarray]): Optional value for the embedding to be appended.
Defaults to None, where a random embedding is created.
Returns:
Tuple[Dict[str, int], Dict[int, str], List[np.ndarray]]: (word2idx, idx2word, embeddings) tuple
"""
word2idx[token] = len(embeddings)
idx2word[len(embeddings)] = token
if emb is None:
emb = np.random.uniform(low=-0.05, high=0.05, size=self.dim_)
embeddings.append(emb)
return word2idx, idx2word, embeddings
@system.timethis(method=True)
def load(self) -> types.Embeddings:
"""Read the word vectors from a text file
* Read embeddings
* Filter with given vocabulary
* Augment with special tokens
Returns:
types.Embeddings: (word2idx, idx2word, embeddings) tuple
"""
# in order to avoid this time consuming operation, cache the results
try:
cache = self._load_cache()
logger.info("Loaded word embeddings from cache.")
return cache
except OSError:
logger.warning(f"Didn't find embeddings cache file {self.embeddings_file}")
logger.warning("Loading embeddings from file.")
# create the necessary dictionaries and the word embeddings matrix
if not os.path.exists(self.embeddings_file):
logger.critical(f"{self.embeddings_file} not found!")
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), self.embeddings_file)
logger.info(f"Indexing file {self.embeddings_file} ...")
# create the 2D array, which will be used for initializing
# the Embedding layer of a NN.
# We reserve the first row (idx=0), as the word embedding,
# which will be used for zero padding (word with id = 0).
if self.extra_tokens is not None:
word2idx, idx2word, embeddings = self.augment_embeddings(
{},
{},
[],
self.extra_tokens.PAD.value, # type: ignore
emb=np.zeros(self.dim_),
)
for token in self.extra_tokens: # type: ignore
logger.debug(f"Adding token {token.value} to embeddings matrix")
if token == self.extra_tokens.PAD:
continue
word2idx, idx2word, embeddings = self.augment_embeddings(
word2idx, idx2word, embeddings, token.value
)
else:
word2idx, idx2word, embeddings = self.augment_embeddings(
{}, {}, [], "[PAD]", emb=np.zeros(self.dim_)
)
# read file, line by line
with open(self.embeddings_file, "r") as f:
num_lines = sum(1 for line in f)
with open(self.embeddings_file, "r") as f:
index = len(embeddings)
for line in tqdm(
f, total=num_lines, desc="Loading word embeddings...", leave=False
):
# skip the first row if it is a header
if len(line.split()) < self.dim_:
continue
values = line.rstrip().split(" ")
word = values[0]
if word in word2idx:
continue
if not self.in_accepted_vocab(word):
continue
vector = np.asarray(values[1:], dtype=np.float32)
idx2word[index] = word
word2idx[word] = index
embeddings.append(vector)
index += 1
logger.info(f"Loaded {len(embeddings)} word vectors.")
embeddings_out = np.array(embeddings, dtype="float32")
# write the data to a cache file
self._dump_cache((word2idx, idx2word, embeddings_out))
return word2idx, idx2word, embeddings_out
class WordCorpus(object):
def __init__(
self,
corpus: List[str],
limit_vocab_size: int = 30000,
word2idx: Optional[Dict[str, int]] = None,
idx2word: Optional[Dict[int, str]] = None,
embeddings: Optional[np.ndarray] = None,
embeddings_file: Optional[str] = None,
embeddings_dim: int = 300,
lower: bool = True,
special_tokens: Optional[SPECIAL_TOKENS] = SPECIAL_TOKENS, # type: ignore
prepend_bos: bool = False,
append_eos: bool = False,
lang: str = "en_core_web_md",
max_length: int = -1,
**kwargs,
):
"""Load corpus embeddings, tokenize in words using spacy and convert to ids
This class handles the handling of a raw corpus. It handles:
* Tokenization into words (spacy)
* Loading of pretrained word embedding
* Calculation of word frequencies / corpus statistics
* Conversion to token ids
You can pass either:
* Pass an embeddings file to load pretrained embeddings and create the word2idx mapping
* Pass already loaded embeddings array and word2idx. This is useful for the dev / test splits
where we want to pass the train split embeddings / word2idx.
Args:
corpus (List[List[str]]): Corpus as a list of sentences
limit_vocab_size (int): Upper bound for number of most frequent tokens to keep. Defaults to 30000.
word2idx (Optional[Dict[str, int]]): Mapping of word to indices. Defaults to None.
idx2word (Optional[Dict[int, str]]): Mapping of indices to words. Defaults to None.
embeddings (Optional[np.ndarray]): Embeddings array. Defaults to None.
embeddings_file (Optional[str]): Embeddings file to read. Defaults to None.
embeddings_dim (int): Dimension of embeddings. Defaults to 300.
lower (bool): Convert strings to lower case. Defaults to True.
special_tokens (Optional[SPECIAL_TOKENS]): Special tokens to include in the vocabulary.
Defaults to slp.config.nlp.SPECIAL_TOKENS.
prepend_bos (bool): Prepend Beginning of Sequence token for seq2seq tasks. Defaults to False.
append_eos (bool): Append End of Sequence token for seq2seq tasks. Defaults to False.
lang (str): Spacy language, e.g. el_core_web_sm, en_core_web_sm etc. Defaults to "en_core_web_md".
max_length (int): Crop sequences above this length. Defaults to -1 where sequences are left unaltered.
"""
# FIXME: Extract super class to avoid repetition
self.corpus_ = corpus
self.max_length = max_length
self.tokenizer = SpacyTokenizer(
lower=lower,
prepend_bos=prepend_bos,
append_eos=append_eos,
specials=special_tokens,
lang=lang,
)
logger.info(f"Tokenizing corpus using spacy {lang}")
self.tokenized_corpus_ = [
self.tokenizer(s)
for s in tqdm(self.corpus_, desc="Tokenizing corpus...", leave=False)
]
self.vocab_ = create_vocab(
self.tokenized_corpus_,
vocab_size=limit_vocab_size if word2idx is None else -1,
special_tokens=special_tokens,
)
self.word2idx_, self.idx2word_, self.embeddings_ = None, None, None
# self.corpus_indices_ = self.tokenized_corpus_
if word2idx is not None:
logger.info("Word2idx was already provided. Going to used it.")
if embeddings_file is not None and word2idx is None:
logger.info(
f"Going to load {len(self.vocab_)} embeddings from {embeddings_file}"
)
loader = EmbeddingsLoader(
embeddings_file,
embeddings_dim,
vocab=self.vocab_,
extra_tokens=special_tokens,
)
word2idx, idx2word, embeddings = loader.load()
if embeddings is not None:
self.embeddings_ = embeddings
if idx2word is not None:
self.idx2word_ = idx2word
if word2idx is not None:
self.word2idx_ = word2idx
logger.info("Converting tokens to ids using word2idx.")
self.to_token_ids = ToTokenIds(
self.word2idx_,
specials=SPECIAL_TOKENS, # type: ignore
)
self.corpus_indices_ = [
self.to_token_ids(s)
for s in tqdm(
self.tokenized_corpus_,
desc="Converting tokens to token ids...",
leave=False,
)
]
logger.info("Filtering corpus vocabulary.")
updated_vocab = {}
for k, v in self.vocab_.items():
if k in self.word2idx_:
updated_vocab[k] = v
logger.info(
f"Out of {len(self.vocab_)} tokens {len(self.vocab_) - len(updated_vocab)} were not found in the pretrained embeddings."
)
self.vocab_ = updated_vocab
@property
def vocab_size(cls) -> int:
"""Retrieve vocabulary size for corpus
Returns:
int: vocabulary size
"""
sz: int = (
cls.embeddings.shape[0] if cls.embeddings is not None else len(cls.vocab_)
)
return sz
@property
def frequencies(cls) -> Dict[str, int]:
"""Retrieve word occurence counts
Returns:
Dict[str, int]: word occurence counts
"""
return cls.vocab_
@property
def vocab(cls) -> Set[str]:
"""Retrieve set of words in vocabulary
Returns:
Set[str]: set of words in vocabulary
"""
return set(cls.vocab_.keys())
@property
def embeddings(cls) -> np.ndarray:
"""Retrieve embeddings array
Returns:
np.ndarray: Array of pretrained word embeddings
"""
return cast(np.ndarray, cls.embeddings_)
@property
def word2idx(cls) -> Dict[str, int]:
"""Retrieve word2idx mapping
Returns:
Dict[str, int]: word2idx mapping
"""
return cast(Dict[str, int], cls.word2idx_)
@property
def idx2word(cls) -> Dict[int, str]:
"""Retrieve idx2word mapping
Returns:
Dict[str, int]: idx2word mapping
"""
return cast(Dict[int, str], cls.idx2word_)
@property
def tokenized(cls) -> List[List[str]]:
"""Retrieve tokenized corpus
Returns:
List[List[str]]: Tokenized corpus
"""
return cls.tokenized_corpus_
@property
def indices(cls) -> List[List[int]]:
"""Retrieve corpus as token indices
Returns:
List[List[int]]: Token indices for corpus
"""
return cls.corpus_indices_
@property
def raw(cls) -> List[str]:
"""Retrieve raw corpus
Returns:
List[str]: Raw Corpus
"""
return cls.corpus_
def __len__(self) -> int:
"""Number of samples in corpus
Returns:
int: Corpus length
"""
return len(self.corpus_indices_)
def __getitem__(self, idx) -> List[int]:
"""Get ith element in corpus as token indices
Args:
idx (List[int]): index in corpus
Returns:
List[int]: List of token indices for sentence
"""
out: List[int] = (
self.corpus_indices_[idx]
if self.max_length <= 0
else self.corpus_indices_[idx][: self.max_length]
)
return out
class HfCorpus(object):
def __init__(
self,
corpus: List[str],
lower: bool = True,
tokenizer_model: str = "bert-base-uncased",
add_special_tokens: bool = True,
special_tokens: Optional[SPECIAL_TOKENS] = SPECIAL_TOKENS, # type: ignore
max_length: int = -1,
**kwargs,
):
"""Process a corpus using hugging face tokenizers
Select one of hugging face tokenizers and process corpus
Args:
corpus (List[str]): List of sentences
lower (bool): Convert strings to lower case. Defaults to True.
tokenizer_model (str): Hugging face model to use. Defaults to "bert-base-uncased".
add_special_tokens (bool): Add special tokens in sentence during tokenization. Defaults to True.
special_tokens (Optional[SPECIAL_TOKENS]): Special tokens to include in the vocabulary.
Defaults to slp.config.nlp.SPECIAL_TOKENS.
max_length (int): Crop sequences above this length. Defaults to -1 where sequences are left unaltered.
"""
self.corpus_ = corpus
self.max_length = max_length
logger.info(
f"Tokenizing corpus using hugging face tokenizer from {tokenizer_model}"
)
self.tokenizer = HuggingFaceTokenizer(
lower=lower, model=tokenizer_model, add_special_tokens=add_special_tokens
)
self.corpus_indices_ = [
self.tokenizer(s)
for s in tqdm(
self.corpus_, desc="Converting tokens to indices...", leave=False
)
]
self.tokenized_corpus_ = [
self.tokenizer.detokenize(s)
for s in tqdm(
self.corpus_indices_,
desc="Mapping indices to tokens...",
leave=False,
)
]
self.vocab_ = create_vocab(
self.tokenized_corpus_,
vocab_size=-1,
special_tokens=special_tokens,
)
@property
def vocab_size(cls) -> int:
"""Retrieve vocabulary size
Returns:
int: Vocabulary size
"""
sz: int = cls.tokenizer.vocab_size
return sz
@property
def frequencies(cls) -> Dict[str, int]:
"""Retrieve wordpieces occurence counts
Returns:
Dict[str, int]: wordpieces occurence counts
"""
return cls.vocab_
@property
def vocab(cls) -> Set[str]:
"""Retrieve set of words in vocabulary
Returns:
Set[str]: set of words in vocabulary
"""
return set(cls.vocab_.keys())
@property
def embeddings(cls) -> None:
"""Unused. Defined for compatibility"""
return None
@property
def word2idx(cls) -> None:
"""Unused. Defined for compatibility"""
return None
@property
def idx2word(cls) -> None:
"""Unused. Defined for compatibility"""
return None
@property
def tokenized(cls) -> List[List[str]]:
"""Retrieve tokenized corpus
Returns:
List[List[str]]: tokenized corpus
"""
return cls.tokenized_corpus_
@property
def indices(cls) -> List[List[int]]:
"""Retrieve corpus as token indices
Returns:
List[List[int]]: Token indices for corpus
"""
return cls.corpus_indices_
@property
def raw(cls) -> List[str]:
"""Retrieve raw corpus
Returns:
List[str]: Raw Corpus
"""
return cls.corpus_
def __len__(self) -> int:
"""Number of samples in corpus
Returns:
int: Corpus length
"""
return len(self.corpus_indices_)
def __getitem__(self, idx) -> List[int]:
"""Get ith element in corpus as token indices
Args:
idx (List[int]): index in corpus
Returns:
List[int]: List of token indices for sentence
"""
out: List[int] = (
self.corpus_indices_[idx]
if self.max_length <= 0
else self.corpus_indices_[idx][: self.max_length]
)
return out
class TokenizedCorpus(object):
def __init__(
self,
corpus: Union[List[str], List[List[str]]],
word2idx: Dict[str, int] = None,
special_tokens: Optional[SPECIAL_TOKENS] = SPECIAL_TOKENS, # type: ignore
max_length: int = -1,
**kwargs,
):
"""Wrap a corpus that's already tokenized
Args:
corpus (Union[List[str], List[List[str]]]): List of tokens or List of lists of tokens
word2idx (Dict[str, int], optional): Token to index mapping. Defaults to None.
special_tokens (Optional[SPECIAL_TOKENS], optional): Special Tokens. Defaults to SPECIAL_TOKENS.
"""
self.corpus_ = corpus
self.tokenized_corpus_ = corpus
self.max_length = max_length
self.vocab_ = create_vocab(
self.tokenized_corpus_,
vocab_size=-1,
special_tokens=special_tokens,
)
if word2idx is not None:
logger.info("Converting tokens to ids using word2idx.")
self.word2idx_ = word2idx
else:
logger.info(
"No word2idx provided. Will convert tokens to ids using an iterative counter."
)
self.word2idx_ = dict(zip(self.vocab_.keys(), itertools.count()))
self.idx2word_ = {v: k for k, v in self.word2idx_.items()}
self.to_token_ids = ToTokenIds(
self.word2idx_,
specials=SPECIAL_TOKENS, # type: ignore
)
if isinstance(self.tokenized_corpus_[0], list):
self.corpus_indices_ = [
self.to_token_ids(s)
for s in tqdm(
self.tokenized_corpus_,
desc="Converting tokens to token ids...",
leave=False,
)
]
else:
self.corpus_indices_ = self.to_token_ids(self.tokenized_corpus_) # type: ignore
@property
def vocab_size(cls) -> int:
"""Retrieve vocabulary size
Returns:
int: Vocabulary size
"""
return len(cls.vocab_)
@property
def frequencies(cls) -> Dict[str, int]:
"""Retrieve wordpieces occurence counts
Returns:
Dict[str, int]: wordpieces occurence counts
"""
return cls.vocab_
@property
def vocab(cls) -> Set[str]:
"""Retrieve set of words in vocabulary
Returns:
Set[str]: set of words in vocabulary
"""
return set(cls.vocab_.keys())
@property
def embeddings(cls) -> None:
"""Unused. Kept for compatibility"""
return None
@property
def word2idx(cls) -> Dict[str, int]:
"""Retrieve word2idx mapping
Returns:
Dict[str, int]: word2idx mapping
"""
return cls.word2idx_
@property
def idx2word(cls) -> Dict[int, str]:
"""Retrieve idx2word mapping
Returns:
Dict[str, int]: idx2word mapping
"""
return cls.idx2word_
@property
def tokenized(cls) -> Union[List[str], List[List[str]]]:
"""Retrieve tokenized corpus
Returns:
List[List[str]]: Tokenized corpus
"""
return cls.tokenized_corpus_
@property
def indices(cls) -> Union[List[int], List[List[int]]]:
"""Retrieve corpus as token indices
Returns:
List[List[int]]: Token indices for corpus
"""
return cls.corpus_indices_
@property
def raw(cls) -> Union[List[str], List[List[str]]]:
"""Retrieve raw corpus
Returns:
List[str]: Raw Corpus
"""
return cls.corpus_
def __len__(self) -> int:
"""Number of samples in corpus
Returns:
int: Corpus length
"""
return len(self.corpus_indices_)
def __getitem__(self, idx) -> List[int]:
"""Get ith element in corpus as token indices
Args:
idx (List[int]): index in corpus
Returns:
List[int]: List of token indices for sentence
"""
out: List[int] = (
self.corpus_indices_[idx]
if self.max_length <= 0
else self.corpus_indices_[idx][: self.max_length]
)
return out
if __name__ == "__main__":
corpus = [
"the big",
"brown fox",
"jumps over",
"the lazy dog",
"supercalifragilisticexpialidocious",
]
word_corpus = WordCorpus(
corpus,
embeddings_file="./cache/glove.6B.50d.txt",
embeddings_dim=50,
lower=True,
prepend_bos=True,
append_eos=True,
)
hugging_face_corpus = HfCorpus(corpus)
|
112206
|
import os
import sys
import argparse
import numpy
import json
import pprint
import subprocess
import errno
import socket
import serial
import time
from datetime import datetime
from collections import OrderedDict
# IoT2 imports
import iot2_settings
import setup_result_collection
import iot2_measure_static_flash_and_ram
#######################################################################
# GLOBALS #
#######################################################################
# iot2 special msgs for result collection
IOT2_BEGIN_BENCHMARK = "[IoT2] benchmark: START" # this message is needed to set an event and start the tcp driver
IOT2_START_COLLECTING_RESULTS_MSG = "[IoT2] collect_results: START"
IOT2_END_COLLECTING_RESULTS_MSG = "[IoT2] collect_results: END"
#######################################################################
# FUNCTIONS #
#######################################################################
def set_bench_event(bench_event):
bench_event.set()
return
def collect_benchmark_results(ser, metric_dict, iteration):
ser_input = ser.readline()
ser_input = ser_input.rstrip('\r\n')
# read metrics until end msg is sent
while ser_input != IOT2_END_COLLECTING_RESULTS_MSG:
metric_result = ser_input.split("->")
metric_name, metric_values = metric_result[1].split(":")
#print("-"*80)
#print(metric_name)
#print("-" * 80)
#print(metric_values)
#print("-"*80)
submetrics = metric_values.split(",")
# if there is only a single value, just store it in the dictionay directly
if len(submetrics) == 1:
# there is only 1 value which is the metric result
# handle the priv code calculation
if metric_name == "TotalPriv_code":
metric_val = int(pow(2, int(float(submetrics[0])))/float(1024))
# all other metrics
else:
metric_val = int(float(submetrics[0]))
metric_dict.setdefault(metric_name, []).append(metric_val)
# there are multiple submetrics, add the recursively to the dictionary
else:
# check if this is the first iteration or not
if metric_name in metric_dict.keys():
submetric_dict = metric_dict[metric_name]
else:
# create dictionary if this is the first iteration
submetric_dict = OrderedDict()
# add submetrics to its own dictionary
for submetric in submetrics:
submetric_name, submetric_val = submetric.split("=")
submetric_val = int(submetric_val, 10)
submetric_dict.setdefault(submetric_name, []).append(submetric_val)
# add all to the submetrics to global metric dictionary
metric_dict[metric_name] = submetric_dict
# receive next result
ser_input = ser.readline()
ser_input = ser_input.rstrip('\r\n')
return metric_dict
def iot2_serial_collector(user_port, iterations, bench_config_name, benchmark_name, append_results, iot2_synch_obj=None):
global IOT2_BEGIN_BENCHMARK
res_file = str(iot2_settings.RESULTS_DIR_PATH +
iot2_settings.BUILD_OPTIONS[bench_config_name][iot2_settings.BENCH_TYPE] +
iot2_settings.BUILD_OPTIONS[bench_config_name][iot2_settings.BENCH_CONFIG_RES_DIR] +
iot2_settings.METRICS_RESULTS_DIR) + benchmark_name + iot2_settings.JSON_EXT
if iot2_settings.OS_BENCHMARKS == 0:
IOT2_BEGIN_BENCHMARK = "Welcome to IoT2"
try:
ser = serial.Serial(port=user_port, baudrate=9600, dsrdtr=0, rtscts=0)
ser.flushInput()
ser.flushOutput()
iterations_cntr = 0
print(res_file)
# check if we should append or create new file
if append_results:
with open(res_file, 'r') as append_fd:
metric_dict = json.load(append_fd)
else:
metric_dict = OrderedDict()
while iterations_cntr < iterations:
ser.flushInput()
ser.flushOutput()
print("-"*80)
print("ITERATION (%d/%d) for BENCHMARK: %s" % (iterations_cntr, iterations, benchmark_name))
print("-"*80)
# reset ser_input
ser_input = ""
t_arr = []
# benchmark might use the serial, keep receiving messages until result collection starts
while IOT2_START_COLLECTING_RESULTS_MSG not in ser_input:
ser_input = ser.readline()
ser_input = ser_input.rstrip('\r\n')
ser_input = ''.join([i if ord(i) < 128 else '' for i in ser_input])
# if benchmark started, set the signal for the tcp process to start
# used the below solution to avoid serial bugs
if IOT2_BEGIN_BENCHMARK in ser_input:
print("BENCHMARK STATUS IS SET")
if iot2_synch_obj:
iot2_synch_obj.set_benchmark_status()
# check if tcp process started, if se reset benchmark status for the next iteration
if iot2_synch_obj:
if iot2_synch_obj.get_tcp_proc_flag() == 1 and iot2_synch_obj.get_benchmark_status() == 1:
iot2_synch_obj.reset_benchmark_status()
print("BENCHMARK STATUS RESET AFTER TCP FLAG IS SET")
m_seconds = "%.4f" % (time.time())
input_time = "%s.%s" % (datetime.now().strftime('%H:%M:%S'), m_seconds[-4:])
print("%s,[IoT2]>> %s" % (input_time, ser_input))
# start recording the received results
metric_dict = collect_benchmark_results(ser, metric_dict, iterations_cntr)
print("-"*80)
print("[*] serial runner finished, closing...")
print("-"*80)
# update counter
iterations_cntr += 1
#print("#"*80)
#print(metric_dict)
# write results to a file
with open(res_file, 'w') as fd:
json.dump(metric_dict, fd, sort_keys=True, indent=4, ensure_ascii=False)
#print("#"*80)
print("="*80)
print("[+] Results written to %s" % res_file)
print("="*80)
return
except KeyboardInterrupt:
print("[-] CTRL-C pressed...")
ser.close()
print("[IoT2]>> Closing....")
return
#######################################################################
# MAIN #
#######################################################################
#iot2_serial_collector('/dev/ttyACM0', 1)
|
112241
|
import web
import gevent
import gevent.monkey
import gevent.pywsgi
from db import xiamidb
import time
import traceback
import os
from libXiami import User
gevent.monkey.patch_all()
class Main:
def GET(self):
return file("static/index.html").read()
def POST(self):
args = web.input()
email = args.pop("email")
password = args.pop("password")
user = User()
user.login(email,password)
if user.islogined:
xiamidb.add(user["user_id"],email,password)
account = xiamidb.get(user["user_id"])
account["last"] = time.time()
account["cookie"] = user.dumpCookie()
account["email"] = email
if "savepw" in args:
account["password"] = password
else:
account["password"] = <PASSWORD>
if "notifyme" in args:
account["notify"] = True
else:
account["notify"] = False
account["errcount"] = 0
account["nexttime"] = account["last"]
account["days"] = int(user["sign"]["persist_num"])
xiamidb.update(**account)
yield "email:%s\n"%email
yield "uid:%s\n"%account['uid']
yield "cookie:\n%s"%account['cookie']
yield "days:%s\n"%account["days"]
yield '\xe7\x99\xbb\xe8\xae\xb0\xe6\x88\x90\xe5\x8a\x9f\xef\xbc\x8c\xe8\xaf\xb7\xe5\x85\xb3\xe9\x97\xad\xe9\xa1\xb5\xe9\x9d\xa2'
else:
yield '\xe6\x97\xa0\xe6\xb3\x95\xe8\x8e\xb7\xe5\x8f\x96\xe7\x94\xa8\xe6\x88\xb7\xe4\xbf\xa1\xe6\x81\xaf\xef\xbc\x8c\xe8\xaf\xb7\xe6\xa3\x80\xe6\x9f\xa5\xe5\xb8\x90\xe5\x8f\xb7\xe6\x88\x96\xe5\xaf\x86\xe7\xa0\x81'
class Ctrl:
def GET(self):
yield "uid\temail\t\t\tdays\tlastcheck\tnextcheck\terrcount\tstatus\n"
accounts = xiamidb.scan()
for account in accounts:
account["last"] -= time.time()
account["nexttime"] -= time.time()
yield "%(uid)s\t%(email)s\t%(days)s\t%(last)s\t%(nexttime)s\t%(errcount)s\t%(status)s\n"%account
class StaticFile:
def GET(self,filename):
path = "static/%s"%filename
if os.path.exists(path):
return file(path,"rb")
return "404"
def checkin(account):
user = User()
user.loadCookie(account["cookie"])
if not user.islogined and account["password"]:
user.login(account["email"],account["password"])
if user.islogined:
if not user.ischeckined:
days = user.checkin()
if days:
account["days"] = days
else:
account["errcount"] += 1
account["nexttime"] = time.time() + 600
account["status"] = u'\u7b7e\u5230\u5931\u8d25'
else:
days = int(user["sign"]["persist_num"])
account["nexttime"] = time.time() + 3600
account["status"] = u'\u6ca1\u6709\u5f02\u5e38'
else:
account["errcount"] += 1
account["nexttime"] = time.time() + 600
account["status"] = u'\u767b\u5f55\u5931\u8d25'
account["last"] = time.time()
xiamidb.update (**account)
def daemon():
while True:
accounts = xiamidb.scan(where="errcount<3 and nexttime<%s"%time.time())
for account in accounts:
try:
checkin(account)
except:
traceback.print_exc()
print "checkin error"
time.sleep(5)
time.sleep(5)
urls = (
"/", "Main",
"/Ctrl","Ctrl",
"/(.*)","StaticFile",
)
def serve_forever():
gevent.spawn(daemon)
print web
application = web.application(urls, globals()).wsgifunc()
print 'Serving on 8888'
server = gevent.pywsgi.WSGIServer(('', 8888), application)
server.serve_forever()
if __name__ == "__main__":
serve_forever()
|
112242
|
from ..helpers import nativestr
class BFInfo(object):
capacity = None
size = None
filterNum = None
insertedNum = None
expansionRate = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.capacity = response["Capacity"]
self.size = response["Size"]
self.filterNum = response["Number of filters"]
self.insertedNum = response["Number of items inserted"]
self.expansionRate = response["Expansion rate"]
class CFInfo(object):
size = None
bucketNum = None
filterNum = None
insertedNum = None
deletedNum = None
bucketSize = None
expansionRate = None
maxIteration = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.size = response["Size"]
self.bucketNum = response["Number of buckets"]
self.filterNum = response["Number of filters"]
self.insertedNum = response["Number of items inserted"]
self.deletedNum = response["Number of items deleted"]
self.bucketSize = response["Bucket size"]
self.expansionRate = response["Expansion rate"]
self.maxIteration = response["Max iterations"]
class CMSInfo(object):
width = None
depth = None
count = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.width = response["width"]
self.depth = response["depth"]
self.count = response["count"]
class TopKInfo(object):
k = None
width = None
depth = None
decay = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.k = response["k"]
self.width = response["width"]
self.depth = response["depth"]
self.decay = response["decay"]
class TDigestInfo(object):
compression = None
capacity = None
mergedNodes = None
unmergedNodes = None
mergedWeight = None
unmergedWeight = None
totalCompressions = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.compression = response["Compression"]
self.capacity = response["Capacity"]
self.mergedNodes = response["Merged nodes"]
self.unmergedNodes = response["Unmerged nodes"]
self.mergedWeight = response["Merged weight"]
self.unmergedWeight = response["Unmerged weight"]
self.totalCompressions = response["Total compressions"]
|
112244
|
from jarbas_hive_mind.slave.terminal import HiveMindTerminal, \
HiveMindTerminalProtocol
from jarbas_hive_mind.message import HiveMessageType, HiveMessage
from jarbas_hive_mind.nodes import HiveMindNodeType
from ovos_utils.log import LOG
from ovos_utils.messagebus import Message, get_mycroft_bus
import json
class HiveMindSlaveProtocol(HiveMindTerminalProtocol):
platform = "HiveMindSlaveV0.2"
@property
def bus(self):
return self.factory.bus
def onConnect(self, response):
self.bus.emit(Message("hive.mind.connected",
{"server_id": response.headers["server"]}))
super().onConnect(response)
def onOpen(self):
self.bus.emit(Message("hive.mind.websocket.open"))
super().onOpen()
def onClose(self, wasClean, code, reason):
self.bus.emit(Message("hive.mind.client.closed",
{"wasClean": wasClean,
"reason": reason,
"code": code}))
super().onClose(wasClean, code, reason)
class HiveMindSlave(HiveMindTerminal):
protocol = HiveMindSlaveProtocol
node_type = HiveMindNodeType.SLAVE
def __init__(self, bus=None, *args, **kwargs):
super(HiveMindSlave, self).__init__(*args, **kwargs)
# mycroft_ws
self.bus = bus or get_mycroft_bus()
self.register_mycroft_messages()
# mycroft methods
def register_mycroft_messages(self):
self.bus.on("message", self.handle_outgoing_mycroft)
self.bus.on("hive.send", self.handle_send)
def shutdown(self):
self.bus.remove("message", self.handle_outgoing_mycroft)
self.bus.remove("hive.send", self.handle_send)
def handle_send(self, message):
msg_type = message.data["msg_type"]
pload = message.data["payload"]
if msg_type == HiveMessageType.BUS:
self.send_to_hivemind_bus(pload)
elif msg_type == HiveMessageType.PROPAGATE:
self.interface.propagate(pload)
elif msg_type == HiveMessageType.BROADCAST:
# Ignore silently, if a Master is connected to bus it will
# handle it
pass
elif msg_type == HiveMessageType.ESCALATE:
self.interface.escalate(pload)
else:
LOG.error("Unknown HiveMind protocol msg_type")
def handle_outgoing_mycroft(self, message=None):
if not self.client:
return # not connected to hivemind yet
# forward internal messages to connections if they are the target
if isinstance(message, dict):
message = json.dumps(message)
if isinstance(message, str):
message = Message.deserialize(message)
if message.msg_type == "complete_intent_failure":
message.msg_type = "hive.complete_intent_failure"
message.context = message.context or {}
message.context["source"] = self.client.peer
peer = message.context.get("destination")
if peer and peer == self.client.peer:
msg = HiveMessage(HiveMessageType.BUS,
source_peer=self.client.peer,
payload=message.serialize())
self.interface.send(msg)
# parsed protocol messages
def handle_incoming_mycroft(self, payload):
""" HiveMind is sending a mycroft bus message"""
# you are a slave_connection, just signal it in the bus
self.bus.emit(Message("hive.message.received", payload))
# and then actually inject it no questions asked
# TODO make behaviour configurable
super().handle_incoming_message(payload)
# websocket handlers
def on_binary(self, payload):
# TODO receive binary file
LOG.info("[BINARY MESSAGE]")
def send_to_hivemind_bus(self, payload):
super().send_to_hivemind_bus(payload)
self.bus.emit(Message("hive.message.sent",
{"payload": payload}))
|
112277
|
import cv2
import os
import re
import numpy as np
from multiprocessing import Pool
import pickle
root_folder = './NTURGBD'
rgb_folder = os.path.join(root_folder, './nturgb+d_rgb')
depth_folder = os.path.join(root_folder, './nturgb+d_depth_masked')
skeleton_folder = os.path.join(root_folder, './nturgb+d_skeletons')
tags = os.listdir(rgb_folder)
tags = [f.split('_')[0] for f in tags]
video_set = []
compiled_regex = re.compile('.*S(\d{3})C(\d{3})P(\d{3})R(\d{3})A(\d{3}).*')
for t in tags:
match = re.match(compiled_regex, t)
setup, camera, performer, replication, action = [*map(int, match.groups())]
video_set.append((setup, camera))
def process_video_set(target_video_set):
print("Starting {}".format(target_video_set))
rgb = []
d = []
for i in range(len(tags)):
if video_set[i] != target_video_set or np.random.rand() > 0.5:
continue
with open(os.path.join(skeleton_folder, tags[i] + '.skeleton'), 'r') as fd:
data = fd.readlines()
joint_data = []
for frame_idx in range(int(data.pop(0))):
for body_idx in range(int(data.pop(0))):
body = data.pop(0)
for joint_idx in range(int(data.pop(0))):
line = data.pop(0).split()
if body_idx == 0:
joint_data.append((frame_idx, body_idx, joint_idx, line[:7]))
depth = []
color = []
for joint in joint_data:
x = np.array(joint[3], dtype=np.float32)
depth.append(x[3:5])
color.append(x[5:7])
if len(depth) == 0:
assert len(color) == 0
continue
d.append(np.stack(depth))
rgb.append(np.stack(color))
rgb = np.concatenate(rgb).astype(np.float32)
d = np.concatenate(d).astype(np.float32)
H, _ = cv2.findHomography(rgb, d, cv2.RANSAC)
print("Finishing {}".format(target_video_set))
return (target_video_set, H)
def process_tag(arg):
tag = arg[0]
H = arg[1]
print("Starting {}".format(tag))
target_folder = os.path.join(root_folder, './nturgb+d_rgb_warped_correction', tag)
if not os.path.exists(target_folder):
os.makedirs(target_folder, exist_ok=True)
vidcap = cv2.VideoCapture(os.path.join(rgb_folder, tag + '_rgb.avi'))
counter = 1
success = 1
while success:
success, image = vidcap.read()
if not success:
break
warped_image = cv2.warpPerspective(image, H, (512, 424))
save_image_fname = os.path.join(target_folder, 'WRGB-{}.jpg'.format(str(counter).zfill(8)))
cv2.imwrite(save_image_fname, warped_image)
counter += 1
print("Finishing {} with {} frames".format(tag, counter))
if __name__ == '__main__':
unique_video_set = set(video_set)
processNum = 16
pool = Pool(processNum)
print("Calculating the Homography...")
return_value = pool.map(process_video_set, list(unique_video_set))
homography_dict = {d[0]: d[1] for d in return_value}
pickle.dump(homography_dict, open('homography_dict_correction.pkl', 'wb'))
print("Warping RGB...")
print("Before tags num: {}".format(len(tags)))
exclude_list = open('./NTU_RGBD_samples_with_missing_skeletons.txt', 'r').readlines()
exclude_list = [l.strip() for l in exclude_list]
new_tags = [t for t in tags if t not in exclude_list]
print("After tags num: {}".format(len(new_tags)))
homography_dict = pickle.load(open('homography_dict_correction.pkl', 'rb'))
args = []
for k in homography_dict.keys():
ind = video_set.index(k)
args.append((new_tags[ind], homography_dict[k]))
pool.map(process_tag, args)
|
112278
|
import numpy as np
import hetu as ht
from hetu import gpu_links as gpu_op
def test_adamw():
ctx = ht.gpu(0)
shape = (500,400)
param = np.random.uniform(-10, 10, size=shape).astype(np.float32)
grad = np.random.uniform(-10, 10, size=shape).astype(np.float32)
m = np.random.uniform(-10, 10, size=shape).astype(np.float32)
v = np.random.uniform(0, 10, size=shape).astype(np.float32)
lr = 1e-2
beta1 = 0.9
beta2 = 0.99
beta1t = beta1**10
beta2t = beta2**10
eps = 1e-7
weight_decay = 0.1
print("Prev param:")
print(param)
print("Prev m:")
print(m)
print("Prev v:")
print(v)
arr_param = ht.array(param, ctx)
arr_grad = ht.array(grad, ctx)
arr_m = ht.array(m, ctx)
arr_v = ht.array(v, ctx)
gpu_op.adamw_update(arr_param, arr_grad, arr_m, arr_v, lr, beta1, beta2, beta1t, beta2t, eps, weight_decay)
re_param = arr_param.asnumpy()
re_m = arr_m.asnumpy()
re_v = arr_v.asnumpy()
m = beta1 * m + (1 - beta1) * grad
v = beta2 * v + (1 - beta2) * grad * grad
mc = m / (1 - beta1t)
vc = v / (1 - beta2t)
update = mc / (np.sqrt(vc) + eps)
param = param - lr * (update + weight_decay * param)
print("Cur param:")
print(re_param)
print(param)
print("Cur m:")
print(re_m)
print(m)
print("Cur v:")
print(re_v)
print(v)
np.testing.assert_allclose(re_param, param, atol=1e-5)
np.testing.assert_allclose(re_m, m, atol=1e-5)
np.testing.assert_allclose(re_v, v, atol=1e-5)
def test_lamb():
ctx = ht.gpu(0)
shape = (4,5)
param = np.random.uniform(-10, 10, size=shape).astype(np.float32)
grad = np.random.uniform(-10, 10, size=shape).astype(np.float32)
m = np.random.uniform(-10, 10, size=shape).astype(np.float32)
v = np.random.uniform(0, 10, size=shape).astype(np.float32)
lr = 1e-2
beta1 = 0.9
beta2 = 0.99
beta1t = beta1**10
beta2t = beta2**10
eps = 1e-7
weight_decay = 0.1
print("Prev param:")
print(param)
print("Prev m:")
print(m)
print("Prev v:")
print(v)
arr_param = ht.array(param, ctx)
arr_grad = ht.array(grad, ctx)
arr_m = ht.array(m, ctx)
arr_v = ht.array(v, ctx)
gpu_op.lamb_update(arr_param, arr_grad, arr_m, arr_v, lr, beta1, beta2, beta1t, beta2t, eps, weight_decay)
re_param = arr_param.asnumpy()
re_m = arr_m.asnumpy()
re_v = arr_v.asnumpy()
m = beta1 * m + (1 - beta1) * grad
v = beta2 * v + (1 - beta2) * grad * grad
mc = m / (1 - beta1t)
vc = v / (1 - beta2t)
update = mc / (np.sqrt(vc) + eps)
norm2_param = np.sqrt(np.sum(np.power(param, 2)))
norm2_update = np.sqrt(np.sum(np.power(update, 2)))
param = param - lr * norm2_param / norm2_update * (update + weight_decay * param)
print("Cur param:")
print(re_param)
print(param)
print("Cur m:")
print(re_m)
print(m)
print("Cur v:")
print(re_v)
print(v)
np.testing.assert_allclose(re_param, param, atol=1e-5)
np.testing.assert_allclose(re_m, m, atol=1e-5)
np.testing.assert_allclose(re_v, v, atol=1e-5)
def test_adamw_sparse():
ctx = ht.gpu(0)
shape = (500, 400)
l = np.random.randint(0,500,size=(100))
indices = np.array(l)
param = np.random.uniform(-10, 10, size=shape).astype(np.float32)
grad = np.random.uniform(-10, 10, size=(indices.shape[0], shape[1])).astype(np.float32)
m = np.random.uniform(-10, 10, size=shape).astype(np.float32)
v = np.random.uniform(0, 10, size=shape).astype(np.float32)
lr = 1e-2
beta1 = 0.9
beta2 = 0.99
beta1t = beta1**10
beta2t = beta2**10
eps = 1e-7
weight_decay = 0.1
print("Prev param:")
print(param)
print("Prev m:")
print(m)
print("Prev v:")
print(v)
print("Indices:")
print(indices)
print("Grad:")
print(grad)
arr_param = ht.array(param, ctx)
arr_indices = ht.array(indices, ctx)
arr_value = ht.array(grad, ctx)
arr_grad = ht.IndexedSlices(indices = arr_indices, values = arr_value, dense_shape = shape)
arr_m = ht.array(m, ctx)
arr_v = ht.array(v, ctx)
gpu_op.adamw_update(arr_param, arr_grad, arr_m, arr_v, lr, beta1, beta2, beta1t, beta2t, eps, weight_decay)
re_param = arr_param.asnumpy()
re_m = arr_m.asnumpy()
re_v = arr_v.asnumpy()
# numpy deduplicate
d = dict()
for i in l:
d[i]=[]
for i, g in zip(l, grad):
d[i].append(g)
for key in d.keys():
g0 = d[key][0]
for i in range(1, len(d[key])):
g0 += d[key][i]
d[key] = g0
grad_new = []
l_new = []
for key in d.keys():
l_new.append(key)
grad_new.append(d[key])
grad_new = np.array(grad_new)
for idx, g in zip(l_new, grad_new):
m[idx] = beta1 * m[idx] + (1 - beta1) * g
v[idx] = beta2 * v[idx] + (1 - beta2) * g * g
mc_idx = m[idx] / (1 - beta1t)
vc_idx = v[idx] / (1 - beta2t)
update = mc_idx / (np.sqrt(vc_idx) + eps)
param[idx] = param[idx] - lr * (update + weight_decay * param[idx])
print("Cur param:")
print(re_param)
print(param)
print("Cur m:")
print(re_m)
print(m)
print("Cur v:")
print(re_v)
print(v)
np.testing.assert_allclose(re_param, param, atol=1e-5)
np.testing.assert_allclose(re_m, m, atol=1e-5)
np.testing.assert_allclose(re_v, v, atol=1e-5)
def test_lamb_sparse():
ctx = ht.gpu(0)
shape = (500, 400)
l = np.random.randint(0,500,size=(100))
# shape = (5,4)
# l = [0,2,3]
indices = np.array(l)
param = np.random.uniform(-10, 10, size=shape).astype(np.float32)
grad = np.random.uniform(-10, 10, size=(indices.shape[0], shape[1])).astype(np.float32)
m = np.random.uniform(-10, 10, size=shape).astype(np.float32)
v = np.random.uniform(0, 10, size=shape).astype(np.float32)
lr = 1e-2
beta1 = 0.9
beta2 = 0.99
beta1t = beta1**10
beta2t = beta2**10
eps = 1e-7
weight_decay = 0.1
print("Prev param:")
print(param)
print("Prev m:")
print(m)
print("Prev v:")
print(v)
print("Indices:")
print(indices)
print("Grad:")
print(grad)
arr_param = ht.array(param, ctx)
arr_indices = ht.array(indices, ctx)
arr_value = ht.array(grad, ctx)
arr_grad = ht.IndexedSlices(indices = arr_indices, values = arr_value, dense_shape = shape)
arr_m = ht.array(m, ctx)
arr_v = ht.array(v, ctx)
gpu_op.lamb_update(arr_param, arr_grad, arr_m, arr_v, lr, beta1, beta2, beta1t, beta2t, eps, weight_decay)
re_param = arr_param.asnumpy()
re_m = arr_m.asnumpy()
re_v = arr_v.asnumpy()
# numpy deduplicate
d = dict()
for i in l:
d[i]=[]
for i, g in zip(l, grad):
d[i].append(g)
for key in d.keys():
g0 = d[key][0]
for i in range(1, len(d[key])):
g0 += d[key][i]
d[key] = g0
grad_new = []
l_new = []
for key in d.keys():
l_new.append(key)
grad_new.append(d[key])
grad_new = np.array(grad_new)
updates = []
for idx, g in zip(l_new, grad_new):
m[idx] = beta1 * m[idx] + (1 - beta1) * g
v[idx] = beta2 * v[idx] + (1 - beta2) * g * g
mc_idx = m[idx] / (1 - beta1t)
vc_idx = v[idx] / (1 - beta2t)
update = mc_idx / (np.sqrt(vc_idx) + eps)
updates.append(update)
updates = np.array(updates)
param_indexed = []
for idx in l_new:
param_indexed.append(param[idx])
param_indexed = np.array(param_indexed)
norm2_param = np.sqrt(np.sum(np.power(param_indexed, 2))) # only use indexed params to calculate norm2
norm2_update = np.sqrt(np.sum(np.power(updates, 2)))
#print(norm2_param, norm2_update)
for idx, u in zip(l_new, updates):
param[idx] = param[idx] - lr * norm2_param / norm2_update * (u + weight_decay * param[idx])
print("Cur param:")
print(re_param)
print(param)
print("Cur m:")
print(re_m)
print(m)
print("Cur v:")
print(re_v)
print(v)
np.testing.assert_allclose(re_param, param, atol=1e-5)
np.testing.assert_allclose(re_m, m, atol=1e-5)
np.testing.assert_allclose(re_v, v, atol=1e-5)
test_adamw()
test_lamb()
test_adamw_sparse()
test_lamb_sparse()
|
112345
|
import unittest
import json
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
from QueryBioLinkExtended import QueryBioLinkExtended as QBLEx
def get_from_test_file(key):
f = open('query_test_data.json', 'r')
test_data = f.read()
try:
test_data_dict = json.loads(test_data)
f.close()
return test_data_dict[key]
except ValueError:
f.close()
return None
class QueryBioLinkExtendedTestCase(unittest.TestCase):
def test_get_anatomy_entity(self):
extended_info_json = QBLEx.get_anatomy_entity('UBERON:0004476')
self.assertIsNotNone(extended_info_json)
if extended_info_json != "UNKNOWN":
self.assertEqual(json.loads(extended_info_json), json.loads(get_from_test_file('UBERON:0004476')))
def test_get_phenotype_entity(self):
extended_info_json = QBLEx.get_phenotype_entity('HP:0011515')
self.assertIsNotNone(extended_info_json)
if extended_info_json != "UNKNOWN":
self.assertEqual(json.loads(extended_info_json), json.loads(get_from_test_file('HP:0011515')))
def test_get_disease_entity(self):
extended_info_json = QBLEx.get_disease_entity('DOID:3965')
self.assertIsNotNone(extended_info_json)
if extended_info_json != "UNKNOWN":
self.assertEqual(json.loads(extended_info_json), json.loads(get_from_test_file('DOID:3965')))
def test_get_bio_process_entity(self):
extended_info_json = QBLEx.get_bio_process_entity('GO:0097289')
self.assertIsNotNone(extended_info_json)
if extended_info_json != "UNKNOWN":
self.assertEqual(json.loads(extended_info_json), json.loads(get_from_test_file('GO:0097289')))
if __name__ == '__main__':
unittest.main()
|
112355
|
from PIL import Image, ImageChops, ImageOps, ImageEnhance
class ScreenshotOperations:
"""Transform screenshot:
- changes contours
- changes contrast: pass float/int values
- changes brightness: pass float/int values
- invert image: invert image colors
"""
def __init__(self):
pass
def change_contour(self, image):
result = ImageOps.grayscale(image)
return result
def change_contrast(self, image, contrast):
enhancer = ImageEnhance.Contrast(image)
result = enhancer.enhance(float(contrast))
return result
def change_brightness(self, image, brightness):
enhancer = ImageEnhance.Brightness(image)
result = enhancer.enhance(float(brightness))
return result
def invert_image(self, image):
im = ImageOps.invert(image)
return im
|
112373
|
from flourish import Flourish
from flourish.generators.base import SourceGenerator
from flourish.source import SourceFile
import pytest
class TestFlourishPaths:
@classmethod
def setup_class(cls):
with pytest.warns(None) as warnings:
cls.flourish = Flourish('tests/source')
def test_homepage_resolves(self):
assert self.flourish.resolve_path('homepage') == '/'
def test_homepage_resolves_even_with_arguments(self):
assert self.flourish.resolve_path('homepage', tag='series') == '/'
def test_tag_index_with_arguments_resolves(self):
assert(self.flourish.resolve_path('tags-tag-page', tag='series') ==
'/tags/series/')
assert(self.flourish.resolve_path('tags-tag-page', tag='css') ==
'/tags/css/')
def test_tag_index_without_arguments_raises(self):
with pytest.raises(KeyError):
_ = self.flourish.resolve_path('tags-tag-page')
def test_homepage_has_one_valid_filter(self):
assert self.flourish.all_valid_filters_for_path('homepage') == [
{}
]
def test_post_detail_has_many_valid_filters(self):
assert self.flourish.all_valid_filters_for_path('source') == [
{'slug': 'basic-page'},
{'slug': 'markdown-page'},
{'slug': 'nothing'},
{'slug': 'series/index'},
{'slug': 'series/part-one'},
{'slug': 'series/part-three'},
{'slug': 'series/part-two'},
{'slug': 'thing-one'},
{'slug': 'thing-two'},
]
def test_tag_index_has_many_valid_filters(self):
assert self.flourish.all_valid_filters_for_path('tags-tag-page') == [
{'tag': 'basic-page'},
{'tag': 'basically'},
{'tag': 'first'},
{'tag': 'index'},
{'tag': 'one'},
{'tag': 'second'},
{'tag': 'series'},
{'tag': 'three'},
{'tag': 'two'},
]
def test_tag_post_detail_resolves_to_many_with_only_one_source_each(self):
_filters = self.flourish.all_valid_filters_for_path('tag-post-detail')
assert _filters == [
{'tag': 'basic-page', 'slug': 'basic-page'},
{'tag': 'basically', 'slug': 'thing-one'},
{'tag': 'basically', 'slug': 'thing-two'},
{'tag': 'first', 'slug': 'thing-one'},
{'tag': 'index', 'slug': 'series/index'},
{'tag': 'one', 'slug': 'series/part-one'},
{'tag': 'one', 'slug': 'thing-one'},
{'tag': 'second', 'slug': 'thing-two'},
{'tag': 'series', 'slug': 'series/index'},
{'tag': 'series', 'slug': 'series/part-one'},
{'tag': 'series', 'slug': 'series/part-three'},
{'tag': 'series', 'slug': 'series/part-two'},
{'tag': 'three', 'slug': 'series/part-three'},
{'tag': 'two', 'slug': 'series/part-two'},
{'tag': 'two', 'slug': 'thing-two'},
]
# as the filters include `slug` (which is unique),
# each should only match one source
for _filter in _filters:
assert self.flourish.sources.filter(**_filter).count() == 1
def test_year_index(self):
_filters = self.flourish.all_valid_filters_for_path('year-index')
assert _filters == [
{'year': '2015'},
{'year': '2016'},
]
sources = self.flourish.sources
assert sources.filter(**_filters[0]).count() == 1 # 2015
assert sources.filter(**_filters[1]).count() == 7 # 2016
assert sources.filter(**_filters[0]).count() == 1 # 2015
def test_month_index(self):
_filters = self.flourish.all_valid_filters_for_path('month-index')
assert _filters == [
{'month': '12', 'year': '2015'},
{'month': '02', 'year': '2016'},
{'month': '06', 'year': '2016'},
]
sources = self.flourish.sources
assert sources.filter(**_filters[0]).count() == 1 # 2015/12
assert sources.filter(**_filters[1]).count() == 1 # 2016/02
assert sources.filter(**_filters[2]).count() == 6 # 2016/06
def test_day_index(self):
_filters = self.flourish.all_valid_filters_for_path('day-index')
assert _filters == [
{'day': '25', 'month': '12', 'year': '2015'},
{'day': '29', 'month': '02', 'year': '2016'},
{'day': '04', 'month': '06', 'year': '2016'},
{'day': '06', 'month': '06', 'year': '2016'},
]
sources = self.flourish.sources
assert sources.filter(**_filters[0]).count() == 1 # 2015/12/25
assert sources.filter(**_filters[1]).count() == 1 # 2016/02/29
assert sources.filter(**_filters[2]).count() == 5 # 2016/06/04
assert sources.filter(**_filters[3]).count() == 1 # 2016/06/06
def test_no_such_keyword_has_no_filters(self):
assert self.flourish.all_valid_filters_for_path('no-such-keyword') \
== []
def test_not_configured_has_no_filters(self):
with pytest.raises(SourceFile.DoesNotExist):
_ = self.flourish.all_valid_filters_for_path('awooga')
def test_paths_for_sources(self):
assert [
'/basic-page',
'/markdown-page',
'/nothing',
'/thing-one',
'/thing-two',
'/series/part-one',
'/series/part-three',
'/series/part-two',
'/series/',
] == [source.path for source in self.flourish.sources.all()]
def test_lookup_path_handler(self):
paths = (
('/', ('homepage', {})),
('/tags/first/', ('tags-tag-page', {'tag': 'first'})),
('/index.atom', ('atom-feed', {})),
('/thing-one', ('source', {'slug': 'thing-one'})),
)
for path, args in paths:
matches = self.flourish.get_handler_for_path(path)
assert matches[0] == args
assert [] == self.flourish.get_handler_for_path('/rabble')
def test_lookup_path_handler_wildcard(self):
expected = [
('tags-tag-page', {'tag': 'first'}),
('tag-post-detail', {'slug': 'thing-one', 'tag': 'first'}),
('tags-atom-feed', {'tag': 'first'}),
]
assert expected == self.flourish.get_handler_for_path('/tags/first/?')
def test_lookup_path_handler_wildcard_submatches(self):
expected = [
('year-index', {'year': '2016'}),
('month-index', {'month': '02', 'year': '2016'}),
('month-index', {'month': '06', 'year': '2016'}),
('day-index', {'day': '29', 'month': '02', 'year': '2016'}),
('day-index', {'day': '04', 'month': '06', 'year': '2016'}),
('day-index', {'day': '06', 'month': '06', 'year': '2016'}),
]
assert expected == self.flourish.get_handler_for_path('/2016?')
class TestFlourishSourcesPath:
def test_category_prefixed_sources(self):
with pytest.warns(None) as _warnings:
_flourish = Flourish('tests/source')
_flourish.add_path(
SourceGenerator(
path = '/#category/#slug',
name = 'source',
),
)
assert [
'/static/basic-page',
'/post/markdown-page',
None,
'/thing/thing-one',
'/thing/thing-two',
'/article/series/part-one',
'/article/series/part-three',
'/article/series/part-two',
'/article/series/',
] == [source.path for source in _flourish.sources.all()]
def test_invalid_prefixed_sources(self):
with pytest.warns(None) as _warnings:
_flourish = Flourish('tests/source')
_flourish.add_path(
SourceGenerator(
path = '/#page_type/#slug',
name = 'source',
),
)
assert [
None,
None,
None,
'/post/thing-one',
'/post/thing-two',
'/post/series/part-one',
'/post/series/part-three',
'/post/series/part-two',
'/series_index/series/',
] == [source.path for source in _flourish.sources.all()]
# FIXME catch third warning
def test_multiple_option_prefixed_sources(self):
with pytest.warns(None) as _warnings:
_flourish = Flourish('tests/source')
_flourish.add_path(
SourceGenerator(
path = '/#tag/#slug',
name = 'source',
),
)
assert [
'/basic-page/basic-page',
None,
None,
'/basically/thing-one',
'/basically/thing-two',
'/series/series/part-one',
'/three/series/part-three',
'/series/series/part-two',
'/series/series/',
] == [source.path for source in _flourish.sources.all()]
# FIXME catch third warning
|
112399
|
from state import called
def setup():
called.append('test_pak1.setup')
def teardown():
called.append('test_pak1.teardown')
def test_one_one():
called.append('test_pak1.test_one_one')
def test_one_two():
called.append('test_pak1.test_one_two')
|
112405
|
from django import template
register = template.Library()
@register.filter
def get_responses(responses, pk):
return responses.response.filter(answer_to__pk = pk)
@register.filter
def is_response(responses, pk):
for i in responses:
if int(i.answer) == int(pk):
return True
return False
|
112409
|
import argparse
import gym
import numpy as np
import os
import torch
import BCQ
import BEAR
import utils
def train_PQL_BEAR(state_dim, action_dim, max_action, device, args):
print("Training BEARState\n")
log_name = f"{args.dataset}_{args.seed}"
# Initialize policy
policy = BEAR.BEAR(2, state_dim, action_dim, max_action, delta_conf=0.1, use_bootstrap=False,
version=args.version,
lambda_=0.0,
threshold=0.05,
mode=args.mode,
num_samples_match=args.num_samples_match,
mmd_sigma=args.mmd_sigma,
lagrange_thresh=args.lagrange_thresh,
use_kl=(True if args.distance_type == "KL" else False),
use_ensemble=(False if args.use_ensemble_variance == "False" else True),
kernel_type=args.kernel_type,
use_state_filter=True, actor_lr=args.actor_lr, beta=args.beta,
n_action=args.n_action, n_action_execute=args.n_action_execute,
backup=args.backup, ql_noise=args.ql_noise, vmin=args.vmin
)
# Load buffer
replay_buffer = utils.ReplayBuffer(state_dim, action_dim, device)
replay_buffer.load(f"./buffers/{args.dataset}", args.load_buffer_size, bootstrap_dim=4)
evaluations = []
training_iters = 0
while training_iters < args.max_vae_trainstep:
vae_loss = policy.train_vae(replay_buffer, iterations=int(args.eval_freq), batch_size=args.batch_size)
print(f"Training iterations: {training_iters}")
print("VAE loss", vae_loss)
training_iters += args.eval_freq
if args.automatic_beta: # args.automatic_beta:
test_loss = policy.test_vae(replay_buffer, batch_size=100000)
beta = np.percentile(test_loss, args.beta_percentile)
policy.beta = beta
hp_setting = f"N{args.load_buffer_size}_phi{args.phi}_n{args.n_action}_bpercentile{args.beta_percentile}"
print("Test vae",args.beta_percentile,"percentile:", beta)
else:
hp_setting = f"N{args.load_buffer_size}_phi{args.phi}_n{args.n_action}_beta{str(args.beta)}"
if args.backup == "QL":
hp_setting += f"_ql{args.ql_noise}"
training_iters = 0
while training_iters < args.max_timesteps:
pol_vals = policy.train(replay_buffer, iterations=int(args.eval_freq), batch_size=args.batch_size)
evaluations.append(eval_policy(policy, args.env, args.seed))
np.save(f"./results/PQL_BEAR_{hp_setting}_{log_name}", evaluations)
training_iters += args.eval_freq
print(f"Training iterations: {training_iters}")
def train_PQL_BCQ(state_dim, action_dim, max_state, max_action, device, args):
# For saving files
log_name = f"{args.dataset}_{args.seed}"
print("=== Start Train ===\n")
print("Args:\n",args)
# Initialize policy
policy = BCQ.PQL_BCQ(state_dim, action_dim, max_state, max_action, device, args.discount, args.tau, args.lmbda, args.phi,
n_action=args.n_action, n_action_execute=args.n_action_execute,
backup=args.backup, ql_noise=args.ql_noise,
actor_lr=args.actor_lr, beta=args.beta, vmin=args.vmin)
# Load buffer
# replay_buffer = utils.ReplayBuffer(state_dim, action_dim, device)
# replay_buffer.load(f"./buffers/{buffer_name}", args.load_buffer_size)
replay_buffer = utils.ReplayBuffer(state_dim, action_dim, device)
replay_buffer.load(f"./buffers/{args.dataset}", args.load_buffer_size)
evaluations = []
filter_scores = []
training_iters = 0
while training_iters < args.max_vae_trainstep:
vae_loss = policy.train_vae(replay_buffer, iterations=int(args.eval_freq), batch_size=args.batch_size)
print(f"Training iterations: {training_iters}. State VAE loss: {vae_loss:.3f}.")
training_iters += args.eval_freq
if args.automatic_beta: # args.automatic_beta:
test_loss = policy.test_vae(replay_buffer, batch_size=100000)
beta = np.percentile(test_loss, args.beta_percentile)
policy.beta = beta
hp_setting = f"N{args.load_buffer_size}_phi{args.phi}_n{args.n_action}_bpercentile{args.beta_percentile}"
print("Test vae",args.beta_percentile,"percentile:", beta)
else:
hp_setting = f"N{args.load_buffer_size}_phi{args.phi}_n{args.n_action}_beta{str(args.beta)}"
if args.backup == "QL":
hp_setting += f"_ql{args.ql_noise}"
# Start training
print("Log files at:", f"./results/BCQState_{hp_setting}_{log_name}")
training_iters = 0
while training_iters < args.max_timesteps:
policy.train(replay_buffer, iterations=int(args.eval_freq), batch_size=args.batch_size)
evaluations.append(eval_policy(policy, args.env, args.seed, eval_episodes=20))
np.save(f"./results/PQL_{hp_setting}_{log_name}", evaluations)
training_iters += args.eval_freq
print(f"Training iterations: {training_iters}")
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--env", default="Hopper-v2") # OpenAI gym environment name (need to be consistent with the dataset name)
parser.add_argument("--dataset", default="d4rl-hopper-medium-v0") # D4RL dataset name
parser.add_argument("--seed", default=1, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--eval_freq", default=1e4, type=float) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=5e5,
type=int) # Max time steps to run environment or train for (this defines buffer size)
parser.add_argument("--max_vae_trainstep", default=2e5, type=int)
# BCQ parameter
parser.add_argument("--batch_size", default=100, type=int) # Mini batch size for networks
parser.add_argument("--discount", default=0.99) # Discount factor
parser.add_argument("--tau", default=0.005) # Target network update rate
parser.add_argument("--lmbda", default=0.75) # Weighting for clipped double Q-learning in BCQ
parser.add_argument("--phi", default=0.1, type=float) # Max perturbation hyper-parameter for BCQ
parser.add_argument("--load_buffer_size", default=1000000, type=int) # number of samples to load into the buffer
parser.add_argument("--actor_lr", default=1e-3, type=float) # learning rate of actor
parser.add_argument("--n_action", default=100, type=int) # number of sampling action for policy (in backup)
parser.add_argument("--n_action_execute", default=100, type=int) # number of sampling action for policy (in execution)
# BEAR parameter
parser.add_argument("--bear", action="store_true") # If true, use BEAR
parser.add_argument("--version", default='0',
type=str) # Basically whether to do min(Q), max(Q), mean(Q)
parser.add_argument('--mode', default='hardcoded', #hardcoded
type=str) # Whether to do automatic lagrange dual descent or manually tune coefficient of the MMD loss (prefered "auto")
parser.add_argument('--num_samples_match', default=5, type=int) # number of samples to do matching in MMD
parser.add_argument('--mmd_sigma', default=20.0, type=float) # The bandwidth of the MMD kernel parameter default 10
parser.add_argument('--kernel_type', default='laplacian',
type=str) # kernel type for MMD ("laplacian" or "gaussian")
parser.add_argument('--lagrange_thresh', default=10.0,
type=float) # What is the threshold for the lagrange multiplier
parser.add_argument('--distance_type', default="MMD", type=str) # Distance type ("KL" or "MMD")
parser.add_argument('--use_ensemble_variance', default='False', type=str) # Whether to use ensemble variance or not
# Our parameter
parser.add_argument("--backup", type=str, default="QL") # "QL": q learning (Q-max) back up, "AC": actor-critic backup
parser.add_argument("--ql_noise", type=float, default=0.15) # Noise of next action in QL
parser.add_argument("--automatic_beta", type=bool, default=True) # If true, use percentile for b (beta is the b in paper)
parser.add_argument("--beta_percentile", type=float, default=2.0) # Use x-Percentile as the value of b
parser.add_argument("--beta", default=-0.4, type=float) # hardcoded b, only effective when automatic_beta = False
parser.add_argument("--vmin", default=0, type=float) # min value of the environment. Empirically I set it to be the min of 1000 random rollout.
args = parser.parse_args()
print("---------------------------------------")
if args.bear:
print(f"Setting: Training PQL-BEAR, Env: {args.env}, Seed: {args.seed}")
else:
print(f"Setting: Training PQL-BCQ, Env: {args.env}, Seed: {args.seed}")
print("---------------------------------------")
if not os.path.exists("./results"):
os.makedirs("./results")
if not os.path.exists("./models"):
os.makedirs("./models")
env = gym.make(args.env)
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
max_state = float(env.observation_space.high[0])
if max_state == np.inf:
max_state = None
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.bear:
train_PQL_BEAR(state_dim, action_dim, max_action, device, args)
else:
train_PQL_BCQ(state_dim, action_dim, max_state, max_action, device, args)
|
112519
|
import enum
from typing import Any, Optional, Union, cast
import numpy as np
import scipy.special
import sklearn.metrics as skm
from . import util
from .util import TaskType
class PredictionType(enum.Enum):
LOGITS = 'logits'
PROBS = 'probs'
def calculate_rmse(
y_true: np.ndarray, y_pred: np.ndarray, std: Optional[float]
) -> float:
rmse = skm.mean_squared_error(y_true, y_pred) ** 0.5
if std is not None:
rmse *= std
return rmse
def _get_labels_and_probs(
y_pred: np.ndarray, task_type: TaskType, prediction_type: Optional[PredictionType]
) -> tuple[np.ndarray, Optional[np.ndarray]]:
assert task_type in (TaskType.BINCLASS, TaskType.MULTICLASS)
if prediction_type is None:
return y_pred, None
if prediction_type == PredictionType.LOGITS:
probs = (
scipy.special.expit(y_pred)
if task_type == TaskType.BINCLASS
else scipy.special.softmax(y_pred, axis=1)
)
elif prediction_type == PredictionType.PROBS:
probs = y_pred
else:
util.raise_unknown('prediction_type', prediction_type)
assert probs is not None
labels = np.round(probs) if task_type == TaskType.BINCLASS else probs.argmax(axis=1)
return labels.astype('int64'), probs
def calculate_metrics(
y_true: np.ndarray,
y_pred: np.ndarray,
task_type: Union[str, TaskType],
prediction_type: Optional[Union[str, PredictionType]],
y_info: dict[str, Any],
) -> dict[str, Any]:
# Example: calculate_metrics(y_true, y_pred, 'binclass', 'logits', {})
task_type = TaskType(task_type)
if prediction_type is not None:
prediction_type = PredictionType(prediction_type)
if task_type == TaskType.REGRESSION:
assert prediction_type is None
assert 'std' in y_info
rmse = calculate_rmse(y_true, y_pred, y_info['std'])
result = {'rmse': rmse}
else:
labels, probs = _get_labels_and_probs(y_pred, task_type, prediction_type)
result = cast(
dict[str, Any], skm.classification_report(y_true, labels, output_dict=True)
)
if task_type == TaskType.BINCLASS:
result['roc_auc'] = skm.roc_auc_score(y_true, probs)
return result
|
112578
|
from __future__ import absolute_import
from webfriend.rpc import Base
class Console(Base):
"""
See: https://chromedevtools.github.io/devtools-protocol/tot/Console
"""
domain = 'Console'
|
112607
|
from dipdup.models import Transaction
from dipdup.context import HandlerContext
import tzprofiles_indexer.models as models
from tzprofiles_indexer.types.tzprofile.parameter.default import DefaultParameter
from tzprofiles_indexer.types.tzprofile.storage import TzprofileStorage
from tzprofiles_indexer.handlers import resolve_profile
async def on_update(
ctx: HandlerContext,
tzprofile_update: Transaction[DefaultParameter, TzprofileStorage],
) -> None:
profile = await models.TZProfile.get(account=tzprofile_update.storage.owner)
if profile.contract == tzprofile_update.data.target_address:
profile.valid_claims = []
profile.invalid_claims = []
profile.errored = False
profile.alias = None
profile.description = None
profile.logo = None
profile.website = None
profile.twitter = None
profile.domain_name = None
profile.discord = None
profile.github = None
profile.ethereum = None
await resolve_profile(tzprofile_update.storage.claims, profile)
await profile.save()
|
112631
|
from setuptools import setup
try:
from pip import main as pipmain
except ImportError:
from pip._internal import main as pipmain
# install_requires sucks, I don't know why and I really
# don't care, so I do this:
pipmain(["install", "-r", "requirements.txt"])
scripts = ["scripts/squid-dl", "scripts/squidson"]
setup(
name="squid-dl",
version="0.9",
author="<NAME> (@tuxlovesyou)",
description="Massively parallel YouTube downloader using yt-dlp",
author_email="<EMAIL>",
url="https://github.com/tuxlovesyou/squid-dl",
packages=["squid_dl"],
include_package_data=True,
scripts=scripts,
)
|
112738
|
import numpy as np
from conftest import EPS
from testutils import (
CLUSTER_LABEL_FIRST_CLUSTER,
CLUSTER_LABEL_NOISE,
assert_cluster_labels,
assert_label_of_object_is_among_possible_ones,
assert_two_objects_are_in_same_cluster,
insert_objects_then_assert_cluster_labels,
reflect_horizontally
)
def test_new_single_object_is_labeled_as_noise(incdbscan4, object_far_away):
incdbscan4.insert(object_far_away)
assert_cluster_labels(incdbscan4, object_far_away, CLUSTER_LABEL_NOISE)
def test_new_object_far_from_cluster_is_labeled_as_noise(
incdbscan4,
blob_in_middle,
object_far_away):
incdbscan4.insert(blob_in_middle)
incdbscan4.insert(object_far_away)
assert_cluster_labels(incdbscan4, object_far_away, CLUSTER_LABEL_NOISE)
def test_new_border_object_gets_label_from_core(incdbscan4):
cluster = np.array([
[1., 1.],
[0., 1.],
[1., 0.],
[0., 0.],
])
new_border_object = np.array([[1 + EPS, 1]])
incdbscan4.insert(cluster)
incdbscan4.insert(new_border_object)
print(incdbscan4.get_cluster_labels(cluster[[0]]))
print(incdbscan4.get_cluster_labels(new_border_object))
assert_two_objects_are_in_same_cluster(
incdbscan4, cluster[[0]], new_border_object)
def test_labels_are_noise_only_until_not_enough_objects_in_cluster(
incdbscan4,
blob_in_middle):
for i in range(len(blob_in_middle)):
incdbscan4.insert(blob_in_middle[[i]])
expected_label = (
CLUSTER_LABEL_NOISE if i + 1 < incdbscan4.min_pts
else CLUSTER_LABEL_FIRST_CLUSTER
)
assert_cluster_labels(incdbscan4, blob_in_middle[:i+1], expected_label)
def test_more_than_two_clusters_can_be_created(incdbscan4, blob_in_middle):
cluster_1 = blob_in_middle
cluster_1_expected_label = CLUSTER_LABEL_FIRST_CLUSTER
insert_objects_then_assert_cluster_labels(
incdbscan4, cluster_1, cluster_1_expected_label)
cluster_2 = cluster_1 + 10
cluster_2_expected_label = cluster_1_expected_label + 1
insert_objects_then_assert_cluster_labels(
incdbscan4, cluster_2, cluster_2_expected_label)
cluster_3 = cluster_2 + 10
cluster_3_expected_label = cluster_2_expected_label + 1
insert_objects_then_assert_cluster_labels(
incdbscan4, cluster_3, cluster_3_expected_label)
def test_two_clusters_can_be_born_at_the_same_time(
incdbscan4,
point_at_origin):
cluster_1 = np.array([
[EPS * 1, 0],
[EPS * 2, 0],
[EPS * 2, 0],
])
cluster_2 = reflect_horizontally(cluster_1)
incdbscan4.insert(cluster_1)
incdbscan4.insert(cluster_2)
assert_cluster_labels(incdbscan4, cluster_1, CLUSTER_LABEL_NOISE)
assert_cluster_labels(incdbscan4, cluster_2, CLUSTER_LABEL_NOISE)
new_object = point_at_origin
incdbscan4.insert(new_object)
cluster_1_label_expected = incdbscan4.get_cluster_labels(cluster_1[[0]])[0]
assert_cluster_labels(incdbscan4, cluster_1, cluster_1_label_expected)
cluster_2_label_expected = \
CLUSTER_LABEL_FIRST_CLUSTER + 1 - cluster_1_label_expected
assert_cluster_labels(incdbscan4, cluster_2, cluster_2_label_expected)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
new_object,
{cluster_1_label_expected, cluster_2_label_expected}
)
def test_absorption_with_noise(incdbscan3, point_at_origin):
expected_cluster_label = CLUSTER_LABEL_FIRST_CLUSTER
cluster_values = np.array([
[EPS, 0],
[EPS * 2, 0],
[EPS * 3, 0],
])
insert_objects_then_assert_cluster_labels(
incdbscan3, cluster_values, expected_cluster_label)
noise = np.array([[0, EPS]])
insert_objects_then_assert_cluster_labels(
incdbscan3, noise, CLUSTER_LABEL_NOISE)
new_object_value = point_at_origin
insert_objects_then_assert_cluster_labels(
incdbscan3, new_object_value, expected_cluster_label)
assert_cluster_labels(incdbscan3, noise, expected_cluster_label)
def test_merge_two_clusters(incdbscan3, point_at_origin):
cluster_1 = np.array([
[EPS, 0],
[EPS * 2, 0],
[EPS * 3, 0],
[EPS * 4, 0],
])
cluster_1_expected_label = CLUSTER_LABEL_FIRST_CLUSTER
insert_objects_then_assert_cluster_labels(
incdbscan3, cluster_1, cluster_1_expected_label)
cluster_2 = reflect_horizontally(cluster_1)
cluster_2_expected_label = cluster_1_expected_label + 1
insert_objects_then_assert_cluster_labels(
incdbscan3, cluster_2, cluster_2_expected_label)
new_object = point_at_origin
merged_cluster_expected_label = \
max([cluster_1_expected_label, cluster_2_expected_label])
insert_objects_then_assert_cluster_labels(
incdbscan3, new_object, merged_cluster_expected_label)
assert_cluster_labels(incdbscan3, cluster_1, merged_cluster_expected_label)
assert_cluster_labels(incdbscan3, cluster_2, merged_cluster_expected_label)
def test_merger_and_creation_can_happen_at_the_same_time(
incdbscan4,
point_at_origin,
hourglass_on_the_right):
# Insert objects to the right
hourglass = hourglass_on_the_right
top_right = hourglass[:3]
top_right_expected_label = CLUSTER_LABEL_FIRST_CLUSTER
bottom_right = hourglass[-3:]
bottom_right_expected_label = top_right_expected_label + 1
bridge_point = hourglass[[3]]
incdbscan4.insert(top_right)
incdbscan4.insert(bridge_point)
incdbscan4.insert(bottom_right)
assert_cluster_labels(incdbscan4, top_right, top_right_expected_label)
assert_cluster_labels(
incdbscan4, bottom_right, bottom_right_expected_label)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point,
{bottom_right_expected_label, bottom_right_expected_label}
)
merged_cluster_expected_label = \
incdbscan4.get_cluster_labels(bridge_point)[0]
# Insert objects to the left
left_pre_cluster = np.array([
[-EPS, 0],
[-EPS * 2, 0],
[-EPS * 2, 0],
])
left_cluster_expected_label = bottom_right_expected_label + 1
insert_objects_then_assert_cluster_labels(
incdbscan4,
left_pre_cluster,
CLUSTER_LABEL_NOISE
)
# Insert object to the center
new_object = point_at_origin
incdbscan4.insert(new_object)
assert_cluster_labels(
incdbscan4, top_right, merged_cluster_expected_label)
assert_cluster_labels(
incdbscan4, bottom_right, merged_cluster_expected_label)
assert_cluster_labels(
incdbscan4, bridge_point, merged_cluster_expected_label)
assert_cluster_labels(
incdbscan4, left_pre_cluster, left_cluster_expected_label)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
new_object,
{merged_cluster_expected_label, left_cluster_expected_label}
)
def test_two_mergers_can_happen_at_the_same_time(
incdbscan4,
point_at_origin,
hourglass_on_the_right):
# Insert objects to the right
top_right = hourglass_on_the_right[:3]
top_right_expected_label = CLUSTER_LABEL_FIRST_CLUSTER
bottom_right = hourglass_on_the_right[-3:]
bottom_right_expected_label = top_right_expected_label + 1
bridge_point_right = hourglass_on_the_right[[3]]
incdbscan4.insert(top_right)
incdbscan4.insert(bridge_point_right)
incdbscan4.insert(bottom_right)
assert_cluster_labels(incdbscan4, top_right, top_right_expected_label)
assert_cluster_labels(
incdbscan4, bottom_right, bottom_right_expected_label)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point_right,
{bottom_right_expected_label, bottom_right_expected_label}
)
# Insert objects to the left
hourglass_on_the_left = reflect_horizontally(hourglass_on_the_right)
top_left = hourglass_on_the_left[:3]
top_left_expected_label = bottom_right_expected_label + 1
bottom_left = hourglass_on_the_left[-3:]
bottom_left_expected_label = top_left_expected_label + 1
bridge_point_left = hourglass_on_the_left[[3]]
incdbscan4.insert(top_left)
incdbscan4.insert(bridge_point_left)
incdbscan4.insert(bottom_left)
assert_cluster_labels(incdbscan4, top_left, top_left_expected_label)
assert_cluster_labels(incdbscan4, bottom_left, bottom_left_expected_label)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point_left,
{top_left_expected_label, bottom_left_expected_label}
)
# Insert object to the center
new_object = point_at_origin
incdbscan4.insert(new_object)
assert_cluster_labels(
incdbscan4,
np.vstack([top_right, bottom_right]),
bottom_right_expected_label
)
assert_cluster_labels(
incdbscan4,
np.vstack([top_left, bottom_left]),
bottom_left_expected_label
)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point_right,
{bottom_left_expected_label, bottom_right_expected_label}
)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point_left,
{top_left_expected_label, bottom_left_expected_label}
)
def test_object_is_core_if_it_has_more_than_enough_neighors(
incdbscan3,
point_at_origin):
neigbors = np.array([
[0, EPS],
[0, -EPS],
[EPS, 0],
[-EPS, 0],
])
expected_label = CLUSTER_LABEL_FIRST_CLUSTER
incdbscan3.insert(neigbors)
incdbscan3.insert(point_at_origin)
assert_cluster_labels(incdbscan3, neigbors, expected_label)
assert_cluster_labels(incdbscan3, point_at_origin, expected_label)
|
112770
|
from rest_framework import serializers
from users.models import User
class UserSerializer(serializers.ModelSerializer):
remote_addr = serializers.SerializerMethodField()
class Meta:
model = User
fields = [
'id',
'username',
'first_name',
'last_name',
'email',
'remote_addr',
]
def get_remote_addr(self, obj):
return self.context['request'].META['REMOTE_ADDR']
|
112780
|
import flask
import numpy as np
import os
import requests
import sys
from cv2 import cv2 as cv
from socket import AF_INET, SOCK_DGRAM, INADDR_ANY, IPPROTO_IP, IP_ADD_MEMBERSHIP, SOL_SOCKET, SO_REUSEADDR, socket, inet_aton, error as socket_error
import struct
from threading import Thread
import imagehash
from PIL import Image
class Request():
def __init__(self, frame, method):
self.frame = frame
self.method = method
self.checksum = ""
def update_checksum(self, checksum):
self.checksum = checksum
def get_frame(self):
return self.frame
def get_method(self):
return self.method
def get_checksum(self):
return self.checksum
replica_number = 1
host = "localhost"
multicast_group = "172.16.31.10"
multicast_port = 20000
#sequencer_port = 20000
timeout = 3
buf = 1024
app = flask.Flask(__name__)
requests_awaiting = {}
requests_finished = []
success_req = {}
delivered_req = {}
fail_req = {}
# TODO : Figure out how to synchronize sequence count between sequencer and implementation
seq_count = 1
@app.route('/getUDPPort', methods=['GET'])
def getUDPPort():
_, temp_port = serv.get_port()
return str(temp_port)
@app.route('/getJob/<seq_num>', methods=['GET'])
def publishFrame(seq_num):
print(str(seq_num))
file_path = "../python/jobs/f" + str(seq_num) + ".jpg"
if os.path.isfile(file_path):
return flask.send_file(file_path, mimetype='image/jpg')
else:
return flask.send_file("images/color_fail.jpg", mimetype='image/jpg')
def getFrame(frame_num):
img_url = "http://localhost:8080/rest/openiss/getStaticFrame/" + str(frame_num)
response = requests.get(img_url)
result = response.content
return np.frombuffer(result, dtype=np.uint8)
def deliverFrame(frame_num):
# checksum = requests_awaiting[frame_num].checksum
addr = (multicast_group, multicast_port)
udp_string = str(frame_num) + ",delivered," + str(replica_number)
udp_socket = socket(AF_INET,SOCK_DGRAM)
udp_socket.sendto(udp_string.encode(), addr)
print("Sending %s ..." % udp_string)
udp_socket.close()
def processFrame(frame_num):
if requests_awaiting[frame_num].get_method() == "canny":
doCanny(frame_num)
elif requests_awaiting[frame_num].get_method() == "contour":
doContour(frame_num)
else:
print("Method called does not exist on web service! Skipping...")
requests_awaiting.pop(frame_num, None)
def checkRequestsAwaiting():
global seq_count
while seq_count in requests_awaiting:
deliverFrame(seq_count)
requests_awaiting.pop(seq_count, None)
requests_finished.append(seq_count)
seq_count += 1
def addToSharedQueues(frame_num, method, replica_num):
global success_req, seq_count
if method == "success":
if frame_num not in success_req:
success_req[frame_num] = []
success_req[frame_num].append(replica_num)
elif method == "fail":
if frame_num not in fail_req:
fail_req[frame_num] = []
fail_req[frame_num].append(replica_num)
else:
if frame_num not in delivered_req:
delivered_req[frame_num] = []
delivered_req[frame_num].append(replica_num)
def doCanny(seq_num):
x = getFrame(seq_num)
img = cv.imdecode(x, cv.IMREAD_UNCHANGED)
if img is None:
print("Error loading image")
return
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
edges = cv.Canny(img_gray, 50, 150, 3, L2gradient=False)
edges = cv.cvtColor(edges, cv.COLOR_GRAY2BGR)
print("Saving canny...")
# cv.imwrite("../python/jobs/canny.jpg", edges)
file_name = "../python/jobs/f" + str(seq_num) + ".jpg"
sys.stdout.flush()
cv.imwrite(file_name, edges)
checksum = imagehash.average_hash(Image.open(file_name))
requests_awaiting[seq_num].checksum = checksum
def doContour(seq_num):
x = getFrame(seq_num)
img = cv.imdecode(x, cv.IMREAD_UNCHANGED)
if img is None:
print("Error loading image")
return
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, img_thresh = cv.threshold(img_gray ,100, 255, cv.THRESH_BINARY)
print("Saving contour...")
img_thresh = cv.cvtColor(img_thresh, cv.COLOR_GRAY2BGR)
#cv.imwrite("contour.jpg", img_thresh)
file_name = "../python/jobs/f" + str(seq_num) + ".jpg"
cv.imwrite(file_name, img_thresh)
checksum = imagehash.average_hash(Image.open(file_name))
requests_awaiting[seq_num].checksum = checksum
class UDPServer():
def __init__(self):
self._running = True
self.sock = socket(AF_INET, SOCK_DGRAM)
self.buf = buf
self.timeout = timeout
self.group = inet_aton(multicast_group) + inet_aton("0.0.0.0")
self.sock.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, self.group)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock.bind(("", multicast_port))
def terminate(self):
self._running = False
self.sock.shutdown(0)
self.sock.close()
def is_running(self):
return self._running
def get_port(self):
return self.sock.getsockname()
def run(self):
global seq_count
while True:
try:
print("Waiting to receive data...")
sys.stdout.flush()
data,address = self.sock.recvfrom(self.buf)
if data:
strings = data.decode('utf-8')
seq_num = int(strings.split(',')[0])
method = strings.split(',')[1]
print("Message:", method, seq_num, "Address: ", address)
if(method == "success" or method == "fail" or method == "delivered"):
replica_num = int(strings.split(',')[2])
if replica_num != replica_number:
addToSharedQueues(seq_num, method, replica_num)
elif(seq_num >= seq_count and seq_num not in requests_finished and seq_num not in requests_awaiting):
requests_awaiting[seq_num] = Request(seq_num, method)
processFrame(seq_num)
checkRequestsAwaiting()
else:
print("Packet with sequence number ", seq_num, " already received!")
sys.stdout.flush()
except socket_error:
self.sock.close()
break
# Main execution
serv = UDPServer()
t = Thread(target=serv.run)
t.start()
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8001)
# If here, ctrl+c was called
serv.terminate()
t.join()
sys.exit()
|
112796
|
from deploy.src.sagemaker_deploy_spec import SageMakerDeploySpec
import unittest
class DeploySpecTestCase(unittest.TestCase):
REQUIRED_ARGS = ["--region", "us-west-2", "--model_name_1", "model-test"]
def test_minimum_required_args(self):
# Will raise if the inputs are incorrect
spec = SageMakerDeploySpec(self.REQUIRED_ARGS)
|
112860
|
from typing import Optional, Union
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import (
convert_variables_to_constants_v2_as_graph,
)
from tensorflow.keras import Sequential, Model
import keras_flops.flops_registory
def get_flops(model: Union[Model, Sequential], batch_size: Optional[int] = None) -> int:
"""
Calculate FLOPS for tf.keras.Model or tf.keras.Sequential .
Ignore operations used in only training mode such as Initialization.
Use tf.profiler of tensorflow v1 api.
"""
if not isinstance(model, (Sequential, Model)):
raise KeyError(
"model arguments must be tf.keras.Model or tf.keras.Sequential instanse"
)
if batch_size is None:
batch_size = 1
# convert tf.keras model into frozen graph to count FLOPS about operations used at inference
# FLOPS depends on batch size
inputs = [
tf.TensorSpec([batch_size] + inp.shape[1:], inp.dtype) for inp in model.inputs
]
real_model = tf.function(model).get_concrete_function(inputs)
frozen_func, _ = convert_variables_to_constants_v2_as_graph(real_model)
# Calculate FLOPS with tf.profiler
run_meta = tf.compat.v1.RunMetadata()
opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()
flops = tf.compat.v1.profiler.profile(
graph=frozen_func.graph, run_meta=run_meta, cmd="scope", options=opts
)
# print(frozen_func.graph.get_operations())
# TODO: show each FLOPS
return flops.total_float_ops
|
112867
|
from collections import Counter, defaultdict, OrderedDict
from sklearn.neighbors.kde import KernelDensity
import itertools
import numpy as np
import os
import pysam
import random as rnd
import sys
import matplotlib
matplotlib.use('Agg') # required if X11 display is not present
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from arcsv.constants import CIGAR_SOFT_CLIP
from arcsv.conditional_mappable_model import process_aggregate_mapstats
from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, \
add_time_checkpoint, normpdf, is_read_through, len_without_gaps
from arcsv.invertedreads import get_inverted_pair
from arcsv.pecluster import process_discordant_pair
from arcsv.softclip import process_softclip
from arcsv.splitreads import parse_splits, splits_are_mirrored
def extract_approximate_library_stats(opts, bam, rough_insert_median):
reads_per_chunk = int(np.floor(opts['approx_stats_nreads'] / opts['approx_stats_nchunks']))
# lib_patterns, lib_stats = parse_library_stats(meta)
# maps read groups matching lib_patterns to indices in lib_stats
# lib_dict = {}
# MULTILIB
nlib = opts['nlib']
insert_len = [[] for i in range(nlib)]
read_len_shorter = [[] for i in range(nlib)]
read_len_longer = [[] for i in range(nlib)]
chrom_name = opts['chromosome']
chrom_size = get_chrom_size_from_bam(chrom_name, bam)
chunk_size = 10 * opts['insert_max_mu_multiple'] * rough_insert_median
rough_insert_max = opts['insert_max_mu_multiple'] * rough_insert_median
reads_processed = [0 for i in range(nlib)]
chunks_processed = 0
# MINOR reads_per_chunk should mean completed
while min(reads_processed) < opts['approx_stats_nreads']:
# extract random chunk
start = np.random.randint(0, chrom_size - chunk_size)
end = start + chunk_size
# parse reads
seen_aln = {}
chunk_reads_seen = 0
alns = list(bam.fetch_unsorted(chrom_name, start, end))
if bam.num_bam > 1:
alns.sort(key=lambda a: a.pos)
for aln in list(bam.fetch_unsorted(chrom_name, start, end)):
# conditioning on mate position introduces slight bias,
# but insignificant if chunk_size >> insert size
if not_primary(aln) or aln.is_duplicate or aln.is_unmapped or \
aln.mpos < start or aln.mpos >= end or aln.mate_is_unmapped:
continue
if aln.qname not in seen_aln:
if chunk_reads_seen < reads_per_chunk:
seen_aln[aln.qname] = aln
chunk_reads_seen += 1
continue
else:
continue
# pair completed
mate = seen_aln[aln.qname]
pair = (aln, mate)
del seen_aln[aln.qname]
lib_idx = 0 # get_lib_idx(aln.get_tag('RG'), lib_dict, lib_patterns)
process_insert_len(pair, insert_len[lib_idx], opts['min_mapq_reads'],
opts['read_len'], maximum_insert_size=rough_insert_max)
process_read_len(pair, read_len_shorter[lib_idx], read_len_longer[lib_idx])
reads_processed[lib_idx] += 1
if min(reads_processed) % 200000 == 0 and opts['verbosity'] > 0:
print('[library_stats] processed {0} reads ({1} chunks) for each lib'.
format(min(reads_processed), chunks_processed))
chunks_processed += 1
insert_mean = [np.median(il) for il in insert_len]
insert_sd = [robust_sd(il) for il in insert_len]
insert_lower = [np.percentile(il, 0.15) for il in insert_len]
insert_upper = [np.percentile(il, 99.85) for il in insert_len]
insert_pmf = [pmf_kernel_smooth(il, 0, opts['insert_max_mu_multiple'] * mu,
opts['max_kde_samples'])
for (il, mu) in zip(insert_len, insert_mean)]
rlen_short = [round(np.median(rl)) for rl in read_len_shorter]
rlen_long = [round(np.median(rl)) for rl in read_len_longer]
rlen_medians = list(zip(rlen_short, rlen_long))
return insert_mean, insert_sd, insert_pmf, insert_lower, insert_upper, rlen_medians
# parse a single bam file, extracting breakpoints,
# insert size distribution, and/or visualization tracks in bed/bigwig format
def parse_bam(opts, reference_files, bamfiles):
chrom_name = opts['chromosome']
start, end = opts['region_start'], opts['region_end']
outdir = opts['outdir']
min_mapq_reads = opts['min_mapq_reads']
nlib = opts['nlib'] # MULTILIB
# lib_patterns, lib_stats = parse_library_stats(meta)
# lib_dict = {}
bam = BamGroup(bamfiles)
opts['read_len'] = bam_read_len(bam)
# bam_has_unmapped = has_unmapped_records(bam)
# if opts['verbosity'] > 0:
# if bam_has_unmapped:
# print('[parse_bam] bam file DOES contain unmapped records')
# else:
# print('[parse_bam] bam file DOES NOT contain unmapped records')
if opts['verbosity'] > 0:
print('\n[parse_bam] extracting approximate library stats')
rough_insert_median = get_rough_insert_median(opts, bam)
if opts['verbosity'] > 0:
print('[parse_bam] read_len: {0}; rough_insert_median: {1}'.
format(opts['read_len'], rough_insert_median))
als = extract_approximate_library_stats(opts, bam, rough_insert_median)
mean_approx, sd_approx, pmf_approx, qlower, qupper, rlen_medians = als
for i in range(len(pmf_approx)):
with open(os.path.join(outdir, 'logging', '{0}_insert_pmf.txt'
.format(opts['library_names'][i])), 'w') as f:
for j in range(len(pmf_approx[i])):
f.write('{0}\t{1}\n'.format(j, pmf_approx[i][j]))
if opts['verbosity'] > 0:
print('[parse_bam] library stats:\n\tmu = {0}\n\tsigma = {1}'
.format(mean_approx, sd_approx))
add_time_checkpoint(opts, 'lib. stats')
def get_lr_cutoff(opts, pmf, do_min=False):
cutoff_normal_equivalent = opts['insert_cutoff']
lr_cutoff = normpdf(0) - normpdf(cutoff_normal_equivalent)
mode = max(pmf)
logmode = np.log(mode)
which_mode = [i for i in range(len(pmf)) if pmf[i] == mode]
cutoff = None
if do_min:
for i in range(1, len(pmf)):
if pmf[i] != 0 and logmode - np.log(pmf[i]) < lr_cutoff:
cutoff = i - 1
break
else:
for i in range(len(pmf) - 2, -1, -1):
if pmf[i] != 0 and logmode - np.log(pmf[i]) < lr_cutoff:
cutoff = i + 1
break
if opts['verbosity'] > 0:
print('[insert_cutoff] lr_cutoff is {0}'.format(lr_cutoff))
print('[insert_cutoff] mode (log) {0} at {1}'.format(logmode, which_mode))
print('[insert_cutoff] cutoff ratio (log) {0} at {1}'.
format(logmode - np.log(pmf[i]), cutoff))
return cutoff
min_concordant_insert = [get_lr_cutoff(opts, pmf, do_min=True) for pmf in pmf_approx]
max_concordant_insert = [get_lr_cutoff(opts, pmf) for pmf in pmf_approx]
if opts['verbosity'] > 0:
print('[parse_bam] insert size cutoffs:')
print('[parse_bam]' + '\n'
.join(['{0}-{1}'.format(min_concordant_insert[i], max_concordant_insert[i])
for i in range(len(mean_approx))]))
print('[parse_bam] equivalent to mu +/- 3 sigma in normal:\n\t{0}\n\t{1}\n'
.format(qlower, qupper))
seen_aln = {}
nreads, npairs = 0, 0
num_read_through = 0
insert_len = [[] for i in range(nlib)]
softclips = [(defaultdict(list), defaultdict(list)) for i in range(nlib)]
splits = [[] for i in range(nlib)]
if opts['do_pecluster']:
discordant_pairs = [OrderedDict() for i in range(nlib)]
if not opts['use_mate_tags']: # need to estimate mappability proportions
mapstats = [defaultdict(int) for i in range(nlib)]
else:
mapstats = None
if opts['verbosity'] > 0:
print('[parse_bam] starting alignment parsing. . .')
alignments = bam.fetch_unsorted(chrom_name, start, end)
for aln in alignments:
if not_primary(aln) or aln.is_unmapped or aln.is_duplicate:
continue
nreads += 1
if opts['verbosity'] > 0 and nreads % (1000000) == 0:
print('[parse_bam] %d reads processed' % nreads)
# TODO this can be done cleaner -- check for is_unmapped above
# and use handle_unpaired for everything with mate_is_unmapped
if aln.qname not in seen_aln:
# read is not going to pair, so handle now
if aln.mate_is_unmapped or aln.rname != aln.mrnm:
handle_unpaired_read(opts, aln, softclips, splits, bam, mapstats)
# waiting for this read's pair
else:
seen_aln[aln.qname] = aln
continue
# Completed a pair!
npairs += 1
mate = seen_aln[aln.qname]
pair = (aln, mate)
del seen_aln[aln.qname]
if opts['filter_read_through'] and is_read_through(opts, pair):
num_read_through += 1
continue
# MULTILIB
lib_idx = 0
# handle softclip information, insert len, mapping stats, splits/discordants
if not opts['use_mate_tags']:
process_aggregate_mapstats(pair, mapstats[lib_idx],
min_mapq_reads, opts['max_pair_distance'])
ilen = process_insert_len(pair, insert_len[lib_idx],
opts['min_mapq_reads'], opts['read_len'])
if opts['do_pecluster']:
process_discordant_pair(pair[0], pair[1], chrom_name,
discordant_pairs[lib_idx], min_mapq_reads,
ilen, min_concordant_insert[lib_idx],
max_concordant_insert[lib_idx],
opts['library_is_rf'])
if any(op == CIGAR_SOFT_CLIP for (op, oplen) in
itertools.chain(aln.cigartuples, mate.cigartuples)):
if opts['do_splits']:
a1_split = process_splits(pair[0], splits[lib_idx],
bam, min_mapq=min_mapq_reads,
mate=pair[1])
a2_split = process_splits(pair[1], splits[lib_idx],
bam, min_mapq=min_mapq_reads,
mate=pair[0])
else:
a1_split, a2_split = False, False
# if we found the same breakpoint in both reads,
# it's quite likely that the reads were overlapping due to a short insert
if a1_split and a2_split and splits_are_mirrored(splits[lib_idx][-1],
splits[lib_idx][-2]):
if opts['verbosity'] > 1:
print('[bamparser] mirrored split: {0} {1} {2}'.
format(chrom_name, splits[lib_idx][-1].bp2, pair[0].qname))
del splits[lib_idx][-1]
process_softclip(opts, pair, (a1_split, a2_split), softclips[lib_idx], lib_idx)
# handle unpaired reads
if opts['verbosity'] > 0:
print('[parse_bam] handling unpaired reads')
for aln in seen_aln.values():
handle_unpaired_read(opts, aln, softclips, splits, bam, mapstats)
if any(len(ins) == 0 for ins in insert_len): # MULTILIB should only fail if all()
print('Error: region specified contains no reads!')
sys.exit(1)
# report stats
if opts['verbosity'] > 0:
print('[parse_bam] processed a total of {0} reads'.format(nreads))
if opts['filter_read_through']:
print('[parse_bam] found {0} read-through pairs out of {1} total'
.format(num_read_through, npairs))
add_time_checkpoint(opts, 'parse bam')
# compute insert length distributions and save plots
if opts['verbosity'] > 1:
print('[parse_bam] observed insert size min:')
print('\n'.join([str(min(insert_len[i])) for i in range(nlib)]))
print('\n'.join([str(Counter(sorted(insert_len[i]))) for i in range(nlib)]))
print('[parse_bam] insert 25-50-75 percentiles by library:')
percentiles = [np.percentile(ins, (25, 50, 75)) for ins in insert_len]
print(''.join(['{0}: {1}\n'.
format(opts['library_names'][l], tuple(percentiles[l]))
for l in range(nlib)]))
if opts['verbosity'] > 0:
print('[parse_bam] computing insert length pmfs')
insert_mean = [np.median(il) for il in insert_len]
insert_sd = [robust_sd(il) for il in insert_len]
max_mult = opts['insert_max_mu_multiple']
insert_len_dist = [pmf_kernel_smooth(insert_len[i], 0,
max_mult * mu, opts['max_kde_samples'])
for (i, mu) in zip(range(nlib), insert_mean)]
if opts['verbosity'] > 1:
for i in range(nlib):
print('[parse_bam] lib {0} mu {1} sigma {2}'
.format(i, insert_mean[i], insert_sd[i]))
# insert dist plots
plot_insert_dist(opts, insert_len_dist, outdir)
# compute average coverage
# MULTILIB this needs adjusting -- keeping track of nreads from each bamgroup
region_len = len_without_gaps(chrom_name, start, end, reference_files['gap'])
opts['seq_coverage'] = [nreads * opts['read_len'] / (nlib * region_len) for _ in range(nlib)]
opts['phys_coverage'] = [npairs * m / region_len for m in insert_mean]
opts['max_pecluster_size'] = [pc * opts['pecluster_size_coverage_ratio']
for pc in opts['phys_coverage']]
if opts['verbosity'] > 0:
print('[parse_bam] average sequence coverage: %.1fx' % opts['seq_coverage'][0])
print('[parse_bam] average physical coverage: %.1fx' % opts['phys_coverage'][0])
if opts['do_pecluster']:
return (softclips, splits, mapstats, rlen_medians, insert_len_dist,
insert_mean, insert_sd,
discordant_pairs, min_concordant_insert, max_concordant_insert)
else:
return (softclips, splits, mapstats, rlen_medians, insert_len_dist,
insert_mean, insert_sd,
None, None, None)
def process_coverage(aln, coverage):
for base in aln.get_reference_positions():
coverage[base] += 1
def process_inverted(pair, inverted_pairs, bam):
# see invertedreads.py
if pair[0].is_reverse != pair[1].is_reverse:
return 0
else:
inverted_pairs.append(get_inverted_pair(pair, bam))
return 1
def process_hanging(anchor_aln, hanging_plus, hanging_minus):
if anchor_aln.is_reverse:
anchor_pos = anchor_aln.reference_end
hanging_minus.add(anchor_pos)
else:
anchor_pos = anchor_aln.reference_start
hanging_plus.add(anchor_pos)
def process_splits(aln, splits, bam, min_mapq, mate):
spl = parse_splits(aln, bam, min_mapq, mate)
if spl is not None:
splits.append(spl)
return 1
else:
return 0
# Input: pair of reads on the same chromosome
# Output: none if read pair invalid (mapq or orientation), else insert length
# Side effects: adds to len_array (checking truncate = True)
def process_insert_len(pair, len_array, min_mapq, read_len,
truncate=True, maximum_insert_size=np.Inf,
lib_is_rf=False, lib_insert_is_inner=False):
# if not fully_aligned(pair[0]) or \
# not fully_aligned(pair[1]) or \
if pair[0].is_reverse == pair[1].is_reverse or \
min(pair[0].mapq, pair[1].mapq) < min_mapq:
return None
which_minus = 0 if pair[0].is_reverse else 1
which_first = which_minus if lib_is_rf else (1 - which_minus)
which_last = 1 - which_first
if lib_insert_is_inner:
ilen = pair[which_last].reference_start - pair[which_first].reference_end
else:
ilen = pair[which_last].reference_end - pair[which_first].reference_start
# adjust for read trimming
if read_len != 0:
ilen += 2 * read_len - pair[0].query_length - pair[1].query_length
# adjust for soft-clipping of 5' end (3' end of MP)
ilen += pair[which_first].query_alignment_start + \
pair[which_last].query_length - pair[which_last].query_alignment_end
if (not truncate) or (ilen <= maximum_insert_size and ilen >= 0):
len_array.append(ilen)
return ilen
def process_insert_viz(pair, insert_plus, insert_minus, library_info):
if pair[0].is_reverse == pair[1].is_reverse:
return 0
which_minus = 0 if pair[0].is_reverse else 1
which_first = which_minus if library_info['is_rf'] else (1 - which_minus)
which_last = 1 - which_first
if library_info['inner_insert']:
ilen = pair[which_last].reference_start - pair[which_first].reference_end
ilen -= pair[which_last].query_alignment_start
ilen -= pair[which_last].query_length - pair[which_last].query_alignment_end
else:
ilen = pair[which_last].reference_end - pair[which_first].reference_start
ilen += pair[which_last].query_length - pair[which_last].query_alignment_end
ilen += pair[which_first].query_alignment_start
if library_info['readlen'] != 0:
ilen += 2 * library_info['readlen'] - pair[0].query_length - pair[1].query_length
insert_plus.add(pair[which_first].reference_start, ilen)
insert_minus.add(pair[which_last].reference_end, ilen)
return 1
def handle_unpaired_read(opts, aln, softclips, splits, bam, mapstats):
pair = (aln, None)
# MULTILIB
lib_idx = 0
if not opts['use_mate_tags']:
process_aggregate_mapstats(pair, mapstats[lib_idx],
opts['min_mapq_reads'], opts['max_pair_distance'])
if any(op == CIGAR_SOFT_CLIP for (op, oplen) in aln.cigartuples):
if opts['do_splits']:
has_split = process_splits(aln, splits[lib_idx], bam,
min_mapq=opts['min_mapq_reads'], mate=None)
else:
has_split = False
process_softclip(opts, pair, (has_split, False), softclips[lib_idx], lib_idx)
# assume no hard-clipping so sequence length is calculated correctly by pysam
def process_read_len(pair, len_short_array, len_long_array):
lens = [aln.query_length for aln in pair]
len_short_array.append(min(lens))
len_long_array.append(max(lens))
# abstraction for a group of bam files
class BamGroup:
def __init__(self, bamfiles):
self.bamlist = [pysam.AlignmentFile(bf) for bf in bamfiles]
def fetch_unsorted(self, *o1, **o2):
return itertools.chain.from_iterable(b.fetch(*o1, **o2) for b in self.bamlist)
def fetch_sorted(self, *o1, **o2):
raise Warning('fetch_sorted not implemented')
# fs = [b.fetch(*o1, **o2) for b in self.bamlist]
def getrname(self, *o1, **o2):
return self.bamlist[0].getrname(*o1, **o2)
def gettid(self, *o1, **o2):
return self.bamlist[0].gettid(*o1, **o2)
@property
def references(self):
return self.bamlist[0].references
@property
def nreferences(self):
return self.bamlist[0].nreferences
@property
def lengths(self):
return self.bamlist[0].lengths
@property
def num_bam(self):
return len(self.bamlist)
def pmf_kernel_smooth(a, xmin, xmax, max_kde_samples):
if len(a) == 0:
raise Warning('[pmf_kernel_smooth] array is empty -- there are no insert lengths!')
if len(a) > max_kde_samples:
a = rnd.sample(a, max_kde_samples)
# Siverman's rule of thumb to choose bandwidth
a_trunc = np.matrix([x for x in a if x >= xmin and x <= xmax]).T
pct = np.percentile(a_trunc, (25, 75))
IQR = pct[1] - pct[0]
bw = max(1.0, .785 * IQR / a_trunc.shape[0]**(1/5))
kde = KernelDensity(kernel='gaussian', bandwidth=bw, rtol=1e-6).fit(a_trunc)
pmf = np.exp(kde.score_samples(np.matrix(np.linspace(xmin, xmax, xmax-xmin+1)).T))
S = sum(pmf)
return [p/S for p in pmf]
# def has_unmapped_records(bam, pairs_to_check=10):
# alignments = bam.fetch_unsorted()
# # find several reads with mates unmapped
# hanging = []
# for aln in alignments:
# if not (aln.is_unmapped or aln.is_supplementary or
# aln.is_secondary or aln.is_duplicate) and \
# aln.mate_is_unmapped:
# hanging.append(aln)
# if len(hanging) >= pairs_to_check:
# break
# # do all hanging reads have mates?
# for aln in hanging:
# alns = bam.fetch_unsorted(bam.getrname(aln.rname), aln.mpos, aln.mpos + 1)
# if any([a.is_unmapped and a.qname == aln.qname for a in alns]):
# continue
# else:
# return False
# return True
def bam_read_len(bam, reads_to_check=1000):
rlen = -np.Inf
nreads = 0
for aln in bam.fetch_unsorted():
if aln.is_unmapped or 'H' in aln.cigarstring:
continue
rlen = max(rlen, aln.query_length)
nreads += 1
if nreads > reads_to_check:
break
return rlen
def get_rough_insert_median(opts, bam, pairs_to_check=10000):
# check min_mapq, neither unmapped, neither supp
ilen = []
seen = {}
rej = set()
for aln in bam.fetch_unsorted():
if aln.qname in seen:
if aln.mapq < opts['min_mapq_reads'] or aln.is_unmapped or not_primary(aln):
del seen[aln.qname]
else:
pair = (aln, seen[aln.qname])
process_insert_len(pair, ilen, opts['min_mapq_reads'],
opts['read_len'], truncate=False)
del seen[aln.qname]
else:
if aln.mapq < opts['min_mapq_reads'] or aln.is_unmapped or not_primary(aln):
rej.add(aln.qname)
else:
seen[aln.qname] = aln
if len(ilen) >= pairs_to_check:
break
return np.median(ilen)
def plot_insert_dist(opts, insert_len_dists, outdir):
for l in range(opts['nlib']):
outfile = os.path.join(outdir, 'insert_' + opts['library_names'][l] + '.pdf')
pp = PdfPages(outfile)
plt.figure()
plt.plot(insert_len_dists[l])
plt.title(opts['library_names'][l])
pp.savefig()
plt.close()
pp.close()
|
112868
|
from models.resnext.resnext101_regular import ResNeXt101
from models.vgg import VGGNet
from models.fcn import ResNextDecoderAtt
def resskspp():
return ResNextDecoderAtt(pretrained_net=ResNeXt101(), type='res')
def vggskspp():
return ResNextDecoderAtt(pretrained_net=VGGNet(),type='vgg')
if __name__ == '__main__':
model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name]))
print(model_names)
|
112891
|
contacts = [['p1', 'e1'],['p2', 'e2'],['p2', 'e3'],['e3', 'p4'],['e2', 'p5'],['p3', 'e4', 'p6'],['e4'],['p6', 'e5']]
visited = [False]*len(contacts)
contacts = map(set, contacts)
def dfs(node,index, temp):
visited[index] = True
result = node
for i,item in enumerate(contacts):
if not visited[i] and not result.isdisjoint(item):
temp.append('c'+str(i+1))
result.update(dfs(item,i, temp))
return result
def merge_contacts():
result = []
ans = []
for i,node in enumerate(contacts):
if not visited[i]:
temp = ['c'+str(i+1)]
result.append(list(dfs(node,i, temp)))
ans.append(temp)
temp = []
return ans
print merge_contacts()
|
112903
|
from __future__ import absolute_import
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from twisted.logger import Logger
import datetime
import uuid
import os
import tempfile
ONE_DAY = datetime.timedelta(1, 0, 0)
THIRTYISH_YEARS = datetime.timedelta(30 * 365, 0, 0)
TENISH_YEARS = datetime.timedelta(10 * 365, 0, 0)
# Various exportable constants that the tests can (and should!) use.
CERT_DIR = tempfile.mkdtemp()
ROOT_CERT_PATH = os.path.join(CERT_DIR, 'root_cert.pem')
ROOT_KEY_PATH = os.path.join(CERT_DIR, 'root_cert.key')
DEFAULT_CERT_PATH = os.path.join(CERT_DIR, 'DEFAULT.pem')
DEFAULT_KEY_PATH = os.path.join(CERT_DIR, 'DEFAULT.key')
HTTP2BIN_CERT_PATH = os.path.join(CERT_DIR, 'http2bin.org.pem')
HTTP2BIN_KEY_PATH = os.path.join(CERT_DIR, 'http2bin.org.key')
# A list of tuples that controls what certs get built and signed by the root.
# Each tuple is (hostname, cert_path)
# We'll probably never need the easy extensibility this provides, but hey, nvm!
_CERTS = [
(u'localhost', DEFAULT_CERT_PATH),
(u'http2bin.org', HTTP2BIN_CERT_PATH),
]
_LOGGER = Logger()
def _build_root_cert():
"""
Builds a single root certificate that can be used to sign the others. This
root cert is basically pretty legit, except for being totally bonkers.
Returns a tuple of (certificate, key) for the CA, which can be used to
build the leaves.
"""
if os.path.isfile(ROOT_CERT_PATH) and os.path.isfile(ROOT_KEY_PATH):
_LOGGER.info("Root already exists, not regenerating.")
with open(ROOT_CERT_PATH, 'rb') as f:
certificate = x509.load_pem_x509_certificate(
f.read(), default_backend()
)
with open(ROOT_KEY_PATH, 'rb') as f:
key = serialization.load_pem_private_key(
f.read(), password=None, backend=default_backend()
)
return certificate, key
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = private_key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u'txsni signing service'),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u'txsni signing service'),
]))
builder = builder.not_valid_before(datetime.datetime.today() - ONE_DAY)
builder = builder.not_valid_after(
datetime.datetime.today() + THIRTYISH_YEARS
)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
# Don't allow intermediates.
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=0), critical=True,
)
certificate = builder.sign(
private_key=private_key, algorithm=hashes.SHA256(),
backend=default_backend()
)
# Write it out.
with open(ROOT_KEY_PATH, 'wb') as f:
f.write(
private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
)
with open(ROOT_CERT_PATH, 'wb') as f:
f.write(
certificate.public_bytes(serialization.Encoding.PEM)
)
_LOGGER.info("Built root certificate.")
return certificate, private_key
def _build_single_leaf(hostname, certfile, ca_cert, ca_key):
"""
Builds a single leaf certificate, signed by the CA's private key.
"""
if os.path.isfile(certfile):
_LOGGER.info("{hostname} already exists, not regenerating",
hostname=hostname)
return
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = private_key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, hostname),
]))
builder = builder.issuer_name(ca_cert.subject)
builder = builder.not_valid_before(datetime.datetime.today() - ONE_DAY)
builder = builder.not_valid_after(
datetime.datetime.today() + TENISH_YEARS
)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=False, path_length=None), critical=True,
)
builder = builder.add_extension(
x509.SubjectAlternativeName([
x509.DNSName(hostname)
]),
critical=True,
)
certificate = builder.sign(
private_key=ca_key, algorithm=hashes.SHA256(),
backend=default_backend()
)
# Write it out.
with open(certfile, 'wb') as f:
f.write(
private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
)
f.write(
certificate.public_bytes(serialization.Encoding.PEM)
)
_LOGGER.info("Built certificate for {hostname}", hostname=hostname)
def _build_certs():
"""
Builds all certificates.
"""
ca_cert, ca_key = _build_root_cert()
for hostname, certfile in _CERTS:
_build_single_leaf(hostname, certfile, ca_cert, ca_key)
if __name__ == '__main__':
_build_certs()
|
112949
|
from super_gradients.common.factories.base_factory import BaseFactory
from super_gradients.training.losses import LOSSES
class LossesFactory(BaseFactory):
def __init__(self):
super().__init__(LOSSES)
|
112971
|
from typing import Iterator
import os
import shutil
import json
import logging
import docker
from ..core.build import BuildInstructions
from ..exceptions import ImageBuildFailed
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
class BuildManager(object):
def __init__(self, client_docker: docker.DockerClient):
self.__docker = client_docker
self.__blueprints = {}
def __getitem__(self, name: str) -> BuildInstructions:
"""
Retrieves the build instructions associated for a named Docker image.
Parameter:
name: the name of the Docker image.
Raises:
KeyError: if no build instructions for the named image have been
registered with this manager.
"""
return self.__blueprints[name]
def __iter__(self) -> Iterator[BuildInstructions]:
"""
Returns an iterator over all of the build instructions that are
registered with this server.
"""
return self.__blueprints.values().__iter__()
def register(self, blueprint: BuildInstructions) -> None:
"""
Attempts to register a blueprint for a given Docker image with this
manager.
"""
self.__blueprints[blueprint.name] = blueprint
add = register
def deregister(self, blueprint: BuildInstructions) -> None:
"""
Attempts to deregister a given blueprint from this manager.
"""
del self.__blueprints[blueprint.name]
remove = deregister
def is_installed(self, name: str) -> bool:
"""
Indicates a given Docker image is installed on this server.
Parameters:
name: the name of the Docker image.
Returns:
`True` if installed; `False` if not.
"""
assert name is not None
try:
self.__docker.images.get(name)
return True
except docker.errors.ImageNotFound:
return False
def build(self,
name: str,
force: bool = False,
quiet: bool = False
) -> None:
"""
Constructs a Docker image, given by its name, using the set of build
instructions associated with that image.
Parameters:
name: the name of the Docker image.
force: if `True`, the image will be rebuilt, regardless of whether
or not it is already installed on the server. If `False` and
a (possibly outdated) version of the image has already been
built, then the build will be skipped.
quiet: used to enable and disable output from the Docker build
process.
"""
logger.debug("request to build image: %s", name)
instructions = self[name]
if instructions.depends_on:
logger.info("building dependent image: %s",
instructions.depends_on)
self.build(instructions.depends_on, force=force, quiet=quiet)
if not force and self.is_installed(instructions.name):
return
if not quiet:
logger.info("building image: %s", name)
context = instructions.abs_context
tf = os.path.join(context, '.Dockerfile')
try:
success = False
shutil.copy(instructions.filename_abs, tf)
response = self.__docker.api.build(path=context,
dockerfile='.Dockerfile',
tag=name,
# pull=force,
buildargs=instructions.arguments,
target=instructions.build_stage,
decode=True,
rm=True)
log = [] # type: List[str]
for line in response:
if 'stream' in line:
line_msg = line['stream'].rstrip()
log.append(line_msg)
if not quiet:
print(line_msg)
if line_msg.startswith('Successfully built'):
success = True
if not success:
raise ImageBuildFailed(name, log)
if success and not quiet:
logger.info("built image: %s", name)
return
finally:
if os.path.exists(tf):
os.remove(tf)
def uninstall(self,
name: str,
force: bool = False,
noprune: bool = False
) -> None:
"""
Attempts to uninstall a given Docker image.
Parameters:
name: the name of the Docker image.
force: a flag indicating whether or not an exception should be
thrown if the image associated with the given build
instructions is not installed. If `True`, no exception
will be thrown; if `False`, exception will be thrown.
noprune: a flag indicating whether or not dangling image layers
should also be removed.
Raises:
docker.errors.ImageNotFound: if the image associated with the given
instructions can't be found.
"""
try:
self.__docker.images.remove(image=name,
force=force,
noprune=noprune)
except docker.errors.ImageNotFound as e:
if force:
return
raise e
def download(self,
name: str,
force: bool = False
) -> bool:
"""
Attempts to download a given Docker image. If `force=True`, then any
previously installed version of the image (described by the
instructions) will be replaced by the image on DockerHub.
Parameters:
name: the name of the Docker image.
Returns:
`True` if successfully downloaded, otherwise `False`.
"""
try:
self.__docker.images.pull(name)
return True
except docker.errors.NotFound:
print("Failed to locate image on DockerHub: {}".format(name))
return False
def upload(self, name: str) -> bool:
"""
Attempts to upload a given Docker image from this server to DockerHub.
Parameters:
name: the name of the Docker image.
Returns:
`True` if successfully uploaded, otherwise `False`.
"""
try:
out = self.__docker.images.push(name, stream=True)
for line in out:
line = line.strip().decode('utf-8')
jsn = json.loads(line)
if 'progress' in jsn:
line = "{}. {}.".format(jsn['status'], jsn['progress'])
print(line, end='\r')
elif 'status' in jsn:
print(jsn['status'])
print('uploaded image to DockerHub: {}'.format(name))
return True
except docker.errors.NotFound:
print("Failed to push image ({}): not installed.".format(name))
return False
|
113003
|
import os
import json
from ..base_service import BaseService
from ..exceptions import NodeServerConnectionError, NodeServerTimeoutError
from ..settings import SERVER_TEST_TIMEOUT
class EchoService(BaseService):
"""
A basic service which will return the value of the parameter
`echo` as the response.
Internally, NodeServer uses this service to test if the server
is running as expected.
"""
path_to_source = os.path.join(os.path.dirname(__file__), 'echo.js')
timeout = SERVER_TEST_TIMEOUT
expected_output = '__NODE_SERVER_RUNNING__'
@classmethod
def warn_if_not_configured(cls):
pass
def test(self):
self.ensure_loaded()
try:
response = self.get_server().send_request_to_service(
self.get_name(),
timeout=self.timeout,
ensure_started=False,
data={
'data': json.dumps({'echo': self.expected_output})
}
)
except (NodeServerConnectionError, NodeServerTimeoutError):
return False
if response.status_code != 200:
return False
return response.text == self.expected_output
|
113100
|
import sys
import os
import pycuber as pc
from pycuber.solver import CFOPSolver
c = pc.Cube()
alg = pc.Formula()
random_alg = alg.random()
c(random_alg)
solver = CFOPSolver(c)
solution = solver.solve(suppress_progress_messages=True)
print(solution)
|
113129
|
import argparse
import glob
import os
import random
import logging
import numpy as np
import math
from tqdm import tqdm
import time
import torch
from transformers import AutoTokenizer, AutoModelForMaskedLM
from transformers import DataCollatorForLanguageModeling
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as ptl
from pytorch_lightning.logging.test_tube import TestTubeLogger
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateLogger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# DONE: reproduce RoBERTa numbers on the Longformer corpus
# DONE: testing ddp single machine
# DONE: testing ddp multiple machines
# DONE: testing resume from checkpoint
# TODO: try on a TPU-pod
# TODO: run on beaker on ai2-server1/2
try:
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class MMapTextDataset(Dataset):
def __init__(self, mmap_filename, chunk_size, bos_token_id, eos_token_id):
# `chunk_size - 2` to reserve space for <s> and </s>
self.num_instances = np.memmap(mmap_filename, mode='r', dtype=np.uint16).shape[0] // (chunk_size - 2)
# defer loading the token_ids memmap until after the first __getitem__ call.
# when spawning new processes for ddp, there is a hard limit in python < 3.8 that
# pickle files need to be < 4GB. By waiting until after the first __getitem__ we
# don't have to pickle the memmap
self.token_ids = None
self._mmap_filename = mmap_filename
self._chunk_size = chunk_size
self._bos_token_id = bos_token_id
self._eos_token_id = eos_token_id
def __len__(self):
return self.num_instances
def __getitem__(self, i):
if self.token_ids is None:
self.token_ids = np.memmap(self._mmap_filename, mode='r', dtype=np.uint16)
from_index = i * (self._chunk_size - 2)
to_index = (i + 1) * (self._chunk_size - 2)
data = np.concatenate(([self._bos_token_id], self.token_ids[from_index:to_index], [self._eos_token_id]))
return torch.tensor(data, dtype=torch.long)
# ========================= preprocessing code ========================= #
@staticmethod
def _process_file(full_fname):
"Step 1: tokenize an input text file then save token ids into `np.memmap` shards of size `args.shard_size`"
fname = full_fname.split('/')[-1]
log_filename = f'{args.input_dir}/logs-{args.shard_size}/{fname}.log'
if os.path.isfile(log_filename):
logging.info(f'Skipping {full_fname} ...')
return # log file already exists. Skip current file.
logging.info(f'Processing {full_fname} ...')
with open(full_fname, 'r') as fin:
token_list = []
shard_count = 0
tokens_count = 0
def _write_shard():
if len(token_list) == 0:
return
if token_list[-1] != MMapTextDataset.tokenizer.sep_token_id: # handle a rare case
token_list.append(MMapTextDataset.tokenizer.sep_token_id)
shared_filename = f'{args.input_dir}/shards-{args.shard_size}/{fname}-{shard_count}.bin'
logging.info(f'Writing {len(token_list)} tokens to shared {shared_filename}')
fp = np.memmap(shared_filename, dtype=np.uint16, mode='w+', shape=len(token_list))
fp[:] = token_list[:]
del fp # flush and close file
for line in tqdm(fin):
line = line.strip()
if line == '': # drop empty lines
continue
tokens = MMapTextDataset.tokenizer.encode(line, add_special_tokens=False) # `__getitem__` adds special tokens
token_list.extend(tokens)
if len(token_list) > args.shard_size:
_write_shard()
tokens_count += len(token_list)
token_list = []
shard_count += 1
else:
token_list.append(MMapTextDataset.tokenizer.sep_token_id)
_write_shard()
tokens_count += len(token_list)
with open(log_filename, 'w') as f:
f.write(f'Generated {tokens_count} tokens in {shard_count + 1} shards')
@staticmethod
def _combine_shards(output_fname, shards_list):
"Step 2: combining memmap shards into one `train.bin` or `val.bin` file"
total_size = 0
for filename in shards_list:
total_size += np.memmap(filename, mode='r', dtype=np.uint16).shape[0]
logging.info(f'Writing {total_size} tokens to {output_fname}')
all_token_ids = np.empty(total_size, dtype=np.uint16)
last_token_index = 0
for filename in tqdm(shards_list):
shared = np.memmap(filename, mode='r', dtype=np.uint16)
all_token_ids[last_token_index:last_token_index+len(shared)] = shared[:]
last_token_index += len(shared)
fp = np.memmap(output_fname, dtype=np.uint16, mode='w+', shape=total_size)
fp[:] = all_token_ids[:]
del fp
@staticmethod
def raw_text_to_mmap(args):
"""This is the main preprocessing function. It processes all the text files in `args.input_dir` and
outputs two np.memmap files, one for training and one for validation with ratio `args.train_dev_split`.
Processing each input file involves tokenizing it, sharding it into shards of size `args.shard_size`,
then writing each shard as an np.memmap file. The stream of tokens in the memmap file represents documents
separated with `tokenizer.sep_token`. In `__getitem__`, the `tokenizer.bos_token` and `tokenizer.eos_token`
are added. The reason for not adding them at preprocessing time is to allow different sequence lengths
later on. Notice that this is the "FULL-SENTENCES" setting in the RoBERTa paper, Table2.
"""
MMapTextDataset.tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, use_fast=True)
assert len(MMapTextDataset.tokenizer) < 65535 # will use uint16 to store token ids
all_files = glob.glob(f'{args.input_dir}/*.txt')
if os.path.exists(f'{args.input_dir}/cache/train.bin') and os.path.exists(f'{args.input_dir}/cache/val.bin'):
logger.info("Cache already exists. Remove the cache directory to regenerate")
return
try:
os.mkdir(f'{args.input_dir}/cache/')
except FileExistsError:
pass
try:
os.mkdir(f'{args.input_dir}/shards-{args.shard_size}/')
except FileExistsError:
pass
try:
os.mkdir(f'{args.input_dir}/logs-{args.shard_size}/') # log progrss to be able to resume
except FileExistsError:
pass
# STEP1: tokenizing and saving to shards
if args.num_preprocessing_workers > 1:
from multiprocessing.pool import Pool
with Pool(args.num_preprocessing_workers) as p:
list(tqdm(p.imap(MMapTextDataset._process_file, all_files), total=len(all_files)))
else:
[MMapTextDataset._process_file(f) for f in tqdm(all_files)]
# STEP2: shuffling shards and combining them into train.bin and val.bin files
all_shards = glob.glob(f'{args.input_dir}/shards-{args.shard_size}/*.bin')
random.shuffle(all_shards) # shuffling based on shards not individual lines
val_shards_count = int(args.train_dev_split * len(all_shards))
val_shards = all_shards[:val_shards_count]
train_shards = all_shards[val_shards_count:]
# TODO: if MMapTextDataset._combining_shards is very slow for large files, it can be skipped but we nned to
# update the dataset to read from multiple shards directly
MMapTextDataset._combine_shards(f'{args.input_dir}/cache/val.bin', val_shards)
MMapTextDataset._combine_shards(f'{args.input_dir}/cache/train.bin', train_shards)
del MMapTextDataset.tokenizer
# ========================= end preprocessing code ========================= #
class Pretrainer(ptl.LightningModule):
def __init__(self, hparams):
super().__init__()
self.args = hparams
self.hparams = self.args
self.model = AutoModelForMaskedLM.from_pretrained(args.model)
self.config = self.model.config
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
self.pad_token_id = tokenizer.pad_token_id
self.eos_token_id = tokenizer.eos_token_id
self.bos_token_id = tokenizer.bos_token_id
logger.info(f'Creating dataset cache from dir {self.args.input_dir}. This could be slow the first time.')
MMapTextDataset.raw_text_to_mmap(args)
# TODO: add support for other objective functions (whole word masking, BART objectives)
self.data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=True, mlm_probability=self.args.mlm_prob
)
self.start_time = 0
def to(self, *args, **kwargs):
param_count_before_to = len(list(self.parameters()))
super().to(*args, **kwargs)
if self.trainer.use_tpu:
# need to re-tie the weights after moving to XLA!
self.model.tie_weights()
if 'roberta' in self.args.model:
self.model.lm_head.bias = self.model.lm_head.decoder.bias
param_count_after_to = len(list(self.parameters()))
assert param_count_before_to == param_count_after_to
def forward(self, input_ids=None, labels=None):
# get the padding mask - 1 for NOT masked, 0 for MASKED/PAD
attention_mask = (input_ids != self.pad_token_id).int()
# output is loss, prediction_scores, hidden_states
output = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
return output[0] # loss
def training_step(self, batch, batch_nb):
loss = self(**batch)
input_ids = batch['input_ids']
tensorboard_logs = {
'input_size': input_ids.numel(),
'mlm_loss': loss,
'mlm_bpc': loss/math.log(2),
'mlm_perplexity': torch.exp(loss),
'token_per_step': input_ids.numel() * self.args.grad_accum * self.trainer.world_size,
}
if self.start_time != 0:
elapsed_time = time.time() - self.start_time
tensorboard_logs['second_per_batch'] = elapsed_time
self.start_time = time.time()
if self.on_gpu:
tensorboard_logs['memory'] = torch.cuda.memory_allocated(loss.device) / 1024 ** 3
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_nb):
# TODO: log how long evaluation takes
self.start_time = 0 # reset training_step timer
loss = self(**batch)
tensorboard_logs = {
'val_mlm_loss': loss.detach(),
}
return {'val_loss': tensorboard_logs["val_mlm_loss"], 'log': tensorboard_logs}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['log']['val_mlm_loss'] for x in outputs if 'val_mlm_loss' in x['log']]).mean()
if self.use_ddp:
# TODO: PTL is already doing this. Is it still needed here?
# https://github.com/PyTorchLightning/pytorch-lightning/blob/0.8.5/pytorch_lightning/metrics/converters.py#L251
torch.distributed.all_reduce(avg_loss, op=torch.distributed.ReduceOp.SUM)
avg_loss /= torch.distributed.get_world_size()
elif self.use_tpu:
avg_loss = xm.all_reduce(xm.REDUCE_SUM, avg_loss) / xm.xrt_world_size()
logs = {'val_mlm_loss': avg_loss}
return {'log': logs, 'progress_bar': logs, "val_loss": avg_loss}
def configure_optimizers(self):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=self.args.train_steps
)
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
def _get_loader(self, fname, is_train):
dataset = MMapTextDataset(fname, chunk_size=self.args.seqlen,
bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id)
# TODO: consider `replace_sampler_ddp=True` and removing the following if statement
if self.trainer.use_ddp:
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=is_train)
shuffle = False
elif self.trainer.use_tpu:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=is_train,
)
shuffle = False
else:
sampler = None
shuffle = is_train
loader = DataLoader(
dataset,
batch_size=self.args.batch_size,
shuffle=shuffle,
sampler=sampler,
num_workers=self.args.num_workers,
collate_fn=self.data_collator,
drop_last=is_train,
)
return loader
def train_dataloader(self):
return self._get_loader(f'{self.args.input_dir}/cache/train.bin', True)
def val_dataloader(self):
return self._get_loader(f'{self.args.input_dir}/cache/val.bin', False)
def grad_norm(self, norm_type):
# Override PTL `grad_norm` function to only return `total_grad_norm` instead norms of individual params
# TODO: grad_norm reporting needs to take fp16 loss scale into account
parameters = [p for p in self.parameters() if p.grad is not None]
device = parameters[0].device
total_norm = torch.zeros([], device=device if parameters else None)
norm_type = float(norm_type)
for p in parameters:
param_norm = p.grad.data.pow(norm_type).sum()
total_norm.add_(param_norm)
total_norm = (total_norm ** (1.0 / norm_type))
return {'total_grad_norm': total_norm}
@staticmethod
def add_args(parser):
parser.add_argument("--seed", type=int, default=3)
# Dataset. Some of these params are only useful when generating the dataset cache
parser.add_argument("--input_dir", type=str, default='/net/nfs.corp/s2-research/beltagy/longformer/data/')
# Used only at the preprocessing phase
parser.add_argument("--train_dev_split", type=float, default=0.05)
parser.add_argument("--shard_size", type=int, default=1024 ** 3 // 4) # 250MB
parser.add_argument("--num_preprocessing_workers", type=int, default=1)
# Used only at the training phase
parser.add_argument("--seqlen", type=int, default=512)
parser.add_argument("--mlm_prob", type=float, default=0.15)
# HF model loading
parser.add_argument("--tokenizer", type=str, default='roberta-base')
parser.add_argument("--model", type=str, default='roberta-base')
# Checkpointing and logging
parser.add_argument("--save_dir", type=str, default='/runs/')
parser.add_argument("--save_prefix", type=str, default='test',
help="path of output directory is --save_dir/--save_prefix")
parser.add_argument("--resume", type=str, default=None, # It is better to use a different output dir.
help="Path to a checkpoint to load model weights and training state. It overwrites args")
parser.add_argument("--resume_model_only", type=str, default=None,
help="Path to a checkpoint to load model weights but not training state")
parser.add_argument("--log_rate", type=int, default=10)
parser.add_argument("--disable_checkpointing", type=bool, default=False)
# Training hyperparams
parser.add_argument("--lr", type=float, default=1e-5)
parser.add_argument("--train_steps", type=int, default=3000, help='# training grad. updates')
parser.add_argument("--warmup_steps", type=int, default=1000, help='# warmup grad. updates')
parser.add_argument("--val_every", type=int, default=1000, help='# training grad. updates between evaluations')
parser.add_argument("--val_batches", type=int, default=1000, help='# evaluation **batches**')
parser.add_argument("--weight_decay", type=float, default=0.01)
parser.add_argument("--adam_epsilon", type=float, default=1e-6)
parser.add_argument("--grad_clip", type=float, default=0) # TODO: test this with fp16. Likely not working
# RoBERTa's tokens_per_step = 2^18 = 512(seqlen) x 1(gpu_count) x 32(batch_size) x 16(grad_accum)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--grad_accum", type=int, default=1)
# Compute resources
parser.add_argument("--fp16", type=bool, default=False)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--gpu_count", type=int, default=1, # `--gpus` is reserved for internal use by PTL
help="Number of gpus. This respects `CUDA_VISIBLE_DEVICES`")
# For multi-node training, use the PyTorch launch script. The script and instructions can be found here:
# https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py.
# To run PTL in a mode compatible with the launch script, two things are needed:
# - pass the argument `--use_env` to `torch.distributed.launch`
# - make sure `--nproc_per_node` matches `--gpu_count` and `--nnodes` matches `--node_count`.
# For example, to run on 2 nodes, 3 gpus each, the command line on node rank 1 would be like:
# >>>> python -m torch.distributed.launch \
# --use_env --nnodes 2 --nproc_per_node 3 \
# --node_rank 1 --master_addr s2-server4 --master_port 12343 \
# scripts/pretrain.py \
# --gpu_count 2 --node_count 2 \
# --input_dir my_data_dir --save_prefix test_multinode
parser.add_argument("--node_count", type=int, default=1,
help="Number of nodes. It needs to match --nnodes of torch.distributed.launch")
parser.add_argument("--tpu_core_count", type=int, default=None)
return parser
def main(args):
random.seed(args.seed * 10)
np.random.seed(args.seed * 100)
torch.manual_seed(args.seed * 1000)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed * 10000)
if args.resume_model_only is not None:
pretrainer = Pretrainer.load_from_checkpoint(args.resume_model_only, args)
else:
pretrainer = Pretrainer(args)
# logger here is a SummaryWritter for tensorboard
# it is used by the trainer, and certain return variables
# from the model are automatically logged
logger = TestTubeLogger(
save_dir=args.save_dir,
name=args.save_prefix,
version=0 # always use version=0
)
checkpoint_callback = ModelCheckpoint(
# model saved to filepath/prefix_....
filepath=os.path.join(args.save_dir, args.save_prefix, 'checkpoint'),
prefix='',
save_top_k=1,
save_last=True,
verbose=True,
monitor='val_loss',
mode='min',
period=-1, # to allow multiple checkpoints per epoch
)
args.val_every *= args.grad_accum # PTL is expecting number of batches_per_gpu
trainer = ptl.Trainer(
gpus=args.gpu_count,
num_nodes=args.node_count,
num_tpu_cores=args.tpu_core_count,
distributed_backend='ddp' if (args.gpu_count > 1 or args.node_count > 1) else None,
replace_sampler_ddp=False,
track_grad_norm=2,
max_epochs=10000, min_epochs=0, max_steps=args.train_steps, # run for many epochs, but stop after max_steps
val_check_interval=args.val_every, limit_val_batches=args.val_batches,
early_stop_callback=None,
row_log_interval=args.log_rate,
progress_bar_refresh_rate=args.log_rate,
logger=logger,
checkpoint_callback=checkpoint_callback if not args.disable_checkpointing else None,
accumulate_grad_batches=args.grad_accum,
resume_from_checkpoint=args.resume,
gradient_clip_val=args.grad_clip,
precision=16 if args.fp16 else 32, amp_level='O2',
num_sanity_val_steps=2,
callbacks=[LearningRateLogger()],
)
trainer.fit(pretrainer)
if __name__ == "__main__":
parser = Pretrainer.add_args(argparse.ArgumentParser(description="pretrain"))
args = parser.parse_args()
main(args)
|
113138
|
from decimal import Decimal
import operator
_MININT = -(2 ** 64 - 1)
_MAXINT = 2 ** 64 - 1
class Metadata(dict):
"""
Represents Cardano transaction metadata. Inherits from :class:`dict` but passes all keys
and values through validity check.
:param mapping: a sequence of (key, value) pairs
"""
TYPE_RESOLVERS = {
"string": lambda s: s,
"int": lambda i: Metadata.deserialize_int(i),
"bytes": bytes.fromhex,
"list": lambda l: [
Metadata.TYPE_RESOLVERS[k](v)
for k, v in map(
operator.itemgetter(0),
map(list, map(operator.methodcaller("items"), l)),
)
],
"map": lambda m: Metadata.deserialize_map(m),
}
@staticmethod
def validate_key(key):
"""
Checks if the key is allowed, i.e. is an :class:`int` and within the allowed range.
Raises :class:`KeyError` otherwise.
"""
if not isinstance(key, int):
raise KeyError("Metadata keys must be integers")
if key < 0:
raise KeyError("Metadata key {:d} is negative".format(key))
if key > _MAXINT:
raise KeyError("Metadata key {:d} is over 2^64-1".format(key))
return key
@staticmethod
def validate_value(val):
"""
Checks if the value is allowed, i.e. is one of :class:`int`, :class:`str`, :class:`bytes`,
:class:`bytearray`, :class:`list` or :class:`dict`.
Raises :class:`TypeError` otherwise. Also raises :class:`ValueError` if the value is of
proper type but exceeds range limit for Cardano metadata.
"""
if isinstance(val, (str, bytes, bytearray)):
if len(val) > 64:
raise ValueError(
"The string {} is too long ({:d} > 64)".format(val, len(val))
)
elif isinstance(val, int):
if val < _MININT:
raise ValueError("Int {:d} is less than -2^64-1".format(val))
elif val > _MAXINT:
raise ValueError("Int {:d} is over 2^64-1".format(val))
elif isinstance(val, (list, tuple)):
pass
elif isinstance(val, dict):
for k, v in val.items():
Metadata.validate_value(k)
Metadata.validate_value(v)
else:
raise TypeError(
"Metadata values must be of int, str, bytes, bytearray, lists of thereof or another Metadata instances, not {}".format(
str(type(val))
)
)
return val
@staticmethod
def serialize_value(val):
"""
Serializes Python value to an object that can be passed to transaction as a metadata value.
The returned object is a mapping which contains both the type name and the value.
Raises :class:`RuntimeError` when a value of unrecognized type has been passed.
"""
if isinstance(val, int):
return {"int": val}
elif isinstance(val, str):
return {"string": val}
elif isinstance(val, (bytes, bytearray)):
return {"bytes": val.hex()}
elif isinstance(val, list):
return {"list": [Metadata.serialize_value(i) for i in val]}
elif isinstance(val, dict):
return {
"map": [
{"k": Metadata.serialize_value(k), "v": Metadata.serialize_value(v)}
for k, v in val.items()
]
}
# This should never happen
raise RuntimeError(
"Found unserializable value of {} (type {})".format(val, str(type(val)))
)
def serialize(self):
"""
Returns serialized form of the metadata, which can be passed to the transaction.
"""
return {str(k): Metadata.serialize_value(v) for k, v in self.items()}
@staticmethod
def deserialize_item(key, vdata):
if len(vdata) > 1:
raise ValueError(
"The value dict for key {:s} has {:d} members while only one is permitted".format(
key, len(vdata)
)
)
typename = list(vdata.keys()).pop()
val = list(vdata.values()).pop()
return {int(key): Metadata.TYPE_RESOLVERS[typename](val)}
@staticmethod
def deserialize_int(i):
if isinstance(i, int):
return i
elif isinstance(i, Decimal):
return int(i.quantize(1))
raise TypeError("Got int serialized as {} of value {}".format(str(type(i)), i))
@staticmethod
def deserialize_map(themap):
data = {}
for m in themap:
ktype, kval = list(m["k"].items()).pop()
vtype, vval = list(m["v"].items()).pop()
key = Metadata.TYPE_RESOLVERS[ktype](kval)
if isinstance(key, list):
key = tuple(key)
elif isinstance(key, dict):
key = ImmutableDict(key)
data[key] = Metadata.TYPE_RESOLVERS[vtype](vval)
return data
@staticmethod
def deserialize(txdata):
"""
Deserializes transaction metadata :class:`dict` and returns :class:`Metadata` instance.
:param txdata: the transaction data
"""
data = {}
for key, vdata in txdata.items():
data.update(Metadata.deserialize_item(key, vdata))
return Metadata(data.items())
def __init__(self, *args, **kwargs):
if len(args) > 0:
for k, v in args[0]:
Metadata.validate_key(k)
Metadata.validate_value(v)
super(Metadata, self).__init__(*args, **kwargs)
def __setitem__(self, key, val):
return super(Metadata, self).__setitem__(
Metadata.validate_key(key), Metadata.validate_value(val)
)
class ImmutableDict(dict):
"""
A flavor of ``dict`` with all mutating methods blocked and hash generation added.
It can be used as mapping keys.
"""
def __hash__(self):
return hash(
"|".join(
[
"{}={}".format(*i)
for i in sorted(self.items(), key=operator.itemgetter(0))
]
)
)
def __setitem__(self, key, value):
raise RuntimeError("ImmutableDict doesn't allow changes")
def __delitem__(self, key):
raise RuntimeError("ImmutableDict doesn't allow changes")
def clear(self):
raise RuntimeError("ImmutableDict doesn't allow changes")
def pop(self, key):
raise RuntimeError("ImmutableDict doesn't allow changes")
def popitem(self):
raise RuntimeError("ImmutableDict doesn't allow changes")
def update(self, *args, **kwargs):
raise RuntimeError("ImmutableDict doesn't allow changes")
|
113159
|
import torch
from torch.autograd import Variable
from .convert import to_var
from .vocab import PAD_TOKEN, SOS_TOKEN, EOS_TOKEN
def pad(tensor, length, dtype=None):
if isinstance(tensor, Variable):
var = tensor
if length > var.size(0):
return torch.cat(
[var, torch.zeros(
length - var.size(0), *var.size()[1:], dtype=dtype).cuda()])
else:
return var
else:
if length > tensor.size(0):
return torch.cat(
[tensor, torch.zeros(
length - tensor.size(0), *tensor.size()[1:], dtype=dtype).cuda()])
else:
return tensor
def pad_and_pack(tensor_list):
length_list = ([t.size(0) for t in tensor_list])
max_len = max(length_list)
padded = [pad(t, max_len) for t in tensor_list]
packed = torch.stack(padded, 0)
return packed, length_list
def pad_tokens(tokens, max_sentence_length=30):
n_valid_tokens = len(tokens)
if n_valid_tokens > max_sentence_length - 1:
tokens = tokens[:max_sentence_length - 1]
n_pad = max_sentence_length - n_valid_tokens - 1
tokens = tokens + [EOS_TOKEN] + [PAD_TOKEN] * n_pad
return tokens
def pad_conversation(conversation, max_sentence_length=30):
conversation = [pad_tokens(sentence,
max_sentence_length=max_sentence_length) for sentence in conversation]
return conversation
def pad_sentences(conversations, max_sentence_length=30, max_conversation_length=10):
all_padded_sentences = []
all_sentence_length = []
for conversation in conversations:
if len(conversation) > max_conversation_length:
conversation = conversation[:max_conversation_length]
sentence_length = [min(len(sentence) + 1, max_sentence_length) # +1 for EOS token
for sentence in conversation]
all_sentence_length.append(sentence_length)
sentences = pad_conversation(conversation)
all_padded_sentences.append(sentences)
# [n_conversations, n_sentence (various), max_sentence_length]
sentences = all_padded_sentences
# [n_conversations, n_sentence (various)]
sentence_length = all_sentence_length
return sentences, sentence_length
|
113223
|
from __future__ import annotations
import math
class Hysteresis:
def __init__(self, amount: float, initial_value: float):
self._amount = amount
self._at = round(initial_value)
def __call__(self, value: float) -> int:
i, j = math.floor(value), math.ceil(value)
if self._at != i and self._at != j:
self._at = round(value)
di, dj = abs(value - i), abs(value - j)
if di + self._amount < dj and self._at == j:
self._at = i
if dj + self._amount < di and self._at == i:
self._at = j
return self._at
if __name__ == '__main__':
h = Hysteresis(0.2, 0)
for v in [0.5, 0.7, 1.0, 1.2, 1.5, 1.3, 1.5, 1.7, 1.5]:
print(v, h(v))
|
113229
|
from __future__ import annotations
contenttypes = {
'Call': {
'jsonrpc': 'Text',
'method': 'Text',
'params': 'Map',
'id': 'Integer',
},
'Request': ['Call', ['Call']],
'Response': {},
'Provider': {
'type': ['http', 'websocket'],
'url': 'Text',
'session_kwargs': 'Map',
},
}
#
# # rpc call schema
#
rpc_block_quantities = [
'baseFeePerGas',
'number',
'difficulty',
'totalDifficulty',
'size',
'gasLimit',
'gasUsed',
'timestamp',
]
rpc_log_quantities = [
'logIndex',
'transactionIndex',
'blockNumber',
]
rpc_transaction_quantities = [
'blockNumber',
'gas',
'gasPrice',
'maxFeePerGas',
'maxPriorityFeePerGas',
'nonce',
'transactionIndex',
'value',
'v',
]
rpc_transaction_receipt_quantities = [
'transactionIndex',
'blockNumber',
'cumulativeGasUsed',
'effectiveGasPrice',
'gasUsed',
'quantity',
'status',
]
rpc_result_scalar_quantities = [
'eth_blockNumber',
'eth_getUncleCountByBlockHash',
'eth_getUncleCountByBlockNumber',
'eth_newFilter',
'eth_newBlockFilter',
'eth_newPendingTransactionFilter',
]
rpc_result_map_quantities = {
'eth_getBlockByHash': rpc_block_quantities,
'eth_getBlockByNumber': rpc_block_quantities,
'eth_getUncleByBlockHashAndIndex': rpc_block_quantities,
'eth_getUncleByBlockNumberAndIndex': rpc_block_quantities,
}
rpc_result_list_map_quantities = {
'eth_getLogs': rpc_log_quantities,
}
rpc_constructor_batch_inputs = {
'eth_get_block_by_hash': {'block_hashes': 'block_hash'},
'eth_get_block_by_number': {'block_numbers': 'block_number'},
'eth_get_uncle_count_by_block_hash': {'block_hashes': 'block_hash'},
'eth_get_uncle_count_by_block_number': {'block_numbers': 'block_number'},
'eth_compile_lll': {'codes': 'code'},
'eth_compile_solidity': {'codes': 'code'},
'eth_compile_serpent': {'codes': 'code'},
'eth_get_logs': {
'topic_lists': 'topic',
'addresses': 'addresses',
'block_hashes': 'block_hash',
},
'web3_sha3': {'datas': 'data'},
'eth_call': {
'to_addresses': 'to_address',
'block_numbers': 'block_number',
'function_parameter_list': 'function_parameters',
},
'eth_estimate_gas': {
'to_addresses': 'to_address',
'block_numbers': 'block_number',
'function_parameter_list': 'function_parameters',
},
'eth_get_balance': {
'addresses': 'address',
'block_numbers': 'block_number',
},
'eth_get_storage_at': {
'addresses': 'address',
'positions': 'position',
'block_numbers': 'block_number',
},
'eth_get_code': {'addresses': 'address', 'block_numbers': 'block_number'},
'eth_send_raw_transaction': {'datas': 'data'},
'eth_get_transaction_count': {
'addresses': 'address',
'block_numbers': 'block_number',
},
'eth_get_transaction_by_hash': {'transaction_hashes': 'transaction_hash'},
'eth_get_transaction_by_block_hash_and_index': {
'block_hashes': 'block_hash',
'indices': 'index',
},
'eth_get_transaction_by_block_number_and_index': {
'block_numbers': 'block_number',
'indices': 'index',
},
'eth_get_transaction_receipt': {'transaction_hashes': 'transaction_hash'},
'eth_get_block_transaction_count_by_hash': {'block_hashes': 'block_hash'},
'eth_get_block_transaction_count_by_number': {
'block_numbers': 'block_number'
},
}
|
113245
|
from app import db, login_manager
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from hashlib import md5
"""This is the model for Upvote class"""
class Upvote(db.Model):
__tablename__ = "upvotes"
vote_id = db.Column(db.Integer, primary_key = True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
question_id = db.Column(db.Integer,db.ForeignKey('questions.question_id'))
answer_id = db.Column(db.Integer,db.ForeignKey('answers.answer_id'))
comment_id = db.Column(db.Integer, db.ForeignKey('comments.comment_id'))
def __repr__(self):
return '<Upvote %r>' % (self.vote_id)
"""This is the model for Downvote class"""
class Downvote(db.Model):
__tablename__ = "downvotes"
vote_id = db.Column(db.Integer, primary_key = True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
question_id = db.Column(db.Integer,db.ForeignKey('questions.question_id'))
answer_id = db.Column(db.Integer,db.ForeignKey('answers.answer_id'))
comment_id = db.Column(db.Integer, db.ForeignKey('comments.comment_id'))
def __repr__(self):
return '<downVote %r>' % (self.vote_id)
|
113254
|
from __future__ import print_function
import unittest
import numpy as np
from simpegEM1D import (
GlobalEM1DProblemFD, GlobalEM1DSurveyFD,
get_vertical_discretization_frequency
)
from SimPEG import (
regularization, Inversion, InvProblem,
DataMisfit, Utils, Mesh, Maps, Optimization,
Tests
)
np.random.seed(41)
class GlobalEM1DFD(unittest.TestCase):
def setUp(self, parallel=True):
frequency = np.array([900, 7200, 56000], dtype=float)
hz = get_vertical_discretization_frequency(
frequency, sigma_background=1./10.
)
n_sounding = 10
dx = 20.
hx = np.ones(n_sounding) * dx
mesh = Mesh.TensorMesh([hx, hz], x0='00')
inds = mesh.gridCC[:, 1] < 25
inds_1 = mesh.gridCC[:, 1] < 50
sigma = np.ones(mesh.nC) * 1./100.
sigma[inds_1] = 1./10.
sigma[inds] = 1./50.
sigma_em1d = sigma.reshape(mesh.vnC, order='F').flatten()
mSynth = np.log(sigma_em1d)
x = mesh.vectorCCx
y = np.zeros_like(x)
z = np.ones_like(x) * 30.
rx_locations = np.c_[x, y, z]
src_locations = np.c_[x, y, z]
topo = np.c_[x, y, z-30.].astype(float)
mapping = Maps.ExpMap(mesh)
survey = GlobalEM1DSurveyFD(
rx_locations=rx_locations,
src_locations=src_locations,
frequency=frequency,
offset=np.ones_like(frequency) * 8.,
src_type="VMD",
rx_type="Hz",
field_type='secondary',
topo=topo
)
problem = GlobalEM1DProblemFD(
[], sigmaMap=mapping, hz=hz,
parallel=parallel, n_cpu=2
)
problem.pair(survey)
survey.makeSyntheticData(mSynth)
# Now set up the problem to do some minimization
dmis = DataMisfit.l2_DataMisfit(survey)
reg = regularization.Tikhonov(mesh)
opt = Optimization.InexactGaussNewton(
maxIterLS=20, maxIter=10, tolF=1e-6,
tolX=1e-6, tolG=1e-6, maxIterCG=6
)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=0.)
inv = Inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = problem
self.mesh = mesh
self.m0 = mSynth
self.survey = survey
self.dmis = dmis
def test_misfit(self):
passed = Tests.checkDerivative(
lambda m: (
self.survey.dpred(m),
lambda mx: self.p.Jvec(self.m0, mx)
),
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
# u = np.random.rand(self.mesh.nC * self.survey.nSrc)
v = np.random.rand(self.mesh.nC)
w = np.random.rand(self.survey.dobs.shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-10
print('Adjoint Test', np.abs(wtJv - vtJtw), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = Tests.checkDerivative(
lambda m: [self.dmis(m), self.dmis.deriv(m)],
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
class GlobalEM1DFD_Height(unittest.TestCase):
def setUp(self, parallel=True):
frequency = np.array([900, 7200, 56000], dtype=float)
hz = np.r_[1.]
n_sounding = 10
dx = 20.
hx = np.ones(n_sounding) * dx
e = np.ones(n_sounding)
mSynth = np.r_[e*np.log(1./100.), e*20]
x = np.arange(n_sounding)
y = np.zeros_like(x)
z = np.ones_like(x) * 30.
rx_locations = np.c_[x, y, z]
src_locations = np.c_[x, y, z]
topo = np.c_[x, y, z-30.].astype(float)
wires = Maps.Wires(('sigma', n_sounding),('h', n_sounding))
expmap = Maps.ExpMap(nP=n_sounding)
sigmaMap = expmap * wires.sigma
survey = GlobalEM1DSurveyFD(
rx_locations=rx_locations,
src_locations=src_locations,
frequency=frequency,
offset=np.ones_like(frequency) * 8.,
src_type="VMD",
rx_type="ppm",
field_type='secondary',
topo=topo,
half_switch=True
)
problem = GlobalEM1DProblemFD(
[], sigmaMap=sigmaMap, hMap=wires.h, hz=hz,
parallel=parallel, n_cpu=2
)
problem.pair(survey)
survey.makeSyntheticData(mSynth)
# Now set up the problem to do some minimization
mesh = Mesh.TensorMesh([int(n_sounding * 2)])
dmis = DataMisfit.l2_DataMisfit(survey)
reg = regularization.Tikhonov(mesh)
opt = Optimization.InexactGaussNewton(
maxIterLS=20, maxIter=10, tolF=1e-6,
tolX=1e-6, tolG=1e-6, maxIterCG=6
)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=0.)
inv = Inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = problem
self.mesh = mesh
self.m0 = mSynth * 1.2
self.survey = survey
self.dmis = dmis
def test_misfit(self):
passed = Tests.checkDerivative(
lambda m: (
self.survey.dpred(m),
lambda mx: self.p.Jvec(self.m0, mx)
),
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
# u = np.random.rand(self.mesh.nC * self.survey.nSrc)
v = np.random.rand(self.mesh.nC)
w = np.random.rand(self.survey.dobs.shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-10
print('Adjoint Test', np.abs(wtJv - vtJtw), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = Tests.checkDerivative(
lambda m: [self.dmis(m), self.dmis.deriv(m)],
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
if __name__ == '__main__':
unittest.main()
|
113256
|
from gensim.models import Word2Vec, KeyedVectors
# Class for a memory-friendly iterator over the dataset
class MemoryFriendlyFileIterator(object):
def __init__(self, filename):
self.filename = filename
def __iter__(self):
for line in open(self.filename):
yield line.split()
def create_word2vec_embedding_from_dataset(
dataset, dim_rho=300, min_count=1, sg=1,
workers=25, negative_samples=10, window_size=4, iters=50,
embedding_file_path=None, save_c_format_w2vec=False, debug_mode=False) -> KeyedVectors:
"""
Creates a Word2Vec embedding from dataset file or a list of sentences.
If a file path is given, the file must be composed
by a sequence of sentences separated by \\n.
If the dataset is big, prefer using its file path.
Parameters:
===
dataset (str or list of str): txt file containing the dataset or a list of sentences
dim_rho (int): dimensionality of the word embeddings
min_count (int): minimum term frequency (to define the vocabulary)
sg (int): whether to use skip-gram
workers (int): number of CPU cores
negative_samples (int): number of negative samples
window_size (int): window size to determine context
iters (int): number of iterations
embedding_file_path (str): optional. File to save the word embeddings
save_c_format_w2vec (bool): wheter to save embeddings as word2vec C format (BIN and TXT files)
debug_mode (bool): wheter or not to log function's operations to the console. By default, no logs are made
Returns:
===
Word2VecKeyedVectors: mapping between words and their vector representations.
Example:
{ 'water': nd.array([0.024187922, 0.053684134, 0.034520667, ... ]) }
"""
assert isinstance(dataset, str) or isinstance(dataset, list), \
'dataset must be file path or list of sentences'
if isinstance(dataset, str):
assert isinstance(embedding_file_path, str), \
'if dataset is a file path, an output embeddings file path must be given'
if save_c_format_w2vec:
assert isinstance(embedding_file_path, str), \
'if save_c_format_w2vec is True, an output embeddings file path must be given'
if debug_mode:
print('Creating memory-friendly iterator...')
sentences = MemoryFriendlyFileIterator(dataset) if isinstance(
dataset, str) else [document.split() for document in dataset]
if debug_mode:
print('Training Word2Vec model with dataset...')
model = Word2Vec(
sentences,
min_count=min_count,
sg=sg,
size=dim_rho,
iter=iters,
workers=workers,
negative=negative_samples,
window=window_size)
embeddings = model.wv
if embedding_file_path is not None:
if debug_mode:
print('Saving word-vector mappings to file...')
embeddings.save(embedding_file_path)
if save_c_format_w2vec:
if debug_mode:
print('Saving BIN/TXT original C Word2vec files...')
embeddings.save_word2vec_format(
f'{embedding_file_path}.bin', binary=True)
embeddings.save_word2vec_format(
f'{embedding_file_path}.txt', binary=False)
return embeddings
|
113258
|
import prefect
from prefect import task, Flow
from prefect.run_configs import DockerRun
from prefect.storage import GitHub
@task
def hello_task():
logger = prefect.context.get("logger")
logger.info("Hello world!")
with Flow(
"hello-flow",
storage=GitHub(repo="kvnkho/demos", path="prefect/docker_error.py", add_default_labels=False),
run_config=DockerRun(
image="prefecthq/prefect:latest",
labels=["docker"],
)
) as flow:
hello = hello_task()
if __name__ == '__main__':
flow.register(project_name='Test')
|
113306
|
import FINE as fn
import pandas as pd
import numpy as np
"""
Here we are testing differnt inputs for time-invariant conversion factors that are
not covered in the minimal test system or other tests.
"""
def create_core_esm():
"""
We create a core esm that only consists of a source and a sink in one location.
"""
numberOfTimeSteps = 4
hoursPerTimeStep = 2190
# Create an energy system model instance
esM = fn.EnergySystemModel(
locations={"ElectrolyzerLocation"},
commodities={"electricity", "hydrogen"},
numberOfTimeSteps=numberOfTimeSteps,
commodityUnitsDict={
"electricity": r"kW$_{el}$",
"hydrogen": r"kW$_{H_{2},LHV}$",
},
hoursPerTimeStep=hoursPerTimeStep,
costUnit="1 Euro",
lengthUnit="km",
verboseLogLevel=2,
)
# Source
esM.add(
fn.Source(
esM=esM,
name="Electricity market",
commodity="electricity",
hasCapacityVariable=False,
)
)
# Sink
demand = pd.Series(np.array([1.0, 1.0, 1.0, 1.0])) * hoursPerTimeStep
esM.add(
fn.Sink(
esM=esM,
name="Industry site",
commodity="hydrogen",
hasCapacityVariable=False,
operationRateFix=demand,
)
)
return esM
def test_conversion_factors_as_series():
"""
Input as pandas.Series for one location.
"""
esM = create_core_esm()
esM.add(
fn.Conversion(
esM=esM,
name="Electrolyzers_VarConvFac",
physicalUnit=r"kW$_{el}$",
commodityConversionFactors=pd.Series(
[0.7, -1], index=["hydrogen", "electricity"]
), # Here we add a Series of time invariant conversion factors.
hasCapacityVariable=True,
investPerCapacity=1000, # euro/kW
opexPerCapacity=500 * 0.025,
interestRate=0.08,
capacityMax=1000,
economicLifetime=10,
locationalEligibility=pd.Series([1], ["ElectrolyzerLocation"]),
)
)
# optimize
esM.optimize(timeSeriesAggregation=False, solver="glpk")
|
113313
|
import os, hashlib, binascii as ba
import base64, re
import time, math
from colors import *
# from functools import lru_cache
from numba import jit
from cachetools.func import *
from cachy import *
def iif(a,b,c):return b if a else c
import json
def obj2json(obj):
return json.dumps(obj, ensure_ascii=False, sort_keys=True, indent=2)
@stale_cache(ttr=1, ttl=30)
def readfile(fn, mode='rb', *a, **kw):
if 'b' not in mode:
with open(fn, mode, encoding='utf8', *a, **kw) as f:
return f.read()
else:
with open(fn, mode, *a, **kw) as f:
return f.read()
def writefile(fn, data, mode='wb', encoding='utf8', *a, **kw):
if 'b' not in mode:
with open(fn,mode, encoding=encoding, *a,**kw) as f:
f.write(data)
else:
with open(fn,mode,*a,**kw) as f:
f.write(data)
def removefile(fn):
try:
os.remove(fn)
except Exception as e:
print_err(e)
print_err('failed to remove', fn)
else:
return
import threading
def dispatch(f):
return tpe.submit(f)
# t = AppContextThreadMod(target=f, daemon=True)
# # t = threading.Thread(target=f, daemon=True)
# t.start()
def dispatch_with_retries(f):
n = 0
def wrapper():
nonlocal n
while 1:
try:
f()
except Exception as e:
print_err(e)
n+=1
time.sleep(0.5)
print_up(f'{f.__name__}() retry #{n}')
else:
print_down(f'{f.__name__}() success on attempt #{n}')
break
return tpe.submit(wrapper)
def init_directory(d):
try:
os.mkdir(d)
except FileExistsError as e:
print_err('directory {} already exists.'.format(d), e)
else:
print_info('directory {} created.'.format(d))
def key(d, k):
if k in d:
return d[k]
else:
return None
def intify(s, name=''):
try:
return int(s)
except:
if s:
# print_err('intifys',s,name)
pass
return 0
def floatify(s):
try:
return float(s)
except:
if s:
pass
return 0.
def get_environ(k):
k = k.upper()
if k in os.environ:
return os.environ[k]
else:
return None
def clip(a,b):
def _clip(c):
return min(b,max(a, c))
return _clip
clip01 = clip(0,1)
import zlib
def calculate_checksum(bin): return zlib.adler32(bin).to_bytes(4,'big')
def calculate_checksum_base64(bin):
csum = calculate_checksum(bin)
chksum_encoded = base64.b64encode(csum).decode('ascii')
return chksum_encoded
def calculate_checksum_base64_replaced(bin):
return calculate_checksum_base64(bin).replace('+','-').replace('/','_')
def calculate_etag(bin):
return calculate_checksum_base64_replaced(bin)
# pw hashing
def bytes2hexstr(b):
return ba.b2a_hex(b).decode('ascii')
def hexstr2bytes(h):
return ba.a2b_hex(h.encode('ascii'))
# https://nitratine.net/blog/post/how-to-hash-passwords-in-python/
def get_salt():
return os.urandom(32)
def get_random_hex_string(b=8):
return base64.b16encode(os.urandom(b)).decode('ascii')
def hash_pw(salt, string):
return hashlib.pbkdf2_hmac(
'sha256',
string.encode('ascii'),
salt,
100000,
)
# input string, output hash and salt
def hash_w_salt(string):
salt = get_salt()
hash = hash_pw(salt, string)
return bytes2hexstr(hash), bytes2hexstr(salt)
# input hash,salt,string, output comparison result
def check_hash_salt_pw(hashstr, saltstr, string):
chash = hash_pw(hexstr2bytes(saltstr), string)
return chash == hexstr2bytes(hashstr)
def timethis(stmt):
import re, timeit
print('timing', stmt)
broken = re.findall(f'\$([a-zA-Z][0-9a-zA-Z_\-]*)', stmt)
stmt = stmt.replace('$','')
setup = f"from __main__ import {','.join(broken)}"
exec(setup) # preheat
exec(stmt)
timeit.Timer(stmt,
setup=setup
).autorange(
lambda a,b:print(f'{a} in {b:.4f}, avg: {b/a*1000_000:.4f}us'))
# if __name__ == '__main__':
# k = time.time()
# def hello():
# if time.time() - k < 2:
# raise Exception('nah')
#
# dispatch_with_retries(hello)
# time.sleep(4)
if __name__ == '__main__':
toenc = b"r12uf-398gy309ghh123r1"*100000
timethis('calculate_checksum_base64_replaced(toenc)')
# everything time related
import datetime
dtdt = datetime.datetime
dtt = datetime.time
dtd = datetime.date
dtn = dtdt.now
dttz = datetime.timezone
dttd = datetime.timedelta
# default time parsing
def dtdt_from_stamp(stamp):
return dtdt.fromisoformat(stamp)
dfs = dtdt_from_stamp
def dfshk(stamp):
return dfs(stamp).replace(tzinfo=working_timezone)
# proper time formatting
# input: string iso timestamp
# output: string formatted time
def format_time(dtdt,s):
return dtdt.strftime(s)
# default time formatting
def format_time_iso(dtdt):
return dtdt.isoformat(timespec='seconds')[:19]
fti = format_time_iso
format_time_datetime = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M')
format_time_datetime_second = lambda s: format_time(dfs(s), '%Y-%m-%d %H:%M:%S')
format_time_dateonly = lambda s: format_time(dfs(s), '%Y-%m-%d')
format_time_timeonly = lambda s: format_time(dfs(s), '%H:%M')
def days_since(ts):
then = dfshk(ts)
now = dtn(working_timezone)
dt = now - then
return dt.days
def days_between(ts0, ts1):
return abs(days_since(ts0) - days_since(ts1))
def seconds_since(ts):
then = dfshk(ts)
now = dtn(working_timezone)
dt = now - then
return dt.total_seconds()
def cap(x, mi, ma):
return min(max(x, mi),ma)
working_timezone = dttz(dttd(hours=+8)) # Hong Kong
gmt_timezone = dttz(dttd(hours=0)) # GMT
def time_iso_now(dt=0): # dt in seconds
return format_time_iso(dtn(working_timezone) + dttd(seconds=dt))
|
113319
|
from __future__ import print_function # Python2 support
import josepy
from cryptography.hazmat.primitives import serialization
def jwk_to_pem(pkey_jwk):
"""
LetsEncrypt uses RSA Private Keys as Account Keys.
Certbot stores the Account Keys as a JWK (JSON Web Key) encoded string.
Many non-certbot clients store the Account Keys using PEM encoding.
Developers may need to utilize a Private Key in the PEM encoding for certain
operations or to migrate existing LetsEncrypt accounts to a client.
:param pkey_jwk: JSON Web Key(jwk) encoded RSA Private Key
:type pkey_jwk: string
:return: PEM encoded RSA Private Key
:rtype: string
"""
pkey = josepy.JWKRSA.json_loads(pkey_jwk)
as_pem = pkey.key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
return as_pem
def pem_to_jwk(pkey_pem, format="string"):
"""
LetsEncrypt uses RSA Private Keys as Account Keys.
Certbot stores the Account Keys as a JWK (JSON Web Key) encoded string.
Many non-certbot clients store the Account Keys using PEM encoding.
Developers may need to utilize a Private Key in the JWK format for certain
operations or to migrate existing LetsEncrypt accounts to a client.
:param pkey_pem: PEM encoded RSA Private Key
:type pkey_pem: string
:param format: Should the format be the JWK as a dict or JSON?, default string
:type format: string, optional
:return: JSON Web Key(jwk) encoded RSA Private Key
:rtype: string or dict
"""
if format not in ("string", "dict"):
raise ValueError("`format` must be one of: string, dict")
pkey = josepy.JWKRSA.load(pkey_pem)
if format == "dict":
# ``.fields_to_partial_json()` does not encode the `kty` Key Identifier
as_jwk = pkey.to_json()
else:
# format == "string"
as_jwk = pkey.json_dumps()
return as_jwk
if __name__ == "__main__":
"""
Certbot stores account data on a disk using this pattern:
/etc/letsencrypt/accounts/##ACME_SERVER##/directory/##ACCOUNT##
Each ACCOUNT folder has three files
/private_key.json - JWK encoded RSA Private Key
/meta.json - metadata
/regr.json - registration information
This example is only concerned with the `/private_key.json` file
"""
import sys
import json
_args = sys.argv
if len(_args) == 2:
json_data = open(_args[1]).read()
as_pem = jwk_to_pem(json_data)
print(as_pem)
elif len(_args) == 3 and _args[2] == "roundtrip":
json_data = open(_args[1]).read()
as_pem = jwk_to_pem(json_data)
as_jwk = pem_to_jwk(as_pem)
assert json.loads(as_jwk) == json.loads(json_data)
print(as_pem)
print("> roundtrip >")
print(as_jwk)
else:
print("Error.")
print("Invoke this script with a single argument: the path to a certbot key.")
print(
" python pem_conversion.py /etc/letsencrypt/accounts/acme-v02.api.letsencrypt.org/directory/##ACCOUNT##/private_key.json"
)
print(
"Optional: add the string 'roundtrip' after the key to perform a roundtrip"
)
print(
" python pem_conversion.py /etc/letsencrypt/accounts/acme-v02.api.letsencrypt.org/directory/##ACCOUNT##/private_key.json roundtrip"
)
|
113380
|
import pytest
from django_oso.models import AuthorizedModel, authorize_model
from django_oso.oso import Oso, reset_oso
from django.core.management import call_command
from app.models import Post, User
@pytest.fixture(autouse=True)
def reset():
reset_oso()
@pytest.fixture
def users():
(manager, _) = User.objects.get_or_create(username="manager")
(user, _) = User.objects.get_or_create(username="user", manager=manager)
return {"user": user, "manager": manager}
@pytest.fixture
def posts(users):
(public_user_post, _) = Post.objects.get_or_create(
contents="public user post", access_level="public", creator=users["user"]
)
(private_user_post, _) = Post.objects.get_or_create(
contents="private user post", access_level="private", creator=users["user"]
)
(public_manager_post, _) = Post.objects.get_or_create(
contents="public manager post",
access_level="public",
creator=users["manager"],
)
(private_manager_post, _) = Post.objects.get_or_create(
contents="private manager post",
access_level="private",
creator=users["manager"],
)
return {
"public_user_post": public_user_post,
"private_user_post": private_user_post,
"public_manager_post": public_manager_post,
"private_manager_post": private_manager_post,
}
@pytest.mark.django_db
def test_user_access_to_posts(users, posts):
authorized_posts = Post.objects.authorize(None, actor=users["user"], action="GET")
assert authorized_posts.count() == 3
assert posts["public_user_post"] in authorized_posts
assert posts["private_user_post"] in authorized_posts
assert posts["public_manager_post"] in authorized_posts
@pytest.mark.django_db
def test_manager_access_to_posts(users, posts):
authorized_posts = Post.objects.authorize(
None, actor=users["manager"], action="GET"
)
assert authorized_posts.count() == 4
assert posts["public_user_post"] in authorized_posts
assert posts["private_user_post"] in authorized_posts
assert posts["public_manager_post"] in authorized_posts
assert posts["private_manager_post"] in authorized_posts
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.