id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
188519
|
from collections import namedtuple
from primitives import primitives
# ----- Expression types -----
fundef = namedtuple('fundef', ('name', 'argnames', 'body'))
vardef = namedtuple('vardef', ('name', 'exp'))
call = namedtuple('call', ('funname', 'args'))
kyif = namedtuple('kyif', ('cond', 'iftrue', 'iffalse'))
begin = namedtuple('begin', ('exps'))
kyfun = namedtuple('kyfun', ('argnames', 'body', 'env'))
gradfun = namedtuple('gradfun', ('fun', 'argnum', 'env'))
grad = namedtuple('grad', ('funname', 'argnum'))
tape = namedtuple('tape', ('value', 'env'))
primitive = namedtuple('primitive', ('fun', 'grad'))
# ----- Evaluator -----
def kyeval(exp, env):
if isinstance(exp, str): # variable lookup
return env[exp] if exp in env else globalenv[exp]
elif isinstance(exp, vardef):
env[exp.name] = kyeval(exp.exp, env)
elif isinstance(exp, fundef):
env[exp.name] = kyfun(exp.argnames, exp.body, env)
elif isinstance(exp, grad):
return gradfun(env[exp.funname], exp.argnum, env)
elif isinstance(exp, kyif):
return kyeval(exp.iftrue if kyeval(exp.cond, env) else exp.iffalse, env)
elif isinstance(exp, call):
return kyapply(kyeval(exp.funname, env), [kyeval(arg, env) for arg in exp.args])
elif isinstance(exp, begin):
return [kyeval(subexp, env) for subexp in exp.exps][-1]
else:
return exp
def kyapply(fun, args):
localenv = {'outgrad' : kyfun((), 0.0, {})}
if isinstance(fun, kyfun):
localenv.update(fun.env)
localenv.update(zip(fun.argnames, args))
return kyeval(fun.body, localenv)
elif isinstance(fun, gradfun):
args[fun.argnum] = tape(args[fun.argnum], localenv)
getval(kyapply(fun.fun, args), 1.0, localenv)
return kyapply(localenv['outgrad'], ())
elif any([isinstance(arg, tape) for arg in args]):
argvals = [getval(arg, grad, localenv) for arg, grad in zip(args, fun.grad)]
localenv.update({'arg_' + str(i) : val for i, val in enumerate(argvals)})
localenv['result'] = kyapply(fun, argvals)
return tape(localenv['result'], localenv)
else:
return fun.fun(*args)
def getval(arg, grad, localenv):
if isinstance(arg, tape):
arg.env['outgrad'] = kyfun((), call('add',
(call(kyfun((), grad, localenv), ()), call(arg.env['outgrad'], ()))), {})
return arg.value
else:
return arg
# ----- Parser -----
def parse(string):
s_list = string.replace('(', ' ( ').replace(')', ' ), ').split()
s_list = [s if s in ['(', '),'] else "'" + s + "'," for s in s_list]
tuples = eval("".join(['("begin", '] + s_list + [')']))
return kyexp(tuples)
def kyexp(obj):
tag = obj[0]
if isinstance(obj, str):
return int(obj) if obj.isdigit() else obj
elif tag == 'def' and isinstance(obj[1], tuple):
return fundef(obj[1][0], obj[1][1:], begin(map(kyexp, obj[2:])))
elif tag == 'def':
return vardef(obj[1], kyexp(obj[2]))
elif tag == 'grad':
return grad(obj[1], int(obj[2]))
elif tag == 'if':
return kyif(*map(kyexp, obj[1:4]))
elif tag == 'begin':
return begin(map(kyexp, obj[1:]))
else:
return call(tag, tuple(map(kyexp, obj[1:])))
globalenv = {name : primitive(val[0], [parse(s) for s in val[1]])
for name, val in primitives.iteritems()}
# ----- Python interface -----
def get_function(string, fun_name, global_vars={}):
env = global_vars.copy()
kyeval(parse(string), env)
return lambda *args : kyapply(env[fun_name], list(args))
|
188597
|
import gc
import pytest
from msl.loadlib import Client64
from msl.examples.loadlib import Cpp64
from conftest import skipif_no_server32
@skipif_no_server32
def test_unclosed_pipe_warning_1(recwarn):
# recwarn is a built-in pytest fixture that records all warnings emitted by test functions
# The following warnings should not be written to stderr for the unclosed subprocess PIPE's
# sys:1: ResourceWarning: unclosed file <_io.BufferedReader name=3>
# sys:1: ResourceWarning: unclosed file <_io.BufferedReader name=4>
Cpp64()
gc.collect()
assert recwarn.list == []
@skipif_no_server32
def test_unclosed_pipe_warning_2(recwarn):
for _ in range(3):
cpp = Cpp64()
out, err = cpp.shutdown_server32()
for _ in range(10):
out.close()
err.close()
del cpp
gc.collect()
assert recwarn.list == []
def test_unraisable_exception_warning():
# The point of this test is to verify that the PytestUnraisableExceptionWarning
# does not get written to the terminal at the end of this test.
#
# This test will always pass (so it is deceptive) but what the user must
# pay attention to is whether a warning message similar to the following
# is displayed in the "warnings summary" of pytest:
#
# Exception ignored in: <function Client64.__del__ at 0x000001BFBD402B80>
# Traceback (most recent call last):
# File "...client64.py", line 368, in __del__
# if self._conn is not None:
# AttributeError: 'DivZero' object has no attribute '_conn'
#
# For more details see:
# https://docs.pytest.org/en/stable/usage.html#warning-about-unraisable-exceptions-and-unhandled-thread-exceptions
class DivZero(Client64):
def __init__(self):
1/0
with pytest.raises(ZeroDivisionError):
DivZero()
|
188640
|
import abc
class Rule(abc.ABC):
@abc.abstractmethod
def check_status(self):
pass
@abc.abstractmethod
def take_action(self):
pass
|
188666
|
from __future__ import print_function
import networkx as nx
import argparse
import multiprocessing
from rdkit import Chem
NUM_PROCESSES = 8
def get_arguments():
parser = argparse.ArgumentParser(description='Convert an rdkit Mol to nx graph, preserving chemical attributes')
parser.add_argument('smiles', type=str, help='The input file containing SMILES strings representing an input molecules.')
parser.add_argument('nx_pickle', type=str, help='The output file containing sequence of pickled nx graphs')
parser.add_argument('--num_processes', type=int, default=NUM_PROCESSES, help='The number of concurrent processes to use when converting.')
return parser.parse_args()
def mol_to_nx(mol):
G = nx.Graph()
for atom in mol.GetAtoms():
G.add_node(atom.GetIdx(),
atomic_num=atom.GetAtomicNum(),
formal_charge=atom.GetFormalCharge(),
chiral_tag=atom.GetChiralTag(),
hybridization=atom.GetHybridization(),
num_explicit_hs=atom.GetNumExplicitHs(),
is_aromatic=atom.GetIsAromatic())
for bond in mol.GetBonds():
G.add_edge(bond.GetBeginAtomIdx(),
bond.GetEndAtomIdx(),
bond_type=bond.GetBondType())
return G
def nx_to_mol(G):
mol = Chem.RWMol()
atomic_nums = nx.get_node_attributes(G, 'atomic_num')
chiral_tags = nx.get_node_attributes(G, 'chiral_tag')
formal_charges = nx.get_node_attributes(G, 'formal_charge')
node_is_aromatics = nx.get_node_attributes(G, 'is_aromatic')
node_hybridizations = nx.get_node_attributes(G, 'hybridization')
num_explicit_hss = nx.get_node_attributes(G, 'num_explicit_hs')
node_to_idx = {}
for node in G.nodes():
a=Chem.Atom(atomic_nums[node])
a.SetChiralTag(chiral_tags[node])
a.SetFormalCharge(formal_charges[node])
a.SetIsAromatic(node_is_aromatics[node])
a.SetHybridization(node_hybridizations[node])
a.SetNumExplicitHs(num_explicit_hss[node])
idx = mol.AddAtom(a)
node_to_idx[node] = idx
bond_types = nx.get_edge_attributes(G, 'bond_type')
for edge in G.edges():
first, second = edge
ifirst = node_to_idx[first]
isecond = node_to_idx[second]
bond_type = bond_types[first, second]
mol.AddBond(ifirst, isecond, bond_type)
Chem.SanitizeMol(mol)
return mol
def do_all(smiles, validate=False):
mol = Chem.MolFromSmiles(smiles.strip())
can_smi = Chem.MolToSmiles(mol)
G = mol_to_nx(mol)
if validate:
mol = nx_to_mol(G)
new_smi = Chem.MolToSmiles(mol)
assert new_smi == smiles
return G
def main():
args = get_arguments()
i = open(args.smiles)
p = multiprocessing.Pool(args.num_processes)
results = p.map(do_all, i.xreadlines())
o = open(args.nx_pickle, 'w')
for result in results:
nx.write_gpickle(result, o)
o.close()
if __name__ == '__main__':
main()
|
188692
|
from DaPy.core import SeriesSet, is_iter, Series
from DaPy.matlib import describe
from collections import namedtuple
from operator import itemgetter
__all__ = ['_label', 'score_binary_clf']
_binary_perf_result = namedtuple('binary_clf', ['TP', 'FN', 'FP', 'TN'])
def plot_reg(y_hat, y, res):
try:
from matplotlib import pyplot as plt
except ImportError:
warn('DaPy uses `matplotlib` to draw pictures, try: pip install matplotlib.')
return None
plt.subplot(311)
plt.title('Prediction of Model')
plt.xlabel('Samples')
plt.ylabel('Prediction')
plt.plot(y.T.tolist()[0], color='blue', alpha=0.65, label='Actual')
plt.plot(y_hat.tolist()[0], color='red', alpha=0.7, label='Predict')
plt.legend()
plt.subplot(312)
plt.title('Distribution of Residual')
plt.xlabel('Residual')
plt.ylabel('Frequency')
plt.hist(res, max(10, len(y_hat) // 5), color='blue', alpha=0.6)
plt.subplot(313)
plt.title('Residual')
plt.xlabel('Samples')
plt.ylabel('Residual')
sigma = [describe(res.T.tolist()[0]).Sn] * y_hat.shape[1]
plt.plot(res, color='blue', alpha=0.6)
plt.plot([0] * y_hat.shape[1], color='black', linestyle='--', alpha=0.5)
plt.plot(sigma, color='black', alpha=0.25, linestyle='--')
plt.plot(map(lambda x: -x, sigma), color='black', alpha=0.25, linestyle='--')
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.95,
wspace=0.2, hspace=0.8)
return plt
|
188703
|
class AtomClass:
def __init__(self, Velocity, Element = 'C', Mass = 12.0):
self.Velocity = Velocity
self.Element = Element
self.Mass = Mass
def Momentum(self):
return self.Velocity * self.Mass
|
188710
|
import pandas as pd
class Bookkeeper:
def __init__(self):
# Instantiate a new data frame.
self.metric = pd.DataFrame(
columns=["Strategy Name", "Percent Change", "Trades Made"]
)
def log(self):
# Log the data frame while ignoring the index.
print(self.metric.to_string(index=False))
def handler(self, data):
# Append new data to the data frame.
self.metric = self.metric.append(
{
"Strategy Name": data[0],
"Percent Change": "{:.2f}%".format(data[1]),
"Trades Made": data[2],
},
ignore_index=True,
)
|
188746
|
from . import _TestHarness, HarnessConfig
import json
from tempfile import NamedTemporaryFile
from pydantic_cli.examples.simple_with_json_config import Opts, runner
class TestExample(_TestHarness[Opts]):
CONFIG = HarnessConfig(Opts, runner)
def _util(self, d, more_args):
with NamedTemporaryFile(mode="w", delete=True) as f:
json.dump(d, f)
f.flush()
f.name
args = ["--json-training", str(f.name)] + more_args
self.run_config(args)
def test_simple_json(self):
opt = Opts(
hdf_file="/path/to/file.hdf5",
max_records=12,
min_filter_score=1.024,
alpha=1.234,
beta=9.854,
)
self._util(opt.dict(), [])
def test_simple_partial_json(self):
d = dict(max_records=12, min_filter_score=1.024, alpha=1.234, beta=9.854)
self._util(d, ["--hdf_file", "/path/to/file.hdf5"])
|
188763
|
import unittest
from tir import Webapp
class TSSMANAGERMONITOR(unittest.TestCase):
@classmethod
def setUpClass(inst):
# Endereco do webapp e o nome do Browser
inst.oHelper = Webapp()
# Utilizando usuário de teste
inst.oHelper.SetTIRConfig("User", "teste")
inst.oHelper.SetTIRConfig("Password", "<PASSWORD>")
# Parametros de inicializacao
inst.oHelper.SetupTSS("TSSMONITOR", "SPED")
def test_TSSMANAGERMONITOR01_CT001(self):
self.oHelper.SetButton("Eventos")
self.oHelper.SetButton("NF-e")
self.oHelper.SetButton("Cancelar")
self.oHelper.SetButton("Fiscal")
self.oHelper.SetButton("NFS-e")
self.oHelper.SetButton("Cancelar")
self.oHelper.SetButton("NF-e")
self.oHelper.SetButton("Cancelar")
self.oHelper.SetButton("CT-e")
self.oHelper.SetButton("Cancelar")
self.oHelper.SetButton("Documentos")
self.oHelper.SetButton("EDI")
self.oHelper.SetButton("Sair")
self.oHelper.SetButton("Sim")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
188767
|
from random import randint
from typing import Any
from typing import Dict
from retrying import retry
import apysc as ap
from apysc._expression import var_names
class TestTimerEvent:
def on_timer(self, e: ap.Event, options: Dict[str, Any]) -> None:
"""
The handler would be called from a timer.
Parameters
----------
e : Event
Event instance.
options : dict
Optional arguments dictionary.
"""
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test___init__(self) -> None:
timer: ap.Timer = ap.Timer(handler=self.on_timer, delay=33)
event: ap.TimerEvent = ap.TimerEvent(this=timer)
assert event.variable_name.startswith(f'{var_names.TIMER_EVENT}_')
assert event._this == timer
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_this(self) -> None:
timer: ap.Timer = ap.Timer(handler=self.on_timer, delay=33)
event: ap.TimerEvent = ap.TimerEvent(this=timer)
assert event.this == timer
|
188783
|
import pyb
import stm
@micropython.asm_thumb
def _write_packet(r0, r1, r2): # uart(r0) buf(r1) len(r2)
movw(r3, 0xffff) # uart(r0) &= 0x7fffffff
movt(r3, 0x7fff) #
and_(r0, r3) #
# Disable the Receiver
ldr(r3, [r0, stm.USART_CR1]) # uart->CR1 &= ~USART_CR1_RE
mov(r4, 0x04) #
bic(r3, r4) #
str(r3, [r0, stm.USART_CR1]) #
add(r2, r2, r1) # buf_end(r2) = &buf(r1)[len(r2)]
sub(r2, 1) # buf_end--
# loop
label(loop)
cmp(r1, r2)
bhi(endloop) # branch if buf > buf_end
# Wait for the Transmit Data Register to be Empty
mov(r4, 0x80) # while ((uart->SR & USART_SR_TXE) == 0) {
# wait_txe # ;
label(wait_txe) #
ldr(r3, [r0, stm.USART_SR]) #
tst(r3, r4) #
beq(wait_txe) # }
# Disable interrupts from the time that we write the last character
# until the tx complete bit is set. This ensures that we re-enable
# the Rx as soon as possible after the last character has left
cmp(r1, r2)
bne(write_dr) # if buf == buf_end
cpsid(i) # disable_irq
# write_dr
label(write_dr)
# Write one byte to the UART
ldrb(r3, [r1, 0]) # uart->DR = *buf++
add(r1, 1) #
str(r3, [r0, stm.USART_DR]) #
b(loop)
# endloop
label(endloop)
# Wait for Transmit Complete (i.e the last bit of transmitted data has left the shift register)
mov(r4, 0x40) # while ((uart->SR & USART_SR_TC) == 0) {
# wait_tx_complete # ;
label(wait_tx_complete) #
ldr(r3, [r0, stm.USART_SR]) #
tst(r3, r4) #
beq(wait_txe) # }
# Re-enable the receiver
ldr(r3, [r0, stm.USART_CR1]) # uart->CR1 |= USART_CR1_RE
mov(r4, 0x04) #
orr(r3, r4) #
str(r3, [r0, stm.USART_CR1]) #
cpsie(i) # enable_irq
def inspect(f, nbytes=16):
import stm
import array
import ubinascii
@micropython.asm_thumb
def dummy():
pass
if type(f) != type(dummy):
raise ValueError('expecting an inline-assembler function')
baddr = bytes(array.array('O', [f]))
addr = baddr[0] | baddr[1] << 8 | baddr[2] << 16 | baddr[3] << 24
print('function object at: 0x%08x' % addr)
print('number of args: %u' % stm.mem32[addr + 4])
code_addr = stm.mem32[addr + 8]
print('machine code at: 0x%08x' % code_addr)
print('----------')
print('import binascii')
print("with open('code.bin', 'wb') as f:")
import ubinascii
hex_str = ubinascii.hexlify(bytearray([stm.mem8[code_addr + i] for i in range(nbytes)]))
print(" f.write(binascii.unhexlify(%s))" % hex_str)
print('----------')
def test():
inspect(_write_packet, 64)
pyb.repl_uart(pyb.UART(4, 115200))
uart = pyb.UART(6, 1000000)
buf = bytearray(b'123456')
_write_packet(stm.USART6 | 0x80000000, buf, len(buf))
test()
|
188786
|
import json
import os
import socket
import sys
import time
import subprocess
import re
import html
from collections import deque
from ipykernel.kernelbase import Kernel
from dyalog_kernel import __version__
from notebook.services.config import ConfigManager
if sys.platform.lower().startswith('win'):
from winreg import *
handShake1 = b'\x00\x00\x00\x1cRIDESupportedProtocols=2'
handShake2 = b'\x00\x00\x00\x17RIDEUsingProtocol=2'
BUFFER_SIZE = 1024
DYALOG_HOST = '127.0.0.1'
DYALOG_PORT = 4502
TCP_TIMEOUT = 0.1
#_increment for port. To find first available
_port = DYALOG_PORT
# no of sec waiting for initial RIDE handshake. Slower systems should be greater no. of sec, to give dyalog a chance to start
RIDE_INIT_CONNECT_TIME_OUT = 3 # seconds
SUSPEND = False # Can be set by %suspend on/off.
# debugging flag
DYALOGJUPYTERKERNELDEBUG = os.environ.get("DYALOGJUPYTERKERNELDEBUG", False)
dq = deque()
def debug(s):
if DYALOGJUPYTERKERNELDEBUG:
writeln(s)
def writeln(s):
tmp_stdout = sys.stdout
sys.stdout = sys.__stdout__
print(s)
sys.stdout = tmp_stdout
class DyalogKernel(Kernel):
implementation = 'Dyalog'
implementation_version = __version__
language = 'APL'
language_version = '0.1'
language_info = {
'name': 'APL',
'mimetype': 'text/apl',
'file_extension': '.apl'
}
banner = "Dyalog APL kernel"
connected = False
# To save receive requests and prevent unneeded, lets put the max number here
RIDE_PW = 32767
dyalog_subprocess = None
def out_error(self, s):
_content = {
'output_type': 'stream',
'name': 'stderr', # stdin or stderr
'text': s
}
self.send_response(self.iopub_socket, 'stream', _content)
def out_png(self, s):
_content = {
'output_type': 'display_data',
'data': {
#'text/plain' : ['multiline text data'],
'image/png': s,
#'application/json':{
# JSON data is included as-is
# 'json':'data',
#},
},
'metadata': {
'image/svg': {
'width': 120,
'height': 80,
},
},
}
self.send_response(self.iopub_socket, 'display_data', _content)
def out_html(self, s):
_content = {
# 'output_type': 'display_data',
'data': {'text/html': s},
'execution_count': self.execution_count,
'metadata': ''
# 'transient': ''
}
self.send_response(self.iopub_socket, 'execute_result', _content)
def out_result(self, s):
# injecting css: white-space:pre. Means no wrapping, RIDE SetPW will take care about line wrapping
html_start = '<span style="white-space:pre; font-family: monospace">'
html_end = '</span>'
_content = {
# 'output_type': 'display_data',
# 'data': {'text/plain': s},
'data': {'text/html': html_start + html.escape(s, False) + html_end},
'execution_count': self.execution_count,
'metadata': {},
# 'transient': ''
}
self.send_response(self.iopub_socket, 'execute_result', _content)
def out_stream(self, s):
_content = {
'output_type': 'stream',
'name': 'stdin', # stdin or stderr
'text': s
}
self.send_response(self.iopub_socket, 'stream', _content)
def dyalog_ride_connect(self):
timeout = time.time() + RIDE_INIT_CONNECT_TIME_OUT
while True:
self.dyalogTCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dyalogTCP.settimeout(TCP_TIMEOUT)
time.sleep(0.5) # solves an issue with connecting to 17.0 on Linux
try:
self.dyalogTCP.connect((DYALOG_HOST, self._port))
break
except socket.error as msg:
# debug(msg)
self.dyalogTCP.close()
if time.time() > timeout:
break
#fcntl.fcntl(self.dyalogTCP, fcntl.F_SETFL, os.O_NONBLOCK)
received = ['', '']
self.ride_receive_wait()
if len(dq) > 0:
received = dq.pop()
if received[0] == handShake1[8:].decode("utf-8"):
# handshake1
self.dyalogTCP.sendall(handShake1)
debug("SEND " + handShake1[8:].decode("utf-8"))
# handshake2
self.ride_receive()
if len(dq) > 0:
received = dq.pop()
if received[0] == handShake2[8:].decode("utf-8"):
# handshake2
self.dyalogTCP.sendall(handShake2)
debug("SEND " + handShake2[8:].decode("utf-8"))
d = ["Identify", {"identity": 1}]
self.ride_send(d)
d = ["Connect", {"remoteId": 2}]
self.ride_send(d)
d = ["GetWindowLayout", {}]
self.ride_send(d)
d = ["SetPW", {"pw": self.RIDE_PW}]
self.ride_send(d)
self.ride_receive_wait()
dq.clear()
self.connected = True
def __init__(self, **kwargs):
# path to connection_file. In case we need it in the close future
#from ipykernel import get_connection_file
#s = get_connection_file()
# debug("########## " + str(s))
self._port = DYALOG_PORT
# lets find first available port, starting from default DYALOG_PORT (:4502)
# this makes sense only if Dyalog APL and Jupyter executables are on the same host (localhost)
if DYALOG_HOST == '127.0.0.1':
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(
(str(DYALOG_HOST).strip(), self._port))
sock.close()
#port is available
if result != 0:
break
else:
# try next port
self._port += 1
# if Dyalog APL and Jupyter executables are on the same host (localhost) let's start instance of Dyalog
if DYALOG_HOST == '127.0.0.1':
if sys.platform.lower().startswith('win'):
# Windows. Let's find an installed version to use
hkcuReg = ConnectRegistry(None, HKEY_CURRENT_USER)
dyalogKey = OpenKey(hkcuReg, r"SOFTWARE\Dyalog")
installCount = QueryInfoKey(dyalogKey)[0]
for n in range(installCount):
currInstall = EnumKey(dyalogKey, installCount - (n + 1))
if currInstall[:12] == "Dyalog APL/W":
break
lastKey = OpenKey(hkcuReg, r"SOFTWARE\\Dyalog\\" + currInstall)
dyalogPath = QueryValueEx(lastKey, "dyalog")[
0] + "\\dyalog.exe"
CloseKey(dyalogKey)
CloseKey(lastKey)
self.dyalog_subprocess = subprocess.Popen([dyalogPath, "RIDE_SPAWNED=1", 'RIDE_INIT=SERVE::' + str(
self._port).strip(), 'LOG_FILE=nul', os.path.dirname(os.path.abspath(__file__)) + '/init.dws'])
else:
# linux, darwin... etc
dyalog_env = os.environ.copy()
dyalog_env['RIDE_INIT'] = 'SERVE:*:' + str(self._port).strip()
dyalog_env['RIDE_SPAWNED'] = '1'
dyalog_env['ENABLE_CEF'] = '0'
dyalog_env['LOG_FILE'] = '/dev/null'
if sys.platform.lower() == "darwin":
for d in sorted(os.listdir('/Applications')):
if re.match('^Dyalog-\d+\.\d+\.app$', d):
dyalog = '/Applications/' + d + '/Contents/Resources/Dyalog/mapl'
else:
for v in sorted(os.listdir('/opt/mdyalog')):
if re.match('^\d+\.\d+$', v):
dyalog = '/opt/mdyalog/' + v + '/'
dyalog += sorted(os.listdir(dyalog))[-1] + '/'
dyalog += sorted(os.listdir(dyalog)
)[-1] + '/' + 'mapl'
self.dyalog_subprocess = subprocess.Popen([dyalog, '+s', '-q', os.path.dirname(os.path.abspath(
__file__)) + '/init.dws'], stdin=subprocess.PIPE, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, env=dyalog_env)
# disable auto closing of brackets/quotation marks. Not very useful in APL
# Pass None instead of False to restore auto-closing feature
c = ConfigManager()
c.update('notebook', {'CodeCell': {
'cm_config': {'autoCloseBrackets': False}}})
Kernel.__init__(self, **kwargs)
self.dyalog_ride_connect()
def recv_all(self, msg_len):
msg = b''
while msg_len:
part = self.dyalogTCP.recv(msg_len)
msg += part
msg_len -= len(part)
return msg
# return False if no RIDE message has been received
def ride_receive(self):
data = b''
rcv = False
while True:
try:
head = self.recv_all(8)
a, b, c, d = head[:4]
msg_len = a * 0x1000000 + b * 0x10000 + c * 0x100 + d - 8
if head[4:8] == b'RIDE':
rideMessage = self.recv_all(msg_len)
try:
rideMessage = rideMessage.decode("utf-8")
except:
debug("JSON parse error")
return False
rideMessage = rideMessage.replace('\n', '\\n')
rideMessage = rideMessage.replace('\r', '\\r')
rcv = True
try:
json_data = json.loads(rideMessage)
except:
# what's been received is not RIDEs standard JSON, it has to be one of the 2 first string type handshake messages
json_data = []
json_data.append(rideMessage)
json_data.append("String")
debug("RECV " + rideMessage)
dq.appendleft(json_data)
else:
debug("Invalid Ride message")
return False
except socket.timeout:
debug('no data')
break
return rcv
# Like ride_receive but will keep trying until it gets data
def ride_receive_wait(self):
while True:
if self.ride_receive():
break
# d is python list, json.
def ride_send(self, d):
json_str = 'XXXXRIDE' + json.dumps(d, separators=(',', ':'))
# json, fix all \r and \n. They should be escaped appropriately for JSON
json_str = json_str.replace('\n', '\\n')
json_str = json_str.replace('\r', '\\r')
_data = bytearray(str.encode(json_str))
l = len(_data)
_data[0] = (l >> 24) & 0xff
_data[1] = (l >> 16) & 0xff
_data[2] = (l >> 8) & 0xff
_data[3] = l & 0xff
self.dyalogTCP.sendall(_data)
debug("SEND " + _data[8:].decode("utf-8"))
def do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=True):
global SUSPEND
code = code.strip()
if not silent:
if self.connected:
lines = code.split('\n')
match = re.search('^%suspend\s+(\w+)$',lines[0].lower(), re.IGNORECASE)
nsmatch = re.match('^\\s*:namespace|:class|:interface',lines[0].lower())
if match:
suspend = match.group(1)
if suspend == 'on':
SUSPEND = True
elif suspend == 'off':
SUSPEND = False
self.ride_send(["GetSIStack", {}])
self.ride_receive_wait()
stack = dq.pop()[1].get('stack')
if stack:
self.execute_line("→\n" * len(stack))
self.ride_receive_wait()
dq.clear()
else:
self.out_error(
'JUPYTER NOTEBOOK: UNDEFINED ARGUMENT TO %suspend, USE EITHER on OR off')
lines = lines[1:]
elif re.match('^\\s*∇', lines[0]):
if not re.match('\\s*∇$', lines[-1]):
self.out_error('DEFN ERROR: Missing closing ∇')
else:
lines[0] = re.sub('^\\s*∇', '', lines[0])
lines = lines[:-1]
self.define_function(lines)
lines = []
elif lines[0].lower() == ']dinput':
self.define_function(lines[1:])
lines = []
elif nsmatch:
if not re.match(":end"+re.sub("^\\s*:",'',nsmatch.group(0)),lines[-1].lower()):
self.out_error("DEFN ERROR: No "+":End"+re.sub("^\\s*:",'',nsmatch.group(0)))
lines = []
else:
self.define_function(lines)
lines = []
try:
# the windows interpreter can only handle ~125 chacaters at a time, so we do one line at a time
for line in lines:
line = line + '\n'
self.execute_line(line)
dq.clear()
PROMPT_AVAILABLE = False
err = False
data_collection = ''
# as long as we have queue dq or RIDE PROMPT is not available... do loop
while (len(dq) > 0 or not PROMPT_AVAILABLE):
received = ['', '']
if len(dq) == 0:
self.ride_receive_wait()
received = dq.pop()
if received[0] == 'AppendSessionOutput':
if not PROMPT_AVAILABLE:
data_collection = data_collection + \
received[1].get('result')
elif received[0] == 'SetPromptType':
pt = received[1].get('type')
if pt == 0:
PROMPT_AVAILABLE = False
elif pt == 1:
PROMPT_AVAILABLE = True
if len(data_collection) > 0:
if err:
self.out_error(data_collection)
else:
self.out_result(data_collection)
data_collection = ''
err = False
elif pt == 2:
self.execute_line("→\n")
raise ValueError(
'JUPYTER NOTEBOOK: Input through ⎕ is not supported')
elif pt == 4:
time.sleep(1)
raise ValueError(
'JUPYTER NOTEBOOK: Input through ⍞ is not supported')
elif received[0] == 'ShowHTML':
self.out_html(received[1].get('html'))
elif received[0] == 'HadError':
# in case of error, set the flag err
# it should be reset back to False only when prompt is available again.
err = True
# actually we don't want echo
elif received[0] == 'OpenWindow':
if not SUSPEND:
self.execute_line("→\n")
elif received[0] == 'EchoInput':
pass
elif received[0] == 'OptionsDialog':
self.ride_send(
["ReplyOptionsDialog", {"index": -1, "token": received[1].get('token')}])
# self.pa(received[1].get('input'))
except KeyboardInterrupt:
self.ride_send(["StrongInterrupt", {}])
if not SUSPEND:
self.execute_line("→\n")
self.out_error('INTERRUPT')
self.ride_receive_wait()
dq.clear()
except ValueError as err:
self.out_error(str(err))
self.ride_receive_wait()
dq.clear()
else:
self.out_error('Dyalog APL not connected')
reply_content = {'status': 'ok',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
return reply_content
def execute_line(self, line):
self.ride_send(["Execute", {"trace": 0, "text": line}])
def define_function(self, lines):
self.execute_line("⎕SE.Dyalog.ipyFn←''\n")
for line in lines:
quoted = "'" + line.replace("'", "''") + "'"
self.execute_line("⎕SE.Dyalog.ipyFn,←⊂," + quoted + "\n")
self.ride_receive_wait()
dq.clear()
if re.match('^\\s*:namespace|:class|:interface',lines[0].lower()):
self.execute_line("{0::'DEFN ERROR'⋄⎕FIX ⍵}⎕SE.Dyalog.ipyFn\n")
else:
self.execute_line("{''≢0⍴r←⎕FX ⍵:511 ⎕SIGNAL⍨'DEFN ERROR: Issue on line ',⍕r}⎕SE.Dyalog.ipyFn\n")
self.execute_line("⎕EX'⎕SE.Dyalog.ipyFn'\n")
self.ride_receive_wait()
while len(dq) > 0:
msg = dq.pop()
if msg == ["HadError", {"error": 511, "dmx": 0}]:
msg = dq.pop()
if msg[0] == 'AppendSessionOutput':
self.out_error(msg[1].get('result'))
def do_shutdown(self, restart):
# shutdown Dyalog executable only if Jupyter kernel has started it.
if DYALOG_HOST == '127.0.0.1':
if self.connected:
self.ride_send(["Exit", {"code": 0}])
# time.sleep(2)
# if self.dyalog_subprocess:
# self.dyalog_subprocess.kill()
self.dyalogTCP.close()
self.connected = False
return {'status': 'ok', 'restart': restart}
|
188800
|
from wtforms import Form, TextField
from wtforms.validators import InputRequired, ValidationError
class SimpleForm(Form):
""" The basic test form. """
first_name = TextField("First name", validators=[InputRequired()])
last_name = TextField("Last name", validators=[InputRequired()])
|
188809
|
import json
import time
import requests
from django.conf import settings
from django.http import HttpResponse, JsonResponse
from .base import Connector as ConnectorBase
class Connector(ConnectorBase):
def __init__(self, connector, message, type, request=None):
self.connector = connector
self.type = type
if settings.DEBUG:
print("TYPE: ", self.type)
self.config = self.connector.config
# self.message must be a dictionary
if message:
self.message = json.loads(message)
else:
self.message = None
self.request = request
self.message_object = None
self.rocket = None
self.room = None
def get_request_session(self):
s = requests.Session()
s.headers = {"content-type": "application/json"}
if self.connector.config.get("api_key"):
s.headers.update({"api_key": self.connector.config["api_key"]})
return s
def incoming(self):
"""
this method will process the incoming messages
and ajust what necessary, to output to rocketchat
"""
if self.message.get("event") == "onMessage":
if self.message.get("event") == "onMessage":
# No Group Messages
if not self.message.get("data", {}).get("isGroupMsg"):
# create message
message, created = self.register_message()
self.rocket = self.get_rocket_client()
if not self.rocket:
return HttpResponse("Rocket Down!", status=503)
# get a room
room = self.get_room()
if room:
if self.message.get("data", {}).get("isMedia"):
print("treat media")
else:
message = self.get_message_body()
deliver = self.outcome_text(room.room_id, message)
if settings.DEBUG:
print("DELIVER OF TEXT MESSAGE:", deliver.ok)
# get a room
room = self.get_room()
if self.message.get("event") == "OnQRCode":
self.rocket = self.get_rocket_client()
if not self.rocket:
return HttpResponse("Rocket Down!", status=503)
self.outcome_qrbase64(self.message["data"]["base64Qrimg"])
self.outcome_admin_message(
"Attempt: {0}".format(self.message["data"]["attempts"])
)
if self.message.get("event") == "onStateChanged":
self.outcome_admin_message(self.message.get("data"))
return JsonResponse({})
def outgo_text_message(self, message, agent_name=None):
content = message["msg"]
# message may not have an agent
if agent_name:
content = "*[" + agent_name + "]*\n" + content
payload = {"args": {"to": self.get_visitor_id(), "content": content}}
url = self.connector.config["endpoint"] + "/sendText"
content = self.joypixel_to_unicode(content)
if settings.DEBUG:
print("outgo payload", payload)
print("outgo url", url)
# TODO: self.full_simulate_typing()
session = self.get_request_session()
timestamp = int(time.time())
try:
sent = session.post(url, json=payload)
self.message_object.delivered = True
self.message_object.response[timestamp] = sent.json()
if settings.DEBUG:
print("SAVING RESPONSE: ", self.message_object.response)
# self.send_seen()
except requests.ConnectionError:
self.message_object.delivered = False
if settings.DEBUG:
print("CONNECTOR DOWN: ", self.connector)
# save message object
if self.message_object:
self.message_object.payload[timestamp] = payload
self.message_object.save()
|
188830
|
import doctest
import mock
import unittest
from test import helpers, models
from collections import defaultdict
from sqlalchemy.orm.properties import RelationshipProperty
from sir.querying import iterate_path_values
from sir.schema.searchentities import defer_everything_but, merge_paths
from sir.schema import generate_update_map, SCHEMA
from sir.trigger_generation.paths import second_last_model_in_path
class DeferEverythingButTest(unittest.TestCase):
def setUp(self):
mapper = helpers.Object()
mapper.iterate_properties = []
pk1 = helpers.Object()
pk1.name = "pk1"
pk2 = helpers.Object()
pk2.name = "pk2"
mapper.primary_key = [pk1, pk2]
self.mapper = mapper
prop = helpers.Object()
prop.columns = ""
self.prop = prop
self.mapper.iterate_properties.append(prop)
self.load = mock.Mock()
self.required_columns = ["key", "key2"]
def test_plain_column_called(self):
self.prop.key = "foo"
load = defer_everything_but(self.mapper, self.load, *self.required_columns)
load.defer.assert_called_once_with("foo")
def test_plain_column_not_called(self):
self.prop.key = "key"
load = defer_everything_but(self.mapper, self.load, *self.required_columns)
self.assertFalse(load.defer.called)
def test_id_column(self):
self.prop.key = "foo_id"
load = defer_everything_but(self.mapper, self.load,
*self.required_columns)
self.assertFalse(load.defer.called)
def test_position_column(self):
self.prop.key = "position"
load = defer_everything_but(self.mapper, self.load,
*self.required_columns)
self.assertFalse(load.defer.called)
def test_primary_key_always_loaded(self):
self.prop.key = "pk1"
load = defer_everything_but(self.mapper, self.load,
*self.required_columns)
self.assertFalse(load.defer.called)
class IteratePathValuesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
c = models.C(id=1)
c.bs.append(models.B(id=1))
c.bs.append(models.B(id=2))
cls.c = c
cls.c_path = "bs.id"
b = models.B(id=1)
b.c = models.C(id=1)
cls.b = b
cls.b_path = "c.id"
def test_one_to_many(self):
res = list(iterate_path_values(self.c_path, self.c))
self.assertEqual(res, [1, 2])
def test_attribute_without_relationship(self):
res = list(iterate_path_values("id", self.c))
self.assertEqual(res, [1])
def test_many_to_one(self):
res = list(iterate_path_values(self.b_path, self.b))
self.assertEqual(res, [1])
def test_non_sqlalchemy_paths(self):
res = list(iterate_path_values("__tablename__", self.c))
self.assertEqual(res, [models.C.__tablename__])
class MergePathsTest(unittest.TestCase):
def test_dotless_path(self):
paths = [["id"], ["name"]]
expected = {"id": "", "name": ""}
self.assertEquals(merge_paths(paths), expected)
def test_dotted_path(self):
paths = [["rel.id"], ["rel2.rel3.id"]]
expected = {
"rel": defaultdict(set, id=""),
"rel2": defaultdict(set,
rel3=defaultdict(set,
id=""
)
)
}
self.assertEqual(dict(merge_paths(paths)), expected)
class DBTest(unittest.TestCase):
def test_non_composite_fk(self):
paths, _, models, _ = generate_update_map()
for table_paths in paths.values():
for core_name, path in table_paths:
model, _ = second_last_model_in_path(SCHEMA[core_name].model, path)
if path:
prop_name = path.split(".")[-1]
try:
prop = getattr(model, prop_name).prop
except AttributeError:
pass
else:
if isinstance(prop, RelationshipProperty):
if prop.direction.name == 'MANYTOONE':
self.assertEqual(len(prop.local_columns), 1)
def load_tests(loader, tests, ignore):
from sir import querying
tests.addTests(doctest.DocTestSuite(querying))
return tests
|
188836
|
from itertools import islice
from hwt.doc_markers import internal
from hwt.hdl.statements.assignmentContainer import HdlAssignmentContainer
from hwt.hdl.statements.codeBlockContainer import HdlStmCodeBlockContainer
from hwt.hdl.statements.utils.reduction import HdlStatement_merge_statement_lists, \
is_mergable_statement_list
from hwt.pyUtils.arrayQuery import areSetsIntersets, groupedby
from hwt.serializer.utils import HdlStatement_sort_key
class HwtStmIncompatibleStructure(Exception):
"""
Statements are not comparable due incompatible structure
"""
@internal
def checkIfIsTooSimple(proc):
"""check if process is just unconditional assignments
and it is useless to merge them"""
try:
a, = proc.statements
if isinstance(a, HdlAssignmentContainer):
return True
except ValueError:
pass
return False
@internal
def tryToMerge(procA: HdlStmCodeBlockContainer,
procB: HdlStmCodeBlockContainer):
"""
Try merge procB into procA
:raise IncompatibleStructure: if merge is not possible
:attention: procA is now result if merge has succeed
:return: procA which is now result of merge
"""
if (areSetsIntersets(procA._outputs, procB._sensitivity) or
areSetsIntersets(procB._outputs, procA._sensitivity) or
not is_mergable_statement_list(procA.statements, procB.statements)):
raise HwtStmIncompatibleStructure()
procA.statements = HdlStatement_merge_statement_lists(
procA.statements, procB.statements)
procB.statements = None
procA._outputs.extend(procB._outputs)
procA._inputs.extend(procB._inputs)
procA._sensitivity.extend(procB._sensitivity)
return procA
@internal
def reduceProcesses(processes):
"""
Try to merge processes as much is possible
:param processes: list of processes instances
"""
# sort to make order of merging same deterministic
processes.sort(key=HdlStatement_sort_key, reverse=True)
# now try to reduce processes with nearly same structure of statements into one
# to minimize number of processes
for _, procs in groupedby(processes, lambda p: p.rank):
_procs = []
for p in procs:
if checkIfIsTooSimple(p):
yield p
else:
_procs.append(p)
procs = _procs
for iA, pA in enumerate(procs):
if pA is None:
continue
for iB, pB in enumerate(islice(procs, iA + 1, None)):
if pB is None:
continue
try:
pA = tryToMerge(pA, pB)
except HwtStmIncompatibleStructure:
continue
procs[iA + 1 + iB] = None
# procs[iA] = pA
for p in procs:
if p is not None:
yield p
|
188865
|
import tensorflow as tf
tf.random.set_seed(4323)
x = tf.random.normal([1, 3])
w = tf.random.normal([3, 2])
b = tf.random.normal([2])
y = tf.constant([0, 1])
with tf.GradientTape() as tape:
tape.watch([w, b])
logits = (x @ w + b)
loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y, logits, from_logits=True))
grads = tape.gradient(loss, [w, b])
print('w grad:', grads[0])
print('b grad:', grads[1])
|
188870
|
from opensextant import load_major_cities, load_countries, get_country, load_us_provinces, load_provinces
from unittest import TestCase
# These fundamental data sets are flat-file resources in the API,... they are all varations of
# "official" gazetteer data. Whereas the opensextant.gazetteer.DB class operates with the full set
# of data from the SQLite master file. The difference is 3 orders of magnitude of data.
class TestGazetteerAPI(TestCase):
def test_gazetteer_api_sample(self):
print("Most objects derive from Place class or Country class")
print("\n===========================")
print("Working with Countries")
data = load_countries()
C = get_country("FR")
print("country: ", C)
C = get_country("IZ", standard="FIPS")
print("country: ", C)
print("API country list length:", len(data))
assert len(data) > 0
print("\n===========================")
print("Working with Major Cities")
data = load_major_cities()
print("city", data[0])
print("city", data[1])
print(".....")
print("API major city count", len(data))
assert len(data) > 0
print("\n===========================")
print("Working with Provinces")
data = load_provinces()
count=0
for adm1_id in data:
print("province", data[adm1_id])
count+=1
if count > 10:
break
print("....")
print("API province count", len(data))
assert len(data) > 0
print("\n===========================")
print("Working with US States only")
data = load_us_provinces()
print("province", data[0])
print("province", data[1])
print(".....")
print("API US state count", len(data))
assert len(data) > 0
|
188955
|
import os
import logging
import logging.handlers
import networkn
import client
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
log_path = os.path.join(root, 'logs')
def get_logger(name, level=logging.DEBUG):
logger = logging.getLogger(name)
logger.setLevel(level)
logger.handlers = []
formatter = logging.Formatter('%(asctime)s.%(msecs)d ' + name + ' %(levelname)s: %(message)s', '%Y-%m-%d %H:%M:%S')
handler = logging.handlers.TimedRotatingFileHandler(os.path.join(log_path, 'app.log'), when='midnight', backupCount=28)
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
188979
|
import inspect
import io
import json
import logging
import os
import sys
import six
def ensure_path_exists(path):
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
return path
logger = logging.getLogger(__name__)
class ConfigurationRegistry(object):
def __init__(self):
self._register = {}
def register(self, key, description=None, default=None, onchange=None, onload=None,
type=None, host=None):
"""
Register a configuration key that can be set by the user. As noted in the
class level documentation, these keys should not lead to changes in the
output of omniduct functions. The same code should generate the same results
independent of this configuration.
The arguments to this method are:
- key : A string used to identify the configuration option.
- description* : A string description of the configuration key.
- default* : A default value for the key.
- onchange* : A function to call when the parameter changes (should have
a signature accepting one variable).
- onload* : A function of no arguments to call to initialize the value
of this configuration setting
- type : Values set will be of `isinstance(val, type)`
* If not specified, these fields default to None.
"""
if key in dir(self):
raise KeyError("Key `{0}` cannot be registered as it conflicts with a method of OmniductConfiguration.".format(key))
if key in self._register:
logger.warn("Overwriting existing key `{0}`, previously registered by {1}".format(key, self._register[key]['host']))
try:
caller_frame = inspect.current_frame().f_back
host = inspect.getmodule(caller_frame).__name__
except:
host = 'unknown'
if default is not None and type is not None:
assert isinstance(default, type)
self._register[key] = {
'description': description,
'host': host,
'default': default,
'onchange': onchange,
'onload': onload,
'type': type,
}
def show(self):
"""
Pretty print the configuration options available to be set, as well as
their current values, descriptions and the module from which they were
registered.
"""
for key in sorted(self._register.keys()):
desc = self._register[key].get('description')
if desc is None:
desc = 'No description'
print('{0} with default = {1}'.format(key, self._register[key]['default']))
print('\t{0}'.format(desc))
print('\t({0})'.format(self._register[key]['host']))
class Configuration(ConfigurationRegistry):
"""
Configuration is a hub for storing runtime configuration settings, as
well as persisting them to disk. Ideally, it should store only configuration
options that allow omniduct to run optimally on the system (such as preferred
hostnames, servers and usernames). In particular, the same code run on different
systems should output the same results (independent of configuration); so,
for example, this configuration hub should *not* be used for things like
default plot styling, etc.
Retrieving a configuration option looks like:
>>> config.logging_level
20
Setting a configuration option looks like:
>>> config.logging_level = 10
Reviewing all available options and set values looks like:
>>> config.show()
"""
def __init__(self, *registries, **kwargs):
ConfigurationRegistry.__init__(self)
for registry in registries:
for key, props in registry.items():
self.register(key, **props)
self._config = {}
self.__config_path = kwargs.pop('config_path', None)
def __dir__(self):
return sorted(self._register.keys())
@property
def _config_path(self):
return self.__config_path
@_config_path.setter
def _config_path(self, path):
self.__config_path = os.path.expandvars(os.path.expanduser(path))
if path is not None and os.path.exists(self.__config_path):
# Restore configuration
try:
self.load(force=True)
except:
raise RuntimeError(
"Configuration file at {0} cannot be loaded. Perhaps try deleting it.".format(self.__config_path))
def all(self):
"""
Return a dictionary containing all configuration keys. Note that this is
the actual dictionary storing the configuration options, so modifying
this dictionary will modify the configuration options *without* running
the standard checks.
"""
return self._config
def show(self):
"""
Pretty print the configuration options available to be set, as well as
their current values, descriptions and the module from which they were
registered.
"""
for key in sorted(self._register.keys()):
desc = self._register[key].get('description')
if desc is None:
desc = 'No description'
val = str(self._config.get(key, '<Not Set>'))
print('{0} = {1} (default = {2})'.format(key, val, self._register[key]['default']))
print('\t{0}'.format(desc))
print('\t({0})'.format(self._register[key]['host']))
def __setattr__(self, key, value):
"""
Allow setting configuration options using the standard python attribute
methods, as described in the class documentation.
Attributes prefixed with '_' are loaded from this class.
"""
if key.startswith('_'):
object.__setattr__(self, key, value)
elif key in self._register:
if self._register[key]['type'] is not None:
if not isinstance(value, self._register[key]['type']):
raise ValueError(
"{} must be in type(s) {}".format(key, self._register[key]['type']))
if self._register[key]['onchange'] is not None:
self._register[key]['onchange'](value)
self._config[key] = value
else:
raise KeyError("No such configuration key `{0}`.".format(key))
def __getattr__(self, key):
"""
Allow retrieval of configuration keys using standard python attribute
methods, as described in the class documentation.
Attributes prefixed with '_' are loaded from this class.
"""
if key.startswith('_'):
return object.__getattr__(self, key)
if key in self._register:
if key in self._config:
return self._config[key]
# if a lazy loader is specified, use it
if self._register[key]['default'] is None and self._register[key]['onload'] is not None:
setattr(self, key, self._register[key]['onload']())
return self._config.get(key, self._register[key]['default'])
raise AttributeError("No such configuration key `{0}`.".format(key))
def reset(self, *keys, **target_config):
"""
Reset all configuration keys specified to their default values, or values
specified in `target_config`. If both `keys` and `target_config` are
specified, `keys` acts to both filter the keys of `target_config` and add
default values as the missing keys.
>>> config.reset('logging_level')
>>> config.reset('logging_level', logging_level=10)
>>> config.reset(logging_level=10)
If no keys are specified, reset all keys:
>>> config.reset()
"""
if len(keys) == 0:
keys = set(list(self._register.keys()) + list(target_config.keys()))
target_config = self.__restrict_keys(target_config, keys)
reset_keys = [key for key in keys if key not in target_config]
for key, value in target_config.items():
self._config[key] = value
if key in self._register:
if value == self._register[key]['default']:
self._config.pop(key)
if self._register[key]['onchange'] is not None:
self._register[key]['onchange'](getattr(self, key))
else: # Allow users to delete deprecated keys
logger.warning("Added value for configuration key `{0}` which has yet to be registered.".format(key))
for key in reset_keys:
if key in self._config:
self._config.pop(key)
if key in self._register and self._register[key]['onchange'] is not None:
self._register[key]['onchange'](getattr(self, key))
def __restrict_keys(self, d, keys):
if keys is None:
return d
return {key: d[key] for key in keys if key in d}
def save(self, filename=None, keys=None, replace=None):
"""
Save the current configuration as a JSON file. Accepted arguments are:
- filename : The location of the file to be saved. If not specified,
default configuration location is used (and autoloaded on startup).
- keys : The keys to be saved. If `None`, all keys are saved (or set to
default values if missing).
- replace : Whether the configuration file should be replaced (True), or
simply updated (False). If False, then the existing keys stored in the
maintained except where they conflict with the keys specified. The
default value is `None`, in which case it maps to `True` if keys=None,
or `False` if specific keys are specified. (default=None)
"""
filename = filename or self._config_path
filename = os.path.join(ensure_path_exists(os.path.dirname(filename)), os.path.basename(filename))
config = {}
if replace is None:
replace = True if keys is None else False
if keys is None:
replace = True
if not replace and os.path.exists(filename):
with io.open(filename, 'r') as f:
config = json.load(f)
config.update(self.__restrict_keys(self._config, keys))
with io.open(filename, 'w') as f:
json_config = json.dumps(config, ensure_ascii=False, indent=4)
if sys.version_info.major == 2 and isinstance(json_config, six.string_types):
json_config = json_config.decode("utf-8")
f.write(json_config)
def load(self, filename=None, keys=None, replace=None, force=False):
"""
Load a configuration from the disk. Accepted arguments are:
- filename : The location of the configuration. By default, this will
point to the automatically loaded configuration file.
- keys : The keys to load from the configuration. If `None`, all keys
are loaded from config file (or set to default values if missing).
- replace : Whether the current configuration should be replaced (True), or
simply updated (False). If False, then the existing configuration will be
maintained except where conflicts exist with the keys being loaded. The
default value is `None`, in which case it maps to `True` if keys=None,
or `False` if specific keys are specified. (default=None)
- force : Ordinarily, new configuration is run through the standard checks
but in some cases (such as startup), the register has yet to be filled,
and so results in spouts spurious warnings. This allows one to bypass
all checks.
"""
filename = filename or self._config_path
if replace is None:
replace = True if keys is None else False
if keys is None:
replace = True
with io.open(filename, 'r') as f:
config = self.__restrict_keys(json.load(f), keys)
if force:
self._config = config
else:
if replace:
self.reset(**config)
else:
self.reset(*keys, **config)
config = Configuration()
|
189005
|
from io import BytesIO
from re import match
from PyPDF2 import PdfFileWriter, PdfFileReader
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
from reportlab.lib.colors import Color
from webcolors import hex_to_rgb
import click
@click.command()
@click.argument('filename')
@click.option('-w', '--watermark', default='TEST',
help='Annotation text, use {} to include parts of file name if '
'you are using --regex')
@click.option('-r', '--regex', default='',
help='Regex to run on file name, annotation is used as template')
@click.option('-f', '--font-name', default='Helvetica-Bold',
help='Font name')
@click.option('-s', '--font-size', default=85, type=int,
help='Font size')
@click.option('-c', '--color', default='#000000',
help='Font colour')
@click.option('-o', '--opacity', default=1.0,
help='Opacity from 0 (transparent) to 1 (solid)')
@click.option('-x', default=250,
help='X coordinate')
@click.option('-y', default=250,
help='Y coordinate')
@click.option('-d', '--destination-file-name', default='',
help='Destination file, by default files are modified in place')
def annotate(filename, watermark, regex, font_name, font_size, color, opacity,
x, y, destination_file_name):
mask_stream = BytesIO()
watermark_canvas = canvas.Canvas(mask_stream, pagesize=A4)
watermark_canvas.setFont(font_name, font_size)
r, g, b = hex_to_rgb(color)
c = Color(r, g, b, alpha=opacity)
watermark_canvas.setFillColor(c)
if regex:
groups = match(regex, filename)
watermark = watermark.format(*groups.groups())
watermark_canvas.drawString(x, y, watermark)
watermark_canvas.save()
mask_stream.seek(0)
mask = PdfFileReader(mask_stream)
src = PdfFileReader(filename)
output = PdfFileWriter()
page = src.getPage(0)
page.mergePage(mask.getPage(0))
output.addPage(page)
for page in range(1, src.getNumPages()):
output.addPage(src.getPage(page))
if not destination_file_name:
destination_file_name = filename
with open(destination_file_name, "wb") as output_stream:
output.write(output_stream)
if __name__ == '__main__':
annotate()
|
189039
|
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class FileBrowser(QWidget):
def __init__(self):
QWidget.__init__(self)
self.init_gui_elements()
self.construct_gui()
self.update_gui()
def init_gui_elements(self):
self.layout = QVBoxLayout()
self.file_system_model = QFileSystemModel()
self.file_tree_view = QTreeView()
def construct_gui(self):
self.setLayout(self.layout)
self.layout.addWidget(self.file_tree_view)
def update_gui(self):
self.file_system_model.setNameFilters(('*.osu', '*.osr'))
self.file_system_model.setNameFilterDisables(False)
self.file_system_model.setRootPath(QDir.currentPath())
self.file_tree_view.setDragEnabled(True)
self.file_tree_view.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.file_tree_view.setModel(self.file_system_model)
self.file_tree_view.hideColumn(1) # Hide file size column
self.file_tree_view.hideColumn(2) # Hide file type column
self.file_tree_view.setRootIndex(self.file_system_model.index(QDir.currentPath()))
self.file_tree_view.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.file_tree_view.resizeColumnToContents(0) # Resize file name column
|
189061
|
from helpers.test_case import AioChatTestCase, unittest_run_loop
class LoginTestCase(AioChatTestCase):
""" Testing for LoginView """
url_name = 'login'
def setUp(self):
super().setUp()
self.url = str(self.app.router[self.url_name].url_for())
@unittest_run_loop
async def test_url_reversed(self):
""" Url should be /login """
self.assertEqual(str(self.app.router[self.url_name].url_for()), '/login')
self.assertEqual(str(self.url), '/login')
@unittest_run_loop
async def test_get_method(self):
""" Should GET return 200 with login form """
response = await self.client.get(self.url)
self.assertEqual(response.status, 200)
content = await response.text()
self.assertIn('Please sign in', content)
@unittest_run_loop
async def test_user_not_found(self):
""" Should GET return 302 and msg for not found user """
response = await self.client.post(self.url, data={'username': 'abcd'}, allow_redirects=False)
self.assertEqual(response.status, 302)
self.assertTrue(response.url.endswith(self.url))
response = await self.client.request('GET', self.url)
content = await response.text()
self.assertIn('Please sign in', content)
self.assertIn('User abcd not found', content)
class RegisterTestCase(AioChatTestCase):
""" Testing for RegisterView """
url_name = 'register'
def setUp(self):
super().setUp()
self.url = str(self.app.router[self.url_name].url_for())
@unittest_run_loop
async def test_url_reversed(self):
""" Url should be /register """
self.assertEqual(str(self.app.router[self.url_name].url_for()), '/register')
self.assertEqual(str(self.url), '/register')
@unittest_run_loop
async def test_get_method(self):
""" Should GET return 200 with register form """
response = await self.client.request('GET', self.url)
self.assertEqual(response.status, 200)
content = await response.text()
self.assertIn('Create account', content)
|
189069
|
from math import *
from functools import partial
from sympy import cot
from PySide6.QtWidgets import *
from PySide6.QtUiTools import *
from PySide6.QtCore import *
class Window(QMainWindow):
def __init__(self):
super().__init__()
loader = QUiLoader()
self.ui = loader.load('form.ui', None)
self.ui.show()
self.result = 0
self.lastOperand = ''
self.step = 0
self.ui.btn_0.clicked.connect(partial(self.func_num, 0))
self.ui.btn_1.clicked.connect(partial(self.func_num, 1))
self.ui.btn_2.clicked.connect(partial(self.func_num, 2))
self.ui.btn_3.clicked.connect(partial(self.func_num, 3))
self.ui.btn_4.clicked.connect(partial(self.func_num, 4))
self.ui.btn_5.clicked.connect(partial(self.func_num, 5))
self.ui.btn_6.clicked.connect(partial(self.func_num, 6))
self.ui.btn_7.clicked.connect(partial(self.func_num, 7))
self.ui.btn_8.clicked.connect(partial(self.func_num, 8))
self.ui.btn_9.clicked.connect(partial(self.func_num, 9))
self.ui.sum_btn.clicked.connect(self.sum)
self.ui.sub_btn.clicked.connect(self.sub)
self.ui.cross_btn.clicked.connect(self.cross)
self.ui.division_btn.clicked.connect(self.div)
self.ui.equal_btn.clicked.connect(self.equal)
self.ui.ac_btn.clicked.connect(self.ac)
self.ui.dot_btn.clicked.connect(self.dot_func)
self.ui.percentage_btn.clicked.connect(self.percentage_func)
self.ui.neg_pos_btn.clicked.connect(self.neg_pos)
self.ui.sin_btn.clicked.connect(self.sin_func)
self.ui.cos_btn.clicked.connect(self.cos_func)
self.ui.tan_btn.clicked.connect(self.tan_func)
self.ui.cot_btn.clicked.connect(self.cot_func)
self.ui.log_btn.clicked.connect(self.log_func)
self.ui.sqrt_btn.clicked.connect(self.sqrt_func)
def func_num(self, num):
self.ui.textBox.setText(self.ui.textBox.text() + str(num))
def dot_func(self):
if '.' not in self.ui.textBox.text():
self.ui.textBox.setText(self.ui.textBox.text() + '.')
def sum(self):
try:
self.result += float(self.ui.textBox.text())
self.lastOperand = '+'
self.step += 1
self.ui.textBox.setText('')
except:
self.ui.textBox.setText('Error')
self.result = 0
def sub(self):
try:
if self.step == 0:
self.result = float(self.ui.textBox.text())
else:
self.result -= float(self.ui.textBox.text())
self.lastOperand = '-'
self.step += 1
self.ui.textBox.setText('')
except:
self.ui.textBox.setText('Error')
self.result = 0
def cross(self):
try:
if self.step == 0:
self.result = float(self.ui.textBox.text())
else:
self.result *= float(self.ui.textBox.text())
self.lastOperand = '*'
self.step += 1
self.ui.textBox.setText('')
except:
self.ui.textBox.setText('Error')
self.result = 0
def div(self):
try:
if self.step == 0:
self.result = float(self.ui.textBox.text())
else:
self.result /= float(self.ui.textBox.text())
self.lastOperand = '/'
self.step += 1
self.ui.textBox.setText('')
except:
self.ui.textBox.setText('Error')
self.result = 0
def equal(self):
self.last_num = float(self.ui.textBox.text())
self.step += 1
if self.lastOperand == '+':
self.result += self.last_num
elif self.lastOperand == '-':
self.result -= self.last_num
elif self.lastOperand == '*':
self.result *= self.last_num
elif self.lastOperand == '/':
self.result /= self.last_num
elif self.lastOperand == '%':
self.result = self.last_num/100
else:
self.result = self.last_num
self.ui.textBox.setText(str(self.result))
self.result = 0
self.step = 0
self.lastOperand = ''
def ac(self):
self.result = 0
self.ui.textBox.setText('')
self.step = 0
self.lastOperand = ''
def percentage_func(self):
self.ui.textBox.setText(str(float(self.ui.textBox.text())/100))
self.lastOperand = '%'
self.step += 1
def neg_pos(self):
if '-' in self.ui.textBox.text():
self.ui.textBox.setText(self.ui.textBox.text()[1:])
else:
self.ui.textBox.setText('-' + self.ui.textBox.text())
def sin_func(self):
self.ui.textBox.setText(str(round(sin(radians(float(self.ui.textBox.text()))), 4)))
self.lastOperand = 'sin'
self.step += 1
def cos_func(self):
self.ui.textBox.setText(str(round(cos(radians(float(self.ui.textBox.text()))), 4)))
self.lastOperand = 'cos'
self.step += 1
def tan_func(self):
self.ui.textBox.setText(str(round(tan(radians(float(self.ui.textBox.text()))), 4)))
self.lastOperand = 'tan'
self.step += 1
def cot_func(self):
self.ui.textBox.setText(str(round(cot(radians(float(self.ui.textBox.text()))), 4)))
self.lastOperand = 'cot'
self.step += 1
def log_func(self):
self.ui.textBox.setText(str(round(log(float(self.ui.textBox.text()), 10), 4)))
self.lastOperand = 'log'
self.step += 1
def sqrt_func(self):
self.ui.textBox.setText(str(round(sqrt(float(self.ui.textBox.text())), 4)))
self.lastOperand = 'sqrt'
self.step += 1
app = QApplication([])
window = Window()
app.exec()
|
189078
|
import unittest
from bulletphysics import *
class TransformTest(unittest.TestCase):
def setUp(self):
self.transform = Transform( Quaternion(0.0, 1.0, 2.0, 1.0), Vector3(0.0, 1.1, 2.2))
def test_opengl(self):
matrix = [0.0] * 5
self.assertRaises(TypeError, self.transform.getOpenGLMatrix, matrix)
matrix = [0.0] * 16
self.transform.getOpenGLMatrix(matrix)
self.assertEqual(-0.67, round(matrix[0], 2))
def test_rotation(self):
self.transform.setIdentity()
q = Quaternion(1.0, 2.0, 3.0, 4.0)
self.transform.setRotation(q)
q = self.transform.getRotation()
self.assertEqual(0.2, round(q.getX(),1))
self.assertEqual(0.4, round(q.getY(),1))
self.assertEqual(0.5, round(q.getZ(),1))
self.assertEqual(0.7, round(q.getW(),1))
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
189124
|
import FWCore.ParameterSet.Config as cms
from HLTrigger.HLTfilters.hltHighLevel_cfi import hltHighLevel
hcalfilter = hltHighLevel.clone(
TriggerResultsTag = cms.InputTag('TriggerResults'),
HLTPaths = cms.vstring('user_step' )
)
hcalfilterSeq = cms.Sequence( hcalfilter )
|
189168
|
import os
import logging
import unittest
from pysnptools.util.filecache import Hashdown
import doctest
pysnptools_hashdown = Hashdown.load_hashdown(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "pysnptools.hashdown.json"
),
directory=os.environ.get("PYSNPTOOLS_CACHE_HOME", None),
)
def example_file(pattern, endswith=None):
"""
Returns the local location of a PySnpTools example file, downloading it
if needed.
:param pattern: The name of the example file of interest. A
`file name pattern <https://docs.python.org/3.7/library/fnmatch.html>`__
may be given. All matching files will be downloaded (if needed) and
the name of one will be returned.
:type file_name: string
:param endswith: The pattern of the file name to return. By default, when
no `endswith` is given, the name of the first matching file will be
returned.
:type file_name: string
:rtype: string
By default, the local location will be under the system temp directory
(typically controlled with the TEMP environment variable).
Alternatively, the directory can be set with the PYSNPTOOLS_CACHE_HOME
environment variable.
This function knows the MD5 hash of all PySnpTools example files and uses
that content-based hash to decide if a file needs to be downloaded.
>>> from pysnptools.util import example_file # Download and return local file name
>>> # Download the phenotype file if necessary. Return its local location.
>>> pheno_fn = example_file("pysnptools/examples/toydata.phe")
>>> print('The local file name is ', pheno_fn)
The local file name is ...pysnptools/examples/toydata.phe
>>>
>>> # Download the bed,bim,&fam files if necessary. Return the location of bed file.
>>> bedfile = example_file("tests/datasets/all_chr.maf0.001.N300.*","*.bed")
>>> print('The local file name is ', bedfile)
The local file name is ...tests/datasets/all_chr.maf0.001.N300.bed
"""
return pysnptools_hashdown._example_file(pattern, endswith=endswith)
class TestExampleFile(unittest.TestCase):
def test_doc_test(self):
import pysnptools.util._example_file as example_mod
result = doctest.testmod(
example_mod, optionflags=doctest.ELLIPSIS
)
assert result.failed == 0, "failed doc test: " + __file__
def getTestSuite():
"""
set up test suite
"""
test_suite = unittest.TestSuite([])
test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestExampleFile))
return test_suite
if __name__ == "__main__":
# This creates a Json based on all files, but it may have the wrong line-endings and thus hash on Windows. It can be edited to files of interest
if True:
update = Hashdown(url=pysnptools_hashdown.url, file_to_hash=pysnptools_hashdown.file_to_hash, allow_unknown_files=True)
for file0 in list(os.walk(r'D:\OneDrive\programs\pysnptools\pysnptools\examples'))[0][2:][0]:
file = 'pysnptools/examples/'+file0
print(file)
update.file_exists(file)
update.save_hashdown("deldir/updated.hashdown.json")
if False:
Hashdown.scan_local(
r"D:\OneDrive\programs\pysnptools",
url="https://github.com/fastlmm/PySnpTools/raw/cf248cbf762516540470d693532590a77c76fba2",
).save_hashdown("deldir/pysnptools.hashdown.json")
# This fixes the hashes of the files
if False:
update = Hashdown(url=pysnptools_hashdown.url, allow_unknown_files=True)
for file in pysnptools_hashdown.file_to_hash:
print(file)
update.file_exists(file)
update.save_hashdown("deldir/updated.hashdown.json")
logging.basicConfig(level=logging.INFO)
suites = getTestSuite()
r = unittest.TextTestRunner(failfast=False)
ret = r.run(suites)
assert ret.wasSuccessful()
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
189170
|
def relax():
neopixel.setAnimation("Color Wipe", 0, 0, 20, 1)
sleep(2)
neopixel.setAnimation("Ironman", 0, 0, 255, 1)
if (i01.eyesTracking.getOpenCV().capturing):
global MoveBodyRandom
MoveBodyRandom=0
global MoveHeadRandom
MoveHeadRandom=0
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.65, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.85, 0.85)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(79,100)
i01.moveArm("left",5,84,28,14)
i01.moveArm("right",5,82,28,16)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(95,90,90)
else:
global MoveBodyRandom
MoveBodyRandom=1
global MoveHeadRandom
MoveHeadRandom=1
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.65, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.85, 0.85)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
#i01.moveHead(79,100)
i01.moveArm("left",5,84,28,14)
i01.moveArm("right",5,82,28,16)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(95,90,90)
|
189190
|
import yaml
import torch
import cv2
import json_tricks as json
from torchvision.utils import make_grid
def load_config(path):
with open(path, 'r') as file:
cfg = yaml.load(file)
return cfg
def draw_shape(pos, sigma_x, sigma_y, angle, size):
"""
draw (batched) gaussian with sigma_x, sigma_y on 2d grid
Args:
pos: torch.tensor (float) with shape (2) specifying center of gaussian blob (x: row, y:column)
sigma_x: torch.tensor (float scalar), scaling parameter along x-axis
sigma_y: similar along y-axis
angle: torch.tensor (float scalar) rotation angle in radians
size: int specifying size of image
device: torch.device, either cpu or gpu
Returns:
torch.tensor (1, 1, size, size) with gaussian blob
"""
device = pos.device
assert sigma_x.device == sigma_y.device == angle.device == device, "inputs should be on the same device!"
# create 2d meshgrid
x, y = torch.meshgrid(torch.arange(0, size), torch.arange(0, size))
x, y = x.unsqueeze(0).unsqueeze(0).to(device), y.unsqueeze(0).unsqueeze(0).to(device)
# see https://en.wikipedia.org/wiki/Gaussian_function#Two-dimensional_Gaussian_function
a = torch.cos(angle) ** 2 / (2 * sigma_x ** 2) + torch.sin(angle) ** 2 / (2 * sigma_y ** 2)
b = -torch.sin(2 * angle) / (4 * sigma_x ** 2) + torch.sin(2 * angle) / (4 * sigma_y ** 2)
c = torch.sin(angle) ** 2 / (2 * sigma_x ** 2) + torch.cos(angle) ** 2 / (2 * sigma_y ** 2)
# append dimsensions for broadcasting
pos = pos.view(1, 1, 2, 1, 1)
a, b, c = a.view(1, 1), b.view(1, 1), c.view(1, 1)
# pixel-wise distance from center
xdist = (x - pos[:, :, 0])
ydist = (y - pos[:, :, 1])
# gaussian function
g = torch.exp((-a * xdist ** 2 - 2 * b * xdist * ydist - c * ydist ** 2))
return g
def draw_template(path, size, batch_size, device):
"""
draw template consisting of limbs defined by gaussian heatmap
Args:
template: json file defining all parts
size: int, image size (assumed quadratic), this should match the center coordinates defined in the json!
device: torch.device, either cpu or gpu
"""
with open(path, 'r') as file:
template = json.load(file)
heatmaps = []
for v in template.values():
center = torch.tensor(v['center']).to(device)
sx = torch.tensor(v['sx']).to(device)
sy = torch.tensor(v['sy']).to(device)
angle = torch.tensor(v['angle']).to(device)
heatmaps.append(draw_shape(center, sx, sy, angle, size))
heatmaps = torch.cat(heatmaps, dim=1).repeat(batch_size, 1, 1, 1)
return heatmaps
def load_anchor_points(path, device, batch_size):
"""
load anchor points from json file
change this according to your definitions
Args:
anchor_points: json file containing anchor points per part in column, row format similar to open-cv
device: torch.device, either cpu or gpu
"""
with open(path, 'r') as file:
anchor_points = json.load(file)
# assumes three anchor points for core, two (parent+child) for all others except hands and feet and head
# change this accordingly for different template definitions!
double = []
single = []
for k, v in anchor_points.items():
if k in ['left hand', 'right hand', 'left foot', 'right foot', 'head']:
single.append(v)
elif k == 'core':
triple = [v]
else:
double.append(v)
return torch.tensor(triple).to(device).float().unsqueeze(-1).unsqueeze(0).repeat(batch_size, 1, 1, 1, 1), \
torch.tensor(single).to(device).float().unsqueeze(-1).unsqueeze(0).repeat(batch_size, 1, 1, 1, 1), \
torch.tensor(double).to(device).float().unsqueeze(-1).unsqueeze(0).repeat(batch_size, 1, 1, 1, 1)
def show_images(tensor, renorm):
if renorm:
for i in range(tensor.shape[0]):
# bgr opencv to rgb
tensor[i] = renorm(tensor[i])[[2, 1, 0]]
output_grid = make_grid(tensor, nrow=6, normalize=True, scale_each=True)
return output_grid
|
189235
|
class Director:
__builder = None
def setBuilder(self, builder):
self.__builder = builder
def getOrden(self):
orden = Orden()
pan = self.__builder.preparaPan()
orden.setPan(pan)
carne = self.__builder.agregaCarne()
orden.setCarne(carne)
verduras = self.__builder.agregaVerduras()
orden.setVerduras(verduras)
condimentos = self.__builder.agregaCondimentos()
orden.setCondimentos(condimentos)
combo = self.__builder.hazlaCombo()
orden.setCombo(combo)
return orden
class Orden:
def __init__(self):
self.__pan = None
self.__carne = None
self.__verduras = None
self.__condimentos = None
self.__combo = None
def setPan(self, pan):
self.__pan = pan
def setCarne(self, carne):
self.__carne = carne
def setVerduras(self, verduras):
self.__verduras = verduras
def setCondimentos(self, condimentos):
self.__condimentos = condimentos
def setCombo(self, combo):
self.__combo = combo
def caracteristicas(self):
print ("Pan: %s" % self.__pan.tipo)
print ("Carne: %s" % self.__carne.porciones)
print ("Verduras: %s" % self.__verduras.si_no)
print ("Condimentos: %s" % self.__condimentos.si_no)
print ("Combo: %s" % self.__combo.si_no)
class Builder:
def preparaPan(self):pass
def agregaCarne(self):pass
def agregaVerduras(self):pass
def agregaCondimentos(self):pass
def hazlaCombo(self):pass
class Hocho1Builder(Builder):
def preparaPan(self):
pan = Pan()
pan.tipo = ' Normal '
return pan
def agregaCarne(self):
carne = Carne()
carne.porciones = ' Si '
return carne
def agregaVerduras(self):
verduras = Verduras()
verduras.si_no = " Si "
return verduras
def agregaCondimentos(self):
condimentos = Condimentos()
condimentos.si_no = " Si "
return condimentos
def hazlaCombo(self):
combo = Combo()
combo.si_no = " Si "
return combo
class Hocho2Builder(Builder):
def preparaPan(self):
pan = Pan()
pan.tipo = ' Normal '
return pan
def agregaCarne(self):
carne = Carne()
carne.porciones = ' Si '
return carne
def agregaVerduras(self):
verduras = Verduras()
verduras.si_no = " No "
return verduras
def agregaCondimentos(self):
condimentos = Condimentos()
condimentos.si_no = " Si "
return condimentos
def hazlaCombo(self):
combo = Combo()
combo.si_no = " Si "
return combo
class Burguer1Builder(Builder):
def preparaPan(self):
pan = Pan()
pan.tipo = ' Integral '
return pan
def agregaCarne(self):
carne = Carne()
carne.porciones = ' No '
return carne
def agregaVerduras(self):
verduras = Verduras()
verduras.si_no = " Si "
return verduras
def agregaCondimentos(self):
condimentos = Condimentos()
condimentos.si_no = " No "
return condimentos
def hazlaCombo(self):
combo = Combo()
combo.si_no = " Si "
return combo
class Burguer2Builder(Builder):
def preparaPan(self):
pan = Pan()
pan.tipo = ' Normal '
return pan
def agregaCarne(self):
carne = Carne()
carne.porciones = ' Si '
return carne
def agregaVerduras(self):
verduras = Verduras()
verduras.si_no = " Si "
return verduras
def agregaCondimentos(self):
condimentos = Condimentos()
condimentos.si_no = " No "
return condimentos
def hazlaCombo(self):
combo = Combo()
combo.si_no = " Si "
return combo
class Burguer3Builder(Builder):
def preparaPan(self):
pan = Pan()
pan.tipo = ' Normal '
return pan
def agregaCarne(self):
carne = Carne()
carne.porciones = ' Ninguna '
return carne
def agregaVerduras(self):
verduras = Verduras()
verduras.si_no = " Si "
return verduras
def agregaCondimentos(self):
condimentos = Condimentos()
condimentos.si_no = " Si "
return condimentos
def hazlaCombo(self):
combo = Combo()
combo.si_no = " No "
return combo
class Pan:
tipo = None
class Carne:
porciones = None
class Verduras:
si_no = None
class Condimentos:
si_no = None
class Combo:
si_no = None
def cliente():
hocho1Builder = Hocho1Builder()
hocho2Builder = Hocho2Builder()
burguer1Builder = Burguer1Builder()
burguer2Builder = Burguer2Builder()
burguer3Builder = Burguer3Builder()
director = Director()
print ("Hocho1: ")
director.setBuilder(hocho1Builder)
hocho1 = director.getOrden()
hocho1.caracteristicas()
print ("\nHocho2: ")
director.setBuilder(hocho2Builder)
hocho2 = director.getOrden()
hocho2.caracteristicas()
print ("\nBurguer1: ")
director.setBuilder(burguer1Builder)
burguer1 = director.getOrden()
burguer1.caracteristicas()
print ("\nBurguer2: ")
director.setBuilder(burguer2Builder)
burguer2 = director.getOrden()
burguer2.caracteristicas()
print ("\nBurguer3: ")
director.setBuilder(burguer3Builder)
burguer3 = director.getOrden()
burguer3.caracteristicas()
if __name__ == '__main__':
cliente()
|
189249
|
from fastapi import FastAPI
from pydantic import BaseModel, validator
from simsity.service import Service
class Params(BaseModel):
"""Parameters for the query endpoint."""
query: dict
n_neighbors: int = 5
@validator("n_neighbors")
def n_neighbors_must_be_positive(cls, value):
"""Gotta make sure they're positive."""
if value <= 0:
raise ValueError(f"we expect n_neighbors >= 0, we received {value}")
return value
def create_app(service: Service):
"""Start a small webserver with the Service."""
app = FastAPI()
@app.post("/query")
def query(params: Params):
"""The main query endpoint."""
return service.query(**params.query, n_neighbors=params.n_neighbors)
return app
|
189267
|
import tkinter as tk
from unittest import TestCase
class TkTestCase(TestCase):
"""A test case designed for Tkinter widgets and views"""
keysyms = {
'-': 'minus',
' ': 'space',
':': 'colon',
# For more see http://www.tcl.tk/man/tcl8.4/TkCmd/keysyms.htm
}
@classmethod
def setUpClass(cls):
cls.root = tk.Tk()
cls.root.wait_visibility()
@classmethod
def tearDownClass(cls):
cls.root.update()
cls.root.destroy()
def type_in_widget(self, widget, string):
widget.focus_force()
for char in string:
char = self.keysyms.get(char, char)
widget.event_generate('<KeyPress-{}>'.format(char))
widget.event_generate('<KeyRelease-{}>'.format(char))
self.root.update()
def click_on_widget(self, widget, x, y, button=1):
widget.focus_force()
self.root.update()
widget.event_generate("<ButtonPress-{}>".format(button), x=x, y=y)
widget.event_generate("<ButtonRelease-{}>".format(button), x=x, y=y)
self.root.update()
|
189299
|
class Solution:
def nthUglyNumber(self, n: int) -> int:
if n < 0: return 0
dp = [1] * n
index2 = index3 = index5 = 0
for i in range(1, n):
dp[i] = min(2 * dp[index2], 3 * dp[index3], 5 * dp[index5])
if dp[i] == 2 * dp[index2]: index2 += 1
if dp[i] == 3 * dp[index3]: index3 += 1
if dp[i] == 5 * dp[index5]: index5 += 1
return dp[n - 1]
|
189342
|
from .constants import EMPTY_RESULT
class UASMatcher(object):
def __init__(self, data):
self._data = data
def _match_robots(self, useragent, result):
try:
res = self._data['robots'][useragent]
result.update(res['details'])
return True
except KeyError:
return False
def _match_browser(self, useragent, result):
for test in self._data['browser']['reg']:
test_rg = test['re'].search(useragent)
if test_rg:
result.update(self._data['browser']['details'][test['details_key']])
if test_rg.lastindex and test_rg.lastindex > 0:
browser_version = test_rg.group(1)
result['ua_name'] = '%s %s' % (result['ua_family'], browser_version)
else:
result['ua_name'] = result['ua_family']
os_key = test['os_details_key']
if os_key:
result.update(self._data['os']['details'][os_key])
return True
return False
return False
def _match_os(self, useragent, result):
for test in self._data['os']['reg']:
if test['re'].search(useragent):
result.update(self._data['os']['details'][test['details_key']])
return True
return False
def _match_device(self, useragent, result):
for test in self._data['device']['reg']:
if test['re'].search(useragent):
result.update(self._data['device']['details'][test['details_key']])
return True
# Try to match using the type
if result['typ'] in ("Other", "Library", "Validator", "Useragent Anonymizer"):
result.update(self._data['device']['details'][1])
elif result['typ'] in ("Mobile Browser", "Wap Browser"):
result.update(self._data['device']['details'][3])
else:
result.update(self._data['device']['details'][2])
return False
def match(self, useragent):
result = dict(EMPTY_RESULT)
self._match_robots(useragent, result) or \
self._match_browser(useragent, result) or \
self._match_os(useragent, result)
self._match_device(useragent, result)
return result
|
189374
|
import numpy as np
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms
def get_device():
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_mnist_loaders(root, batch_size):
def _get_dataset(train):
return torchvision.datasets.MNIST(
root=root, train=train,
transform=transforms.ToTensor(),
download=True
)
train, test = _get_dataset(True), _get_dataset(False)
def _get_loader(data, shuffle):
return torch.utils.data.DataLoader(
data, batch_size=batch_size, shuffle=shuffle)
train, test = _get_loader(train, True), _get_loader(test, False)
return train, test
class MNISTModel(nn.Module):
def __init__(self, units=None):
super(MNISTModel, self).__init__()
if units is None:
units = [784, 256, 10]
layers = []
for in_features, out_features in zip(units[:-1], units[1:]):
layers.append(nn.Linear(in_features, out_features))
layers.append(nn.ReLU())
layers.pop()
self.layers = nn.ModuleList(layers)
self.to(get_device())
def forward(self, inputs):
x = inputs.reshape(-1, 28 * 28).to(get_device())
for layer in self.layers:
x = layer(x)
return x
def learn(model, trainloader, nepochs, optimizer,
loss_function=nn.functional.cross_entropy,
metrics=None, logperiod=10):
metrics = metrics or []
device = get_device()
datasize = len(trainloader)
for epoch in range(nepochs):
iters = []
losses = []
metrics_values = [[] for _ in enumerate(metrics)]
for i, (inputs, labels) in enumerate(trainloader):
outputs = model(inputs)
labels = labels.to(device)
loss = loss_function(outputs, labels)
if i % logperiod == 0:
iters.append(datasize * epoch + i)
losses.append(loss)
for i, metric in enumerate(metrics):
metrics_values[i].append(metric(outputs, labels))
optimizer.zero_grad()
loss.backward()
optimizer.step()
yield iters, losses, metrics_values
def accuracy(outputs, labels):
_, predictions = torch.max(outputs, -1)
if isinstance(labels, torch.FloatTensor):
_, labels = torch.max(labels, -1)
return (labels == predictions).type(torch.float32).mean()
def nparams(model, nnz=False):
params = (p for p in model.parameters() if p.requires_grad)
return sum([np.prod(p.size() if not nnz else (p != 0).sum().item())
for p in params])
def update_line(line, newxs, newys):
xs, ys = line.get_data()
xs.extend(newxs)
ys.extend(newys)
line.set_data(xs, ys)
line.axes.relim()
line.axes.autoscale_view(True)
|
189402
|
from experiments.localization.aachen import AachenLocalizer
class LocalizersFactory(object):
def __init__(self, cfg):
self.cfg = cfg
def get_localizer(self):
localizer = None
loc_name = self.cfg.task.task_params.dataset
if loc_name == 'aachen_v11':
localizer = AachenLocalizer(self.cfg)
return localizer
|
189407
|
from enum import IntEnum
__version__ = "1.4"
class formats(IntEnum):
i, b, blockquote, searchresult, h1, h2, h, pre, code, \
divpadding, divborder = (1 << i for i in range(11))
|
189468
|
import cfile as C
import os
import io
import autosar
innerIndentDefault=3 #default indendation (number of spaces)
def _genCommentHeader(comment):
code = C.sequence()
code.append(C.line('/*********************************************************************************************************************'))
code.append(C.line('* %s'%comment))
code.append(C.line('*********************************************************************************************************************/'))
return code
class AlarmVariable:
def __init__(self, task):
init_delay=0 #FIXME: allow users to select this at a later time
self.decl = C.variable('os_alarm_cfg_%s'%task.name, 'os_alarm_cfg_t', static=True, const=True, array='OS_NUM_ALARMS_%s'%task.name)
self.body = C.block(innerIndent=innerIndentDefault)
self.body.append(C.linecomment('OS Task, Event ID, Init Delay (ms), Period (ms)'))
for event in task.timer_events:
self.body.append(C.line('{'+'{0: >10},{1: >50},{2: >5},{3: >5}'.format(
'&m_os_task_'+task.name, 'EVENT_MASK_%s_%s'%(task.name,event.name), init_delay, event.inner.period)+'},'))
class OsTaskCfgVar:
def __init__(self, tasks):
self.decl = C.variable('os_task_cfg', 'os_task_elem_t', static=True, const=True, array='OS_NUM_TASKS')
self.body = C.block(innerIndent=innerIndentDefault)
for task in tasks:
fmt='{0: >25},{1: >15},{2: >30},{3: >30}'
if len(task.timer_events)>0:
self.body.append(C.line('{'+fmt.format(
'&m_os_task_'+task.name, task.name, '&os_alarm_cfg_%s[0]'%task.name, 'OS_NUM_ALARMS_%s'%task.name)+'},'))
else:
self.body.append(C.line('{'+fmt.format(
'&m_os_task_'+task.name, task.name, '(os_alarm_cfg_t*) 0', '0')+'},'))
class OsConfigGenerator:
def __init__(self, cfg):
self.cfg = cfg
self.static_vars={}
self.alarm_vars=[]
self.os_task_var=None
def generate(self, dest_dir='.'):
for task in self.cfg.tasks:
task.finalize()
self._create_static_vars()
self.os_task_var = OsTaskCfgVar(self.cfg.tasks)
self._generate_event_cfg_header(dest_dir)
header_file = self._generate_task_cfg_header(dest_dir)
self._generate_task_cfg_source(dest_dir, header_file)
def _create_static_vars(self):
for os_task in self.cfg.tasks:
static_var = C.variable('m_os_task_'+os_task.name, 'os_task_t', static=True)
self.static_vars[static_var.name]=static_var
if len(os_task.timer_events)>0:
self.alarm_vars.append(AlarmVariable(os_task))
def _generate_event_cfg_header(self, dest_dir, file_name='os_event_cfg.h'):
header = C.hfile(os.path.join(dest_dir, file_name))
code = header.code
code.extend(_genCommentHeader('INCLUDES'))
code.append(C.include('PlatForm_Types.h'))
code.append('')
code.extend(_genCommentHeader('PUBLIC CONSTANTS AND DATA TYPES'))
for os_task in self.cfg.tasks:
for event_mask in os_task.event_masks:
code.append(event_mask)
code.append(C.define('OS_NUM_ALARMS_%s'%os_task.name, str(len(os_task.timer_events))))
code.append(C.blank())
with io.open(header.path, 'w', newline='\n') as fp:
for line in header.lines():
fp.write(line+'\n')
def _generate_task_cfg_header(self, dest_dir, file_name = 'os_task_cfg.h'):
header = C.hfile(os.path.join(dest_dir, file_name))
code = header.code
code.extend(_genCommentHeader('INCLUDES'))
code.append(C.include('os_types.h'))
code.append(C.include('os_task.h'))
code.append('')
code.extend(_genCommentHeader('PUBLIC CONSTANTS AND DATA TYPES'))
code.append(C.define('OS_NUM_TASKS',str(len(self.cfg.tasks))+'u'))
code.append('')
code.extend(_genCommentHeader('PUBLIC VARIABLES'))
code.append(C.statement('extern os_cfg_t g_os_cfg'))
code.append('')
code.extend(_genCommentHeader('PUBLIC FUNCTION PROTOTYPES'))
for task in self.cfg.tasks:
code.append(C.statement('OS_TASK_HANDLER(%s, arg)'%task.name))
for function_name in self.cfg.mode_switch_calls:
code.append(C.statement(C.function(function_name, 'void')))
with io.open(header.path, 'w', newline='\n') as fp:
for line in header.lines():
fp.write(line+'\n')
return file_name
def _generate_task_cfg_source(self, dest_dir, header_file, file_name = 'os_task_cfg.c'):
source = C.cfile(os.path.join(dest_dir, file_name))
code = source.code
code.extend(_genCommentHeader('INCLUDES'))
code.append(C.include(header_file))
code.append(C.include('os_event_cfg.h'))
code.append('')
code.extend(_genCommentHeader('PRIVATE VARIABLES'))
for static_var in sorted(self.static_vars.values(), key=lambda x: x.name):
code.append(C.statement(static_var))
code.append('')
for alarm_var in self.alarm_vars:
code.append(C.line(str(alarm_var.decl)+' ='))
code.append(C.statement(alarm_var.body))
code.append(C.line(str(self.os_task_var.decl)+' ='))
code.append(C.statement(self.os_task_var.body))
code.append('')
code.extend(_genCommentHeader('PUBLIC VARIABLES'))
code.append(C.line('os_cfg_t g_os_cfg ='))
body = C.block(innerIndent = innerIndentDefault)
body.append(C.line('&os_task_cfg[0],'))
body.append(C.line('OS_NUM_TASKS,'))
body.append(C.line('0,'))
body.append(C.line('0'))
code.append(C.statement(body))
code.append('')
code.extend(_genCommentHeader('PUBLIC FUNCTIONS'))
for elem in self.cfg.partition.mode_switch_functions.values():
for callback_name in sorted(elem.calls.keys()):
code.extend(self._generate_mode_switch_func(callback_name, elem.calls[callback_name]))
code.append('')
with io.open(source.path, 'w', newline='\n') as fp:
for line in source.lines():
fp.write(line+'\n')
def _generate_mode_switch_func(self, callback_name, events):
code = C.sequence()
generated=set()
code.append(C.function(callback_name, 'void'))
block = C.block(innerIndent = innerIndentDefault)
for event in events:
task = self.cfg.find_os_task_by_runnable(event.runnable)
if task is not None:
if (task.name, event.name) not in generated:
block.append(C.statement(C.fcall('os_task_setEvent', params=['&m_os_task_%s'%task.name, 'EVENT_MASK_%s_%s'%(task.name, event.name)])))
generated.add((task.name, event.name))
code.append(block)
return code
|
189514
|
from .model import BaseModel
from .tiramisu import DenseUNet, DenseBlock, DenseLayer
from .tiramisu import (
ModuleName,
DEFAULT_MODULE_BANK,
UPSAMPLE2D_NEAREST,
UPSAMPLE2D_PIXELSHUFFLE,
UPSAMPLE2D_TRANPOSE,
)
__all__ = [
"BaseModel",
"DenseUNet",
"DenseBlock",
"DenseLayer",
"ModuleName",
"DEFAULT_MODULE_BANK",
"UPSAMPLE2D_NEAREST",
"UPSAMPLE2D_PIXELSHUFFLE",
"UPSAMPLE2D_TRANPOSE",
]
|
189538
|
import os
from os.path import isfile, isdir, pardir
from pathlib import Path
from typing import List, Tuple
from node_launcher.constants import NODE_LAUNCHER_RELEASE
from node_launcher.logging import log
from PySide2.QtCore import QFileSystemWatcher, Signal, QObject
from node_launcher.node_set.lib.configuration_property import ConfigurationProperty
class ConfigurationFile(QObject):
file_watcher: QFileSystemWatcher
def __init__(self, path: str, assign_op: str = '='):
super().__init__()
self.path = path
self.directory = Path(path).parent
self.assign_op = assign_op
def __repr__(self):
return f'ConfigurationFile: {self.path}'
def parse_line(self, line: str) -> Tuple[str, str]:
if line.startswith('#'):
return '', ''
key_value = line.split(self.assign_op)
key = key_value[0].strip()
if not key:
return '', ''
value = key_value[1:]
value = self.assign_op.join(value).strip()
value = value.replace('"', '')
if not value:
return '', ''
return key, value
def read(self) -> List[Tuple[str, str, str]]:
parent = os.path.abspath(os.path.join(self.path, pardir))
if not isdir(parent):
log.info(
'Creating directory',
path=parent
)
os.makedirs(parent)
if not isfile(self.path):
log.info(
'Creating file',
path=self.path
)
lines = [
'# Auto-Generated Configuration File' + os.linesep + os.linesep,
f'# Node Launcher version {NODE_LAUNCHER_RELEASE}' + os.linesep + os.linesep
]
with open(self.path, 'w') as f:
f.writelines(lines)
with open(self.path, 'r') as f:
lines = f.readlines()
parsed_lines = []
index = 0
for line in lines:
key, value = self.parse_line(line)
if key:
parsed_lines.append((str(index), key, value))
index += 1
return parsed_lines
def save(self, configurations: List[ConfigurationProperty]):
with open(self.path, 'w') as f:
lines = [f'{c.name}{self.assign_op}{c.value}{os.linesep}' for c in configurations]
f.writelines(lines)
|
189562
|
from pathlib import Path
def db_synchronous_on(setting: str, db_path: Path) -> str:
if setting == "on":
return "NORMAL"
if setting == "off":
return "OFF"
if setting == "full":
return "FULL"
# for now, default to synchronous=FULL mode. This can be made more
# sophisticated in the future. There are still material performance
# improvements to be had in cases where the risks are low.
# e.g.
# type = GetDriveTypeW(db_path)
# if type == DRIVE_FIXED or type == DRIVE_RAMDISK:
# return "OFF"
return "FULL"
|
189622
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.advanced_construction import SurfaceConvectionAlgorithmInsideUserCurve
log = logging.getLogger(__name__)
class TestSurfaceConvectionAlgorithmInsideUserCurve(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_surfaceconvectionalgorithminsideusercurve(self):
pyidf.validation_level = ValidationLevel.error
obj = SurfaceConvectionAlgorithmInsideUserCurve()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_reference_temperature_for_convection_heat_transfer = "MeanAirTemperature"
obj.reference_temperature_for_convection_heat_transfer = var_reference_temperature_for_convection_heat_transfer
# object-list
var_hc_function_of_temperature_difference_curve_name = "object-list|Hc Function of Temperature Difference Curve Name"
obj.hc_function_of_temperature_difference_curve_name = var_hc_function_of_temperature_difference_curve_name
# object-list
var_hc_function_of_temperature_difference_divided_by_height_curve_name = "object-list|Hc Function of Temperature Difference Divided by Height Curve Name"
obj.hc_function_of_temperature_difference_divided_by_height_curve_name = var_hc_function_of_temperature_difference_divided_by_height_curve_name
# object-list
var_hc_function_of_air_change_rate_curve_name = "object-list|Hc Function of Air Change Rate Curve Name"
obj.hc_function_of_air_change_rate_curve_name = var_hc_function_of_air_change_rate_curve_name
# object-list
var_hc_function_of_air_system_volume_flow_rate_divided_by_zone_perimeter_length_curve_name = "object-list|Hc Function of Air System Volume Flow Rate Divided by Zone Perimeter Length Curve Name"
obj.hc_function_of_air_system_volume_flow_rate_divided_by_zone_perimeter_length_curve_name = var_hc_function_of_air_system_volume_flow_rate_divided_by_zone_perimeter_length_curve_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.surfaceconvectionalgorithminsideusercurves[0].name, var_name)
self.assertEqual(idf2.surfaceconvectionalgorithminsideusercurves[0].reference_temperature_for_convection_heat_transfer, var_reference_temperature_for_convection_heat_transfer)
self.assertEqual(idf2.surfaceconvectionalgorithminsideusercurves[0].hc_function_of_temperature_difference_curve_name, var_hc_function_of_temperature_difference_curve_name)
self.assertEqual(idf2.surfaceconvectionalgorithminsideusercurves[0].hc_function_of_temperature_difference_divided_by_height_curve_name, var_hc_function_of_temperature_difference_divided_by_height_curve_name)
self.assertEqual(idf2.surfaceconvectionalgorithminsideusercurves[0].hc_function_of_air_change_rate_curve_name, var_hc_function_of_air_change_rate_curve_name)
self.assertEqual(idf2.surfaceconvectionalgorithminsideusercurves[0].hc_function_of_air_system_volume_flow_rate_divided_by_zone_perimeter_length_curve_name, var_hc_function_of_air_system_volume_flow_rate_divided_by_zone_perimeter_length_curve_name)
|
189655
|
from chef.base import ChefObject
class Environment(ChefObject):
"""A Chef environment object.
.. versionadded:: 0.2
"""
url = '/environments'
api_version = '0.10'
attributes = {
'description': str,
'cookbook_versions': dict,
'default_attributes': dict,
'override_attributes': dict,
}
|
189662
|
from Cut import cut
import pytest
@pytest.mark.parametrize('value,delimiter,fields,expected', [
('A-B-C-D-E', '-', '1,5', 'A-E'),
('a,ב,c', ',', '2,3', 'ב,c'),
])
def test_cut(value, delimiter, fields, expected):
"""
Given:
Case 1: A-B-C-D-E to split by - from char 1 to 5
Case 2: a,ב,c to split by , from char 2 to 3
When:
Running Cut
Then:
Case 1: Ensure A-E is returned
Case 2: Ensure ב,c is returned
"""
assert cut(value, fields, delimiter) == expected
|
189679
|
import random
from datetime import datetime
import factory
import pytz
from factory.alchemy import SQLAlchemyModelFactory
from factory.faker import Faker
from factory.fuzzy import FuzzyChoice, FuzzyDateTime, FuzzyText
from app.enums import userenums
from app.models import rolemodels, shopmodels, usermodels
from app.service.passwordservice import get_password_hash
from . import common
class BaseFactory(SQLAlchemyModelFactory):
"""Base Factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = common.ScopedSession
sqlalchemy_session_persistence = "commit"
class TimeStampFactory(BaseFactory):
"""Timestamp Base Factory."""
created_at = FuzzyDateTime(datetime(2020, 1, 1, tzinfo=pytz.UTC))
updated_at = FuzzyDateTime(datetime(2020, 1, 1, tzinfo=pytz.UTC))
class UserFactory(TimeStampFactory, BaseFactory):
"""User Factory.
Has a default password and active status.
"""
class Meta:
model = usermodels.User
email = Faker("email")
hashed_password = get_password_hash(common.USER_PASSWORD)
first_name = Faker("first_name")
last_name = Faker("last_name")
status = userenums.UserStatus.active
# Relationships
@factory.post_generation
def roles(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of roles were passed in, use them
for role in extracted:
self.roles.append(role)
class RoleFactory(TimeStampFactory, BaseFactory):
"""Role factory."""
class Meta:
model = rolemodels.Role
name = FuzzyText()
description = FuzzyText()
class ShopFactory(TimeStampFactory, BaseFactory):
"""Shop factory."""
class Meta:
model = shopmodels.Shop
name = FuzzyText()
url = FuzzyText()
query_url = FuzzyText()
render_javascript = Faker("boolean")
listing_page_selector = {"data": "here"}
|
189680
|
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers import modes, Cipher
from cryptography.hazmat.backends import default_backend
def xor(x, y):
# assert len(x) == len(y)
a = int.from_bytes(x, "big")
b = int.from_bytes(y, "big")
return (a ^ b).to_bytes(len(x), "big")
def AES_DECRYPT(key):
cipher = Cipher(AES(key), modes.ECB(), backend=default_backend())
return cipher.decryptor().update
def AES_ENCRYPT(key):
cipher = Cipher(AES(key), modes.ECB(), backend=default_backend())
return cipher.encryptor().update
|
189687
|
from rest_framework import authentication
from rest_framework import exceptions
from dataloaderinterface.models import SiteRegistration
class UUIDAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
if request.META['REQUEST_METHOD'] != 'POST':
return None
if 'HTTP_TOKEN' not in request.META:
raise exceptions.ParseError("Registration Token not present in the request.")
elif 'sampling_feature' not in request.data:
raise exceptions.ParseError("Sampling feature UUID not present in the request.")
# Get auth_token(uuid) from header,
# get registration object with auth_token,
# get the user from that registration,
# verify sampling_feature uuid is registered by this user,
# be happy.
token = request.META['HTTP_TOKEN']
registration = SiteRegistration.objects.filter(registration_token=token).first()
if not registration:
raise exceptions.PermissionDenied('Invalid Security Token')
# request needs to have the sampling feature uuid of the registration -
if str(registration.sampling_feature.sampling_feature_uuid) != request.data['sampling_feature']:
raise exceptions.AuthenticationFailed('Site Identifier is not associated with this Token')
return None
|
189691
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.SolidMechanicsApplication
import KratosMultiphysics.PfemApplication
import MainSolid
class PfemSolution(MainSolid.Solution):
def __init__(self, Model, file_parameters = "ProjectParameters.json", file_name = None):
super(PfemSolution, self).__init__(Model, file_parameters, file_name)
#### Main internal methods ####
def _get_processes_parameters(self):
# add fluid processes
add_fluid_process = True
if self.ProjectParameters.Has("problem_data"):
if self.ProjectParameters["problem_data"].Has("domain_type"):
if(self.ProjectParameters["problem_data"]["domain_type"].GetString() != "Solid"):
add_fluid_process = False
if add_fluid_process is True:
return self._add_fluid_processes()
else:
return MainSolid.Solution._get_processes_parameters(self)
def _add_fluid_processes(self):
# get processes parameters from base class
processes_parameters = MainSolid.Solution._get_processes_parameters(self)
# add process to manage assignation of material properties to particles
# modify processes_parameters to introduce this process in the problem_process_list
# particles concept : assign initial material percent and properties vector pointer to nodes
if(processes_parameters.Has("problem_process_list")):
problem_processes = processes_parameters["problem_process_list"]
#print(" PROBLEM_PROCESSES ", processes_parameters["problem_process_list"].PrettyPrintJsonString())
extended_problem_processes = self._set_particle_properties_process(problem_processes)
processes_parameters.AddValue("problem_process_list", extended_problem_processes)
#extended_problem_processes = self._set_volume_recovery_process(problem_processes)
#processes_parameters.AddValue("problem_process_list", extended_problem_processes)
#print(" EXTENDED_PROBLEM_PROCESSES ", processes_parameters["problem_process_list"].PrettyPrintJsonString())
if(processes_parameters.Has("constraints_process_list")):
constraints_processes = processes_parameters["constraints_process_list"]
if(self.echo_level>1):
print(" CONSTRAINTS_PROCESSES ", processes_parameters["constraints_process_list"].PrettyPrintJsonString())
extended_constraints_processes = self._set_isolated_nodes_management_process(constraints_processes)
processes_parameters.AddValue("constraints_process_list", extended_constraints_processes)
extended_constraints_processes = self._set_selected_elements_management_process(constraints_processes)
processes_parameters.AddValue("constraints_process_list", extended_constraints_processes)
if(self.echo_level>1):
print(" EXTENDED_CONSTRAINTS_PROCESSES ", processes_parameters["constraints_process_list"].PrettyPrintJsonString())
if(processes_parameters.Has("loads_process_list")):
loads_processes = processes_parameters["loads_process_list"]
if(self.echo_level>1):
print(" LOADS_PROCESSES ", processes_parameters["loads_process_list"].PrettyPrintJsonString())
extended_loads_processes = self._set_volume_acceleration_process(loads_processes)
processes_parameters.AddValue("loads_process_list", extended_loads_processes)
if(self.echo_level>1):
print(" EXTENDED_LOADS_PROCESSES ", processes_parameters["loads_process_list"].PrettyPrintJsonString())
return processes_parameters
def _set_isolated_nodes_management_process(self, constraints_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "manage_isolated_nodes_process",
"kratos_module" : "KratosMultiphysics.PfemApplication",
"Parameters" : {}
}
""")
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
constraints_processes.Append(default_settings)
return constraints_processes
def _set_selected_elements_management_process(self, constraints_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "manage_selected_elements_process",
"kratos_module" : "KratosMultiphysics.PfemApplication",
"Parameters" : {}
}
""")
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
constraints_processes.Append(default_settings)
return constraints_processes
def _set_volume_acceleration_process(self, loads_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "assign_modulus_and_direction_to_nodes_process",
"kratos_module" : "KratosMultiphysics.SolidMechanicsApplication",
"Parameters" : {
"variable_name" : "VOLUME_ACCELERATION",
"modulus" : 9.81,
"direction" : [0.0,-1.0,0.0]
}
}
""")
if(self.ProjectParameters.Has("problem_data")):
if(self.ProjectParameters["problem_data"].Has("gravity_vector")):
import math
#get normalized direction
direction = []
scalar_prod = 0
for i in range(self.ProjectParameters["problem_data"]["gravity_vector"].size()):
direction.append( self.ProjectParameters["problem_data"]["gravity_vector"][i].GetDouble() )
scalar_prod = scalar_prod + direction[i]*direction[i]
norm = math.sqrt(scalar_prod)
self.value = []
if( norm != 0.0 ):
for j in direction:
self.value.append( j/norm )
else:
for j in direction:
self.value.append(0.0)
if(default_settings["Parameters"].Has("modulus")):
default_settings["Parameters"]["modulus"].SetDouble(norm)
if(default_settings["Parameters"].Has("direction")):
counter = 0
for i in self.value:
default_settings["Parameters"]["direction"][counter].SetDouble(i)
counter+=1
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
loads_processes.Append(default_settings)
return loads_processes
def _set_particle_properties_process(self, problem_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "assign_properties_to_nodes_process",
"kratos_module" : "KratosMultiphysics.PfemApplication",
"Parameters" : {
"fluid_mixture" : true,
"solid_mixture" : false
}
}
""")
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
problem_processes.Append(default_settings)
return problem_processes
def _set_volume_recovery_process(self, problem_processes):
default_settings = KratosMultiphysics.Parameters("""
{
"python_module" : "volume_recovery_process",
"kratos_module" : "KratosMultiphysics.PfemApplication",
"Parameters" : {
}
}
""")
model_part_name = self.model.GetMainModelPart().Name
default_settings["Parameters"].AddEmptyValue("model_part_name").SetString(model_part_name)
problem_processes.Append(default_settings)
return problem_processes
@classmethod
def _class_prefix(self):
header = "::[--PFEM Simulation--]::"
return header
if __name__ == "__main__":
PfemSolution().Run()
|
189698
|
from icolos.core.containers.generic import GenericData
import unittest
import os
from icolos.core.containers.gmx_state import GromacsState
from icolos.core.composite_agents.workflow import WorkFlow
from icolos.core.workflow_steps.gromacs.editconf import StepGMXEditConf
from icolos.utils.enums.step_enums import StepBaseEnum, StepGromacsEnum
from tests.tests_paths import PATHS_EXAMPLEDATA, export_unit_test_env_vars
from icolos.utils.general.files_paths import attach_root_path
_SGE = StepGromacsEnum()
_SBE = StepBaseEnum
class Test_Editconf(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/gromacs")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
export_unit_test_env_vars()
def setUp(self):
with open(
attach_root_path(PATHS_EXAMPLEDATA.GROMACS_HOLO_STRUCTURE_GRO), "r"
) as f:
self.struct = f.readlines()
self.topol = GromacsState()
self.topol.structures = [GenericData(_SGE.STD_STRUCTURE, file_data=self.struct)]
def test_editconf_wf_input(self):
"""
Takes input from a workflow object
"""
step_conf = {
_SBE.STEPID: "test_editconf",
_SBE.STEP_TYPE: "editconf",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2021-fosscuda-2019a-PLUMED-2.7.1-Python-3.7.2"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-d": "1.0",
"-bt": "dodecahedron",
},
}
},
}
step_editconf = StepGMXEditConf(**step_conf)
step_editconf.data.gmx_state = self.topol
step_editconf.execute()
out_path = os.path.join(self._test_dir, "confout.gro")
step_editconf.get_topol().write_structure(self._test_dir)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 2102964)
def test_editconf_external_input(self):
step_conf = {
_SBE.STEPID: "test_editconf",
_SBE.STEP_TYPE: "editconf",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2021-fosscuda-2019a-PLUMED-2.7.1-Python-3.7.2"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-d": "1.0",
"-bt": "dodecahedron",
},
}
},
}
step_editconf = StepGMXEditConf(**step_conf)
step_editconf.data.generic.add_file(
GenericData(file_name=_SGE.STD_STRUCTURE, file_data=self.struct)
)
step_editconf.execute()
out_path = os.path.join(self._test_dir, "confout.gro")
step_editconf.get_topol().write_structure(self._test_dir)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 2102964)
|
189718
|
import pexpect
def show_version(device, prompt, ip, username, password):
device_prompt = prompt
child = pexpect.spawn('telnet ' + ip)
child.expect('Username:')
child.sendline(username)
child.expect('Password:')
child.sendline(password)
child.expect(device_prompt)
child.sendline('show version | i V')
child.expect(device_prompt)
result = child.before
child.sendline('exit')
return device, result
if __name__ == '__main__':
username = 'cisco'
password = '<PASSWORD>'
print(show_version('iosv-1', 'iosv-1#', '172.16.1.225', username, password))
print(show_version('iosv-2', 'iosv-2#', '172.16.1.226', username, password))
|
189826
|
import os
import pandas as pd
from datetime import datetime
from geopy import distance
from libcity.data.dataset.trajectory_encoder.abstract_trajectory_encoder import AbstractTrajectoryEncoder
from libcity.utils import parse_time, parse_coordinate
parameter_list = ['dataset', 'min_session_len', 'min_sessions', 'traj_encoder', 'cut_method',
'window_size', 'min_checkins', 'neg_samples']
class AtstlstmEncoder(AbstractTrajectoryEncoder):
# 这里有问题,需要重新修改
def __init__(self, config):
super().__init__(config)
self.uid = 0
self.location2id = {} # 因为原始数据集中的部分 loc id 不会被使用到因此这里需要重新编码一下
self.loc_id = 0
self.tim_max = 0 # 记录最大的时间编码
if self.config['cut_method'] == 'time_interval':
# 对于以时间窗口切割的轨迹,最大时间编码是已知的
self.tim_max = self.config['window_size'] - 1
self.feature_dict = {'current_loc': 'int', 'loc_neg': 'int',
'current_dis': 'float', 'dis_neg': 'float',
'current_tim': 'float', 'tim_neg': 'float', 'uid': 'int',
'target_loc': 'int', 'target_dis': 'float', 'target_tim': 'float'
}
parameters_str = ''
for key in parameter_list:
if key in self.config:
parameters_str += '_' + str(self.config[key])
self.cache_file_name = os.path.join(
'./libcity/cache/dataset_cache/', 'trajectory_{}.json'.format(parameters_str))
self.data_path = './raw_data/{}/'.format(self.config['dataset'])
self.geo = pd.read_csv(os.path.join(self.data_path, '{}.geo'.format(self.config['dataset'])))
def encode(self, uid, trajectories, negative_sample):
"""Encoded Method refered to the open source code
https://github.com/drhuangliwei/An-Attention-based-Spatiotemporal-LSTM-Network-for-Next-POI-Recommendation
row index is:
0 1 2 3 4
dyna_id,type,time,entity_id,location
"""
# 直接对 uid 进行重编码
uid = self.uid
self.uid += 1
encoded_trajectories = []
for i, traj in enumerate(trajectories):
current_loc = [] # the checkin poi list
loc_distance = [] # the distance between two checkin
tim_interval = [] # the time interval between two checkin
pre_time = None
pre_lat = None
pre_lon = None
for index, row in enumerate(traj):
loc = row[4]
now_time = parse_time(row[2])
lon, lat = parse_coordinate(self.geo.loc[self.geo['geo_id'] == loc].iloc[0]['coordinates'])
if index == 0:
# for the first checkin, distance and time_interval set to a fixed value
if loc not in self.location2id:
self.location2id[loc] = self.loc_id
self.loc_id += 1
current_loc.append(self.location2id[loc])
tim_interval.append(100) # choose the same fixed value as the reference code
loc_distance.append(1)
else:
if loc not in self.location2id:
self.location2id[loc] = self.loc_id
self.loc_id += 1
current_loc.append(self.location2id[loc])
# the unit of time is second
tim_interval.append(datetime.timestamp(now_time) - datetime.timestamp(pre_time))
loc_distance.append(distance.distance((pre_lat, pre_lon), (lat, lon)).kilometers)
pre_time = now_time
pre_lat = lat
pre_lon = lon
# generate negative samples' current_loc loc_distance and tim_interval
neg_loc = []
neg_distance = []
neg_time = []
# the final checkin will be target (positive sample), so use the second last to cal neg
row = traj[-2]
loc = row[4]
pre_lon, pre_lat = parse_coordinate(self.geo.loc[self.geo['geo_id'] == loc].iloc[0]['coordinates'])
for neg in negative_sample[i]:
neg_lon, neg_lat = parse_coordinate(self.geo.loc[self.geo['geo_id'] == neg].iloc[0]['coordinates'])
if neg not in self.location2id:
self.location2id[neg] = self.loc_id
self.loc_id += 1
neg_loc.append(self.location2id[neg])
neg_time.append(tim_interval[-1]) # use target's time interval as the neg sample's
neg_distance.append(distance.distance((neg_lat, neg_lon), (pre_lat, pre_lon)).kilometers)
trace = []
target_loc = current_loc[-1]
target_dis = loc_distance[-1]
target_tim = tim_interval[-1]
trace.append(current_loc[:-1])
trace.append(neg_loc)
trace.append(loc_distance[:-1])
trace.append(neg_distance)
trace.append(tim_interval[:-1])
trace.append(neg_time)
trace.append(uid)
trace.append(target_loc)
trace.append(target_dis)
trace.append(target_tim)
encoded_trajectories.append(trace)
return encoded_trajectories
def gen_data_feature(self):
loc_pad = self.loc_id
self.pad_item = {
'current_loc': loc_pad,
'current_dis': 0.0,
'current_tim': 0.0
}
self.data_feature = {
'loc_size': self.loc_id + 1,
'uid_size': self.uid,
'loc_pad': loc_pad
}
|
189833
|
import discord
from discord.ext import commands
import json
class GuildEvents(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("GuildEvents are ready!")
@commands.Cog.listener()
async def on_guild_join(self, guild):
# send the log in support server
with open("./config.json", "r") as f:
config = json.load(f)
serverId = int(config["IDs"]["serverLogId"])
channelId = int(config["IDs"]["channelLogId"])
embed = discord.Embed(title = "I joined a new server!", color = discord.Color.red())
embed.add_field(name = "Owner:", value = f"`{guild.owner}`")
embed.add_field(name = "New Servercount:", value = f"`{len(self.client.guilds)}`")
embed.add_field(name = "New Usercount:", value = f"`{len(self.client.users)}`")
embed.add_field(name = "Name:", value = f"{str(guild.name)}")
sguild = self.client.get_guild(serverId)
for channel in sguild.channels:
if channel.id == channelId:
await channel.send(embed = embed)
break
# now send a message to the people, for the people.
em = discord.Embed(title = "<:VERIFIED_DEVELOPER:761297621502656512> Thanks for adding me to your server!",color = discord.Color.red(), description = f"""I am TheImperialGod, Lord of the empire who will bring peace to your server: {guild.name}.\n
I see that you have {guild.member_count} members, how about we try to double that in the next week!\n\nSO to get started with my power, please use `imp help`. My prefix is `imp` and `imp help` shows you all the commands.\n\n\nHere is what I can do:```diff\n+ Make your members contact you via tickets\n+ Create roles for you\n+ Can set an autorole\n+ Can do maths for you!\n+ Host giveaways\n+ Make people rich with server economy\n- Can show you information about users and the server!\n- Make you have fun with my utilities\n- Show you images and memes from reddits\n``` """)
em.set_author(name = "TheImperialGod")
em.set_footer(text = "Thanks for inviting me!", icon_url = guild.icon_url)
if guild.system_channel is not None:
await guild.system_channel.send(embed = em)
else:
for channel in guild.channels:
return await channel.send(embed = em)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
with open("./config.json", "r") as f:
config = json.load(f)
serverId = int(config["IDs"]["serverLogId"])
channelId = int(config["IDs"]["channelLogId"])
embed = discord.Embed(title = "I left a new server!", color = discord.Color.red())
embed.add_field(name = "Owner:", value = f"`{guild.owner}`")
embed.add_field(name = "New Servercount:", value = f"`{len(self.client.guilds)}`")
embed.add_field(name = "New Usercount:", value = f"`{len(self.client.users)}`")
embed.add_field(name = "Name:", value = f"{str(guild.name)}")
sguild = self.client.get_guild(serverId)
for channel in sguild.channels:
if channel.id == channelId:
await channel.send(embed = embed)
break
# dm the owner about it
try:
await guild.owner.send("""I hope I was of service to you in your server! I would love to hear your feedback on my top.gg page: https://top.gg/bot/768695035092271124/ """)
except:
pass
def setup(client):
client.add_cog(GuildEvents(client))
|
189854
|
from lms.lmstests.sandbox.config import celery as celery_config
from lms.lmstests.sandbox.linters import tasks as flake8_tasks
celery_app = celery_config.app
__all__ = ('flake8_tasks', 'celery_app')
|
189856
|
import PCA as pc
import numpy as np
from sklearn.decomposition import PCA
if __name__ == "__main__":
data = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
pca = pc.PCA(n_components=1)
pca.fit(data,rowvar=False)
res = pca.transform(data,rowvar=False)
ratio = pca.variance_ratio(only=True)
print("各特征的权重为: ratio = ",ratio)
print("使用本库进行计算得到的PCA降维结果为: res = ", res)
pca1 = PCA(n_components=1)
res = pca1.fit_transform(data)
ratio = pca1.explained_variance_ratio_
print("各特征的权重为: ratio = ",ratio)
print("使用sklearn.decomposition.PCA 验证的结果为: res = ", res)
|
189861
|
import unittest
import os
import binascii
import blocktrail
class ApiClientTestCase(unittest.TestCase):
def setUp(self):
self.cleanup_data = {}
def tearDown(self):
#cleanup any records that were created after each test
client = self.setup_api_client(debug=False)
#webhooks
if 'webhooks' in self.cleanup_data:
count = 0
for webhook in self.cleanup_data['webhooks']:
try:
count += int(client.delete_webhook(webhook))
except Exception:
pass
def setup_api_client(self, api_key=None, api_secret=None, debug=True):
if api_key is None:
api_key = os.environ.get('BLOCKTRAIL_SDK_APIKEY', 'EXAMPLE_BLOCKTRAIL_SDK_PYTHON_APIKEY')
if api_secret is None:
api_secret = os.environ.get('BLOCKTRAIL_SDK_APISECRET', 'EXAMPLE_BLOCKTRAIL_SDK_PYTHON_APISECRET')
return blocktrail.APIClient(api_key, api_secret, debug=debug)
def test_coin_value(self):
assert 1 == blocktrail.to_satoshi(0.00000001)
assert 1.0, blocktrail.to_btc(100000000)
assert 123456789, blocktrail.to_satoshi(1.23456789)
assert 1.23456789, blocktrail.to_btc(123456789)
def test_auth(self):
client = self.setup_api_client(api_secret="FAILSECRET", debug=False)
assert client.address("1dice8EMZmqKvrGE4Qc9bUFf9PX3xaYDp")
with self.assertRaises(blocktrail.exceptions.InvalidCredentials):
client.verify_address("16dwJmR4mX5RguGrocMfN9Q9FR2kZcLw2z", "HPMOHRgPSMKdXrU6AqQs/i9S7alOakkHsJiqLGmInt05C<KEY>
client = self.setup_api_client(api_key="FAILKEY", debug=False)
with self.assertRaises(blocktrail.exceptions.InvalidCredentials):
client.verify_address("<KEY>", "<KEY>
client = self.setup_api_client()
try:
client.verify_address("<KEY>", "<KEY>
except blocktrail.exceptions.InvalidCredentials:
assert False, "InvalidCredentials raised"
except Exception:
assert True, "Other Exception is fine" # we're not testing verify_address, we're testing HMAC!
def test_address(self):
client = self.setup_api_client()
# address info
address = client.address("1dice8EMZmqKvrGE4Qc9bUFf9PX3xaYDp")
assert address and 'address' in address
assert address['address'] == "<KEY>"
# address transactions
address_txs = client.address_transactions("1dice8EMZmqKvrGE4Qc9bUFf9PX3xaYDp", limit=23)
assert address_txs and 'total' in address_txs and 'data' in address_txs
assert len(address_txs['data']) == 23
# address unconfirmed transactions
address_txs = client.address_unconfirmed_transactions("1dice8EMZ<KEY>", limit=23)
assert address_txs and 'total' in address_txs and 'data' in address_txs
# assert address_txs['total'] >= len(address_txs['data'])
# address unspent outputs
address_utxo = client.address_unspent_outputs("1dice8EMZmqKvrGE4Qc9bUFf9PX3xaYDp", limit=23)
assert address_utxo and 'total' in address_utxo and 'data' in address_utxo
assert address_utxo['total'] >= len(address_utxo['data'])
verify = client.verify_address("16dwJmR4mX5RguGrocMfN9Q9FR2kZcLw2z", "<KEY>
assert verify and verify['result']
def test_block(self):
client = self.setup_api_client()
# block info
block = client.block("000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf")
assert block and 'hash' in block
assert block['hash'] == "000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf"
# block info by height
block = client.block(200000)
assert block and 'hash' in block
assert block['hash'] == "000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf"
# block transactions
block_txs = client.block_transactions("000000000000034a7dedef4a161fa058a2d67a173a90155f3a2fe6fc132e0ebf", limit=23)
assert block_txs and 'total' in block_txs and 'data' in block_txs
assert len(block_txs['data']) == 23
# all blocks
blocks = client.all_blocks(page=2, limit=23)
assert blocks and 'total' in blocks and 'data' in blocks
assert len(blocks['data']) == 23
assert 'hash' in blocks['data'][0]
assert 'hash' in blocks['data'][1]
assert blocks['data'][0]['hash'] == '000000000cd339982e556dfffa9de94744a4135c53eeef15b7bcc9bdeb9c2182'
assert blocks['data'][1]['hash'] == '00000000fc051fbbce89a487e811a5d4319d209785ea4f4b27fc83770d1e415f'
# latest block
block = client.block_latest()
assert block and 'hash' in block
def test_transaction(self):
client = self.setup_api_client()
# coinbase TX
tx = client.transaction("0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098")
assert tx and 'hash' in tx and 'confirmations' in tx
assert tx['hash'] == "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098"
assert tx['enough_fee'] is None
# random TX 1
tx = client.transaction("c791b82ed9af681b73eadb7a05b67294c1c3003e52d01e03775bfb79d4ac58d1")
assert tx and 'hash' in tx and 'confirmations' in tx
assert tx['hash'] == "c791b82ed9af681b73eadb7a05b67294c1c3003e52d01e03775bfb79d4ac58d1"
assert tx['enough_fee'] is True
assert tx['high_priority'] is False
def test_webhooks(self):
client = self.setup_api_client()
# keep track of all webhooks created for cleanup
self.cleanup_data['webhooks'] = []
# create a webhook with a custom identifier (randomly generated)
bytes = os.urandom(10)
identifier_1 = binascii.hexlify(bytes).decode("utf-8")
result = client.setup_webhook("https://www.blocktrail.com/webhook-test", identifier_1)
assert result and 'url' in result and 'identifier' in result
assert result['url'] == "https://www.blocktrail.com/webhook-test"
assert result['identifier'] == identifier_1
webhookID1 = result['identifier']
self.cleanup_data['webhooks'].append(webhookID1)
# create a webhook without a custom identifier
result = client.setup_webhook("https://www.blocktrail.com/webhook-test")
assert result and 'url' in result and 'identifier' in result
assert result['url'] == "https://www.blocktrail.com/webhook-test"
assert len(result['identifier']) > 0
webhookID2 = result['identifier']
self.cleanup_data['webhooks'].append(webhookID2)
# get all webhooks
result = client.all_webhooks()
assert result and 'data' in result and 'total' in result
assert result['total'] >= 2
assert len(result['data']) >= 2
assert 'url' in result['data'][0]
assert 'url' in result['data'][1]
# get a single webhook
result = client.webhook(webhookID1)
assert result and 'url' in result and 'identifier' in result
assert result['url'] == "https://www.blocktrail.com/webhook-test"
assert result['identifier'] == webhookID1
# delete a webhook
assert client.delete_webhook(webhookID1)
# update a webhook
bytes = os.urandom(10)
new_identifier = binascii.hexlify(bytes).decode("utf-8")
result = client.update_webhook(webhookID2, "https://www.blocktrail.com/new-webhook-url", new_identifier)
assert result and 'url' in result and 'identifier' in result
assert result['url'] == "https://www.blocktrail.com/new-webhook-url"
assert result['identifier'] == new_identifier
webhookID2 = result['identifier']
self.cleanup_data['webhooks'].append(webhookID2)
# add webhook event subscription (address-transactions)
result = client.subscribe_address_transactions(webhookID2, "1dice8EMZmqKvrGE4Qc9bUFf9PX3xaYDp", 2)
assert result
assert result['event_type'] == 'address-transactions'
assert result['address'] == '1dice8EMZmqKvrGE4Qc9bUFf9PX3xaYDp'
assert result['confirmations'] == 2
# add webhook event subscription (block)
result = client.subscribe_new_blocks(webhookID2)
assert result
assert result['event_type'] == 'block'
# add webhook event subscription (transactions)
result = client.subscribe_transaction(webhookID2,
"6a46bf067704284340e64cb963d816b152643e0c156204632f58aec1d751d145", 2)
assert result
assert result['event_type'] == 'transaction'
assert result['address'] is None
assert result['transaction'] == '6a46bf067704284340e64cb963d816b152643e0c156204632f58aec1d751d145'
assert result['confirmations'] == 2
# get webhook's event subscriptions
result = client.webhook_events(webhookID2)
assert result and 'data' in result and 'total' in result
assert result['total'] == 3
assert len(result['data']) == 3
assert result['data'][0]['event_type'] == 'address-transactions'
assert result['data'][1]['event_type'] == 'block'
assert result['data'][2]['event_type'] == 'transaction'
# unsubscribe webhook event (address-transaction)
assert client.unsubscribe_address_transactions(webhookID2, "1dice8EMZmqKvrGE4Qc9bUFf9PX3xaYDp")
# unsubscribe webhook event (block)
assert client.unsubscribe_new_blocks(webhookID2)
# unsubscribe webhook event (transaction)
assert client.unsubscribe_transaction(webhookID2,
"6a46bf067704284340e64cb963d816b152643e0c156204632f58aec1d751d145")
# batch create webhook events (address-transactions)
batch_data = [
{
'event_type': 'address-transactions',
'address': '18FA8Tn54Hu8fjn7kkfA<KEY>bHzo',
'confirmations': 1
},
{
'address': '1LUCKYwD6V9JHVXAFEEjyQSD4Dj5GLXmte',
'confirmations': 1
},
{
'address': '1qMBuZnrmGoAc2MWyTnSgoLuWReDHNYyF'
}
]
result = client.batch_subscribe_address_transactions(webhookID2, batch_data)
assert result
result = client.webhook_events(webhookID2)
assert result['total'] == 3
assert len(result['data']) == 3
assert result['data'][2]['address'] == batch_data[2]['address']
# cleanup - @todo needs to be put in a cleanup class and run regardless of the test progress
#assert client.delete_webhook(webhookID2)
def test_price_index(self):
client = self.setup_api_client()
result = client.price()
assert result and 'USD' in result
def test_verify_message(self):
client = self.setup_api_client()
address = "1F26pNMrywyZJdr22jErtKcjF8R3Ttt55G"
message = address
signature = "H85WKpqtNZDrajOnYDgUY+abh0KCAcOsAIOQwx2PftAbLEPRA7mzXA/CjXRxzz0MC225pR/hx02Vf2Ag2x33kU4="
result = client.verify_message(message, address, signature)
assert result
if __name__ == "__main__":
unittest.main()
|
189874
|
import logging
from .object import ObjectStore
from openpathsampling.netcdfplus.cache import LRUChunkLoadingCache
logger = logging.getLogger(__name__)
init_log = logging.getLogger('openpathsampling.initialization')
class ValueStore(ObjectStore):
"""
Store that stores a value by integer index
Usually used to save additional attributes for objects
See Also
--------
`PseudoAttribute`, `PseudoAttributeStore`
"""
def __init__(
self,
key_class,
allow_incomplete=False,
chunksize=256
):
super(ValueStore, self).__init__(None)
self.key_class = key_class
self.object_index = None
self.allow_incomplete = allow_incomplete
self.chunksize = chunksize
self.object_pos = None
self._len = 0
def to_dict(self):
return {
'key_class': self.key_class,
'allow_incomplete': self.allow_incomplete,
'chunksize': self.chunksize
}
def create_uuid_index(self):
return dict()
def register(self, storage, prefix):
super(ValueStore, self).register(storage, prefix)
self.object_pos = self.storage._objects[self.key_class].pos
def __len__(self):
return len(self.variables['value'])
# ==========================================================================
# LOAD/SAVE DECORATORS FOR CACHE HANDLING
# ==========================================================================
def load(self, idx):
pos = self.object_pos(idx)
if pos is None:
return None
if self.allow_incomplete:
# we want to load by uuid and it was not in cache.
if pos in self.index:
n_idx = self.index[pos]
else:
return None
if n_idx < 0:
return None
else:
if pos < self._len:
n_idx = pos
else:
return None
# if it is in the cache, return it
try:
obj = self.cache[n_idx]
return obj
except KeyError:
pass
obj = self.vars['value'][n_idx]
self.cache[n_idx] = obj
return obj
def __setitem__(self, idx, value):
pos = self.object_pos(idx)
if pos is None:
return
if self.allow_incomplete:
if pos in self.index:
return
n_idx = len(self.index)
self.cache.update_size(n_idx)
else:
if pos < self._len:
return
n_idx = idx
if self.allow_incomplete:
# only if partial storage is used store index and update
self.vars['index'][n_idx] = pos
self.index[pos] = n_idx
self.vars['value'][n_idx] = value
self.cache[n_idx] = value
self._len = max(self._len, n_idx + 1)
def fill_cache(self):
self.cache.load_max()
def restore(self):
if self.allow_incomplete: # only if partial storage is used
for pos, idx in enumerate(self.vars['index'][:]):
self.index[idx] = pos
self._len = len(self)
self.initialize_cache()
def initialize(self):
self.initialize_cache()
def initialize_cache(self):
self.cache = LRUChunkLoadingCache(
chunksize=self.chunksize,
variable=self.vars['value']
)
self.cache.update_size()
def __getitem__(self, item):
# enable numpy style selection of objects in the store
try:
if isinstance(item, self.key_class):
return self.load(item)
elif type(item) is list:
return [self.load(idx) for idx in item]
except KeyError:
pass
return None
def get(self, item):
if self.allow_incomplete:
try:
return self.load(item)
except KeyError:
return None
else:
return self.load(item)
|
189902
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import numpy as np
import time
import cv2
from tqdm import tqdm
from model_wrappers.ldf_wrapper import LDF_Wrapper
import logging
def get_mae(img1, img2):
# get mae from two gray scale images
ims = []
for pred in [img1, img2]:
pred = pred / 255
if pred.max() != pred.min(): # prediction normalization, why norm here ?
pred = (pred - pred.min()) / (pred.max() - pred.min())
ims.append(pred)
# return pred, gt
return np.mean(np.abs(ims[0] - ims[1]))
def trans_to_numpy_img(tsr, shape):
"""
tsr[H, W]: model output result
shape
"""
pred = F.interpolate(tsr.unsqueeze(0), size=shape, mode='bilinear')
pred = torch.sigmoid(pred[0][0]).cpu().numpy() * 255 # H, W
return np.round(pred)
def mask2dt_bd(mask):
mask = mask.astype(np.uint8)
body = cv2.blur(mask, ksize=(5,5))
body = cv2.distanceTransform(body, distanceType=cv2.DIST_L2, maskSize=5)
body = body**0.5
tmp = body[np.where(body>0)]
if len(tmp)!=0:
body[np.where(body>0)] = np.floor(tmp/np.max(tmp)*255)
detail = mask-body
return body, detail
def get_ent(pred_mask:torch.Tensor):
"""
args:
- pred_mask[1, H, W]: output of model
returns:
- ent map[H, W]
"""
z = torch.zeros(()).to(pred_mask.device)
pred_mask = torch.sigmoid(pred_mask[0])
ent_map = torch.stack((1 - pred_mask, pred_mask), 0) # 2, H, W
# avoid log(0)
ent_map = torch.sum(-1 * torch.where(ent_map == 0., z, torch.log(ent_map)) * ent_map, 0) # H, W
return ent_map
def it_pse_update_conf_lite(
test_loader,
model_wrapper:LDF_Wrapper,
res_path,
cur_round,
cfg=None
):
trans_back_list = test_loader.dataset.trans_back_funcs
test_loader = tqdm(test_loader)
test_im_shape = cfg.DATA.TEST_SHAPE
# b_thres, w_thres = 0.001, 0.999
b_thres, w_thres = cfg.SOLVER.B_THRES, cfg.SOLVER.W_THRES
logging.info(res_path)
model = model_wrapper.model
model.eval() # model need to be evaluation mode
with torch.no_grad():
name2consis = {}
name2ent = {}
filter_out_file_list = []
end = time.time()
for image, shape_list, name_list in test_loader: # mask->numpy array
data_time = time.time()
pred_list = []
bn = image[0].size(0)
for i, im in enumerate(image):
im = im.cuda().float()
out_dict = model_wrapper.pred_img(im, test_im_shape)
pred = model_wrapper.output2img(out_dict['mask']).cpu().numpy()
pred = trans_back_list[i](pred) # bn, 1, h, w
pred_list.append(pred.squeeze(1)) #b, h, w
# from lib.exp_logging import get_unnorm_np_img
# debug_save_dir = 'debug_err'
# # if os.path.exists(debug_save_dir):
# os.makedirs(debug_save_dir, exist_ok=True)
# im_np = get_unnorm_np_img(im)
# for j in range(bn):
# cv2.imwrite(os.path.join(debug_save_dir, f'{name_list[j]}_{i}.png'), im_np[j])
# # import pdb; pdb.set_trace();
# cv2.imwrite(os.path.join(debug_save_dir, f'{name_list[j]}_{i}_pred.png'), pred[j].squeeze() * 255)
infer_time = time.time()
for idx in range(bn):
shape = shape_list[idx]
name = name_list[idx]
# original output
pred_o = pred_list[0][idx] # np.ndarray h, w
assert pred_o.shape == tuple(test_im_shape), pred_o.shape
# sailency region ratio
posi_ratio = np.mean(np.round(pred_o * 255) > 128)
if posi_ratio < b_thres or posi_ratio > w_thres:
# filter out image with extreme ratio
filter_out_file_list.append(f'{name}**{posi_ratio}')
continue
pred_as = np.stack([pred_list[i][idx] for i in range(len(pred_list))], axis=0) # n, h, w
pred_as_var = np.var(pred_as, axis = 0) # h, w
name2consis[name] = np.mean(pred_as_var) # 1
pred_o = cv2.resize(pred_o, dsize=tuple(shape)[::-1], interpolation=cv2.INTER_LINEAR) # cv2 resize w, h,
pred_o = np.round(pred_o * 255)
pred_as_var = cv2.resize(pred_as_var, dsize=tuple(shape)[::-1], interpolation=cv2.INTER_LINEAR) # cv2 resize w, h,
pred_as_var = np.round(pred_as_var * 255)
# if cfg.SOLVER.ONE_HOT: # not one hot here
# pred_o = (pred_o > 128).astype(np.uint8) * 255
if cfg.SOLVER.ONLY_MASK:
bd, dt = mask2dt_bd(pred_o)
else:
raise NotImplementedError("Only support only_mask schema currently")
for path, img in [(p, i) for p, i in [
(res_path["save_body_path"], bd),
(res_path["save_detail_path"], dt),
(res_path["save_mask_path"], pred_o),
(res_path["save_var_path"], pred_as_var)
] if p and p != '']:
if not os.path.exists(path):
os.makedirs(path)
cv2.imwrite(os.path.join(path, f'{name}.png'), img)
saving_time = time.time()
test_loader.set_postfix(
data_time = data_time - end,
infer_time=infer_time - data_time,
saving_time=saving_time - infer_time
)
end = time.time()
## metric to choose pseudo label
sorted_name2metric = None
name2metric = None
if cfg.SOLVER.LABEL_SELECT_STRATEGY == 'ent':
name2metric = name2ent
sorted_name2metric = sorted(name2ent.items(), key=lambda tup: tup[1])
elif cfg.SOLVER.LABEL_SELECT_STRATEGY == 'consis':
name2metric = name2consis
sorted_name2metric = sorted(name2consis.items(), key=lambda tup: tup[1])
tot_len = len(sorted_name2metric)
if cfg.SOLVER.PSE_POLICY == 'portion':
## filter image according to preset portion
portion = cfg.tgt_portion_list[cur_round]
filter_portion = cfg.SOLVER.PSE_FILTER_PORTION
train_list_len = int(tot_len * portion)
selected_win_start = int(filter_portion * train_list_len)
selected_win_end = selected_win_start + train_list_len
elif cfg.SOLVER.PSE_POLICY == 'threshold':
## filter image according to threshold
var_thres = cfg.SOLVER.VAR_THRESHOLD
new_train_lst = [tup for tup in sorted_name2metric if tup[1] <= var_thres]
train_list_len = len(new_train_lst)
selected_win_start = 0
selected_win_end = len(new_train_lst)
new_train_lst = sorted_name2metric[selected_win_start:selected_win_end]
with open(res_path["save_file_list_path"], "w") as f:
f.write('\n'.join([tup[0] for tup in sorted_name2metric[selected_win_start:selected_win_end]]))
# maybe read from var file
with open(os.path.join(res_path['cur_round_dir'], 'im_score.csv'), 'w') as f:
f.write('\n'.join([f'{tup[0]}, {tup[1]}' for tup in sorted_name2metric]))
if "filter_out_im_list_path" in res_path:
with open(res_path["filter_out_im_list_path"], "w") as f:
f.write('\n'.join(filter_out_file_list))
# just for debug
logging.info(f"write pseudo label into:")
for k, v in res_path.items():
logging.info(f"{k}: {v}")
logging.info("==" * 10)
logging.info(f"unfiltered dataset length: {tot_len}")
logging.info(f"dataset length: {train_list_len} from idx {selected_win_start} to {selected_win_end}")
return {
'pse_train_list_len': train_list_len
}
if __name__ == '__main__':
# test
# create a model
pass
|
189906
|
from django.contrib import admin
from .models import Page
# Register your models here.
admin.site.register(Page)
|
189959
|
from beanborg.rule_engine.rules import *
class My_Custom_Rule(Rule):
def __init__(self, name, context):
# invoking the __init__ of the parent class
Rule.__init__(self, name, context)
def execute(self, csv_line, tx = None, ruleDef = None ):
self.checkAccountFromTo(ruleDef)
if "Withdrawal".lower() in csv_line[self.context.tx_type_pos].lower():
cashPosting = [Posting(
account=ruleDef.account_from,
units=None,
cost=None,
price=None,
flag=None,
meta=None),
Posting(
account=ruleDef.account_to,
units=None,
cost=None,
price=None,
flag=None,
meta=None)]
return (True, tx._replace(postings=cashPosting))
return (False,tx)
|
189961
|
import argparse
import sys
from loguru import logger
from genomepy import Genome, install_genome
def parse_genome(auto_install=False, genomes_dir=None):
"""
Argparse action for command-line genome option.
Parameters
----------
auto_install : bool, optional
Install a genome if it's not found locally.
genomes_dir : str, optional
Directory to look for and/or insall genomes.
"""
class GenomeAction(argparse.Action):
def __call__(self, parser, args, name, option_string=None):
try:
genome = Genome(name, genomes_dir=genomes_dir)
except FileNotFoundError:
logger.warning(f"Genome {name} not found!")
if auto_install:
logger.info("Trying to install it automatically using genomepy...")
install_genome(name, annotation=True, genomes_dir=genomes_dir)
genome = Genome(name, genomes_dir=genomes_dir)
else:
logger.info("You can install it using `genomepy install`.")
sys.exit(1)
setattr(args, self.dest, genome)
return GenomeAction
|
189966
|
import torch
import random
import dataclasses
from collections import OrderedDict
from .utils import format_belief
class NegativeSamplingDatasetWrapper(torch.utils.data.Dataset):
def __init__(self, inner, transform=None):
self.inner = inner
self.transform = transform
assert hasattr(self.inner, 'ontology')
assert self.inner.ontology is not None
self.ontology = {k: sorted(v) for k, v in self.inner.ontology.items()}
def __len__(self):
return 2 * len(self.inner)
def __getitem__(self, i):
item = self.inner[i // 2]
negative = i % 2
if negative:
negative = False
belief, response, context = item.belief, item.response, item.context
raw_belief = item.raw_belief
negative_type = random.randrange(1, 8)
use_new_belief = (negative_type // 4) % 2
modify_belief = (negative_type // 2) % 2
use_new_response = negative_type % 2
# Negative resonse
negative_sample = random.randrange(len(self.inner))
neg_sample = self.inner[negative_sample]
if use_new_belief:
raw_belief = neg_sample.raw_belief
negative = True
if modify_belief:
# Use different slot values
new_belief = OrderedDict()
for k, bs in item.raw_belief.items():
new_belief[k] = dict()
for k2, v in bs.items():
choices = [x for x in self.ontology[(k, k2)] if x != v]
if len(choices) == 0:
new_belief[k][k2] = v
else:
new_belief[k][k2] = random.choice(choices)
negative = True
raw_belief = new_belief
if use_new_response:
# Negative resonse
negative_sample = random.randrange(len(self.inner))
neg_sample = self.inner[negative_sample]
response = neg_sample.response # negative response
negative = True
belief = format_belief(raw_belief)
item = dataclasses.replace(item, context=context, belief=belief,
raw_belief=raw_belief, response=response, positive=not negative)
return self.transform(item)
class NegativeSamplerWrapper(torch.utils.data.Sampler):
def __init__(self, inner):
self.inner = inner
@property
def num_samples(self):
return 2 * self.inner.num_samples
def __iter__(self):
for index in iter(self.inner):
yield 2 * index
yield 2 * index + 1
def set_epoch(self, epoch):
if hasattr(self.inner, 'set_epoch'):
self.inner.set_epoch(epoch)
def __len__(self):
return self.num_samples
|
189967
|
from marshmallow import fields
from paramtools.schema import (
OrderedSchema,
BaseValidatorSchema,
ValueObject,
get_type,
get_param_schema,
ParamToolsSchema,
)
from paramtools import utils
class SchemaFactory:
"""
Uses data from:
- a schema definition file
- a baseline specification file
to extend:
- `schema.BaseParamSchema`
- `schema.BaseValidatorSchema`
Once this has been completed, the `load_params` method can be used to
deserialize and validate parameter data.
"""
def __init__(self, defaults):
defaults = utils.read_json(defaults)
self.defaults = {k: v for k, v in defaults.items() if k != "schema"}
self.schema = ParamToolsSchema().load(defaults.get("schema", {}))
(self.BaseParamSchema, self.label_validators) = get_param_schema(
self.schema
)
def schemas(self):
"""
For each parameter defined in the baseline specification file:
- define a parameter schema for that specific parameter
- define a validation schema for that specific parameter
Next, create a baseline specification schema class (`ParamSchema`) for
all parameters listed in the baseline specification file and a
validator schema class (`ValidatorSchema`) for all parameters in the
baseline specification file.
- `ParamSchema` reads and validates the baseline specification file
- `ValidatorSchema` reads revisions to the baseline parameters and
validates their type, structure, and whether they are within the
specified range.
`param_schema` is defined and used to read and validate the baseline
specifications file. `validator_schema` is defined to read and validate
the parameter revisions. The output from the baseline specification
deserialization is saved in the `context` attribute on
`validator_schema` and will be utilized when doing range validation.
"""
param_dict = {}
validator_dict = {}
for k, v in self.defaults.items():
fieldtype = get_type(v)
classattrs = {
"value": fieldtype,
"_auto": fields.Boolean(required=False, load_only=True),
**self.label_validators,
}
# TODO: what about case where number_dims > 0
# if not isinstance(v["value"], list):
# v["value"] = [{"value": v["value"]}]
validator_dict[k] = type(
"ValidatorItem", (OrderedSchema,), classattrs
)
classattrs = {"value": ValueObject(validator_dict[k], many=True)}
param_dict[k] = type(
"IndividualParamSchema", (self.BaseParamSchema,), classattrs
)
classattrs = {k: fields.Nested(v) for k, v in param_dict.items()}
DefaultsSchema = type("DefaultsSchema", (OrderedSchema,), classattrs)
defaults_schema = DefaultsSchema()
classattrs = {
k: ValueObject(v, many=True) for k, v in validator_dict.items()
}
ValidatorSchema = type(
"ValidatorSchema", (BaseValidatorSchema,), classattrs
)
validator_schema = ValidatorSchema()
return (
defaults_schema,
validator_schema,
self.schema,
defaults_schema.load(self.defaults),
)
|
189990
|
from setuptools import setup, find_packages
from codecs import open
import os
import re
with open("README.md", "r") as f:
long_description = f.read()
with open("requirements.txt", "r") as f:
requirements = f.read().splitlines()
setup(
name='utilspack',
version="0.0.1",
author='<NAME>',
author_email='<EMAIL>',
description='A box of stuff',
long_description=long_description,
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS",
"Operating System :: Unix",
],
package_dir={"utilspack": "utilspack"},
packages=find_packages(),
include_package_data=True,
)
|
190013
|
import pytest
from django_api_client.client import api_client_factory
from django_api_client.client.exceptions import APINotFound
def test_get_api_name_error():
with pytest.raises(APINotFound) as error:
api_client_factory('CRAZY NAME')
assert str(error.value) == "API name Not Found."
|
190018
|
from termcolor import colored
import logging
import time
import os
import json
from pathlib import Path
import shutil
class Logger:
def __init__(self):
self.__logger = logging.getLogger()
Logger.mkdir_if_not_exist('logs')
__file_name = 'logs/' + time.strftime('%Y-%m-%d', time.localtime(time.time())) + '.log'
__handler = logging.FileHandler(filename=__file_name, mode='a', encoding='utf-8')
__formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
__handler.setFormatter(__formatter)
self.__logger.addHandler(__handler)
self.__logger.setLevel(logging.DEBUG)
def d(self, msg, *args, **kwargs):
self.__logger.debug(msg, *args, **kwargs)
def i(self, msg, *args, **kwargs):
self.__logger.info(msg, *args, **kwargs)
def w(self, msg, *args, **kwargs):
self.__logger.warning(msg, *args, **kwargs)
def e(self, msg, *args, **kwargs):
self.__logger.error(msg, *args, **kwargs)
@staticmethod
def info(msg):
print(colored(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ":" + str(msg), 'yellow'))
@staticmethod
def success(msg):
print(colored(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ":" + str(msg), 'green'))
@staticmethod
def error(msg):
print(colored(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ":" + str(msg), 'red'))
@staticmethod
def debug(msg):
print(colored(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ":" + str(msg), 'blue'))
@staticmethod
def mkdir_if_not_exist(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def filter_unsynced_image_tags(fetched_dict=None, synced_list=None):
"""
sample data of fetched_list :
{
"name": "gcr.io/kubernetes-helm/tiller",
"tags": [
"canary",
"test-release",
"v2.0.0",
]
}
"""
if not isinstance(fetched_dict, dict) or not isinstance(synced_list, list):
Logger.error("filter unsynced list cause some unexcept status, the fetched_dict type is: {}, synced_list type "
"is:{}".format(str(type(fetched_dict)), str(type(synced_list))))
return fetched_dict
for do_item in synced_list:
if not isinstance(do_item, dict):
continue
if do_item['name'] == fetched_dict['name']:
no_duplicate_tags = remove_duplicates_item(do_item['tags'], fetched_dict['tags'])
return {'name': fetched_dict['name'], 'tags': no_duplicate_tags}
Logger.info("can not match the same dict, direct return fetched_list, fetched_dict's name: {}".format(str(fetched_dict['name'])))
return {'name': fetched_dict['name'], 'tags': fetched_dict['tags']}
def remove_duplicates_item(fl=None, sl=None):
if not isinstance(fl, list) and not isinstance(sl, list):
return sl
for fli in fl:
if fli in sl:
sl.remove(fli)
return sl
def add_synced_image_tags(success_list=None, synced_list=None):
if not isinstance(success_list, list) or not isinstance(synced_list, list):
Logger.error("add synced image error, return success_list directly.")
return success_list
temp = []
combin_synced = []
combin_success = []
for s in synced_list:
for i in success_list:
if s['name'] == i['name']:
temp.append({'name': s['name'], 'tags': list(set(s['tags']+i['tags']))})
combin_success.append(i)
combin_synced.append(s)
for i in range(len(combin_synced)):
if combin_success[i] in success_list:
success_list.remove(combin_success[i])
if combin_synced[i] in synced_list:
synced_list.remove(combin_synced[i])
temp = temp+synced_list+success_list
return remove_duplicate(temp)
def load_jsond(filename, load_after_delete=True, rm_dupliacate=True):
if not os.path.exists(os.path.join(get_dir(), filename)):
Logger.error("[load_jsond] can not found the target file of path: {}".format(filename))
return None
cache = open(os.path.join(get_dir(), filename), "r")
undo_task = json.load(cache)
cache.close()
if load_after_delete:
os.remove(os.path.join(get_dir(), filename))
if rm_dupliacate:
return remove_duplicate(undo_task)
return undo_task
def save_jsond(filename, data=None, overwrite=True):
if overwrite and os.path.exists(os.path.join(get_dir(), filename)):
os.remove(os.path.join(get_dir(), filename))
file = open(os.path.join(get_dir(), filename), "w")
json.dump(data, file)
file.close()
def update_synced_list(synced_list=None, namespace=None):
synced = load_jsond(f'{namespace}-synced.json')
save_jsond(f'{namespace}-synced.json', add_synced_image_tags(synced_list, synced))
def get_dir():
dir_path = os.path.dirname(os.path.realpath(__file__))
return Path(dir_path).parent
def remove_duplicate(data_list):
if data_list is None or len(data_list) <= 0:
return data_list
temp = []
for item in data_list:
if item not in temp:
temp.append(item)
return temp
def show_disk():
try:
total, used, free = shutil.disk_usage(r'/var/lib/docker')
Logger.info("Total: {}GB, Used: {} GB, Free: {}GB".format(str((total // (2**30))), str((used // (2**30))), str((free // (2**30)))))
except Exception:
return
def update_trigger(trigger=False, force=False):
synced = load_jsond('trigger.json', False, False)
if synced is None:
synced = {'trigger': trigger}
if isinstance(synced, list):
synced = synced[0]
if force:
synced['trigger'] = trigger
else:
if not synced['trigger']:
synced['trigger'] = trigger
save_jsond('trigger.json', synced, True)
def read_trigger():
synced = load_jsond('trigger.json', True, False)
if synced is None:
return True
else:
if 'trigger' in synced:
return synced['trigger']
else:
return True
|
190019
|
import numpy as np
import numba as nb
@nb.jit("(f8[:])(f8[:], f8[:])", nopython=True, nogil=True, cache=True)
def nb_safe_divide(a, b):
# divide each element in a by each element in b
# if element b == 0.0, return element = 0.0
c = np.zeros(a.shape[0], dtype=np.float64)
for i in range(a.shape[0]):
if b[i] != 0.0:
c[i] = a[i] / b[i]
return c
@nb.jit("(f8[:])(f8[:], i8)", nopython=True, nogil=True, parallel=False)
def nb_causal_rolling_average(arr, window_size):
# create an output array
out_arr = np.zeros(arr.shape[0])
# create an array from the input array, with added space for the rolling window
new_arr = np.hstack((np.ones(window_size-1) * arr[0], arr))
# for each output element, find the mean of the last few input elements
#for i in nb.prange(out_arr.shape[0]):
for i in range(out_arr.shape[0]):
out_arr[i] = np.mean(new_arr[i : i + window_size])
return out_arr
@nb.jit("(f8[:])(f8[:], i8)", nopython=True, nogil=True, parallel=False)
def nb_causal_rolling_sd(arr, window_size):
# create an output array
out_arr = np.zeros(arr.shape[0])
# create an array from the input array, with added space for the rolling window
new_arr = np.hstack((np.ones(window_size-1) * arr[0], arr))
# for each output element, find the mean and std of the last few
# input elements, and standardise the input element by the mean and std of the window
#for i in nb.prange(out_arr.shape[0]):
for i in range(out_arr.shape[0]):
num = new_arr[i+window_size-1] - np.mean(new_arr[i : i + window_size-1])
denom = np.std(new_arr[i : i + window_size-1])
if denom != 0.0:
out_arr[i] = num / denom
return out_arr
@nb.jit("(f8[:])(f8[:], i8)", nopython=True, nogil=True, parallel=False)
def nb_causal_rolling_sd_rand(arr, window_size_rand):
# create an output array
out_arr = np.zeros(arr.shape[0])
# create an array from the input array, with added space for the rolling window
new_arr = np.hstack((np.ones(window_size_rand-1) * arr[0], arr))
# create an array from the input array, with added space for the rolling window
new_arr = np.hstack((np.ones(window_size_rand-1) * arr[0], arr))
# for each output element, find the mean and std of the last few
# input elements, and standardise the input element by the mean and std of the window
#for i in nb.prange(out_arr.shape[0]):
for i in range(out_arr.shape[0]):
window_size_std = 1.0
window_size = round(np.random.normal(window_size_rand, window_size_std))
num = new_arr[i+window_size-1] - np.mean(new_arr[i : i + window_size-1])
denom = np.std(new_arr[i : i + window_size-1])
if denom != 0.0:
out_arr[i] = num / denom
return out_arr
@nb.jit("(f8[:])(f8[:], i8)", nopython=True, nogil=True, parallel=False)
def nb_causal_rolling_norm(arr, window_size):
# create an output array
out_arr = np.zeros(arr.shape[0])
# create an array from the input array, with added space for the rolling window
new_arr = np.hstack((np.ones(window_size-1) * arr[0], arr))
# for each output element, find the mean and std of the last few
# input elements, and standardise the input element by the mean and std of the window
#for i in nb.prange(out_arr.shape[0]):
for i in range(out_arr.shape[0]):
num = new_arr[i+window_size-1] - np.mean(new_arr[i : i + window_size])
denom = np.max(np.abs(new_arr[i : i + window_size] - np.mean(new_arr[i : i + window_size])))
if denom != 0.0:
out_arr[i] = num / denom
return out_arr
@nb.jit("(f8[:])(f8[:], i8, f8)", nopython=True, nogil=True, parallel=False)
def nb_causal_rolling_norm_rand(arr, window_size_rand, peturb):
# create an output array
out_arr = np.zeros(arr.shape[0])
# create an array from the input array, with added space for the rolling window
new_arr = np.hstack((np.ones(window_size_rand-1) * arr[0], arr))
index_new = window_size_rand
# for each output element, find the mean and std of the last few
# input elements, and standardise the input element by the mean and std of the window
#for i in nb.prange(out_arr.shape[0]):
for i in range(out_arr.shape[0]):
window_size_std = peturb * np.float64(window_size_rand)
window_size = round(np.random.normal(window_size_rand, window_size_std))
i_end_new = i + window_size_rand
i_start_new = i_end_new - window_size
if i_start_new < 0:
i_start_new = 0
out_arr[i] = np.mean(new_arr[i_start_new : i_end_new])
#print(out_arr[i-1:i+1])
#num = new_arr[i+window_size-1] - np.mean(new_arr[i : i + window_size])
#denom = np.max(np.abs(new_arr[i : i + window_size] - np.mean(new_arr[i : i + window_size])))
#if denom != 0.0:
# out_arr[i] = num / denom
return out_arr
@nb.jit("(f8[:])(f8[:], i8)", nopython=True, nogil=True, parallel=False)
def nb_causal_rolling_average(arr, window_size):
# create an output array
out_arr = np.zeros(arr.shape[0])
# create an array from the input array, with added space for the rolling window
new_arr = np.hstack((np.ones(window_size-1) * arr[0], arr))
# for each output element, find the mean of the last few input elements
#for i in nb.prange(out_arr.shape[0]):
for i in range(out_arr.shape[0]):
out_arr[i] = np.mean(new_arr[i : i + window_size])
return out_arr
#@nb.jit("(f8[:])(f8[:], f8[:], i8, i8, f8)", nopython=True, nogil=True)
def nb_calc_sentiment_score_rand_b(sent_a, sent_b, ra_win_size_short, ra_win_size_long,peturb):
# example method for creating a stationary sentiment score based on Augmento data
# compare the raw sentiment values
sent_ratio = nb_safe_divide(sent_a, sent_b)
# smooth the sentiment ratio
sent_ratio_short = nb_causal_rolling_norm_rand(sent_ratio, ra_win_size_short, peturb)
sent_ratio_long = nb_causal_rolling_norm_rand(sent_ratio, ra_win_size_long, peturb)
# create a stationary(ish) representation of the smoothed sentiment ratio
sent_score = sent_ratio_short - sent_ratio_long
return sent_score
@nb.jit("(f8[:])(f8[:], f8[:], i8, i8, f8)", nopython=True, nogil=True)
def nb_calc_sentiment_score_rand_a(sent_a, sent_b, ra_win_size, std_win_size, peturb):
# example method for creating a stationary sentiment score based on Augmento data
# compare the raw sentiment values
sent_ratio = nb_safe_divide(sent_a, sent_b)
# smooth the sentiment ratio
sent_ratio_smooth = nb_causal_rolling_norm_rand(sent_ratio, ra_win_size, peturb)
# create a stationary(ish) representation of the smoothed sentiment ratio
sent_score = nb_causal_rolling_sd(sent_ratio_smooth, std_win_size)
return sent_score
@nb.jit("(f8[:])(f8[:], f8[:], i8, i8)", nopython=True, nogil=True)
def nb_calc_sentiment_score_a(sent_a, sent_b, ra_win_size, std_win_size):
# example method for creating a stationary sentiment score based on Augmento data
# compare the raw sentiment values
sent_ratio = nb_safe_divide(sent_a, sent_b)
# smooth the sentiment ratio
sent_ratio_smooth = nb_causal_rolling_average(sent_ratio, ra_win_size)
# create a stationary(ish) representation of the smoothed sentiment ratio
sent_score = nb_causal_rolling_sd(sent_ratio_smooth, std_win_size)
return sent_score
@nb.jit("(f8[:])(f8[:], f8[:], i8, i8)", nopython=True, nogil=True)
def nb_calc_sentiment_score_b(sent_a, sent_b, ra_win_size_short, ra_win_size_long):
# example method for creating a stationary sentiment score based on Augmento data
# compare the raw sentiment values
sent_ratio = nb_safe_divide(sent_a, sent_b)
# smooth the sentiment ratio
sent_ratio_short = nb_causal_rolling_average(sent_ratio, ra_win_size_short)
sent_ratio_long = nb_causal_rolling_average(sent_ratio, ra_win_size_long)
# create a stationary(ish) representation of the smoothed sentiment ratio
sent_score = sent_ratio_short - sent_ratio_long
return sent_score
@nb.jit("(f8[:])(f8[:], f8[:], i8, i8)", nopython=True, nogil=True)
def nb_calc_sentiment_score_c(sent_a, sent_b, ra_win_size, std_win_size):
# example method for creating a stationary sentiment score based on Augmento data
# compare the raw sentiment values
sent_ratio = nb_safe_divide(sent_a, sent_b)
# smooth the sentiment ratio
sent_ratio_smooth = nb_causal_rolling_average(sent_ratio, ra_win_size)
# create a stationary(ish) representation of the smoothed sentiment ratio
sent_score = nb_causal_rolling_norm(sent_ratio_smooth, std_win_size)
return sent_score
@nb.jit("(f8[:])(f8[:], f8[:], f8, f8)", nopython=True, nogil=True, cache=True)
def nb_backtest_a(price, sent_score, start_pnl, buy_sell_fee):
# example backtest with approximate model for long/short contracts
# create an array to hold our pnl, and set the first value
pnl = np.zeros(price.shape, dtype=np.float64)
pnl[0] = start_pnl
# for each step, run the market model
for i_p in range(1, price.shape[0]):
# if sentiment score is positive, simulate long position
# else if sentiment score is negative, simulate short position
# else if the sentiment score is 0.0, hold
# (note that this is a very approximate market simulation!)
n_sample_delay = 2
if i_p < n_sample_delay:
pnl[i_p] = pnl[i_p-1]
if sent_score[i_p-n_sample_delay] > 0.0:
pnl[i_p] = (price[i_p] / price[i_p-1]) * pnl[i_p-1]
elif sent_score[i_p-n_sample_delay] <= 0.0:
pnl[i_p] = (price[i_p-1] / price[i_p]) * pnl[i_p-1]
elif sent_score[i_p-n_sample_delay] == 0.0:
pnl[i_p] = pnl[i_p-1]
# simulate a trade fee if we cross from long to short, or visa versa
if i_p > 1 and np.sign(sent_score[i_p-1]) != np.sign(sent_score[i_p-2]):
pnl[i_p] = pnl[i_p] - (buy_sell_fee * pnl[i_p])
return pnl
@nb.jit("(f8[:])(f8[:], i8)", nopython=True, nogil=True, cache=True)
def moving_average(arr, window):
# output array
ma_arr = np.zeros(arr.shape[0])
# add space for rolling window
new_arr = np.hstack((np.ones(window-1) * arr[0], arr))
# calculate moving average
#for i in nb.prange(arr.shape[0]):
for i in range(arr.shape[0]):
num = new_arr[i+window-1] - np.mean(new_arr[i : i+window-1])
denom = np.std(new_arr[i : i + window-1])
if denom != 0.0:
ma_arr[i] = num / denom
return ma_arr
<EMAIL>("(f8[:])(f8[:], i8)", nopython=True, nogil=True, cache=True)
#def signal_ma(positive, negative, short, long):
@nb.jit("(f8[:])(f8[:], f8[:], f8[:], f8, f8, f8)",nopython=True, nogil=True,cache=True)
def sma_crossover_backtest(price, leading_arr, lagging_arr, start_pnl, buy_sell_fee, threshold=0.0):
# create an array to hold our pnl, and set the first value
pnl = np.zeros(price.shape, dtype=np.float64)
pnl[0] = start_pnl
# BUY if Leading SMA is above Lagging SMA by some threshold.
# SELL if Leading SMA is below Lagging SMA by some threshold.
sent_signal = leading_arr - lagging_arr
# for each step, run the market model
for i_p in range(1, price.shape[0]):
if sent_signal[i_p-1] > threshold:
pnl[i_p] = (price[i_p] / price[i_p-1]) * pnl[i_p-1]
elif sent_signal[i_p-1] < threshold:
pnl[i_p] = (price[i_p-1] / price[i_p]) * pnl[i_p-1]
elif sent_signal[i_p-1] == threshold:
pnl[i_p] = pnl[i_p-1]
# simulate a trade fee if we cross from long to short, or visa versa
if i_p > 1 and np.sign(sent_signal[i_p-1]) != np.sign(sent_signal[i_p-2]):
pnl[i_p] = pnl[i_p] - (buy_sell_fee * pnl[i_p])
return pnl
<EMAIL>("(f8[:])(f8[:], f8[:], i8)", nopython=True, nogil=True, cache=True)
#def forward_volume(volume_data, price_data, threshold=2000000):
# price_rate_change = np.full(len(volume_data), np.nan)
# for i in range(len(volume_data)):
# sum_volume = 0
# for j in range(len(price_data)):
# sum_volume += price_data[j]
# if sum_volume >= threshold:
# price_rate_change[i] = (price_data[j] - price_data[i])/price_data[i]
# break
@nb.jit("(f8[:])(f8[:], f8[:], i8)", nopython=True, nogil=True, cache=True)
def forward_volume(volume_data, price_data, threshold=2000000):
price_rate_change = np.zeros(len(price_data))
for i in range((len(volume_data))):
j = i+1
sum_volume = 0.0
while (sum_volume < threshold) & (j < len(price_rate_change)):
sum_volume += volume_data[j]
if sum_volume >= threshold:
price_rate_change[i] = (price_data[j]-price_data[i])/price_data[i]
j += 1
return price_rate_change
@nb.jit("(f8[:])(f8[:], f8[:], f8)", nopython=True, nogil=True, cache=True)
def forward_volume(volume_data, price_data, threshold):
price_rate_change = np.zeros(len(price_data))
for i in range((len(volume_data))):
j = i+1
sum_volume = 0.0
while (sum_volume < threshold) & (j < len(price_rate_change)):
sum_volume += volume_data[j]
if sum_volume >= threshold:
price_rate_change[i] = (price_data[j]-price_data[i])/price_data[i]
j += 1
return price_rate_change
@nb.jit("(f8[:])(f8[:], i8)", nopython=True, nogil=True, cache=True)
def volume_normalized(volume_data, n_hours):
norm_volume = np.zeros(len(volume_data))
start = 0
for i in range(n_hours,len(volume_data), n_hours):
for j in range(start,i):
norm_volume[j] = volume_data[j]/np.sum(volume_data[start:i])
start = i
return norm_volume
|
190079
|
import discord
from discord.ext import commands
import DiscordUtils
from discord.ext.commands import has_permissions, MissingPermissions
import datetime
class Snipe(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.description = (
" <:sucess:935052640449077248> Commands to snipe out messages that people try to hide"
)
self.theme_color = discord.Color.blue()
self.deleted_msgs = {}
self.edited_msgs = {}
self.snipe_limit = 20
@commands.Cog.listener()
async def on_message_delete(self, message: discord.Message):
ch_id = message.channel.id
if message.content:
if ch_id not in self.deleted_msgs:
self.deleted_msgs[ch_id] = []
self.deleted_msgs[ch_id].append(message)
if len(self.deleted_msgs[ch_id]) > self.snipe_limit:
self.deleted_msgs[ch_id].pop(0)
@commands.Cog.listener()
async def on_message_edit(
self, before: discord.Message, after: discord.Message
):
ch_id = before.channel.id
if not before.author.bot:
if before.content and after.content:
if ch_id not in self.edited_msgs:
self.edited_msgs[ch_id] = []
self.edited_msgs[ch_id].append((before, after))
if len(self.edited_msgs[ch_id]) > self.snipe_limit:
self.edited_msgs[ch_id].pop(0)
@commands.command(
name="snipe",
aliases=["sn"],
help="See recently deleted messages in the current channel",
)
@has_permissions(manage_messages=True)
async def snipe(self, ctx: commands.Context):
limit = self.snipe_limit
try:
msgs: list[discord.Message] = self.deleted_msgs[ctx.channel.id][
::-1
][:limit]
print(msgs)
embeds=[]
for msg in msgs:
snipe_embed = discord.Embed(
title="Snipe",description=f"Most recent deleted messages\n Channel {ctx.channel.mention}", color=self.theme_color
)
a=str(msg.created_at)
b = datetime.datetime.strptime(a, '%Y-%m-%d %H:%M:%S.%f')
c = b.timestamp()
snipe_embed.add_field(
name=f"{msg.author} @<t:{round(c)}>", value=f"{msg.content}", inline=False
)
embeds.append(snipe_embed)
paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx, remove_reactions=True)
paginator.add_reaction('<:arrow_left:940845517703889016>', "first")
paginator.add_reaction('<:leftarrow:941994549935472670>', "back")
paginator.add_reaction('<:rightarrow:941994550124245013>', "next")
paginator.add_reaction('<:arrow_right:940608259075764265>', "last")
paginator.add_reaction('<:DiscordCross:940914829781270568>', "lock")
await paginator.run(embeds)
except KeyError:
await ctx.send(embed=discord.Embed(description="**There's nothing to snipe here...**\n Wait for a deleted message",color=discord.Color.dark_grey()))
@commands.command(
name="editsnipe",
aliases=["esn"],
help="See recently edited messages in the current channel",
)
@has_permissions(manage_messages=True)
async def editsnipe(self, ctx: commands.Context):
limit = self.snipe_limit
try:
msgs = self.edited_msgs[ctx.channel.id][::-1][:limit]
embeds=[]
for msg in msgs:
editsnipe_embed = discord.Embed(title="Edit Snipe",
description=f"Author • {msg[1].author} | Edited", color=self.theme_color
)
editsnipe_embed.add_field(
name='Before',
value=f"{msg[0].content}",
inline=True,
)
editsnipe_embed.add_field(
name='After',
value=f" {msg[1].content}",
inline=True,
)
editsnipe_embed.set_footer(text=f"Edited at {msg[1].created_at}")
embeds.append(editsnipe_embed)
paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx, remove_reactions=True)
paginator.add_reaction('<:arrow_left:940845517703889016>', "first")
paginator.add_reaction('<:leftarrow:941994549935472670>', "back")
paginator.add_reaction('<:rightarrow:941994550124245013>', "next")
paginator.add_reaction('<:arrow_right:940608259075764265>', "last")
paginator.add_reaction('<:DiscordCross:940914829781270568>', "lock")
await paginator.run(embeds)
except KeyError:
await ctx.send(embed=discord.Embed(description="**There's nothing to snipe here...**\n Wait for a edited message",color=discord.Color.dark_grey()))
def setup(bot):
bot.add_cog(Snipe(bot))
|
190084
|
from jplephem.names import (
target_name_pairs as code_name_pairs,
target_names as code_names
)
name_codes = dict((name, code) for code, name in code_name_pairs)
def numbered_name_of(code):
"""Given a code, return a string giving both the code and name.
>>> numbered_name_of(301)
'301 Moon'
"""
name = code_names.get(code, '(Unnamed)')
return '{0} {1}'.format(code, name)
def _target_name(target):
"""Return `target` annotated for display to the user.
* A string target is quoted to make clear that it's a string, like 'comet'.
* A numeric target has its NAIF name appended, like 399 EARTH.
"""
if isinstance(target, str):
return repr(target)
name = code_names.get(target, 'UNKNOWN')
return '{0} {1}'.format(target, name)
|
190097
|
import os
from .settings_dev import *
#########
# Setup #
#########
FIXTURE_DIRS = (
PROJECT_ROOT,
)
#############
# Overrides #
#############
RUN_TASKS_ASYNC = False # avoid sending celery tasks to queue -- just run inline
SUBDOMAIN_URLCONFS = {}
DEBUG = False
TESTING = True
ADMINS = (
("<NAME>", '<EMAIL>'),
)
###############
# Speed Hacks #
###############
# Reference:
# - https://docs.djangoproject.com/en/1.4/topics/testing/#speeding-up-the-tests
# - http://www.daveoncode.com/2013/09/23/effective-tdd-tricks-to-speed-up-django-tests-up-to-10x-faster/
CELERY_TASK_ALWAYS_EAGER = True
CELERY_TASK_EAGER_PROPAGATES = True
CELERY_BROKER_URL = 'memory://localhost/'
# faster collectstatic
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# Note: this is recommended by the Django docs but
# currently conflicts with some of our tests
# PASSWORD_HASHERS = (
# 'django.contrib.auth.hashers.MD5PasswordHasher',
# )
# Using the Django SQLite in-memory DB for testing is faster,
# but threaded tasks won't have access in Django <=1.7
# - https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEST_NAME
# - https://code.djangoproject.com/ticket/12118
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': None
# }
# }
# Work around for https://github.com/jamesls/fakeredis/issues/234
DJANGO_REDIS_CONNECTION_FACTORY = 'perma.tests.utils.FakeConnectionFactory'
# Use production cache setup, except with fakeredis backend
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"REDIS_CLIENT_CLASS": "fakeredis.FakeStrictRedis",
}
}
}
# Perma.cc encryption keys for communicating with Perma-Payments
# generated using perma_payments.security.generate_public_private_keys
# SECURITY WARNING: keep the production secret key secret!
PERMA_PAYMENTS_ENCRYPTION_KEYS = {
'id': 1,
'perma_secret_key': '<KEY>
'perma_public_key': '<KEY>
'perma_payments_public_key': '<KEY>
}
PURCHASE_URL = '/purchase/'
PURCHASE_HISTORY_URL = '/purchase-history/'
ACKNOWLEDGE_PURCHASE_URL = '/acknowledge-purchase/'
SUBSCRIBE_URL = '/subscribe/'
CANCEL_URL = '/cancel-request/'
SUBSCRIPTION_STATUS_URL = '/subscription/'
UPDATE_URL = '/update/'
CHANGE_URL = '/change/'
# lots of subscription packages, to be thorough
TIERS = {
'Individual': [
{
'period': 'monthly',
'link_limit': 10,
'rate_ratio': 1
},{
'period': 'monthly',
'link_limit': 100,
'rate_ratio': 2.5
},{
'period': 'monthly',
'link_limit': 500,
'rate_ratio': 10
}, {
'period': 'annually',
'link_limit': 500,
'rate_ratio': 10
}
],
'Registrar': [
{
'period': 'monthly',
'link_limit': 10,
'rate_ratio': 0.1
},{
'period': 'monthly',
'link_limit': 25,
'rate_ratio': 0.25
},{
'period': 'monthly',
'link_limit': 100,
'rate_ratio': 1
},{
'period': 'monthly',
'link_limit': 500,
'rate_ratio': 5
},{
'period': 'monthly',
'link_limit': 'unlimited',
'rate_ratio': 10
},{
'period': 'annually',
'link_limit': 'unlimited',
'rate_ratio': 120
}
]
}
if os.environ.get('DOCKERIZED'):
HOST = 'web:8000'
PLAYBACK_HOST = 'web:8000'
ALLOWED_HOSTS.append('web')
REMOTE_SELENIUM_HOST = 'selenium'
WR_API = 'http://nginx/api/v1'
PLAYBACK_HOST = 'nginx:81'
else:
HOST = 'perma.test:8000'
PLAYBACK_HOST = 'perma-archives.test:8000'
REMOTE_SELENIUM_HOST = 'localhost'
WR_API = 'http://perma-archives.test:8089/api/v1'
PLAYBACK_HOST = 'perma-archives.test:8092'
ENABLE_SPONSORED_USERS = True
ENABLE_BONUS_LINKS = True
|
190105
|
import numpy
import cv2
def Area(rectangle):
"""
Returns rectangle area.
Args:
rectangle: Rectangle
Returns:
Area
"""
w = rectangle[2] - rectangle[0]
h = rectangle[3] - rectangle[1]
return w * h
def Max(rectangles):
"""
Returns the maximum rectangle.
Args:
rectangles: Rectangles
Returns:
Rectangle
"""
index = 0
area = -2147483648
for i in range(len(rectangles)):
box = rectangles[i]
cur = Area(box)
if (cur > area):
area = cur
index = i
return rectangles[index]
def Min(rectangles):
"""
Returns the minimum rectangle.
Args:
rectangles: Rectangles
Returns:
Rectangle
"""
index = 0
area = 2147483647
for i in range(len(rectangles)):
box = rectangles[i]
cur = Area(box)
if (cur < area):
area = cur
index = i
return rectangles[index]
def ToBox(rectangle):
"""
Returns rectangle scaled to box.
Args:
rectangle: Rectangle
Returns:
Rectangle
"""
width = rectangle[2] - rectangle[0]
height = rectangle[3] - rectangle[1]
m = max(width, height)
dx = int((m - width)/2)
dy = int((m - height)/2)
return [rectangle[0] - dx, rectangle[1] - dy, rectangle[2] + dx, rectangle[3] + dy]
|
190124
|
import myssl, select, handleHTTP, socket
import sys, struct, os, random, hashlib, time, threading
import json
import logging
#MAXSYN = 2 ** 15
MAXSYNBUFFER = 64
MAXSYN = 1024
#REMOTE_lines = 4
def filepath(f):
return os.path.join(os.path.split(os.path.realpath(__file__))[0], f)
def random_data(len):
d = ''
for i in range(0, len):
d += chr(random.randint(0,255))
return d
def send_all(sock, data):
bytes_sent = 0
con = 0
while 1:
r = sock.send(data[bytes_sent:])
if r < 0:
return r
bytes_sent += r
if bytes_sent == len(data):
return bytes_sent
con = con + 1
if con > 20:
raise Exception('send too many times!')
def read_all(sock):
data_len = sock.recv(2)
con = 0
if len(data_len) <= 0:
raise Exception('read_all zero data!')
data_len = struct.unpack("H",data_len)[0]
if data_len <= 0:
raise Exception('read_all data_len error!')
data = ''
while data_len > 0:
d = sock.recv(data_len)
if len(d) <= 0:
raise Exception('read_all read error!')
data += d
data_len -= len(d)
con += 1
if con > 20:
raise Exception('read too many times!')
return data
class zProxyHandle(myssl.zProxyRequestHandler):
def handle_socket5(self, sock, remote):
try:
fdset = [sock, remote]
while 1:
r, w, e = select.select(fdset, [], [])
if sock in r:
data = sock.recv(4096)
if len(data) <= 0:
break
result = send_all(remote, data)
if result < len(data):
raise Exception('failed to send all data')
if remote in r:
data = remote.recv(4096)
if len(data) <= 0:
break
result = send_all(sock, data)
if result < len(data):
raise Exception('failed to send all data')
if not len(r):
break
finally:
sock.close()
remote.close()
def socket5proxy(self):
try:
sock = self.connection
addrtype = ord(sock.recv(1))
if addrtype > 4:
return addrtype
if addrtype == 1:
addr = socket.inet_ntoa(self.rfile.read(4))
elif addrtype == 3:
addr = self.rfile.read(ord(sock.recv(1)))
elif addrtype == 4:
addr = socket.inet_ntop(socket.AF_INET6,self.rfile.read(16))
else:
# not support
logging.warn('addr_type not support')
return
port = struct.unpack('>H', self.rfile.read(2))
try:
logging.info('connecting %s:%d' % (addr, port[0]))
remote = socket.create_connection((addr, port[0]))
except socket.error, e:
logging.warn(e)
return
self.handle_socket5(sock, remote)
except socket.error, e:
logging.warn(e)
return
def handleProxy(self):
addrtype = self.socket5proxy()
if addrtype:
self.tcpproxy(addrtype)
def tcpproxy(self, addrtype):
self.tcpruning = True
try:
sock = self.connection
if addrtype == 8:
self.remote = TCP_CLIENTS.handleproxy(self)
if self.remote:
self.handle_TCP()
return
elif addrtype == 5:
addr = socket.inet_ntoa(self.rfile.read(4))
elif addrtype == 6:
addr = self.rfile.read(ord(sock.recv(1)))
elif addrtype == 7:
addr = socket.inet_ntop(socket.AF_INET6,self.rfile.read(16))
else:
# not support
logging.warn('addr_type not support')
return
port = struct.unpack('>H', self.rfile.read(2))
clientID = hashlib.sha1(str(self.client_address) + random_data(20) + str(time.time())).digest()
self.remote = TCP_CLIENTS.newproxy(clientID, addr, port[0], self)
if self.remote:
self.handle_TCP()
return
except socket.error, e:
logging.warn(e)
return
def handle_TCP(self):
try:
sock = self.connection
fset = [sock]
while self.tcpruning:
r, w, e = select.select(fset, [], [])
if sock in r:
self.remote.send(read_all(sock))
else:
break
except:
print 'handle_TCP'
print sys.exc_info()
finally:
self.destroy()
def destroy(self):
self.tcpruning = False
self.remote.remove(self)
self.connection.close()
def send(self, data):
try:
result = send_all(self.connection, data)
if result < len(data):
raise Exception('failed to send all data')
return True
except:
print 'Hsend'
print sys.exc_info()
self.destroy()
return False
def verify(self):
global PW
if self.data[:20] == PW:
#Going up, as a proxy
self.connection.send(PW + '\x00' * random.randint(30,150))
return True
else:
#Going down, as a HTTP
return False
def log_message(self, format, *args):
s = ("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format%args))
l = open(HTTPLOG,'a+')
l.write(s)
l.close()
sys.stderr.write(s)
version_string = handleHTTP.version_string
do_HEAD = handleHTTP.send404
do_PUT = handleHTTP.send404
do_POST = handleHTTP.send404
do_DELETE = handleHTTP.send404
do_CONNECT = handleHTTP.send404
do_GET = handleHTTP.do_GET
class tcpproxyhandle:
def __init__(self):
self.clientlist = {}
def newproxy(self, clientID, addr, port, client):
try:
remote = socket.create_connection((addr, port))
client.connection.send(clientID + '\x00' * random.randint(10,80))
reID = client.connection.recv(65535)
if reID[:20] == clientID:
t = tcp_remote(remote, clientID)
t.Load(client)
t.start()
self.clientlist[clientID] = t
return t
except:
print sys.exc_info()
def handleproxy(self, client):
try:
ID = client.connection.recv(65535)[:20]
if ID in self.clientlist:
client.connection.send(ID + '\x00' * random.randint(10, 80))
t = self.clientlist[ID]
t.Load(client)
return t
except:
print sys.exc_info()
def removeID(self, ID):
if ID in self.clientlist:
del self.clientlist[ID]
class tcp_remote(threading.Thread):
def __init__(self, sock, clientID):
threading.Thread.__init__(self)
self.sock = sock
self.ID = clientID
self.clients = []
self.mutex = threading.Lock()
self.SendSYN = 0
self.RecvSYN = 0
self.SYNbuffer = {}
def run(self):
sock = self.sock
fset = [sock]
try:
while len(self.clients):
r, w, e = select.select(fset, [], [])
if sock in r:
data = sock.recv(1020)
if len(data) <= 0:
break
data = struct.pack("H",self.SendSYN) + data
self.SendSYN = (self.SendSYN + 1) % MAXSYN
data = struct.pack("H",len(data)) + data
while len(self.clients):
if random.choice(self.clients[-4:]).send(data):
break
else:
break
except:
print 'tcp_remote'
print sys.exc_info()
finally:
self.destroy()
def Load(self, client):
self.clients.append(client)
def remove(self, client):
if client in self.clients:
self.clients.remove(client)
if not len(self.clients):
self.destroy()
def send(self, data):
def _send(self, data):
result = send_all(self.sock, data)
if result < len(data):
raise Exception('failed to send all data')
self.RecvSYN = (self.RecvSYN + 1) % MAXSYN
try:
self.mutex.acquire()
syn = struct.unpack("H",data[:2])[0]
if syn == self.RecvSYN:
_send(self, data[2:])
while len(self.SYNbuffer):
if self.RecvSYN in self.SYNbuffer:
#print 'SYN out', self.RecvSYN
_send(self, self.SYNbuffer.pop(self.RecvSYN))
else:
break
else:
if len(self.SYNbuffer) >= MAXSYNBUFFER:
raise Exception('SYNbuffer overflow')
#print 'SYN need', self.RecvSYN, 'save', syn
self.SYNbuffer[syn] = data[2:]
except:
print 'Tsend'
print sys.exc_info()
self.destroy()
finally:
self.mutex.release()
def destroy(self):
TCP_CLIENTS.removeID(self.ID)
while len(self.clients):
self.clients.pop().destroy()
self.sock.close()
def main():
global PW, HTTPLOG, TCP_CLIENTS
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
with open(filepath('config.json'), 'rb') as f:
config = json.load(f)
logging.info('loading config from %s' % filepath('config.json'))
SERVER = config['server']
PORT = config['server_port']
PW = hashlib.sha1(config['password'] + "<PASSWORD>").digest()
IPv6 = int(config['ipv6'])
CRT = filepath(config['crt'])
KEY = filepath(config['key'])
TCP_CLIENTS = tcpproxyhandle()
if IPv6:
ThreadingTCPServer.address_family = socket.AF_INET6
HTTPLOG = filepath('http.log')
server = myssl.ThreadingzProxyServer((SERVER,PORT),
zProxyHandle,
CRT,
KEY)
logging.info("starting server at %s:%d" % tuple(server.server_address[:2]))
try:
server.serve_forever()
except socket.error, e:
logging.error(e)
server.shutdown()
server.server_close()
except KeyboardInterrupt:
server.shutdown()
server.server_close()
sys.exit(0)
if __name__ == '__main__':
main()
|
190125
|
import logging
import subprocess
import glob
import pkgutil
import hashlib
import importlib
from karton.core import Config, Karton, Task, Resource
from typing import Optional
from .__version__ import __version__
log = logging.getLogger(__name__)
def unpacker_module_worker(sample, user_config, module) -> Task:
spec = importlib.util.spec_from_file_location("module.name", module)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
module = module.KartonUnpackerModule(sample=sample, config=user_config)
if module.enabled is True:
return module.main()
return []
class Unpacker(Karton):
"""
A modular Karton Framework service that unpacks common packers like UPX, MPress and others using the Qilling Framework.
"""
identity = "karton.unpacker"
filters = [
{"type": "sample", "stage": "recognized", "kind": "runnable", "platform": "win32"},
{"type": "sample", "stage": "recognized", "kind": "runnable", "platform": "win64" },
{"type": "sample", "stage": "recognized", "kind": "runnable", "platform": "linux"}
]
@classmethod
def args_parser(cls):
parser = super().args_parser()
parser.add_argument("--modules", help="Modules Directory", type=str, required=True)
parser.add_argument("--rootfs", help="Emulator RootFS", type=str, default=None, required=False)
parser.add_argument("--emulator-timeout", help="Emulator Timeout", type=int, default=5000, required=False)
parser.add_argument("--timeout", help="Task Timeout in Seconds", type=int, default=30, required=False)
parser.add_argument("--debug", help="Debug", action='store_true', default=False, required=False)
return parser
@classmethod
def main(cls):
parser = cls.args_parser()
args = parser.parse_args()
config = Config(args.config_file)
user_config = {
'modules': args.modules,
'rootfs': args.rootfs,
'emulator_timeout': args.emulator_timeout,
'timeout': args.timeout,
'debug': args.debug
}
service = Unpacker(config=config, user_config=user_config)
service.loop()
def __init__(self, user_config: Optional[dict] = None, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
modules = user_config['modules']
modules = modules or "modules"
self.modules = glob.glob(f'{modules}/**/*.py', recursive=True)
self.user_config = user_config
def process(self, task: Task) -> None:
sample = task.get_resource("sample")
for module in self.modules:
tasks = unpacker_module_worker(sample, self.user_config, module)
for task in tasks:
self.send_task(task)
if __name__ == "__main__":
Unpacker().loop()
|
190130
|
import os
class Resource:
"""Helper class to facilitate mounting. On assign, Seamless will mount "filename" """
def __init__(self, filename, data=None):
self.filename = filename
if data is None and os.path.exists(filename):
with open(filename) as f:
data = f.read()
self.data = data
|
190142
|
from unittest.mock import patch, Mock
from Crypto.Cipher import AES
from lxml import etree
from federation.protocols.diaspora.encrypted import pkcs7_unpad, EncryptedPayload
from federation.tests.fixtures.keys import get_dummy_private_key
def test_pkcs7_unpad():
assert pkcs7_unpad(b"foobar\x02\x02") == b"foobar"
assert pkcs7_unpad("foobar\x02\x02") == "foobar"
class TestEncryptedPayload:
@patch("federation.protocols.diaspora.encrypted.PKCS1_v1_5.new")
@patch("federation.protocols.diaspora.encrypted.AES.new")
@patch("federation.protocols.diaspora.encrypted.pkcs7_unpad", side_effect=lambda x: x)
@patch("federation.protocols.diaspora.encrypted.b64decode", side_effect=lambda x: x)
def test_decrypt(self, mock_decode, mock_unpad, mock_aes, mock_pkcs1):
mock_decrypt = Mock(return_value=b'{"iv": "foo", "key": "bar"}')
mock_pkcs1.return_value = Mock(decrypt=mock_decrypt)
mock_encrypter = Mock(return_value="<foo>bar</foo>")
mock_aes.return_value = Mock(decrypt=mock_encrypter)
doc = EncryptedPayload.decrypt(
{"aes_key": '{"iv": "foo", "key": "bar"}', "encrypted_magic_envelope": "magically encrypted"},
"private_key",
)
mock_pkcs1.assert_called_once_with("private_key")
mock_decrypt.assert_called_once_with('{"iv": "foo", "key": "bar"}', sentinel=None)
assert mock_decode.call_count == 4
mock_aes.assert_called_once_with("bar", AES.MODE_CBC, "foo")
mock_encrypter.assert_called_once_with("magically encrypted")
assert doc.tag == "foo"
assert doc.text == "bar"
def test_encrypt(self):
private_key = get_dummy_private_key()
public_key = private_key.publickey()
encrypted = EncryptedPayload.encrypt("<spam>eggs</spam>", public_key)
assert "aes_key" in encrypted
assert "encrypted_magic_envelope" in encrypted
# See we can decrypt it too
decrypted = EncryptedPayload.decrypt(encrypted, private_key)
assert etree.tostring(decrypted).decode("utf-8") == "<spam>eggs</spam>"
|
190145
|
import unittest
import mock
from reppy import ttl
class TTLPolicyBaseTest(unittest.TestCase):
'''Tests about TTLPolicyBase.'''
def test_does_not_implement_ttl(self):
'''Does not implement the ttl method.'''
with self.assertRaises(NotImplementedError):
ttl.TTLPolicyBase().ttl(object())
def test_implements_expires(self):
'''Expires is based off of ttl.'''
policy = ttl.TTLPolicyBase()
with mock.patch.object(policy, 'ttl', return_value=10):
with mock.patch.object(ttl.time, 'time', return_value=100):
self.assertEqual(policy.expires(object()), 110)
class HeaderWithDefaultPolicyTest(unittest.TestCase):
'''Tests about HeaderWithDefaultPolicy.'''
def test_no_store(self):
'''Returns the minimum when no-store present.'''
response = mock.Mock(headers={
'cache-control': 'no-store'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 10)
def test_must_revalidate(self):
'''Returns the minimum when must-revalidate present.'''
response = mock.Mock(headers={
'cache-control': 'must-revalidate'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 10)
def test_no_cache(self):
'''Returns the minimum when no-cache present.'''
response = mock.Mock(headers={
'cache-control': 'no-cache'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 10)
def test_s_maxage(self):
'''Returns the parsed s-maxage.'''
response = mock.Mock(headers={
'cache-control': 's-maxage=15'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 15)
def test_max_age(self):
'''Returns the parsed max-age.'''
response = mock.Mock(headers={
'cache-control': 'max-age=15'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 15)
def test_default_for_malformed_maxage(self):
'''Returns the default when maxage cannot be parsed.'''
response = mock.Mock(headers={
'cache-control': 'max-age=not-a-number'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 20)
def test_multiple_cache_control(self):
'''Can walk through multiple cache control configs.'''
response = mock.Mock(headers={
'cache-control': 'foo, max-age=15'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 15)
def test_expires_with_no_date(self):
'''Uses the host computer's date when the Date header is absent.'''
expires = 'Thu, 13 Oct 2016 15:50:54 GMT'
response = mock.Mock(headers={
'expires': expires
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
timestamp = ttl.parse_date(expires)
expected = 60
with mock.patch.object(ttl.time, 'time', return_value=timestamp - expected):
self.assertEqual(policy.ttl(response), expected)
def test_expires_with_malformed_date(self):
'''Uses the host computer's date when the Date header is unparseable.'''
expires = 'Thu, 13 Oct 2016 15:50:54 GMT'
response = mock.Mock(headers={
'expires': expires,
'date': 'not parseable as a date'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
timestamp = ttl.parse_date(expires)
expected = 60
with mock.patch.object(ttl.time, 'time', return_value=timestamp - expected):
self.assertEqual(policy.ttl(response), expected)
def test_expires_with_date(self):
'''Uses the Date header when present.'''
response = mock.Mock(headers={
'expires': 'Thu, 13 Oct 2016 15:50:54 GMT',
'date': 'Thu, 13 Oct 2016 15:49:54 GMT'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 60)
def test_malformed_expires(self):
'''Returns the default when the Expires header is malformed.'''
response = mock.Mock(headers={
'expires': 'not parseable as a date'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 20)
def test_cache_control_precedence(self):
'''Cache control is used before expires.'''
response = mock.Mock(headers={
'cache-control': 'max-age=30',
'expires': 'Thu, 13 Oct 2016 15:50:54 GMT',
'date': 'Thu, 13 Oct 2016 15:49:54 GMT'
})
policy = ttl.HeaderWithDefaultPolicy(20, 10)
self.assertEqual(policy.ttl(response), 30)
|
190181
|
from unittest.mock import patch
from bag_transfer.lib.clients import (ArchivesSpaceClient,
ArchivesSpaceClientError)
from django.test import TestCase
class ClientTestCase(TestCase):
@patch("requests.Session.get")
@patch("requests.Session.post")
def test_get_resources(self, mock_post, mock_get):
"""Ensure client handles requests and errors."""
return_value = {}
mock_post.return_value.status_code = 200
mock_post.return_value.text = '{"session": "12345"}'
client = ArchivesSpaceClient("baseurl", "username", "password", "repo_id")
mock_get.return_value.status_code = 200
mock_get.return_value.json.return_value = return_value
self.assertEqual(client.get_resource("1"), return_value)
mock_get.return_value.status_code = 404
mock_get.return_value.json.return_value = {"error": "foobar"}
with self.assertRaisesMessage(ArchivesSpaceClientError, "foobar"):
client.get_resource("1")
|
190201
|
from deepee import __version__, PrivacyWrapper, UniformDataLoader
import torch
import toml
from pathlib import Path
def test_version():
path = Path(__file__).resolve().parents[1] / "pyproject.toml"
pyproject = toml.loads(open(str(path)).read())
assert __version__ == pyproject["tool"]["poetry"]["version"]
def test_overfitting():
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(1, 1)
def forward(self, x):
return self.lin(x)
class DS(torch.utils.data.Dataset):
def __init__(self):
self.features = torch.linspace(0, 1, 1000).requires_grad_(True)
self.labels = torch.linspace(0, 1, 1000).requires_grad_(True)
def __getitem__(self, idx):
return (self.features, self.labels)
def __len__(self):
return len(self.features)
dl = UniformDataLoader(DS(), batch_size=2)
model = PrivacyWrapper(Model(), 2, 1.0, 1.0)
optimizer = torch.optim.Adam(model.wrapped_model.parameters(), lr=1e-2)
losses = []
for feature, label in dl:
output = model(feature[..., None])
loss = ((output - label[..., None]) ** 2).mean()
loss.backward()
model.clip_and_accumulate()
model.noise_gradient()
optimizer.step()
model.prepare_next_batch()
losses.append(loss.item())
assert min(losses) < 0.01
|
190209
|
from ..tree_layer import TreeSparseMarginalsFast
import torch
from torch.autograd import gradcheck, Variable
def test_fasttree_sparse_decode():
torch.manual_seed(42)
n_nodes = 5
tsm = TreeSparseMarginalsFast(n_nodes, max_iter=1000)
for _ in range(20):
W = torch.randn(n_nodes, n_nodes + 1).view(-1)
W = Variable(W, requires_grad=True)
res = gradcheck(tsm, (W,), eps=1e-4,
atol=1e-3)
print(res)
assert res
def test_meaning_sparse_decode():
n_nodes = 4
w = torch.zeros(n_nodes, n_nodes + 1)
w[2, 1] = 100
w = Variable(w)
tsm = TreeSparseMarginalsFast(n_nodes, verbose=3)
u = tsm(w.view(-1))
for config in tsm.status['active_set']:
assert config[1 + 2] == 1
def test_fast_tree_ignores_diag():
n_nodes = 4
# w = torch.zeros(n_nodes, n_nodes + 1)
w_init = torch.randn(n_nodes * (n_nodes + 1))
w = Variable(w_init)
tsm = TreeSparseMarginalsFast(n_nodes)
u = tsm(w.view(-1))
k = 0
for m in range(1, n_nodes + 1):
for h in range(0, n_nodes + 1):
if h == m:
w_init[k] = 0
k += 1
w = Variable(w_init)
tsm = TreeSparseMarginalsFast(n_nodes)
u_zeroed = tsm(w.view(-1))
assert (u_zeroed - u).data.norm() < 1e-12
|
190220
|
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import fasttext
def run_kfold_test(clf, x, y, k=10):
'''
Runs k-Fold test for model benchmarking.
o Inputs:
- clf is a classifier to be trained, tested and evaluated
- x is input features
- y is target labels
- k defines the number of test-splits
'''
kf = KFold(k)
results = []
for train_idx, test_idx in kf.split(x, y):
# Train and test split
x_train = x[train_idx]
x_test = x[test_idx]
y_train = y[train_idx]
y_test = y[test_idx]
# Training
clf.fit(x_train, y_train)
## Evaluation
# Train-set
pred = clf.predict(x_train)
train_f1 = f1_score(y_train, pred)
train_acc = accuracy_score(y_train, pred)
cm = confusion_matrix(y_train, pred)
train_trp = cm[1, 1] / np.sum(cm[1, :])
# Test-set
pred = clf.predict(x_test)
test_f1 = f1_score(y_test, pred)
test_acc = accuracy_score(y_test, pred)
cm = confusion_matrix(y_test, pred)
test_trp = cm[1, 1] / np.sum(cm[1, :])
print(train_acc, train_f1, train_trp, test_acc, test_f1, test_trp, sep='\t')
results.append([train_acc, train_f1, train_trp, test_acc, test_f1, test_trp, clf])
result = np.array(results)
return results
class skfasttext:
def __init__(self, train_file = '/tmp/fasttext_train.txt'):
'Creates a new SciKit-Learn style wrapped FastText object'
self.train_file = train_file
self.model = None
def fit(self, x, y):
'''
Traines a supervised classifier.
o Inputs:
- x is a list of text documents
- y is a list or array with numerical labels (0 OK and 1 hatespeech)
'''
with open(self.train_file, 'w') as f:
for xi, yi in zip(x, y):
if yi == 0:
f.write('__label__OK ')
else:
f.write('__label__vihapuhetta ')
f.write(xi.replace('\n', ' ') + '\n')
self.model = fasttext.supervised(self.train_file, 'model', label_prefix='__label__')
def predict(self, x):
'''
Predicts/classifies given samples
o Input:
- x is a list of text documents
'''
predictions = self.model.predict(x)
pred = np.zeros(len(predictions))
for i in range(0, len(predictions)):
pred[i] = self.model.labels.index(predictions[i][0])
return pred
|
190249
|
from collections import defaultdict
from typing import Dict, List, Optional, Type
from nuplan.common.maps.maps_datatypes import TrafficLightStatusData, TrafficLightStatusType
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer
from nuplan.planning.simulation.observation.abstract_observation import AbstractObservation
from nuplan.planning.simulation.observation.idm.idm_agent_manager import IDMAgentManager
from nuplan.planning.simulation.observation.idm.idm_agents_builder import build_idm_agents_on_map_rails
from nuplan.planning.simulation.observation.observation_type import DetectionsTracks, Observation
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
class IDMAgents(AbstractObservation):
"""
Simulate agents based on IDM policy.
"""
def __init__(
self,
target_velocity: float,
min_gap_to_lead_agent: float,
headway_time: float,
accel_max: float,
decel_max: float,
scenario: AbstractScenario,
minimum_path_length: float = 20,
planned_trajectory_samples: int = 6,
planned_trajectory_sample_interval: float = 0.5,
):
"""
Constructor for IDMAgents
:param target_velocity: [m/s] Desired velocity in free traffic
:param min_gap_to_lead_agent: [m] Minimum relative distance to lead vehicle
:param headway_time: [s] Desired time headway. The minimum possible time to the vehicle in front
:param accel_max: [m/s^2] maximum acceleration
:param decel_max: [m/s^2] maximum deceleration (positive value)
:param scenario: scenario
:param minimum_path_length: [m] The minimum path length
:param planned_trajectory_samples: number of elements to sample for the planned trajectory.
:param planned_trajectory_sample_interval: [s] time interval of sequence to sample from.
"""
self.current_iteration = 0
self._target_velocity = target_velocity
self._min_gap_to_lead_agent = min_gap_to_lead_agent
self._headway_time = headway_time
self._accel_max = accel_max
self._decel_max = decel_max
self._scenario = scenario
self._minimum_path_length = minimum_path_length
self._planned_trajectory_samples = planned_trajectory_samples
self._planned_trajectory_sample_interval = planned_trajectory_sample_interval
# Prepare IDM agent manager
self._idm_agent_manager: Optional[IDMAgentManager] = None
def reset(self) -> None:
"""Inherited, see superclass."""
self.current_iteration = 0
self._idm_agent_manager = None
def _get_idm_agent_manager(self) -> IDMAgentManager:
"""
Create idm agent manager in case it does not already exists
:return: IDMAgentManager
"""
if not self._idm_agent_manager:
agents, agent_occupancy = build_idm_agents_on_map_rails(
self._target_velocity,
self._min_gap_to_lead_agent,
self._headway_time,
self._accel_max,
self._decel_max,
self._minimum_path_length,
self._scenario,
)
self._idm_agent_manager = IDMAgentManager(agents, agent_occupancy, self._scenario.map_api)
return self._idm_agent_manager
def observation_type(self) -> Type[Observation]:
"""Inherited, see superclass."""
return DetectionsTracks # type: ignore
def initialize(self) -> None:
"""Inherited, see superclass."""
pass
def get_observation(self) -> DetectionsTracks:
"""Inherited, see superclass."""
detections = self._get_idm_agent_manager().get_active_agents(
self.current_iteration, self._planned_trajectory_samples, self._planned_trajectory_sample_interval
)
return detections
def update_observation(
self, iteration: SimulationIteration, next_iteration: SimulationIteration, history: SimulationHistoryBuffer
) -> None:
"""Inherited, see superclass."""
self.current_iteration = next_iteration.index
tspan = next_iteration.time_s - iteration.time_s
traffic_light_data: List[TrafficLightStatusData] = self._scenario.get_traffic_light_status_at_iteration(
self.current_iteration
)
# Extract traffic light data into Dict[traffic_light_status, lane_connector_ids]
traffic_light_status: Dict[TrafficLightStatusType, List[str]] = defaultdict(list)
for data in traffic_light_data:
traffic_light_status[data.status].append(str(data.lane_connector_id))
ego_state, _ = history.current_state
self._get_idm_agent_manager().propagate_agents(ego_state, tspan, self.current_iteration, traffic_light_status)
|
190262
|
import starry
import matplotlib.pyplot as plt
import numpy as np
import os
# Settings
lmax = 5
res = 300
# Set up the plot
fig, ax = plt.subplots(lmax + 1, 2 * lmax + 1, figsize=(9, 6))
fig.subplots_adjust(hspace=0)
for axis in ax.flatten():
axis.set_xticks([])
axis.set_yticks([])
axis.spines["top"].set_visible(False)
axis.spines["right"].set_visible(False)
axis.spines["bottom"].set_visible(False)
axis.spines["left"].set_visible(False)
for l in range(lmax + 1):
ax[l, lmax - l].set_ylabel(
r"$l = %d$" % l,
rotation="horizontal",
labelpad=15,
y=0.35,
fontsize=11,
alpha=0.5,
)
for j, m in enumerate(range(-lmax, lmax + 1)):
if m < 0:
ax[-1, j].set_xlabel(
r"$m {=} $-$%d$" % -m,
labelpad=5,
fontsize=11,
rotation="45",
x=0.3,
alpha=0.5,
)
else:
ax[-1, j].set_xlabel(
r"$m = %d$" % m,
labelpad=5,
fontsize=11,
rotation=45,
x=0.35,
alpha=0.5,
)
# Loop over the orders and degrees
map = starry.Map(lmax, lazy=False)
for i, l in enumerate(range(lmax + 1)):
for j, m in enumerate(range(-l, l + 1)):
# Offset the index for centered plotting
j += lmax - l
# Compute the spherical harmonic
map.reset()
if l > 0:
map[l, m] = 1.0
map.show(ax=ax[i, j], grid=False)
ax[i, j].axis("on")
ax[i, j].set_rasterization_zorder(1)
# We're done
fig.savefig(
os.path.abspath(__file__).replace(".py", ".pdf"),
bbox_inches="tight",
dpi=300,
)
|
190263
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.linalg
import scipy.special
from . import thops
def nan_throw(tensor, name="tensor"):
stop = False
if ((tensor!=tensor).any()):
print(name + " has nans")
stop = True
if (torch.isinf(tensor).any()):
print(name + " has infs")
stop = True
if stop:
print(name + ": " + str(tensor))
#raise ValueError(name + ' contains nans of infs')
class _ActNorm(nn.Module):
"""
Activation Normalization
Initialize the bias and scale with a given minibatch,
so that the output per-channel have zero mean and unit variance for that.
After initialization, `bias` and `logs` will be trained as parameters.
"""
def __init__(self, num_features, scale=1.):
super().__init__()
# register mean and scale
size = [1, num_features, 1]
self.register_parameter("bias", nn.Parameter(torch.zeros(*size)))
self.register_parameter("logs", nn.Parameter(torch.zeros(*size)))
self.num_features = num_features
self.scale = float(scale)
# self.inited = False
self.register_buffer('is_initialized', torch.zeros(1))
def _check_input_dim(self, input):
return NotImplemented
def initialize_parameters(self, input):
# print("HOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOo")
self._check_input_dim(input)
if not self.training:
return
assert input.device == self.bias.device
with torch.no_grad():
bias = thops.mean(input.clone(), dim=[0, 2], keepdim=True) * -1.0
vars = thops.mean((input.clone() + bias) ** 2, dim=[0, 2], keepdim=True)
logs = torch.log(self.scale/(torch.sqrt(vars)+1e-6))
self.bias.data.copy_(bias.data)
self.logs.data.copy_(logs.data)
# self.inited = True
self.is_initialized += 1.
def _center(self, input, reverse=False):
if not reverse:
return input + self.bias
else:
return input - self.bias
def _scale(self, input, logdet=None, reverse=False):
logs = self.logs
if not reverse:
input = input * torch.exp(logs)
else:
input = input * torch.exp(-logs)
if logdet is not None:
"""
logs is log_std of `mean of channels`
so we need to multiply timesteps
"""
dlogdet = thops.sum(logs) * thops.timesteps(input)
if reverse:
dlogdet *= -1
logdet = logdet + dlogdet
return input, logdet
def forward(self, input, logdet=None, reverse=False):
if not self.is_initialized:
self.initialize_parameters(input)
self._check_input_dim(input)
# no need to permute dims as old version
if not reverse:
# center and scale
input = self._center(input, reverse)
input, logdet = self._scale(input, logdet, reverse)
else:
# scale and center
input, logdet = self._scale(input, logdet, reverse)
input = self._center(input, reverse)
return input, logdet
class ActNorm2d(_ActNorm):
def __init__(self, num_features, scale=1.):
super().__init__(num_features, scale)
def _check_input_dim(self, input):
assert len(input.size()) == 3
assert input.size(1) == self.num_features, (
"[ActNorm]: input should be in shape as `BCT`,"
" channels should be {} rather than {}".format(
self.num_features, input.size()))
class LinearZeros(nn.Linear):
def __init__(self, in_channels, out_channels, logscale_factor=3):
super().__init__(in_channels, out_channels)
self.logscale_factor = logscale_factor
# set logs parameter
self.register_parameter("logs", nn.Parameter(torch.zeros(out_channels)))
# init
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
output = super().forward(input)
return output * torch.exp(self.logs * self.logscale_factor)
class Conv2d(nn.Conv2d):
pad_dict = {
"same": lambda kernel, stride: [((k - 1) * s + 1) // 2 for k, s in zip(kernel, stride)],
"valid": lambda kernel, stride: [0 for _ in kernel]
}
@staticmethod
def get_padding(padding, kernel_size, stride):
# make paddding
if isinstance(padding, str):
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if isinstance(stride, int):
stride = [stride, stride]
padding = padding.lower()
try:
padding = Conv2d.pad_dict[padding](kernel_size, stride)
except KeyError:
raise ValueError("{} is not supported".format(padding))
return padding
def __init__(self, in_channels, out_channels,
kernel_size=[3, 3], stride=[1, 1],
padding="same", do_actnorm=True, weight_std=0.05):
padding = Conv2d.get_padding(padding, kernel_size, stride)
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, bias=(not do_actnorm))
# init weight with std
self.weight.data.normal_(mean=0.0, std=weight_std)
if not do_actnorm:
self.bias.data.zero_()
else:
self.actnorm = ActNorm2d(out_channels)
self.do_actnorm = do_actnorm
def forward(self, input):
x = super().forward(input)
if self.do_actnorm:
x, _ = self.actnorm(x)
return x
class Conv2dZeros(nn.Conv2d):
def __init__(self, in_channels, out_channels,
kernel_size=[3, 3], stride=[1, 1],
padding="same", logscale_factor=3):
padding = Conv2d.get_padding(padding, kernel_size, stride)
super().__init__(in_channels, out_channels, kernel_size, stride, padding)
# logscale_factor
self.logscale_factor = logscale_factor
self.register_parameter("logs", nn.Parameter(torch.zeros(out_channels, 1, 1)))
# init
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
output = super().forward(input)
return output * torch.exp(self.logs * self.logscale_factor)
class LinearNormInit(nn.Linear):
def __init__(self, in_channels, out_channels, weight_std=0.05):
super().__init__(in_channels, out_channels)
# init
self.weight.data.normal_(mean=0.0, std=weight_std)
self.bias.data.zero_()
class LinearZeroInit(nn.Linear):
def __init__(self, in_channels, out_channels):
super().__init__(in_channels, out_channels)
# init
self.weight.data.zero_()
self.bias.data.zero_()
class Permute2d(nn.Module):
def __init__(self, num_channels, shuffle):
super().__init__()
self.num_channels = num_channels
print(num_channels)
self.indices = np.arange(self.num_channels - 1, -1,-1).astype(np.long)
self.indices_inverse = np.zeros((self.num_channels), dtype=np.long)
print(self.indices_inverse.shape)
for i in range(self.num_channels):
self.indices_inverse[self.indices[i]] = i
if shuffle:
self.reset_indices()
def reset_indices(self):
np.random.shuffle(self.indices)
for i in range(self.num_channels):
self.indices_inverse[self.indices[i]] = i
def forward(self, input, reverse=False):
assert len(input.size()) == 3
if not reverse:
return input[:, self.indices, :]
else:
return input[:, self.indices_inverse, :]
class InvertibleConv1x1(nn.Module):
def __init__(self, num_channels, LU_decomposed=False):
super().__init__()
w_shape = [num_channels, num_channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(np.float32)
if not LU_decomposed:
# Sample a random orthogonal matrix:
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
else:
np_p, np_l, np_u = scipy.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_shape, dtype=np.float32), -1)
eye = np.eye(*w_shape, dtype=np.float32)
#self.p = torch.Tensor(np_p.astype(np.float32))
#self.sign_s = torch.Tensor(np_sign_s.astype(np.float32))
self.register_buffer('p', torch.Tensor(np_p.astype(np.float32)))
self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(np.float32)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(np.float32)))
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(np.float32)))
self.u = nn.Parameter(torch.Tensor(np_u.astype(np.float32)))
self.l_mask = torch.Tensor(l_mask)
self.eye = torch.Tensor(eye)
self.w_shape = w_shape
self.LU = LU_decomposed
self.first_pass = True
self.saved_weight = None
self.saved_dlogdet = None
def get_weight(self, input, reverse):
w_shape = self.w_shape
if not self.LU:
timesteps = thops.timesteps(input)
dlogdet = torch.slogdet(self.weight)[1] * timesteps
if not reverse:
weight = self.weight.view(w_shape[0], w_shape[1], 1)
else:
weight = torch.inverse(self.weight.double()).float()\
.view(w_shape[0], w_shape[1], 1)
return weight, dlogdet
else:
self.p = self.p.to(input.device)
self.sign_s = self.sign_s.to(input.device)
self.l_mask = self.l_mask.to(input.device)
self.eye = self.eye.to(input.device)
l = self.l * self.l_mask + self.eye
u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s))
dlogdet = thops.sum(self.log_s) * thops.timesteps(input)
if not reverse:
w = torch.matmul(self.p, torch.matmul(l, u))
else:
l = torch.inverse(l.double()).float()
u = torch.inverse(u.double()).float()
w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
return w.view(w_shape[0], w_shape[1], 1), dlogdet
def forward(self, input, logdet=None, reverse=False):
"""
log-det = log|abs(|W|)| * timesteps
"""
# weight, dlogdet = self.get_weight(input, reverse)
if not reverse:
weight, dlogdet = self.get_weight(input, reverse)
else:
if self.first_pass:
weight, dlogdet = self.get_weight(input, reverse)
self.saved_weight = weight
if logdet is not None:
self.saved_dlogdet = dlogdet
self.first_pass = False
else:
weight = self.saved_weight
if logdet is not None:
dlogdet = self.saved_dlogdet
nan_throw(weight, "weight")
nan_throw(dlogdet, "dlogdet")
if not reverse:
z = F.conv1d(input, weight)
if logdet is not None:
logdet = logdet + dlogdet
return z, logdet
else:
nan_throw(input, "InConv input")
z = F.conv1d(input, weight)
nan_throw(z, "InConv z")
nan_throw(logdet, "InConv logdet")
if logdet is not None:
logdet = logdet - dlogdet
return z, logdet
# Here we define our model as a class
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim=1, num_layers=2, dropout=0.0):
super(LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
# Define the LSTM layer
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True)
# Define the output layer
self.linear = LinearZeroInit(self.hidden_dim, output_dim)
# do_init
self.do_init = True
def init_hidden(self):
# This is what we'll initialise our hidden state as
self.do_init = True
def forward(self, input):
# Forward pass through LSTM layer
# shape of lstm_out: [batch_size, input_size, hidden_dim]
# shape of self.hidden: (a, b), where a and b both
# have shape (batch_size, num_layers, hidden_dim).
if self.do_init:
lstm_out, self.hidden = self.lstm(input)
self.do_init = False
else:
lstm_out, self.hidden = self.lstm(input, self.hidden)
#self.hidden = hidden[0].to(input.device), hidden[1].to(input.device)
# Final layer
y_pred = self.linear(lstm_out)
return y_pred
# Here we define our model as a class
class GRU(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim=1, num_layers=2, dropout=0.0):
super(GRU, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
# Define the LSTM layer
self.gru = nn.GRU(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True)
# Define the output layer
self.linear = LinearZeroInit(self.hidden_dim, output_dim)
# do_init
self.do_init = True
def init_hidden(self):
# This is what we'll initialise our hidden state as
self.do_init = True
def forward(self, input):
# Forward pass through LSTM layer
# shape of lstm_out: [batch_size, input_size, hidden_dim]
# shape of self.hidden: (a, b), where a and b both
# have shape (batch_size, num_layers, hidden_dim).
if self.do_init:
gru_out, self.hidden = self.gru(input)
self.do_init = False
else:
gru_out, self.hidden = self.gru(input, self.hidden)
#self.hidden = hidden[0].to(input.device), hidden[1].to(input.device)
# Final layer
y_pred = self.linear(gru_out)
return y_pred
class GaussianDiag:
Log2PI = float(np.log(2 * np.pi))
@staticmethod
def likelihood(x):
"""
lnL = -1/2 * { ln|Var| + ((X - Mu)^T)(Var^-1)(X - Mu) + kln(2*PI) }
k = 1 (Independent)
Var = logs ** 2
"""
return -0.5 * (((x) ** 2) + GaussianDiag.Log2PI)
@staticmethod
def logp(x):
likelihood = GaussianDiag.likelihood(x)
return thops.sum(likelihood, dim=[1, 2])
@staticmethod
def sample(z_shape, eps_std=None, device=None):
eps_std = eps_std or 1
eps = torch.normal(mean=torch.zeros(z_shape),
std=torch.ones(z_shape) * eps_std)
eps = eps.to(device)
return eps
class StudentT:
def __init__(self, df, d):
self.df=df
self.d=d
self.norm_const = scipy.special.loggamma(0.5*(df+d))-scipy.special.loggamma(0.5*df)-0.5*d*np.log(np.pi*df)
def logp(self,x):
'''
Multivariate t-student density:
output:
the sum density of the given element
'''
#df=100
#d=x.shape[1]
#norm_const = scipy.special.loggamma(0.5*(df+d))-scipy.special.loggamma(0.5*df)-0.5*d*np.log(np.pi*df)
#import pdb; pdb.set_trace()
x_norms = thops.sum(((x) ** 2), dim=[1])
likelihood = self.norm_const-0.5*(self.df+self.d)*torch.log(1+(1/self.df)*x_norms)
return thops.sum(likelihood, dim=[1])
def sample(self,z_shape, eps_std=None, device=None):
'''generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom
n : int
number of observations, return random array will be (n, len(m))
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable
'''
#df=100
# import pdb; pdb.set_trace()
x_shape = torch.Size((z_shape[0], 1, z_shape[2]))
x = np.random.chisquare(self.df, x_shape)/self.df
x = np.tile(x, (1,z_shape[1],1))
x = torch.Tensor(x.astype(np.float32))
z = torch.normal(mean=torch.zeros(z_shape),std=torch.ones(z_shape) * eps_std)
# import pdb; pdb.set_trace()
return (z/torch.sqrt(x)).to(device)
class Split2d(nn.Module):
def __init__(self, num_channels):
super().__init__()
print("Split2d num_channels:" + str(num_channels))
self.num_channels = num_channels
self.conv = Conv2dZeros(num_channels // 2, num_channels)
def split2d_prior(self, z):
h = self.conv(z)
return thops.split_feature(h, "cross")
def forward(self, input, cond, logdet=0., reverse=False, eps_std=None):
if not reverse:
#print("forward Split2d input:" + str(input.shape))
z1, z2 = thops.split_feature(input, "split")
#mean, logs = self.split2d_prior(z1)
logdet = GaussianDiag.logp(z2) + logdet
return z1, cond, logdet
else:
z1 = input
#print("reverse Split2d z1.shape:" + str(z1.shape))
#mean, logs = self.split2d_prior(z1)
z2_shape = list(z1.shape)
z2_shape[1] = self.num_channels-z1.shape[1]
z2 = GaussianDiag.sample(z2_shape, eps_std, device=input.device)
z = thops.cat_feature(z1, z2)
return z, cond, logdet
def squeeze2d(input, factor=2):
assert factor >= 1 and isinstance(factor, int)
if factor == 1:
return input
size = input.size()
B = size[0]
C = size[1]
H = size[2]
W = size[3]
assert H % factor == 0 , "{}".format((H, W))
x = input.view(B, C, H // factor, factor, W, 1)
x = x.permute(0, 1, 3, 5, 2, 4).contiguous()
x = x.view(B, C * factor, H // factor, W)
return x
def unsqueeze2d(input, factor=2):
assert factor >= 1 and isinstance(factor, int)
#factor2 = factor ** 2
if factor == 1:
return input
size = input.size()
B = size[0]
C = size[1]
H = size[2]
W = size[3]
assert C % (factor) == 0, "{}".format(C)
x = input.view(B, C // factor, factor, 1, H, W)
x = x.permute(0, 1, 4, 2, 5, 3).contiguous()
x = x.view(B, C // (factor), H * factor, W)
return x
class SqueezeLayer(nn.Module):
def __init__(self, factor):
super().__init__()
self.factor = factor
def forward(self, input, cond = None, logdet=None, reverse=False):
if not reverse:
output = squeeze2d(input, self.factor)
cond_out = squeeze2d(cond, self.factor)
return output, cond_out, logdet
else:
output = unsqueeze2d(input, self.factor)
cond_output = unsqueeze2d(cond, self.factor)
return output, cond_output, logdet
def squeeze_cond(self, cond):
cond_out = squeeze2d(cond, self.factor)
return cond_out
|
190267
|
try:
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
except ImportError: # mpl is optional
pass
import numpy as np
from keanu.vartypes import sample_types, numpy_types
from typing import Any, List, Tuple, Union
def traceplot(trace: sample_types, labels: List[Union[str, Tuple[str, str]]] = None, ax: Any = None,
x0: int = 0) -> Any:
"""
Plot samples values.
:param trace: result of MCMC run
:param labels: labels of vertices to be plotted. if None, all vertices are plotted.
:param ax: Matplotlib axes
:param x0: index of first data point, used for sample stream plots
"""
if labels is None:
labels = list(trace.keys())
if ax is None:
_, ax = plt.subplots(len(labels), 1, squeeze=False)
for index, label in enumerate(labels):
data = [sample for sample in trace[label]]
ax[index][0].set_title(label)
ax[index][0].plot(__integer_xaxis(ax[index][0], x0, len(data)), data)
__pause_for_crude_animation()
return ax
def __integer_xaxis(ax: Any, x0: int, n: int) -> range:
x = range(x0, x0 + n)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
return x
def __pause_for_crude_animation() -> None:
plt.pause(0.1)
|
190272
|
from typing import Any, Dict, List, Tuple
from src.db.models.event import Event
from src.db.models.match import Match
from src.db.models.team import Team
from src.db.models.team_event import TeamEvent
from src.db.models.team_match import TeamMatch
from src.db.models.team_year import TeamYear
from src.db.models.year import Year
def create_team_obj(data: Dict[str, Any]) -> Team:
data["wins"] = 0
data["losses"] = 0
data["ties"] = 0
data["count"] = 0
return Team.from_dict(data)
def create_year_obj(data: Dict[str, Any]) -> Year:
return Year.from_dict(data)
def create_team_year_obj(data: Dict[str, Any]) -> TeamYear:
data["wins"] = 0
data["losses"] = 0
data["ties"] = 0
data["count"] = 0
return TeamYear.from_dict(data)
def create_event_obj(data: Dict[str, Any]) -> Event:
return Event.from_dict(data)
def create_team_event_obj(data: Dict[str, Any]) -> TeamEvent:
data["wins"] = 0
data["losses"] = 0
data["ties"] = 0
data["count"] = 0
return TeamEvent.from_dict(data)
def create_match_obj(data: Dict[str, Any]) -> Tuple[Match, List[TeamMatch]]:
data["playoff"] = data["comp_level"] != "qm"
data["red_auto"] = data["red_score_breakdown"]["auto"]
data["red_auto_movement"] = data["red_score_breakdown"]["auto_movement"]
data["red_auto_1"] = data["red_score_breakdown"]["auto_1"]
data["red_auto_2"] = data["red_score_breakdown"]["auto_2"]
data["red_teleop_1"] = data["red_score_breakdown"]["teleop_1"]
data["red_teleop_2"] = data["red_score_breakdown"]["teleop_2"]
data["red_1"] = data["red_score_breakdown"]["1"]
data["red_2"] = data["red_score_breakdown"]["2"]
data["red_teleop"] = data["red_score_breakdown"]["teleop"]
data["red_endgame"] = data["red_score_breakdown"]["endgame"]
data["red_no_fouls"] = data["red_score_breakdown"]["no_fouls"]
data["red_fouls"] = data["red_score_breakdown"]["fouls"]
data["red_rp_1"] = data["red_score_breakdown"]["rp1"]
data["red_rp_2"] = data["red_score_breakdown"]["rp2"]
data["blue_auto"] = data["blue_score_breakdown"]["auto"]
data["blue_auto_movement"] = data["blue_score_breakdown"]["auto_movement"]
data["blue_auto_1"] = data["blue_score_breakdown"]["auto_1"]
data["blue_auto_2"] = data["blue_score_breakdown"]["auto_2"]
data["blue_teleop_1"] = data["blue_score_breakdown"]["teleop_1"]
data["blue_teleop_2"] = data["blue_score_breakdown"]["teleop_2"]
data["blue_1"] = data["blue_score_breakdown"]["1"]
data["blue_2"] = data["blue_score_breakdown"]["2"]
data["blue_teleop"] = data["blue_score_breakdown"]["teleop"]
data["blue_endgame"] = data["blue_score_breakdown"]["endgame"]
data["blue_no_fouls"] = data["blue_score_breakdown"]["no_fouls"]
data["blue_fouls"] = data["blue_score_breakdown"]["fouls"]
data["blue_rp_1"] = data["blue_score_breakdown"]["rp1"]
data["blue_rp_2"] = data["blue_score_breakdown"]["rp2"]
match = Match.from_dict(data)
team_matches: List[TeamMatch] = []
new_data = {"match": data["key"], **data}
for alliance in ["red", "blue"]:
new_data["alliance"] = alliance
for team in data[alliance].split(","):
new_data["team"] = int(team)
team_matches.append(create_team_match_obj(new_data))
return (match, team_matches)
def create_team_match_obj(data: Dict[str, Any]) -> TeamMatch:
return TeamMatch.from_dict(data)
|
190276
|
import nose
import requests
import fixture
@nose.with_setup(fixture.start_tangelo, fixture.stop_tangelo)
def test_version():
response = requests.get(fixture.plugin_url("tangelo", "version"))
expected = "0.10.0-dev"
assert response.content == expected
|
190288
|
import numpy as np
def fUi_VortexSegment11_smooth(xa = None,ya = None,za = None,xb = None,yb = None,zb = None,visc_model = None,t = None,bComputeGrad = None):
# !!!!! No intensity!!!
norm_a = np.sqrt(xa * xa + ya * ya + za * za)
norm_b = np.sqrt(xb * xb + yb * yb + zb * zb)
denominator = norm_a * norm_b * (norm_a * norm_b + xa * xb + ya * yb + za * zb)
crossprod = np.array([[ya * zb - za * yb],[za * xb - xa * zb],[xa * yb - ya * xb]])
if bComputeGrad:
Uout = np.zeros((12,1))
else:
Uout = np.array([[0],[0],[0]])
# check for singularity */
# fprintf('denom norma normb, #f #f #f\n',denominator,norm_a,norm_b)
if denominator < 1e-17 and (visc_model < 4 or visc_model > 5):
print('exit1')
return Uout
else:
if (norm_a < 1e-08 or norm_b < 1e-08) and (visc_model < 4):
print('exit2')
return Uout
else:
# viscous model */
Kv = 1.0
if 0 == (visc_model):
Kv = 1.0
else:
if 1 == (visc_model):
norm_r0 = np.sqrt((xa - xb) * (xa - xb) + (ya - yb) * (ya - yb) + (za - zb) * (za - zb))
h = np.sqrt(crossprod(1) * crossprod(1) + crossprod(2) * crossprod(2) + crossprod(3) * crossprod(3)) / norm_r0
if (h < t):
Kv = h * h / t / t
else:
Kv = 1.0
else:
if 2 == (visc_model):
norm_r0 = np.sqrt((xa - xb) * (xa - xb) + (ya - yb) * (ya - yb) + (za - zb) * (za - zb))
h = np.sqrt(crossprod(1) * crossprod(1) + crossprod(2) * crossprod(2) + crossprod(3) * crossprod(3)) / norm_r0
Kv = 1.0 - np.exp(- 1.25643 * h * h / t / t)
else:
if 3 == (visc_model):
norm_r0 = np.sqrt((xa - xb) * (xa - xb) + (ya - yb) * (ya - yb) + (za - zb) * (za - zb))
h = np.sqrt(crossprod(1) * crossprod(1) + crossprod(2) * crossprod(2) + crossprod(3) * crossprod(3)) / norm_r0
# h = (norm_a+norm_b)/2;
Kv = h * h / np.sqrt(t ** 4 + h ** 4)
else:
if 4 == (visc_model):
norm_r0 = np.sqrt((xa - xb) * (xa - xb) + (ya - yb) * (ya - yb) + (za - zb) * (za - zb))
Kv = 1.0
# delta*norm(r0)^2 */
denominator = denominator + t * norm_r0
else:
if 5 == (visc_model):
Kv = 1.0
# (delta l_0)^2 */
denominator = denominator + t
else:
if 33 == (visc_model):
# norm_r0 = sqrt((xa-xb)*(xa-xb) + (ya-yb)*(ya-yb) +(za-zb)*(za-zb));
# h = sqrt(crossprod(1)*crossprod(1)+crossprod(2)*crossprod(2)+crossprod(3)*crossprod(3))/ norm_r0; # orthogonal distance r1xr2/r0 */
h = (norm_a + norm_b) / 2
Kv = h * h / np.sqrt(t ** 4 + h ** 4)
# fprintf('Kv #f\n',Kv)
Kv = Kv / 4.0 / pi * (norm_a + norm_b) / denominator
# fprintf('Kv #f',Kv)
# fprintf('crossprod #f\n',crossprod)
Uout[np.arange[1,3+1]] = Kv * crossprod
# fprintf('Uout #f\n',Kv*crossprod)
if bComputeGrad:
d = (norm_a * norm_b + xa * xb + ya * yb + za * zb)
if np.abs(d) > 1e-09:
ra = np.array([[xa],[ya],[za]])
rb = np.array([[xb],[yb],[zb]])
D = - (ra / norm_a ** 3 + rb / norm_b ** 3) - (1 / norm_a + 1 / norm_b) * 1 / d * (ra / norm_a * norm_b + rb / norm_b * norm_a + ra + rb)
GradRaRb = np.zeros((3,3))
GradRaRb[1,2] = za - zb
GradRaRb[2,1] = zb - za
GradRaRb[1,3] = yb - ya
GradRaRb[3,1] = ya - yb
GradRaRb[2,3] = xa - xb
GradRaRb[3,2] = xb - xa
GradU = 1 / (4 * pi) * 1 / d * (- (1 / norm_a + 1 / norm_b) * GradRaRb + crossprod * (np.transpose(D)))
Uout[3 + [np.arange[1,3+1]]] = GradU(1,np.arange(1,3+1))
Uout[6 + [np.arange[1,3+1]]] = GradU(2,np.arange(1,3+1))
Uout[9 + [np.arange[1,3+1]]] = GradU(3,np.arange(1,3+1))
if sum(np.isnan(Uout)) > 0:
print('Error fUi_Vortexline')
#kbd
# printf("#4.3f #4.3f #4.3f #4.3f #4.3f\n",Uout(1),Uout(2),Uout(3),Kv,denominator); */
return Uout
|
190299
|
import glob
from abc import ABC, abstractmethod
from contextlib import ExitStack, contextmanager
from dataclasses import dataclass
from functools import reduce
from logging import getLogger
from pathlib import Path
from sys import stderr
from tempfile import TemporaryDirectory
from typing import Iterator, List, Optional, Union
from urllib.parse import urlparse
from uuid import uuid4
import boto3
import pandas as pd
import pyarrow.parquet as pq
from pyarrow import lib as pyarrowlib
from halo import Halo
logger = getLogger(__name__)
class InvalidCommandExcpetion(Exception):
'''Exception for invalid command. Argment parser raises this Exception.
'''
pass
class FileNotFoundException(Exception):
pass
class ParquetFile(ABC):
'''Abstract ParquetFile.
One object does not correspond one parquet file but one expression about file
such as ./target.parquet, ./*.parquet, s3://bucket/foo.parquet or s3://bucket/*
'''
def __post_init__(self):
self.validation()
def validation(self) -> None:
'''validate properties
'''
pass
@abstractmethod
def is_wildcard(self) -> bool:
'''Return if this object correspond one or more object.
'''
raise NotImplementedError()
@abstractmethod
def resolve_wildcard(self) -> List['ParquetFile']:
'''Return concrete Parquert file objects.
'''
raise NotImplementedError()
@contextmanager
@abstractmethod
def get_local_path(self) -> Iterator[str]:
'''Return local file path.
If call this function of S3ParquetFile, return the path of downloaded file.
'''
raise NotImplementedError()
@contextmanager
def get_dataframe(self) -> pd.DataFrame:
with self.get_local_path() as local_path:
try:
yield pq.read_table(local_path).to_pandas()
except pyarrowlib.ArrowInvalid:
print(f"File({local_path}) cannot be read as parquet.", file=stderr)
yield None
@dataclass
class LocalParquetFile(ParquetFile):
'''Parquet file object on local disk
'''
path: str
def is_wildcard(self) -> bool:
return '*' in self.path
def resolve_wildcard(self) -> List[ParquetFile]:
return sorted(
[LocalParquetFile(f) for f in glob.glob(self.path)],
key=lambda x: x.path
)
@contextmanager
def get_local_path(self) -> Iterator[str]:
if self.is_wildcard():
raise Exception('Please resolve first.')
if not Path(self.path).exists():
raise FileNotFoundException(f'File({self.path}) not found')
yield self.path
@dataclass
class S3ParquetFile(ParquetFile):
'''Parquet file object on S3
'''
aws_session: boto3.Session
bucket: str
key: str
def validation(self):
''' key can have *. But it must be last of the string.
'''
if self.is_wildcard() and not self.key.index('*') in (-1, len(self.key) - 1):
raise InvalidCommandExcpetion('You can use * only end of the path')
def is_wildcard(self) -> bool:
return '*' in self.key
def resolve_wildcard(self) -> List[ParquetFile]:
list_res = self.aws_session.client('s3')\
.list_objects_v2(
Bucket=self.bucket,
Prefix=self.key[:-1] # remove *
)
if list_res['IsTruncated']:
raise Exception(f'Too much file match s3://{self.bucket}/{self.key}')
if list_res['KeyCount'] == 0:
return []
keys = [e['Key'] for e in list_res['Contents']]
return sorted(
[S3ParquetFile(aws_session=self.aws_session, bucket=self.bucket, key=key) for key in keys],
key=lambda x: x.key
)
@contextmanager
def get_local_path(self) -> Iterator[str]:
if self.is_wildcard():
raise Exception('Please resolve first.')
with TemporaryDirectory() as tmp_path:
localfile = f'{tmp_path}/{uuid4()}.parquet'
logger.info(f'Download stat parquet file on s3://{self.bucket}/{self.key} -> {localfile}')
try:
with Halo(text='Downloading from s3', spinner='dots', stream=stderr) as spinner:
self.aws_session.resource('s3')\
.meta.client.download_file(self.bucket, self.key, localfile)
spinner.info(f's3://{self.bucket}/{self.key} => {localfile}')
except Exception:
raise FileNotFoundException(f's3://{self.bucket}/{self.key} not found or cannot access')
else:
yield localfile
def get_aws_session(profile_name: Optional[str]) -> boto3.Session:
return boto3.Session(profile_name=profile_name)
def _is_s3_file(filename: str) -> bool:
return filename[:5] == 's3://'
def to_parquet_file(file_exp: str, awsprofile: Optional[str]) -> ParquetFile:
'''Transform file_exp to ParquetFile object.
'''
if _is_s3_file(file_exp):
parsed_url = urlparse(file_exp)
return S3ParquetFile(
aws_session=get_aws_session(awsprofile),
bucket=parsed_url.netloc,
key=parsed_url.path[1:]
)
else:
return LocalParquetFile(
path=file_exp
)
@contextmanager
def get_datafame_from_objs(objs: List[ParquetFile], head: Union[int, float] = None):
'''Get pandas dataframe of ParquetFile object list.
'''
if head is None or head <= 0:
head = float('inf')
cumsum_row: int = 0
dfs: List[pd.DataFrame] = []
with ExitStack() as stack:
for obj in objs:
for pf in _resolve_wildcard(obj):
df: Optional[pd.DataFrame] = stack.enter_context(pf.get_dataframe())
if df is None:
continue
cumsum_row += len(df)
dfs.append(df)
if cumsum_row >= head:
break
if cumsum_row >= head:
break
if dfs:
yield reduce(lambda x, y: pd.concat([x, y]), dfs)
else:
yield None
def _resolve_wildcard(obj: ParquetFile) -> List[ParquetFile]:
if not obj.is_wildcard():
return [obj]
else:
return obj.resolve_wildcard()
|
190311
|
from user_accounts import exceptions
from intake.tests.base_testcases import IntakeDataTestCase
class TestUserProfile(IntakeDataTestCase):
fixtures = [
'counties', 'groups',
'organizations', 'mock_profiles',
'mock_2_submissions_to_cc_pubdef',
'mock_2_submissions_to_sf_pubdef',
'mock_1_submission_to_multiple_orgs',
'mock_1_bundle_to_cc_pubdef', 'template_options'
]
def test_shows_org_name_when_printed(self):
default_print_display = str(self.sf_pubdef_user.profile)
display_name = self.sf_pubdef_user.profile.get_display_name()
org_name = self.sf_pubdef_user.profile.organization.name
self.assertIn(org_name, default_print_display)
self.assertTrue(self.sf_pubdef_user.profile.should_get_notifications)
self.assertIn("gets notifications", default_print_display)
self.assertIn(org_name, display_name)
def test_user_should_see_pdf(self):
self.assertTrue(self.sf_pubdef_user.profile.should_see_pdf())
self.assertTrue(self.cfa_user.profile.should_see_pdf())
self.assertFalse(self.cc_pubdef_user.profile.should_see_pdf())
def test_should_have_access_to_allows_staff_submission_access(self):
for sub in self.submissions:
self.assertTrue(self.cfa_user.profile.should_have_access_to(sub))
def test_should_have_access_to_limits_submission_access_same_org(self):
for sub in self.sf_pubdef_submissions:
self.assertTrue(
self.sf_pubdef_user.profile.should_have_access_to(sub))
self.assertFalse(
self.cc_pubdef_user.profile.should_have_access_to(sub))
for sub in self.cc_pubdef_submissions:
self.assertFalse(
self.sf_pubdef_user.profile.should_have_access_to(sub))
self.assertTrue(
self.cc_pubdef_user.profile.should_have_access_to(sub))
for sub in self.combo_submissions:
self.assertTrue(
self.cc_pubdef_user.profile.should_have_access_to(sub))
self.assertTrue(
self.sf_pubdef_user.profile.should_have_access_to(sub))
def should_have_access_to_allows_staff_submission_access(self):
bundle = self.cc_pubdef_bundle
self.assertTrue(self.cfa_user.profile.should_have_access_to(bundle))
self.assertTrue(
self.cc_pubdef_user.profile.should_have_access_to(bundle))
self.assertFalse(
self.sf_pubdef_user.profile.should_have_access_to(bundle))
def should_have_access_to_raises_error_for_other_objects(self):
with self.assertRaises(exceptions.UndefinedResourceAccessError):
self.cfa_user.profile.should_have_access_to({})
|
190389
|
import pandas as pd
import tushare as ts
from StockAnalysisSystem.core.config import TS_TOKEN
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.core.Utility.CollectorUtility import *
# ----------------------------------------------------------------------------------------------------------------------
FIELDS = {
'Finance.BusinessComposition': {
'bz_item': '主营业务来源',
'bz_sales': '主营业务收入(元)',
'bz_profit': '主营业务利润(元)',
'bz_cost': '主营业务成本(元)',
'curr_type': '货币代码',
'update_flag': '是否更新',
},
}
# -------------------------------------------------------- Prob --------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_name': 'finance_business_tushare_pro',
'plugin_version': '0.0.0.1',
'tags': ['tusharepro']
}
def plugin_adapt(uri: str) -> bool:
return uri in FIELDS.keys()
def plugin_capacities() -> list:
return list(FIELDS.keys())
# ----------------------------------------------------------------------------------------------------------------------
# fina_mainbz: https://tushare.pro/document/2?doc_id=81
def __fetch_bussiness_data_by_type(pro: ts.pro_api, ts_code: str, classify: str,
since: datetime.datetime, until: datetime.datetime):
limit = 10
result = None
derive_time = until
while limit > 0:
ts_since = since.strftime('%Y%m%d')
ts_until = derive_time.strftime('%Y%m%d')
ts_delay('fina_mainbz')
sub_result = pro.fina_mainbz(ts_code=ts_code, start_date=ts_since, end_date=ts_until, type=classify)
if not isinstance(sub_result, pd.DataFrame) or sub_result.empty:
break
result = pd.concat([result, sub_result])
result = result.reset_index(drop=True)
result_since = min(sub_result['end_date'])
result_since = text_auto_time(result_since)
# End condition
if result_since == derive_time or len(sub_result) < 100:
break
limit -= 1
derive_time = result_since
if isinstance(result, pd.DataFrame):
result = result.drop_duplicates()
return result
def __fetch_business_data(**kwargs) -> pd.DataFrame:
uri = kwargs.get('uri')
result = check_execute_test_flag(**kwargs)
if result is None:
period = kwargs.get('period')
ts_code = pickup_ts_code(kwargs)
since, until = normalize_time_serial(period, default_since(), today())
# since_limit = years_ago_of(until, 3)
# since = max([since, since_limit])
# Because of the implementation of this interface, we only fetch the annual report
# since_year = since.year
# until_year = until.year
result = None
pro = ts.pro_api(TS_TOKEN)
try:
if is_slice_update(ts_code, since, until):
ts_since = since.strftime('%Y%m%d')
clock = Clock()
result_product = pro.fina_mainbz_vip(ts_since, type='P')
result_area = pro.fina_mainbz_vip(ts_since, type='D')
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
else:
clock = Clock()
result_product = __fetch_bussiness_data_by_type(pro, ts_code, 'P', since, until)
result_area = __fetch_bussiness_data_by_type(pro, ts_code, 'D', since, until)
print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
if isinstance(result_product, pd.DataFrame) and not result_product.empty:
result_product['classification'] = 'product'
if isinstance(result_area, pd.DataFrame) and not result_area.empty:
result_area['classification'] = 'area'
result = pd.concat([result_product, result_area])
# for year in range(since_year, until_year):
# ts_date = '%02d1231' % year
# # 抱歉,您每分钟最多访问该接口60次
# ts_delay('fina_mainbz')
# sub_result = pro.fina_mainbz(ts_code=ts_code, start_date=ts_date, end_date=ts_date)
# result = pd.concat([result, sub_result])
# print('%s: [%s] - Network finished, time spending: %sms' % (uri, ts_code, clock.elapsed_ms()))
# result.fillna(0.0)
# del result['ts_code']
# result.reset_index()
# business = result.groupby('end_date').apply(
# lambda x: x.drop('end_date', axis=1).to_dict('records'))
# result = pd.DataFrame.from_dict({'business': business}, orient='index').reset_index()
# result['ts_code'] = ts_code
except Exception as e:
print(e)
print(traceback.format_exc())
finally:
pass
check_execute_dump_flag(result, **kwargs)
if isinstance(result, pd.DataFrame) and not result.empty:
result.reset_index(drop=True, inplace=True)
result.fillna('', inplace=True)
convert_ts_code_field(result)
convert_ts_date_field(result, 'end_date', 'period')
# if result is not None:
# result['period'] = pd.to_datetime(result['end_date'])
# result['stock_identity'] = result['ts_code'].apply(ts_code_to_stock_identity)
return result
# ----------------------------------------------------------------------------------------------------------------------
def query(**kwargs) -> pd.DataFrame or None:
uri = kwargs.get('uri')
if uri in list(FIELDS.keys()):
return __fetch_business_data(**kwargs)
else:
return None
def validate(**kwargs) -> bool:
nop(kwargs)
return True
def fields() -> dict:
return FIELDS
|
190393
|
import os
import time
from collections import deque
import torch
import torch.distributions
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from common.util import scale_ob, Trajectories
def learn(logger,
device,
env, nenv,
number_timesteps,
network, optimizer,
save_path, save_interval, ob_scale,
gamma, grad_norm, timesteps_per_batch, ent_coef, vf_coef):
"""
Paper:
<NAME>, <NAME>, <NAME>, et al. Asynchronous methods for deep
reinforcement learning[C]// International Conference on Machine Learning.
2016: 1928-1937.
Parameters:
----------
gram_norm (float | None): grad norm
timesteps_per_batch (int): number of steps per update
ent_coef (float): policy entropy coefficient in the objective
vf_coef (float): value function loss coefficient in the objective
"""
policy = network.to(device)
number_timesteps = number_timesteps // nenv
generator = _generate(
device, env, policy, ob_scale,
number_timesteps, gamma, timesteps_per_batch
)
max_iter = number_timesteps // timesteps_per_batch
scheduler = LambdaLR(optimizer, lambda i_iter: 1 - i_iter / max_iter)
total_timesteps = 0
infos = {k: deque(maxlen=100)
for k in ['eplenmean', 'eprewmean', 'pgloss', 'v', 'entropy']}
start_ts = time.time()
for n_iter in range(1, max_iter + 1):
scheduler.step()
batch = generator.__next__()
b_o, b_a, b_r, b_v_old, info = batch
for d in info:
infos['eplenmean'].append(d['l'])
infos['eprewmean'].append(d['r'])
total_timesteps += b_o.size(0)
# calculate advantange
b_logits, b_v = policy(b_o)
b_v = b_v[:, 0]
dist = torch.distributions.Categorical(logits=b_logits)
entropy = dist.entropy().mean()
b_logp = dist.log_prob(b_a)
adv = b_r - b_v_old
# update policy
vloss = (b_v - b_r).pow(2).mean()
pgloss = -(adv * b_logp).mean()
loss = pgloss + vf_coef * vloss - ent_coef * entropy
optimizer.zero_grad()
loss.backward()
if grad_norm is not None:
nn.utils.clip_grad_norm_(policy.parameters(), grad_norm)
optimizer.step()
# record logs
infos['pgloss'].append(pgloss.item())
infos['v'].append(vloss.item())
infos['entropy'].append(entropy.item())
logger.info('{} Iter {} {}'.format('=' * 10, n_iter, '=' * 10))
fps = int(total_timesteps / (time.time() - start_ts))
logger.info('Total timesteps {} FPS {}'.format(total_timesteps, fps))
for k, v in infos.items():
v = (sum(v) / len(v)) if v else float('nan')
logger.info('{}: {:.6f}'.format(k, v))
if save_interval and n_iter % save_interval == 0:
torch.save([policy.state_dict(), optimizer.state_dict()],
os.path.join(save_path, '{}.checkpoint'.format(n_iter)))
def _generate(device, env, policy, ob_scale,
number_timesteps, gamma, timesteps_per_batch):
""" Generate trajectories """
record = ['o', 'a', 'r', 'done', 'vpred']
export = ['o', 'a', 'r', 'vpred']
trajectories = Trajectories(record, export, device, gamma, ob_scale)
o = env.reset()
infos = []
for n in range(1, number_timesteps + 1):
# sample action
with torch.no_grad():
logits, v = policy(scale_ob(o, device, ob_scale))
dist = torch.distributions.Categorical(logits=logits)
a = dist.sample().cpu().numpy()
v = v.cpu().numpy()[:, 0]
# take action in env
o_, r, done, info = env.step(a)
for d in info:
if d.get('episode'):
infos.append(d['episode'])
# store batch data and update observation
trajectories.append(o, a, r, done, v)
if n % timesteps_per_batch == 0:
with torch.no_grad():
ob = scale_ob(o_, device, ob_scale)
v_ = policy(ob)[1].cpu().numpy()[:, 0] * (1 - done)
yield trajectories.export(v_) + (infos, )
infos.clear()
o = o_
|
190396
|
import os
from hashlib import sha256
import importlib
import json
import logging
import textwrap
from uuid import uuid4
from pathlib import Path
from pkg_resources import resource_filename
from tempfile import NamedTemporaryFile
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from sphinx.transforms import SphinxTransform
from sphinx.util.docutils import LoggingReporter
from sphinx.util.fileutil import copy_asset
import cadquery
from . import __version__
logger = logging.getLogger(__name__)
common_part_names = ['result', 'part']
common_source_header = [
'import cadquery',
'import cadquery as cq',
]
raw_html_template = """
<div class="sphinxcadqueryview", style="width:{width};height:{height}">
<script>
var parent = document.scripts[ document.scripts.length - 1 ].parentNode;
parent.fname = "{parturi}";
parent.color = "{color}";
parent.gridsize = "{gridsize}";
parent.griddivisions = "{griddivisions}";
</script>
</div>
"""
def directive_truefalse(argument):
return directives.choice(argument, ('true', 'false'))
def get_handler(fname):
loader = importlib.machinery.SourceFileLoader('source', str(fname))
return loader.load_module('source')
def find_part(module, name):
"""
Try to find the 3D part to visualize.
If no part name is provided, it will try with a list of default/usual
candidates.
"""
source = module.__dict__
if name:
candidates = [name]
else:
candidates = common_part_names
for candidate in candidates:
if candidate in source.keys():
return source[candidate]
raise KeyError('Could not find `%s` to visualize!' % candidates[0])
class CadQueryDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
'select': directives.unchanged,
'include-source': bool,
'color': directives.unchanged,
'background': directives.unchanged,
'rotation': directive_truefalse,
'width': directives.unchanged,
'height': directives.unchanged,
'gridsize': float,
'griddivisions': int,
}
def run(self):
doc_source_name = self.state.document.attributes['source']
self.options.setdefault('include-source',
setup.app.config.sphinxcadquery_include_source)
self.options.setdefault('color', setup.app.config.sphinxcadquery_color)
if len(self.arguments):
fname = Path(setup.app.srcdir) / self.arguments[0]
fname = fname.resolve()
handle = get_handler(fname)
else:
with NamedTemporaryFile() as named:
fname = named.name
with open(fname, 'w') as tmp:
tmp.write(
'\n'.join(common_source_header + self.content.data))
handle = get_handler(fname)
select = self.options.get('select', None)
part = find_part(handle, select)
content = cadquery.exporters.toString(part, 'TJS')
digest = sha256(content.encode('utf')).hexdigest()
fpath = Path('_static') / 'sphinxcadquery'
fname = Path(digest).with_suffix('.tjs')
outputdir = Path(setup.app.builder.outdir) / fpath
outputdir.mkdir(parents=True, exist_ok=True)
outputfname = outputdir / fname
with open(outputfname, 'w') as outputfile:
outputfile.write(content)
source_path = Path(doc_source_name)
depth = \
len(source_path.parent.relative_to(Path(setup.app.srcdir)).parents)
relative_uri = Path('.')
for _ in range(depth):
relative_uri /= '../'
raw_html = raw_html_template.format(
parturi=relative_uri / fpath / fname,
color=self.options['color'],
width=self.options.get('width', '100%'),
height=self.options.get('height', '400px'),
gridsize=self.options.get('gridsize', 100.),
griddivisions=self.options.get('griddivisions', 20),
)
lines = []
if self.options['include-source']:
data = textwrap.indent('\n'.join(self.content.data), ' ')
lines = ['.. code-block:: python', '', *data.splitlines()]
lines.extend(['', ''])
lines.extend(['', ''])
raw_html = textwrap.indent(raw_html, ' ')
lines.extend(['.. raw:: html', '', *raw_html.splitlines()])
lines.extend(['', ''])
self.state_machine.insert_input(lines, source=doc_source_name)
return []
def copy_asset_files(app, exc):
if exc is not None: # build failed
return
source = resource_filename(__name__, 'sphinxcadquerystatic')
copy_asset(source, os.path.join(app.outdir, '_static/sphinxcadquerystatic'))
def setup(app):
setup.app = app
app.connect('build-finished', copy_asset_files)
app.add_js_file('sphinxcadquerystatic/three.js')
app.add_js_file('sphinxcadquerystatic/AMFLoader.js')
app.add_js_file('sphinxcadquerystatic/STLLoader.js')
app.add_js_file('sphinxcadquerystatic/LegacyJSONLoader.js')
app.add_js_file('sphinxcadquerystatic/jszip.min.js')
app.add_js_file('sphinxcadquerystatic/OrbitControls.js')
app.add_js_file('sphinxcadquerystatic/WebGL.js')
app.add_js_file('sphinxcadquerystatic/main.js')
app.add_css_file('sphinxcadquerystatic/main.css')
app.add_directive('cadquery', CadQueryDirective)
app.add_config_value('sphinxcadquery_color', '#99bbdd', 'env')
app.add_config_value('sphinxcadquery_include_source', False, 'env')
return {'version': __version__, 'parallel_read_safe': True}
|
190401
|
from itertools import count
from typing import Iterable, Dict, List
class IndexMapper:
"""
This class maps between the different atoms objects in a MAZE-sim project.
It is essential for keeping track of the various
"""
id = 0
@staticmethod
def get_id():
"""
Get a unique id
:return:
"""
IndexMapper.id += 1
return str(IndexMapper.id)
@staticmethod
def get_unique_name(name: str):
"""
Get a unique name
:param name: name
:return: name + _ + unique id number
"""
return name + '_' + IndexMapper.get_id()
def __init__(self, atom_indices: Iterable) -> None:
"""
:param atom_indices: A list of atom indices from a Zeotype
"""
self.main_index = {}
self.i_max = 0
self.names = ['parent']
for main_index, atom_index in zip(count(), atom_indices):
self.main_index[main_index] = {'parent': atom_index}
self.i_max = main_index
def get_reverse_main_index(self, name: str) -> Dict[int, int]:
"""
Reverses an index so that the name indices are used as a key for the main index
:param name: name of the item to make the key
:return: a reverse index map where the indices of name are the key
"""
return self._reverse_main_index(name)
def _reverse_main_index(self, name: str) -> Dict[int, int]:
"""
Internal function for making a reverse index map
:param name: name of the item to make the key
:return:a reverse index map where the indices of name are the key
"""
name_to_main_dict = {}
for main_index, value in self.main_index.items():
try:
name_index = value[name]
except KeyError:
print(f'name not found at index {main_index}')
continue
if name_index is None: # ignore none indices
continue
name_to_main_dict[name_index] = main_index
return name_to_main_dict
def get_name1_to_name2_map(self, name1: str, name2: str) -> Dict[int, int]:
"""
Gets a map between the indices of name1 and the indices of name2
:param name1: name whose indices are the key in the map
:param name2: name whose indices are the value in the map
:return: name1.index -> name2.index map
"""
name1_to_name2_map = {}
for row in self.main_index.values():
if row[name1] is not None:
name1_to_name2_map[row[name1]] = row[name2]
return name1_to_name2_map
def register_with_main(self, new_name: str, main_to_new_map: Dict[int, int]) -> None:
"""
Register a new object by using the main index in mapping.
:param new_name: name of the new object being registered
:param main_to_new_map:
:return:
"""
self.names.append(new_name)
for main_i in self.main_index.keys():
self.main_index[main_i][new_name] = main_to_new_map.get(main_i, None)
def register(self, old_name: str, new_name: str, old_to_new_map: Dict[int, int]) -> None:
"""
Register a new object with the indexer
:param old_name: name of object known by the indexer
:param new_name: name of new object being registered
:param old_to_new_map: a index mapping between the old map and the new map
note that additional atoms in new will be added to the index
:return:
"""
already_registered = f'Error: {new_name} has already been registered'
not_registered = f'Error: {old_name} has not been registered'
if new_name in self.names:
print(f'old name is {old_name}, new name is {new_name}')
raise ValueError(already_registered)
assert old_name in self.names, not_registered
self.names.append(new_name)
old_name_to_main_dict = self._reverse_main_index(old_name)
main_to_new_name_dict = {}
for old_ind, main_ind in old_name_to_main_dict.items():
main_to_new_name_dict[main_ind] = old_to_new_map.get(old_ind, None)
for i in self.main_index.keys():
self.main_index[i][new_name] = main_to_new_name_dict.get(i, None)
def _make_none_dict(self) -> Dict[str, None]:
"""
Get a dictionary full of None for each name
:return: A dictionary full of None for each known name
"""
none_dict = {}
for name in self.names:
none_dict[name] = None
return none_dict
def add_atoms(self, name: str, new_atom_indices: Iterable[int]) -> None:
"""
Add new atoms to the dictionary
:param name: name of object with new atoms
:param new_atom_indices: indices of the new atoms
:return:
"""
assert name not in self.names, 'name already exists'
for index, value in self.main_index.items():
value[name] = None
self.names.append(name)
for index in new_atom_indices:
none_dict = self._make_none_dict() # could be slow
none_dict[name] = index
self.i_max += 1
self.main_index[self.i_max] = none_dict
def pop(self, name: str, atom_index_to_delete: int) -> int:
"""
Deletes a single atom index
:param name: name of Zeotype to delete atom
:type name: str
:param atom_index_to_delete: index to delete (using own mapping)
:type atom_index_to_delete: int
:return: index deleted
:rtype: int
"""
name_to_main_dict = self._reverse_main_index(name)
name_indices = list(name_to_main_dict.keys())
name_indices.sort()
for i in name_indices:
if i == atom_index_to_delete:
self.main_index[name_to_main_dict[i]][name] = None
elif i > atom_index_to_delete:
self.main_index[name_to_main_dict[i]][name] -= 1
return atom_index_to_delete
def extend(self, name: str, new_atom_indices: Iterable[int]) -> None:
"""
This adds additional atom indices to the zeolite
:param name: name to add indices to
:type name: str
:param new_atom_indices: list of indices
:type new_atom_indices: Iterable[int]
:return: None
:rtype: None
"""
assert name in self.names, 'name not in index mapper'
for index in new_atom_indices:
none_dict = self._make_none_dict() # could be slow
none_dict[name] = index
self.i_max += 1
self.main_index[self.i_max] = none_dict
def delete_atoms(self, name: str, atom_indices_to_delete: Iterable[int]) -> None:
"""
:param name: name of with indices to delete
:param atom_indices_to_delete: indices to delete
:return: None
"""
name_to_main_dict = self._reverse_main_index(name)
for i in atom_indices_to_delete:
self.main_index[name_to_main_dict[i]][name] = None
def get_index(self, sender_name: str, receiver_name: str, sender_index: int) -> int:
"""
get the index of another object
:param sender_name: name of the sender zeolite
:param receiver_name: the name of the receiving zeolite
:param sender_index: the index of the sender
:return: the receiving index
"""
for name_dict in self.main_index.values():
my_name_dict = name_dict[sender_name]
my_sender_index = sender_index
if name_dict[sender_name] == sender_index:
return name_dict[receiver_name]
def delete_name(self, name: str) -> None:
"""
Delete zeolite from the index
:param name: The name of the zeolite to delete from the index
:return: None
"""
try:
self.names.remove(name)
except:
print(name) # TODO: Write custom error message
return None
for index, old_row in self.main_index.items():
new_row = self._make_none_dict()
for key in new_row.keys():
new_row[key] = old_row[key]
self.main_index[index] = new_row
def get_overlap(self, name1: str, name2: str) -> List[int]:
"""
Get the list of names that overlap
:param name1: name of object 1
:param name2: name of object 2
:return: overlapping indices
"""
overlap_indices_name1 = []
for name_dict in self.main_index.values():
if name_dict[name1] is not None and name_dict[name2] is not None:
overlap_indices_name1.append(name_dict[name1])
return overlap_indices_name1
def reregister_parent(self, main_to_parent_map) -> None:
"""
Register a Zeolite as the parent using a main_to_parent_map
This is useful when building a Perfect Zeolite from a file.
:param main_to_parent_map: dict that maps between parent and the main indices
:type main_to_parent_map: Dict[int, int]
:return: None
:rtype: None
"""
for key, value in self.main_index.items():
if key in main_to_parent_map:
value['parent'] = main_to_parent_map[key]
else:
value['parent'] = None
|
190411
|
from threading import Thread
import queue
from . import packet_util
from .message_consumer import MessageConsumer
import logging
class MessageWorker(Thread):
def __init__(self, socket, room_id):
Thread.__init__(self)
self.need_stop = False
self.socket = socket
self.room_id = room_id
self.msg_queue = queue.Queue()
self.message_consumer = MessageConsumer(self.msg_queue)
def add_handler(self, msg_type, handler):
self.message_consumer.add_handler(msg_type, handler)
def set_stop(self, need_stop=True):
self.need_stop = need_stop
self.message_consumer.set_stop(need_stop)
def prepare(self):
self.message_consumer.start()
self.enter_room()
def enter_room(self):
ori_str = packet_util.assemble_login_str(self.room_id)
data = packet_util.assemble_transfer_data(ori_str)
self.socket.send(data)
ori_str = packet_util.assemble_join_group_str(self.room_id)
data = packet_util.assemble_transfer_data(ori_str)
self.socket.send(data)
def run(self):
self.prepare()
while not self.need_stop:
packet_size = self.socket.receive(4)
if packet_size is None:
logging.warning("Socket closed")
self.socket.connect()
self.enter_room()
continue
packet_size = int.from_bytes(packet_size, byteorder='little')
data = self.socket.receive(packet_size)
if data is None:
logging.warning("Socket closed")
self.socket.connect()
self.enter_room()
continue
self.msg_queue.put(data)
|
190424
|
import unittest
from unittest.mock import patch
from simple_dwd_weatherforecast import dwdforecast
from tests.dummy_data import parsed_data
from datetime import datetime, timezone
import time
class WeatherUpdate(unittest.TestCase):
def setUp(self):
self.dwd_weather = dwdforecast.Weather("H889")
self.dwd_weather.forecast_data = parsed_data
self.dwd_weather.station_name = "BAD HOMBURG"
@patch(
"simple_dwd_weatherforecast.dwdforecast.download_latest_kml", return_value=None
)
@patch(
"simple_dwd_weatherforecast.dwdforecast.Weather.parse_kml", return_value=None
)
def test_issue_time_none(self, mock_function, _):
self.dwd_weather.update()
mock_function.assert_called()
@patch(
"simple_dwd_weatherforecast.dwdforecast.download_latest_kml", return_value=None
)
@patch(
"simple_dwd_weatherforecast.dwdforecast.Weather.parse_kml", return_value=None
)
def test_issue_time_old(self, mock_function, _):
self.dwd_weather.issue_time = datetime(
*(time.strptime("2020-11-06T03:00:00.000Z", "%Y-%m-%dT%H:%M:%S.%fZ")[0:6]),
0,
timezone.utc
)
self.dwd_weather.update()
mock_function.assert_called()
@patch(
"simple_dwd_weatherforecast.dwdforecast.download_latest_kml", return_value=None
)
@patch(
"simple_dwd_weatherforecast.dwdforecast.Weather.parse_kml", return_value=None
)
def test_issue_time_actual(self, mock_function, _):
self.dwd_weather.issue_time = datetime.now(timezone.utc)
self.dwd_weather.update()
mock_function.assert_not_called()
|
190439
|
import json
import numpy as np
import os
import pickle
from azureml.core.model import Model
def wind_turbine_model(x):
# cut-in speed vs cut-out speed
if x<4.5 or x>21.5:
return 0.0
# standard operability
return 376.936 - 195.8161*x + 33.75734*x**2 - 2.212492*x**3 + 0.06309095*x**4 - 0.0006533647*x**5
def init():
global model
# not model for wind turbine
model = wind_turbine_model
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make evaluation
y = model(data)
return json.dumps(y)
|
190460
|
import os
from cfdata.tabular import *
from cfml import *
# datasets
boston = TabularDataset.boston()
prices_file = os.path.join("datasets", "prices.txt")
prices = TabularData(task_type=TaskTypes.REGRESSION).read(prices_file).to_dataset()
breast_cancer = TabularDataset.breast_cancer()
digits = TabularDataset.digits()
column_indices = list(range(digits.num_features))
digits_onehot = TabularData.from_dataset(digits, categorical_columns=column_indices).to_dataset()
# numpy poly fit
Base.make("poly").fit(*prices.xy).visualize1d(*prices.xy)
# linear regression
Base.make("linear_regression").fit(*prices.xy).visualize1d(*prices.xy).plot_loss_curve()
# logistic regression
Base.make("logistic_regression").fit(*breast_cancer.xy).plot_loss_curve()
# multinomial naive bayes
Base.make("multinomial_nb").fit(*digits_onehot.xy)
# gaussian naive bayes
Base.make("gaussian_nb").fit(*breast_cancer.xy)
# linear support vector machine (classification)
Base.make("linear_svc").fit(breast_cancer.x, breast_cancer.y).plot_loss_curve()
# linear support vector machine (regression)
Base.make("linear_svr").fit(boston.x, boston.y).plot_loss_curve()
# support vector machine (classification)
Base.make("svc").fit(breast_cancer.x, breast_cancer.y).plot_loss_curve()
# support vector machine (regression)
Base.make("svr").fit(boston.x, boston.y).plot_loss_curve()
# fully connected neural network (classification)
Base.make("fcnn_clf").fit(*breast_cancer.xy).plot_loss_curve()
# fully connected neural network (regression)
Base.make("fcnn_reg").fit(*boston.xy).plot_loss_curve()
|
190473
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = "0.1.0"
__author__ = "<NAME>"
from flask import flash
from flask import Flask
from flask import render_template
from flask import request
from keras.models import load_model
import os
import numpy as np
app = Flask(__name__)
@app.route("/")
def home():
filename = "http://0.0.0.0:5000/static/" + "class.png"
return render_template("index.html", class_finding=filename)
@app.route("/", methods=["POST"])
def classify():
text = request.form["text"]
assert "," in text, "FormatError: Please enter comma-separated features"
text = text.strip(",")
text = text.split(",")
data = np.array(text, np.int)
model = load_model("dnn.h5")
prediction = np.argmax(model.predict(np.reshape(data, (-1, data.shape[0]))))
print("Prediction : {}".format(prediction))
class_finding = "benign.png" if prediction == 0 else "malignant.png"
filename = "http://0.0.0.0:5000/static/" + class_finding
return render_template("index.html", class_finding=filename)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
|
190506
|
import json
import pytchat
from pytchat.parser.live import Parser
from pytchat.processors.speed.calculator import SpeedCalculator
parser = Parser(is_replay=False)
def test_speed_1():
stream = pytchat.create("Hj-wnLIYKjw", seektime = 6000,processor=SpeedCalculator())
while stream.is_alive():
speed = stream.get()
assert speed > 100
break
test_speed_1()
|
190522
|
import torch
import torch.nn as nn
from modelzoo.normalisers import NormalisedSigmoid
from modelzoo.redistributions import Gate
from modelzoo.redistributions import get_redistribution
class MCModel(nn.Module):
def __init__(self, cfg: dict):
super().__init__()
self.mclstm = MCLSTMv2(cfg['mass_input_size'],
cfg['aux_input_size'],
cfg['hidden_size'],
redistribution_type=cfg['redistribution_type'],
normaliser=cfg.get('normaliser'))
self.fc = nn.Linear(cfg['hidden_size'], cfg['out_size'])
self.reset_parameters()
def reset_parameters(self):
self.mclstm.reset_parameters()
nn.init.kaiming_uniform_(self.fc.weight, nonlinearity='linear')
nn.init.zeros_(self.fc.bias)
def forward(self, x_m, x_a) -> tuple:
m_out, c = self.mclstm(x_m, x_a)
return self.fc(m_out[:, -1]), c
class MCSum(nn.Module):
def __init__(self, cfg: dict):
super().__init__()
self.mclstm = MCLSTMv2(cfg['mass_input_size'],
cfg['aux_input_size'],
cfg['hidden_size'],
redistribution_type=cfg['redistribution_type'],
normaliser=cfg.get('normaliser'))
self.reset_parameters()
def reset_parameters(self):
self.mclstm.reset_parameters()
def forward(self, x_m, x_a) -> tuple:
m_out, c = self.mclstm(x_m, x_a)
return m_out[:, -1].sum(dim=-1, keepdims=True), c
class MCProd(MCModel):
def forward(self, x_m, x_a) -> tuple:
m_out, c = self.mclstm(torch.log(x_m), x_a)
return torch.exp(self.fc(m_out[:, -1])), c
class MCWrappedModel(nn.Module):
def __init__(self, cfg: dict):
super().__init__()
self.mclstm = MCLSTMv2(cfg['mass_input_size'],
cfg['aux_input_size'],
cfg['hidden_size'],
redistribution_type=cfg['redistribution_type'],
normaliser=cfg.get('normaliser'))
self.pre_mlp = nn.Sequential(nn.Linear(cfg['mass_input_size'], cfg['inter_size']), nn.ReLU(),
nn.Linear(cfg['inter_size'], cfg['mass_input_size']))
self.post_mlp = nn.Sequential(nn.Linear(cfg['hidden_size'], cfg['inter_size']), nn.ReLU(),
nn.Linear(cfg['inter_size'], cfg['out_size']))
self.reset_parameters()
def reset_parameters(self):
self.mclstm.reset_parameters()
nn.init.kaiming_uniform_(self.pre_mlp[0].weight)
nn.init.zeros_(self.pre_mlp[0].bias)
nn.init.kaiming_uniform_(self.pre_mlp[2].weight)
nn.init.zeros_(self.pre_mlp[2].bias)
nn.init.kaiming_uniform_(self.post_mlp[0].weight)
nn.init.zeros_(self.post_mlp[0].bias)
nn.init.kaiming_uniform_(self.post_mlp[2].weight)
nn.init.zeros_(self.post_mlp[2].bias)
def forward(self, x_m, x_a) -> tuple:
x_m = self.pre_mlp(x_m)
m_out, c = self.mclstm(x_m, x_a)
m_out = self.post_mlp(m_out[:, -1])
return m_out, c
class MassConservingTemplate(nn.Module):
""" Base class for different flavours of Mass Conserving LSTMs. """
def __init__(self,
mass_input_size: int,
aux_input_size: int,
hidden_size: int,
redistribution_type: str = "gate",
normaliser: str = "softmax",
batch_first: bool = True):
"""
Parameters
----------
mass_input_size : int
Number of mass input features at each time step.
aux_input_size : int
Number of auxiliary input features at each time step.
hidden_size : int
Number of output features at each time step.
redistribution_type : str, optional
Specifies how the redistribution matrix should be computed.
batch_first : bool, optional
Whether or not the first dimension is the batch dimension.
"""
super(MassConservingTemplate, self).__init__()
self.mass_input_size = mass_input_size
self.aux_input_size = aux_input_size
self.hidden_size = hidden_size
self.redistribution_type = redistribution_type
self.batch_first = batch_first
if normaliser == 'sigmoid':
self.normaliser = NormalisedSigmoid(dim=-1)
elif normaliser == 'id':
self.normaliser = lambda x: x
elif normaliser == 'nonorm':
self.normaliser = nn.Sigmoid()
else:
self.normaliser = nn.Softmax(dim=-1)
@torch.no_grad()
def get_initial_state(self, x0: torch.Tensor):
return x0.new_zeros((len(x0), self.hidden_size))
def forward(self, x_m: torch.Tensor, x_a: torch.Tensor, init_state: torch.Tensor = None):
if self.batch_first:
x_m = x_m.transpose(0, 1)
x_a = x_a.transpose(0, 1)
ct = self.get_initial_state(x_m[0])
if init_state is not None:
ct = ct + init_state
m_out, c = [], []
for xt_m, xt_a in zip(x_m, x_a):
mt_out, ct = self._step(xt_m, xt_a, ct)
m_out.append(mt_out)
c.append(ct)
m_out, c = torch.stack(m_out), torch.stack(c)
if self.batch_first:
m_out = m_out.transpose(0, 1)
c = c.transpose(0, 1)
return m_out, c
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
raise NotImplementedError("subclass must implement this method")
class MCLSTM(MassConservingTemplate):
""" Mass conserving LSTM.
Using all inputs in all gates.
"""
def __init__(self,
mass_input_size: int,
aux_input_size: int,
hidden_size: int,
redistribution_type: str = "gate",
normaliser: str = "sigmoid",
batch_first: bool = True):
"""
Parameters
----------
mass_input_size : int
Number of mass input features at each time step.
aux_input_size : int
Number of auxiliary input features at each time step.
hidden_size : int
Number of output features at each time step.
redistribution_type : str, optional
Specifies how the redistribution matrix should be computed.
batch_first : bool, optional
Whether or not the first dimension is the batch dimension.
"""
super(MCLSTM, self).__init__(mass_input_size, aux_input_size, hidden_size, redistribution_type, normaliser,
batch_first)
in_shape = self.mass_input_size + self.aux_input_size + self.hidden_size
self.out_gate = Gate(self.hidden_size, in_shape)
# NOTE: without normalised sigmoid here, there seem to be troubles!
self.junction = get_redistribution("gate",
num_states=self.mass_input_size,
num_features=in_shape,
num_out=self.hidden_size,
normaliser=self.normaliser)
self.redistribution = get_redistribution(self.redistribution_type,
num_states=self.hidden_size,
num_features=in_shape,
normaliser=self.normaliser)
self.reset_parameters()
def reset_parameters(self):
self.out_gate.reset_parameters()
self.junction.reset_parameters()
self.redistribution.reset_parameters()
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
features = torch.cat([xt_m, xt_a, c], dim=-1)
j = self.junction(features)
r = self.redistribution(features)
o = self.out_gate(features)
m_in = torch.matmul(xt_m.unsqueeze(-2), j).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
return o * m_new, (1 - o) * m_new
class MCLSTMv2(MassConservingTemplate):
""" Mass conserving LSTM.
Using only auxiliary inputs in all gates.
"""
def __init__(self,
mass_input_size: int,
aux_input_size: int,
hidden_size: int,
redistribution_type: str = "gate",
normaliser: str = "softmax",
batch_first: bool = True):
super(MCLSTMv2, self).__init__(mass_input_size, aux_input_size, hidden_size, redistribution_type, normaliser,
batch_first)
self.out_gate = Gate(self.hidden_size, self.aux_input_size)
# NOTE: without normalised sigmoid here, there seem to be troubles!
self.junction = get_redistribution("gate",
num_states=self.mass_input_size,
num_features=self.aux_input_size,
num_out=self.hidden_size,
normaliser=nn.Softmax(dim=-1))
self.redistribution = get_redistribution(self.redistribution_type,
num_states=self.hidden_size,
num_features=self.aux_input_size,
normaliser=self.normaliser)
self.reset_parameters()
def reset_parameters(self):
self.out_gate.reset_parameters()
nn.init.constant_(self.out_gate.fc.bias, -3.)
self.junction.reset_parameters()
self.redistribution.reset_parameters()
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
j = self.junction(xt_a)
r = self.redistribution(xt_a)
o = self.out_gate(xt_a)
m_in = torch.matmul(xt_m.unsqueeze(-2), j).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
return o * m_new, (1 - o) * m_new
class MCLSTMv3(MassConservingTemplate):
""" Mass conserving LSTM.
Using auxiliary inputs and normalised cell states (no mass inputs) in all gates.
"""
def __init__(self,
mass_input_size: int,
aux_input_size: int,
hidden_size: int,
redistribution_type: str = "gate",
normaliser: str = "softmax",
batch_first: bool = True):
super().__init__(mass_input_size, aux_input_size, hidden_size, redistribution_type, normaliser, batch_first)
input_size = self.aux_input_size + hidden_size
self.out_gate = Gate(self.hidden_size, input_size)
self.junction = get_redistribution("gate",
num_states=self.mass_input_size,
num_features=input_size,
num_out=self.hidden_size,
normaliser=nn.Softmax(dim=-1))
self.redistribution = get_redistribution(self.redistribution_type,
num_states=self.hidden_size,
num_features=input_size,
normaliser=self.normaliser)
self.reset_parameters()
def reset_parameters(self):
self.out_gate.reset_parameters()
nn.init.constant_(self.out_gate.fc.bias, -3.)
self.junction.reset_parameters()
self.redistribution.reset_parameters()
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
c_sum = torch.sum(c, dim=-1, keepdim=True)
normaliser = torch.where(c_sum == 0, c_sum.new_ones(c_sum.shape), c_sum)
aux = torch.cat([xt_a, c / normaliser], dim=-1)
j = self.junction(aux)
r = self.redistribution(aux)
o = self.out_gate(aux)
m_in = torch.matmul(xt_m.unsqueeze(-2), j).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
return o * m_new, (1 - o) * m_new
class MCLSTMMultiOutBad(MassConservingTemplate):
""" Mass conserving LSTM.
NOTE: FAULTY MODEL FOR MASS-CONSERVATION!
(multiple output gates can make cell states negative)
Using auxiliary inputs and normalised cell states (no mass inputs) in all gates
with multiple output gates (one for each output).
"""
def __init__(self,
mass_input_size: int,
aux_input_size: int,
hidden_size: int,
output_size: int,
redistribution_type: str = "gate",
normaliser: str = "softmax",
batch_first: bool = True):
super().__init__(mass_input_size, aux_input_size, hidden_size, redistribution_type, normaliser, batch_first)
input_size = self.aux_input_size + hidden_size
self.output_size = output_size
self.out_gate = Gate(self.hidden_size * output_size, input_size)
self.junction = get_redistribution("gate",
num_states=self.mass_input_size,
num_features=input_size,
num_out=self.hidden_size,
normaliser=nn.Softmax(dim=-1))
self.redistribution = get_redistribution(self.redistribution_type,
num_states=self.hidden_size,
num_features=input_size,
normaliser=self.normaliser)
self.reset_parameters()
def reset_parameters(self):
self.out_gate.reset_parameters()
nn.init.constant_(self.out_gate.fc.bias, -3.)
self.junction.reset_parameters()
self.redistribution.reset_parameters()
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
c_sum = torch.sum(c, dim=-1, keepdim=True)
normaliser = torch.where(c_sum == 0, c_sum.new_ones(c_sum.shape), c_sum)
aux = torch.cat([xt_a, c / normaliser], dim=-1)
j = self.junction(aux)
r = self.redistribution(aux)
o = self.out_gate(aux).reshape(-1, self.hidden_size, self.output_size)
m_in = torch.matmul(xt_m.unsqueeze(-2), j).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
m_out = o * m_new.unsqueeze(-1)
return m_out.sum(1), m_new - m_out.sum(-1)
class MCLSTMMultiOut(MCLSTMv3):
""" Mass conserving LSTM.
Using auxiliary inputs and normalised cell states (no mass inputs) in all gates
with multiple output gates (one for each output).
"""
def __init__(self,
mass_input_size: int,
aux_input_size: int,
hidden_size: int,
output_size: int,
redistribution_type: str = "gate",
normaliser: str = "softmax",
batch_first: bool = True):
super().__init__(mass_input_size, aux_input_size, hidden_size, redistribution_type, normaliser, batch_first)
self.output_size = output_size
self.final = get_redistribution("linear",
num_states=self.hidden_size,
num_features=0,
num_out=self.output_size,
normaliser=nn.Softmax(dim=-1))
self.final.reset_parameters()
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
tmp, c = super()._step(xt_m, xt_a, c)
out_redist = self.final(None)
h = torch.matmul(tmp.unsqueeze(-2), out_redist).squeeze(-2)
return h, c
|
190539
|
import numpy as np
from tree.DecisionTreeClassifier import DecisionTreeClassifier
from scipy import stats # 用于求众数
class RandomForestClassifier:
def __init__(self, n_estimators: int = 5, min_samples_split: int = 5,
min_samples_leaf: int = 5, min_impurity_decrease: float = 0.0):
'''
:param n_estimators: 子树的数量
:param min_samples_split: 最小分割样本数,用于传递给子CART树
:param min_samples_leaf: 最小叶节点样本数,用于传递给子CART树
:param min_impurity_decrease: 最小增益,用于传递给子CART树
'''
self.n_estimators = n_estimators
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_impurity_decrease = min_impurity_decrease
self.estimators_ = list()
def __RandomPatches(self, data):
'''
实现RandomPatches
:param data: 用于抽样的数据
:return: 随机抽样得到的子数据
'''
n_samples, n_features = data.shape
n_features -= 1
sub_data = np.copy(data)
random_f_idx = np.random.choice(
n_features, size=int(np.sqrt(n_features)), replace=False)
mask_f_idx = [i for i in range(n_features) if i not in random_f_idx] # 未抽到的特征idx
random_data_idx = np.random.choice(n_samples, size=n_samples, replace=True)
sub_data = sub_data[random_data_idx]
sub_data[:, mask_f_idx] = 0 # 未抽到的特征列全部置零
return sub_data
def __RF_Clf(self, data):
'''
串行生成随机森林
'''
for _ in range(self.n_estimators):
tree = DecisionTreeClassifier(min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_impurity_decrease=self.min_impurity_decrease)
sub_data = self.__RandomPatches(data)
tree.fit(sub_data[:, :-1], sub_data[:, -1])
self.estimators_.append(tree)
def fit(self, X_train, Y_train):
data = np.c_[X_train, Y_train]
self.__RF_Clf(data)
del data
def predict(self, X_test):
raw_pred = np.array([tree.predict(X_test) for tree in self.estimators_]).T
return np.array([stats.mode(y_pred)[0][0] for y_pred in raw_pred])
if __name__ == '__main__':
from datasets.dataset import load_breast_cancer
data = load_breast_cancer()
X, Y = data.data, data.target
del data
from model_selection.train_test_split import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
rf_clf = RandomForestClassifier()
rf_clf.fit(X_train, Y_train)
Y_pred = rf_clf.predict(X_test)
print('rf acc:{}'.format(np.sum(Y_pred == Y_test) / len(Y_test)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.