text string | size int64 | token_count int64 |
|---|---|---|
__all__ = ['Token', 'Scanner', 'getscanner']
import types
class Token:
def __init__(self, type, attr=None, pattr=None, offset=-1):
self.type = intern(type)
self.attr = attr
self.pattr = pattr
self.offset = offset
def __cmp__(self, o):
if isinstance(o, Token):
return cmp(self.type, o.type) or cmp(self.pattr, o.pattr)
else:
return cmp(self.type, o)
def __repr__(self): return str(self.type)
def __str__(self):
pattr = self.pattr or ''
return '%s\t%-17s %r' % (self.offset, self.type, pattr)
def __hash__(self): return hash(self.type)
def __getitem__(self, i): raise IndexError
class Code:
def __init__(self, co, scanner):
for i in dir(co):
if i.startswith('co_'):
setattr(self, i, getattr(co, i))
self._tokens, self._customize = scanner.disassemble(co)
class Scanner:
def __init__(self, version):
self.__version = version
import dis_files
self.dis = dis_files.by_version[version]
self.resetTokenClass()
dis = self.dis
self.JUMP_OPs = map(lambda op: dis.opname[op],
dis.hasjrel + dis.hasjabs)
def setShowAsm(self, showasm, out=None):
self.showasm = showasm
self.out = out
def setTokenClass(self, tokenClass):
assert type(tokenClass) == types.ClassType
self.Token = tokenClass
def resetTokenClass(self):
self.setTokenClass(Token)
def disassemble(self, co):
rv = []
customize = {}
dis = self.dis
Token = self.Token
code = co.co_code
cf = self.find_jump_targets(code)
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
offset = i
if cf.has_key(offset):
for j in range(cf[offset]):
rv.append(Token('COME_FROM',
offset="%s_%d" % (offset, j) ))
c = code[i]
op = ord(c)
opname = dis.opname[op]
i += 1
oparg = None; pattr = None
if op >= dis.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1]) * 256 + extended_arg
extended_arg = 0
i += 2
if op == dis.EXTENDED_ARG:
extended_arg = oparg * 65536L
if op in dis.hasconst:
const = co.co_consts[oparg]
if type(const) == types.CodeType:
oparg = const
if const.co_name == '<lambda>':
assert opname == 'LOAD_CONST'
opname = 'LOAD_LAMBDA'
pattr = 'code_object ' + const.co_name
else:
pattr = const
elif op in dis.hasname:
pattr = co.co_names[oparg]
elif op in dis.hasjrel:
pattr = repr(i + oparg)
elif op in dis.hasjabs:
pattr = repr(oparg)
elif op in dis.haslocal:
pattr = co.co_varnames[oparg]
elif op in dis.hascompare:
pattr = dis.cmp_op[oparg]
elif op in dis.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
pattr = free[oparg]
if opname == 'SET_LINENO':
continue
elif opname in ('BUILD_LIST', 'BUILD_TUPLE', 'BUILD_SLICE',
'UNPACK_LIST', 'UNPACK_TUPLE', 'UNPACK_SEQUENCE',
'MAKE_FUNCTION', 'CALL_FUNCTION', 'MAKE_CLOSURE',
'CALL_FUNCTION_VAR', 'CALL_FUNCTION_KW',
'CALL_FUNCTION_VAR_KW', 'DUP_TOPX',
):
opname = '%s_%d' % (opname, oparg)
customize[opname] = oparg
rv.append(Token(opname, oparg, pattr, offset))
if self.showasm:
out = self.out
for t in rv:
print >>out, t
print >>out
return rv, customize
def find_jump_targets(self, code):
HAVE_ARGUMENT = self.dis.HAVE_ARGUMENT
hasjrel = self.dis.hasjrel
targets = {}
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
i += 1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1]) * 256
i += 2
label = -1
if op in hasjrel:
label = i + oparg
if label >= 0:
targets[label] = targets.get(label, 0) + 1
return targets
__scanners = {}
def getscanner(version):
if not __scanners.has_key(version):
__scanners[version] = Scanner(version)
return __scanners[version]
| 5,057 | 1,583 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import unittest
class FcntlModuleTest(unittest.TestCase):
def test_it_imports(self):
import fcntl
self.assertEqual(fcntl.__name__, "fcntl")
if __name__ == "__main__":
unittest.main()
| 312 | 113 |
#!/usr/bin/env python
#title :main.py
#description :Tensorflow implementation of CapsNet.
#author :Jose Chavez
#date :2019/04/30
#version :1.0
#usage :python3 main.py
#python_version :3.6.7
#==============================================================================
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from capsnet import CapsNet
from tensorflow.examples.tutorials.mnist import input_data
import functools
mnist = input_data.read_data_sets('MNIST_data/')
batch_size = 10
tf.reset_default_graph()
tf.random.set_random_seed(0)
np.random.seed(0)
checkpoint_file = './tmp/model.ckpt'
def train(model, restore = False, n_epochs = 50):
init = tf.global_variables_initializer()
n_iter_train_per_epoch = mnist.train.num_examples // batch_size
n_iter_valid_per_epoch = mnist.validation.num_examples // batch_size
best_loss_val = np.infty
saver = tf.train.Saver()
with tf.Session() as sess:
writer = tf.summary.FileWriter("output", sess.graph)
if restore and tf.train.checkpoint_exists('checkpoint_file'):
saver.restore(sess, checkpoint_file)
else:
init.run()
print('\n\nRunning CapsNet ...\n')
count_params()
for epoch in range(n_epochs):
margin_loss_train_ep = []
recnst_loss_train_ep = []
loss_train_ep = []
acc_train_ep = []
for it in range(1, n_iter_train_per_epoch + 1):
X_batch, y_batch = mnist.train.next_batch(batch_size)
_, loss_batch_train, margin_loss_train, recnst_loss_train,acc_batch_train = sess.run(
[model.train_op,
model.margn_loss,
model.recnst_loss_scale,
model.batch_loss,
model.accuracy],
feed_dict = {model.X: X_batch.reshape([-1, 28, 28, 1]),
model.y: y_batch,
model.reconstruction: True})
print("\rIter: {}/{} [{:.1f}%] loss : {:.5f}".format(
it, n_iter_train_per_epoch, 100.0 * it / n_iter_train_per_epoch, loss_batch_train), end="")
plot_imgs = sess.run(model.X_cropped, feed_dict = {model.X: X_batch.reshape([-1, 28, 28, 1])})
#print(plot_imgs.shape)
#print(X_batch[0])
#plt.imshow(X_batch[0].reshape((28,28)), cmap='gray')
#plt.show()
#plt.imshow(plot_imgs[0].reshape((28,28)), cmap='gray')
#plt.show()
loss_train_ep.append(loss_batch_train)
acc_train_ep.append(acc_batch_train)
margin_loss_train_ep.append(margin_loss_train)
recnst_loss_train_ep.append(recnst_loss_train)
loss_train = np.mean(loss_train_ep)
margin_loss_train = np.mean(margin_loss_train_ep)
recnst_loss_train = np.mean(recnst_loss_train_ep)
acc_train = np.mean(acc_train_ep)
loss_val_ep = []
acc_val_ep = []
for it in range(1, n_iter_valid_per_epoch + 1):
X_batch, y_batch = mnist.validation.next_batch(batch_size)
loss_batch_val, acc_batch_val = sess.run(
[model.batch_loss, model.accuracy],
feed_dict = {model.X_cropped: X_batch.reshape([-1, 28, 28, 1]),
model.y: y_batch})
loss_val_ep.append(loss_batch_val)
acc_val_ep.append(acc_batch_val)
print("\rValidation {}/{} {:.1f}%".format(it,
n_iter_valid_per_epoch,
100.0 * it / n_iter_valid_per_epoch),
end=" "*30)
loss_val = np.mean(loss_val_ep)
acc_val = np.mean(acc_val_ep)
print("\repoch: {} loss_train: {:.5f}, loss_val: {:.5f}, margin_loss: {:.5f}, recnst_loss: {:.5f}, train_acc: {:.4f}%, valid_acc: {:.4f}% {}".format(
epoch + 1,
loss_train,
margin_loss_train,
recnst_loss_train,
loss_val,
acc_train * 100.0,
acc_val * 100.0,
"(improved)" if loss_val < best_loss_val else ""))
if loss_val < best_loss_val:
saver.save(sess, checkpoint_file)
best_loss_val = loss_val
writer.close()
def test(model):
n_iter_test_per_epoch = mnist.test.num_examples // batch_size
loss_test_ep = []
acc_test_ep = []
#init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
#init.run()
#saver = tf.train.import_meta_graph(checkpoint_file +'.meta')
saver.restore(sess, tf.train.latest_checkpoint('tmp/'))
#init.run()
print('\n\nTest\n')
for it in range(1, n_iter_test_per_epoch + 1):
X_batch, y_batch = mnist.test.next_batch(batch_size)
loss_batch_test, acc_batch_test = sess.run(
[model.batch_loss, model.accuracy],
feed_dict = { model.X_cropped: X_batch.reshape([-1, 28, 28, 1]),
model.y: y_batch,
model.reconstruction: False})
loss_test_ep.append(loss_batch_test)
acc_test_ep.append(acc_batch_test)
print("\rTesting {}/{} {:.1f}%".format(it,
n_iter_test_per_epoch,
100.0 * it / n_iter_test_per_epoch),
end=" "*30)
loss_test = np.mean(loss_test_ep)
acc_test = np.mean(acc_test_ep)
print("\r(Testing) accuracy: {:.3f}%, loss: {:.4f}".format(acc_test*100.0, loss_test))
def reconstruction(model, num_samples):
samples_imgs = mnist.test.images[:num_samples].reshape([-1, 28, 28, 1])
with tf.Session() as sess:
saver = tf.train.import_meta_graph(checkpoint_file +'.meta')
saver.restore(sess, tf.train.latest_checkpoint('tmp/'))
decoder_output, y_pred_value = sess.run(
[model.decoder_output, model.y_pred],
feed_dict = {model.X_cropped: samples_imgs,
model.y: np.array([], dtype = np.int64),
model.reconstruction: False})
samples_imgs = samples_imgs.reshape([-1, 28, 28])
reconstructions_imgs = decoder_output.reshape([-1, 28, 28])
plt.figure(figsize = (num_samples * 2, 4))
for img_idx in range(num_samples):
plt.subplot(2, num_samples, img_idx + 1)
plt.imshow(samples_imgs[img_idx], cmap='gray')
plt.title("Input: " + str(mnist.test.labels[img_idx]))
plt.axis("off")
#plt.show()
for img_idx in range(num_samples):
plt.subplot(2, num_samples, num_samples + img_idx + 1)
plt.imshow(reconstructions_imgs[img_idx], cmap='gray')
plt.title("Output: " + str(y_pred_value[img_idx]))
plt.axis("off")
plt.show()
def count_params():
size = lambda v: functools.reduce(lambda x, y: x*y, v.get_shape().as_list())
n_trainable = sum(size(v) for v in tf.trainable_variables())
#n_total = sum(size(v) for v in tf.all_variables())
print("Model size (Trainable): {:.1f}M\n".format(n_trainable/1000000.0))
#print("Model size (Total): {}".format(n_total))
if __name__ == '__main__':
tf.reset_default_graph()
model = CapsNet(rounds = 3)
#train(model, False, 50)
test(model)
#reconstruction(model, 5)
| 7,576 | 2,794 |
import os
import sys
import time
import decimal
import sqlite3
import multiprocessing
from secret import rpc_user, rpc_password
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import cluster_db_query as cdq
import db_query as dq
rpc_ip = '127.0.0.1'
rpc_port = '8332'
timeout = 300
def get_rpc():
return AuthServiceProxy(f'http://{rpc_user}:{rpc_password}@{rpc_ip}:{rpc_port}', timeout=timeout)
def get_min_cluster_num(addr, flag=0):
''' DON'T USE
flag: 0 ์ ์ฒด ์ต์๊ฐ
flag: 1 -1์ด ์๋ ์ต์๊ฐ'''
cluster_num_list = cdq.get_min_cluster(addr)
cluster_num_list = list()
for addr in addr_set.keys():
cluster_num_list.append(addr_set[addr])
sort_cls_num_list = sorted(cluster_num_list)
if flag == 0:
return sort_cls_num_list[0]
elif flag == 1:
for num in sort_cls_num_list:
if num > -1:
return num
def get_cluster_num(addrs, max_cluster_num):
cls_num = -1
cls_num_set = set(cdq.get_cluster_number(addrs))
#all same cluster
if len(cls_num_set) == 1:
cls_num = cls_num_set.pop()
if cls_num == -1:
cls_num = max_cluster_num + 1
max_cluster_num = cls_num
else:
cls_num = cdq.get_min_clustered(addrs)
return cls_num, max_cluster_num
def update_cluster(addrs, cluster_num):
try:
cluster_nums = [cluster_num] * len(addrs)
cluster_list = list(zip(addrs, cluster_nums))
cdq.insert_cluster_many(cluster_list)
return True
except Exception as e:
print(e)
return False
def is_utxo(address, tx):
'''
utxo์ธ์ง ์๋์ง ํ๋จํด์ฃผ๋์ฝ๋
1. Output Tx ์ ๋นํธ์ฝ์ธ ์ฃผ์๋ค์ด ์ฐ์ธ TxIn id๊ฐ ํ์ฌ TxOut id๋ณด๋ค ํฐ๊ฒ์ด ์๋๊ฒฝ์ฐ
2. retur utxo์ ๋ชจ๋ ๊ฐ๋ค ๋ฐํํ๋ค.
'''
utxo_list = get_utxo(tx)
if utxo_list > 0:
return True, utxo_list
return False, None
def is_first(address, tx):
'''
์ฒ์๋์จ ์ฃผ์์ธ์ง ์๋์ง ํ์ธํด์ฃผ๋ ์ฝ๋
1. ํ์ฌ tx์ ์ฒ์๋์จ์ฃผ์๊ฐ ๋์ผํ๋ค๋ฉด True๋ฅผ ๋ฐํ
'''
first_tx = cdq.find_tx_first_appeared_address(address)
if first_tx == tx:
return True
return False
def is_power_of_ten(address, tx):
'''
์์ก์ฃผ์์ ํ๋จ
- ์์์ ์๋ 4๊ฐ์ด์์ ์์ด์ผํ๋ค.
'''
value = cdq.find_addr_value(address, tx)
num_of_decimal = abs(decimal.Decimal(str(a)).as_tuple().exponent())
if num_of_decimal >= 4:
return True
return False
def is_otc_cond(in_addrs, out_addrs, tx):
balance_address = None
num_of_balance = 0
if in_addrs == None or out_addrs == None:
return None
if len(in_addrs) != 2 and len(out_addrs) ==2:
for out in out_addrs:
if out in in_addrs:
continue
if not is_utxo(out, tx):
continue
if not is_power_of_ten(out, tx):
continue
balance_address = out
num_of_balance += 1
if balance_address == None:
return None
elif num_of_balance >= 2:
return None
else:
return balance_address
return None
def add_db(c_dict):
'''
1. db์์ ์ฃผ์์ ํด๋นํ๋ ๊ฐ์ด ๋ชจ๋ ๋์ผํ๋ค๋ฉด
- ํด๋ฌ์คํฐ ๋ฒํธ์ค์ ==> ๊ทธ๋๋ก ๋
๋๋ฉด ๋๋๋ฏ
- ํด๋ฌ์คํฐ ๋ฒํธ๊ฐ -1 ์ด๋ผ๋ฉด max๊ฐ ์ค์
2. db์ ํด๋นํ๋ ์ฃผ์๊ฐ ๋์ผํ์ง ์๋ค๋ฉด
- -1์ด ์๋ ์ต์๊ฐ์ผ๋ก ์ค์
3. ํด๋ฌ์คํฐ๋ฒํธ์ ์ค์ ํ๋ ์ฃผ์๊ฐ ์๋ค
'''
for _, addrs in c_dict.items():
cluster_num_list = sorted(list(cdq.get_cluster_number(addrs)))
if len(cluster_num_list) == 1 and cluster_num_list[0] == -1:
cluster_num = cdq.get_max_clustered() + 1
execute_list = list(zip([cluster_num]*len(addrs), addrs))
cdq.update_cluster_many(execute_list)
else:
cluster_num = -1
for num in cluster_num_list:
if num != -1:
cluster_num = num
break
for num in cluster_num_list:
if num != cluster_num:
addr = cdq.find_addr_from_cluster_num(num)
else:
addr = addrs
execute_list = list(zip([cluster_num]*len(addr), addr))
cdq.update_cluster_many(execute_list)
def rpc_command(height):
while True:
try:
rpc_connection = get_rpc()
block_hash = rpc_connection.getblockhash(height)
txes = rpc_connection.getblock(block_hash)['tx']
break
except OSError as e:
print("Cannot assign requested address!")
time.sleep(3)
return txes
def one_time_change(height):
cluster_dict = dict()
txes = rpc_command(height)
max_cluster_num = 0
for tx in txes:
tx_indexes = dq.get_txid(tx)
in_addrs = dq.get_addr_txin(tx_indexes)
out_addrs = dq.get_addr_txout(tx_indexes)
balance_addr = change_heuristics_cond(in_addrs, out_addrs, tx):
if balance_addr != None:
##### update cluster dict #################
'''
1. cluster_dict์ key์ item์ ๋๋ฉด์
2. ํ์ฌ ๋ง๋ค์ด์ง address_set๊ณผ ๊ต์งํฉ์ด ์๋๊ฐ? ํ๋จ
3. ๊ต์งํฉ์ด ์๋ค๋ฉด ๊ทธ ์งํฉ์ ๋ฃ์๊ฒ
4. ๊ต์งํฉ์ด ์๋ค๋ฉด cluster ๋ฒํธ๋ฅผ ์๋ก ๋ง๋ค๊ฒ
'''
need_new_cls_num = True
for key, addr_set in cluster_dict.items():
if len(addr_set & in_addrs) != 0:
cluster_dict[key].union(in_addrs)
need_new_cls_num = False
break
if need_new_cls_num:
if len(cluster_dict.keys()) == 0:
cls_num = 0
else:
cls_num_set = sorted(list(cluster_dict.keys()))
cls_num = set(cls_num_set).pop() + 1
cluster_dict.update({cls_num:in_addrs})
############################################
return cluster_dict
def main():
term = 10000
start_height = 0
end_height = dq.get_max() - 1
pool_num = multiprocessing.cpu_count()//2
print("CLSUTER TABLE MADE")
time.sleep(5)
stime = time.time()
try:
for sheight, eheight in zip(range(start_height, end_height, term), \
range(start_height+term, end_height+term, term)):
addr_dict = dict()
max_cluster_num = 0
cdq.begin_transactions()
if eheight >= end_height:
eheight = end_height + 1
with multiprocessing.Pool(pool_num) as p:
result = p.imap(one_time_change, range(sheight, eheight))
for cluster_dict in result:
cluster_set = set(cluster_dict.keys())
for i in cluster_dict.keys():
for j in addr_dict.keys():
if len(addr_dict[j] & cluster_dict[i]) > 0:
addr_dict[j] = addr_dict[j].union(cluster_dict[i])
cluster_set = cluster_set - {i}
for i in list(cluster_set):
addr_dict[max_cluster_num] = \
addr_dict.get(max_cluster_num, set()).union(cluster_dict[i])
max_cluster_num += 1
add_db(addr_dict)
cdq.commit_transactions()
etime = time.time()
print('height: {}, time:{}'.format(eheight, etime-stime))
except KeyboardInterrupt:
print('Keyboard Interrupt Detected! Commit transactions...')
cdq.commit_transactions()
finally:
cdq.commit_transactions()
cdq.db_close()
if __name__=="__main__":
main() | 7,707 | 2,848 |
from typing import Optional, List
from pydantic import Field
from pydantic.main import BaseModel
from inoft_vocal_framework.utils.formatters import normalize_intent_name
class Intent(BaseModel):
name: str
displayName: str
class User(BaseModel):
_VERIFICATION_NAME_GUEST = "GUEST"
_VERIFICATION_NAME_VERIFIED = "VERIFIED"
_PERMISSION_UPDATE_TYPE = "UPDATE"
permissions: Optional[list] = None
locale: Optional[str] = None
lastSeen: Optional[str] = None
userStorage: Optional[str] = None
userVerificationStatus: Optional[str] = None
class Payload(BaseModel):
_INPUT_TYPE_OPTION = "OPTION"
user: User = Field(default_factory=User)
class Conversation(BaseModel):
conversationId: str
type: str
conversation: Optional[Conversation] = None
isInSandbox: bool
requestType: str
class InputsCustomList(list):
# todo: make the check that the current device has the capabilities to use an interactive list
class InputItem(BaseModel):
intent: str
rawInputs: list
class ArgumentItemsCustomList(list):
class ArgumentItem(BaseModel):
name: str
textValue: str
rawText: str
def append(self, item: dict) -> None:
if isinstance(item, dict):
argument_item_object = self.ArgumentItem(**item)
super().append(argument_item_object)
def custom_set_from(self, list_object: list) -> None:
for item in list_object:
self.append(item=item)
arguments: Optional[ArgumentItemsCustomList] = Field(default_factory=ArgumentItemsCustomList)
def append(self, item: dict) -> None:
if isinstance(item, dict):
input_item_object = self.InputItem(**item)
super().append(input_item_object)
def custom_set_from(self, list_object: list) -> None:
for item in list_object:
self.append(item=item)
inputs: InputsCustomList = Field(default_factory=InputsCustomList)
class Surface(BaseModel):
capabilities: list = Field(default_factory=list)
surface: Surface = Field(default_factory=Surface)
class AvailableSurfaceItem(BaseModel):
capabilities: list = Field(default_factory=list)
availableSurfaces: List[AvailableSurfaceItem] = Field(default_factory=list)
def get_first_input_of_type(self, type_name: str) -> Optional[dict]:
for input_item in self.inputs:
for argument_item in input_item.arguments:
if argument_item.name == type_name:
return argument_item
return None
class OriginalDetectIntentRequest(BaseModel):
source: str
version: str
payload: Payload
class QueryResult(BaseModel):
queryText: str
action: str
parameters: dict
allRequiredParamsPresent: bool
fulfillmentText: Optional[str] = None
fulfillmentMessages: Optional[List[str]] = None
outputContexts: List[dict]
intent: Intent
intentDetectionConfidence: Optional[int] = None
diagnosticInfo: Optional[dict] = None
LanguageModel: str
class Request(BaseModel):
# General for LaunchRequest, IntentRequest and SessionEndedRequest
responseId: str
queryResult: QueryResult
originalDetectIntentRequest: OriginalDetectIntentRequest
session: str
def is_option_select_request(self) -> bool:
return self.queryResult.queryText == "actions_intent_OPTION"
def get_updates_user_id_if_present(self) -> Optional[str]:
for output_context in self.queryResult.outputContexts:
context_parameters: Optional[dict] = output_context.get('parameters', None)
if context_parameters is not None:
context_parameters_permission: Optional[bool] = context_parameters.get('PERMISSION')
if context_parameters_permission is True:
context_parameters_updates_user_id: Optional[str] = context_parameters.get('UPDATES_USER_ID', None)
if context_parameters_updates_user_id is not None:
return context_parameters_updates_user_id
return None
def selected_option_identifier(self) -> str:
argument_item = self.originalDetectIntentRequest.payload.get_first_input_of_type(self.originalDetectIntentRequest.payload._INPUT_TYPE_OPTION)
if isinstance(argument_item, self.originalDetectIntentRequest.payload.InputsCustomList.InputItem.ArgumentItemsCustomList.ArgumentItem):
return argument_item.textValue
def is_launch_request(self) -> bool:
return self.queryResult.queryText == "GOOGLE_ASSISTANT_WELCOME"
def active_intent_name(self) -> str:
return normalize_intent_name(intent_name=self.queryResult.intent.displayName)
def is_in_intent_names(self, intent_names_list: List[str] or str) -> bool:
intent_name: str = self.active_intent_name()
if isinstance(intent_names_list, list):
return intent_name in [normalize_intent_name(intent_name=name) for name in intent_names_list]
elif isinstance(intent_names_list, str):
return intent_name == normalize_intent_name(intent_name=intent_names_list)
else:
raise Exception(f"intent_names_list type not supported : {type(intent_names_list)}")
def get_intent_parameter_value(self, parameter_key: str, default=None):
return self.queryResult.parameters.get(dict_key=parameter_key).to_any(default=default)
def is_not_usable(self):
return False
if self.type is not None and self.type not in [self.LaunchRequestKeyName, self.IntentRequestKeyName, self.SessionEndedRequestKeyName]:
raise Exception(f"The request type '{self.type}' is not None or any of the supported types.")
return False
if (self._type == str()
or self._requestId == str()
or self._timestamp == str()
or self._locale == str()):
return True
else:
return False
def to_dict(self) -> dict:
return self.dict()
| 6,240 | 1,725 |
# -*- encoding: utf-8 -*-
from django.contrib import admin
from emailtemplates.models import EmailTemplate
from emailtemplates.models import MailServerFailure
class EmailTemplateAdmin(admin.ModelAdmin):
list_display = ['desc', 'subject']
readonly_fields = ['uid']
pass
admin.site.register(EmailTemplate, EmailTemplateAdmin)
class MailServerFailureAdmin(admin.ModelAdmin):
list_display = ['when', 'client_ip', 'reason']
readonly_fields = ['when', 'client_ip', 'reason']
pass
admin.site.register(MailServerFailure, MailServerFailureAdmin)
| 589 | 180 |
class Package(object):
def __init__(self, name):
self._name = name
@classmethod
def root(cls): # type: () -> Package
return Package("_root_")
@property
def __str__(self):
return self._name
def __eq__(self, other):
return str(other == self._name)
def __repr__(self):
return "<Package {}>".format(self._name) | 380 | 118 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#--------------------------------#
"""
File name: TPEX_STOCKBOT/main.py
Author: WEI-TA KUAN
Date created: 12/9/2021
Date last modified: 9/10/2021
Version: 1.0
Python Version: 3.8.8
Status: Developing
"""
#--------------------------------#
from scraping_data import stock_daily_scraping, tpex_holiday
import pickle
import datetime
year = datetime.datetime.today().strftime("%Y")
today = datetime.datetime.today().strftime("%Y/%m/%d")
holiday = pickle.load(open("assets/tpex_holiday.pkl",'rb'))
# update the market close date for each year
while True:
if year != holiday["ไผๅธๆฅๆ"][0].split("/")[0]:
print("Update Holiday")
tpex_holiday.get_holiday()
holiday = pickle.load(open("assets/tpex_holiday.pkl",'rb'))
break
# Dont run the code if the market is close
if (today != holiday["ไผๅธๆฅๆ"]).any() and datetime.datetime.today().weekday() not in [5, 6]:
print("Run 360 TPEX Stockbot...")
# run the daily scraping method to store today stock data
stock_daily_scraping.daily_scraping() | 1,073 | 400 |
"""
list twikis:
List all L1 Trigger Offline Twikis
Usage:
list twikis [check=1]
Parameters:
check: force a check of the twiki URL before printing.
Useful when adding new entries. Default: 0
"""
import logging
import urllib
import hepshell
LOG = logging.getLogger(__name__)
URL_PREFIX = 'https://twiki.cern.ch/twiki/bin/view/'
TWIKIS = {
'L1T offline DEV': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideL1TOfflineDev',
'description': 'Instructions for L1 offline software development',
},
'L1T Calo Upgrade Offline Analysis': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/L1CaloUpgradeOfflineAnalysis',
'description': 'Some CaloL2 analysis workflows are detailed here',
},
'L1T phase 2': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/L1TriggerPhase2',
'description': 'In preparation ! ',
},
'L1T phase 2 interface specs': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/L1TriggerPhase2InterfaceSpecifications',
'description': 'Working definitions of Trigger Primitive inputs',
},
'CSC trigger emulator timing': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/CSCDigitizationTiming',
'description': 'Simulation of signal times for CSC',
},
'L1 Trigger Emulator Stage 2 Upgrade Instructions': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideL1TStage2Instructions',
'description': 'L1 Trigger Emulator Stage 2 Upgrade Instructions',
},
'Offline DQM': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/DQMOffline',
'description': 'Twiki meant to give you a basic understanding of Offline DQM',
},
'L1T DQM DEV': {
'url': 'https://twiki.cern.ch/twiki/bin/view/Sandbox/L1TDQMModuleDev',
'description': 'L1T DQM Module Development Guide',
}
}
def does_url_exist(url):
exists = False
try:
qry = urllib.urlopen(url)
if qry.getcode() == 200:
exists = True
except Exception as e:
print(e)
return exists
def get_text_lenghts(twikis):
names = twikis.keys()
urls = []
descriptions = []
for _, twiki in twikis.items():
urls.append(twiki['url'])
descriptions.append(twiki['description'])
len_names = [len(n) for n in names]
len_urls = [len(u) for u in urls]
len_descriptions = [len(d) for d in descriptions]
return max(len_names), max(len_urls), max(len_descriptions)
class Command(hepshell.Command):
DEFAULTS = {
'check': False
}
def __init__(self, path=__file__, doc=__doc__):
super(Command, self).__init__(path, doc)
def run(self, args, variables):
# parse arguments and parameters
self.__prepare(args, variables)
self.__create_table(TWIKIS)
return True
def __create_table(self, twikis):
headers = ['Name', 'URL', 'Description']
# get maximum lenghts of our columns
max_len_n, max_len_u, max_len_d = get_text_lenghts(twikis)
# add some space
max_len_n = max([max_len_n, len(headers[0])])
row_format = "{:<" + str(max_len_n) + "}\t"
row_format += "{:<" + str(max_len_u) + "}\t"
row_format += "{:<" + str(max_len_d) + "}\n"
self.__text = row_format.format(*headers)
self.__text += '-' * (max_len_n + max_len_u + max_len_d)
self.__text += '\n'
for name, twiki in sorted(twikis.items()):
# url = twiki['url'].replace(URL_PREFIX, '')
url = twiki['url']
desc = twiki['description']
if not self.__variables['check'] or does_url_exist(url):
self.__text += row_format.format(*[name, url, desc])
else:
LOG.warn('Twiki "{0}" does not exist!'.format(url))
self.__text += '\n'
| 3,959 | 1,368 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""IAAFT surrogates for correlated noise.
The properties of linearly correlated noise can be captured quite
accurately by IAAFT surrogates. Thus, they cannot easily fool
a dimension estimator (here we use Takens's maximum likelihood estimator
for the correlation dimension) if surrogate analysis is performed
additionally.
"""
import matplotlib.pyplot as plt
import numpy as np
from nolitsa import surrogates, d2, noise, delay
x = noise.sma(np.random.normal(size=(2 ** 12)), hwin=100)
ends = surrogates.mismatch(x)[0]
x = x[ends[0]:ends[1]]
act = np.argmax(delay.acorr(x) < 1 / np.e)
mle = np.empty(19)
# Compute 19 IAAFT surrogates and compute the correlation sum.
for k in range(19):
y = surrogates.iaaft(x)[0]
r, c = d2.c2_embed(y, dim=[7], tau=act, window=act)[0]
# Compute the Takens MLE.
r_mle, mle_surr = d2.ttmle(r, c)
i = np.argmax(r_mle > 0.5 * np.std(y))
mle[k] = mle_surr[i]
plt.loglog(r, c, color='#BC8F8F')
r, c = d2.c2_embed(x, dim=[7], tau=act, window=act)[0]
# Compute the Takens MLE.
r_mle, true_mle = d2.ttmle(r, c)
i = np.argmax(r_mle > 0.5 * np.std(x))
true_mle = true_mle[i]
plt.title('IAAFT surrogates for correlated noise')
plt.xlabel('Distance $r$')
plt.ylabel('Correlation sum $C(r)$')
plt.loglog(r, c, color='#000000')
plt.figure(2)
plt.title('Takens\'s MLE for correlated noise')
plt.xlabel(r'$D_\mathrm{MLE}$')
plt.vlines(mle, 0.0, 0.5)
plt.vlines(true_mle, 0.0, 1.0)
plt.yticks([])
plt.ylim(0, 3.0)
plt.show()
| 1,528 | 690 |
import time
from unittest import TestCase
from app.pubmed.source_entrez import *
class TestEntrez(TestCase):
def test_do_rate_limit(self):
# Serial Test
start = time.time()
do_rate_limit()
do_rate_limit()
do_rate_limit()
do_rate_limit()
elapsed = time.time() - start
self.assertTrue(0.37 * 3 < elapsed < 0.37 * 4, "Incorrect elapsed time for serial test, " + str(elapsed))
time.sleep(0.37)
# Parallel Test 1
start = time.time()
run_over_threads(do_rate_limit, [[], [], [], []])
elapsed = time.time() - start
self.assertTrue(0.37 * 3 < elapsed < 0.37 * 4, "Incorrect elapsed time for parallel test, " + str(elapsed))
time.sleep(0.37)
# Parallel Test 2
start = time.time()
run_over_threads(do_rate_limit, [[], [], [], [], [], [], []])
elapsed = time.time() - start
self.assertTrue(0.37 * 6 < elapsed < 0.37 * 7, "Incorrect elapsed time for parallel test, " + str(elapsed))
def test_request_entrez_einfo(self):
response = request_entrez_einfo()
self.assertIsInstance(response, dict)
self.assertTrue("DbList" in response)
databases = response["DbList"]
self.assertIsInstance(databases, list)
self.assertTrue(PUBMED_DB_NAME in databases)
self.assertTrue(PUBMED_CENTRAL_DB_NAME in databases)
def test_request_entrez_database_list(self):
databases = request_entrez_database_list()
self.assertIsInstance(databases, list)
self.assertTrue(PUBMED_DB_NAME in databases)
self.assertTrue(PUBMED_CENTRAL_DB_NAME in databases)
def test_request_entrez_by_date(self):
# This test is really slow...
# print(len(download_all_modified_since(PUBMED_DB_NAME, "2022/03/08")))
pass
| 1,853 | 625 |
import nltk
import numpy as np
from nltk.stem.porter import PorterStemmer
nltk.download('punkt')
stemmer = PorterStemmer()
# splitting a string into words, punctuation and numbers
def tokenize(sentence):
return nltk.word_tokenize(sentence)
# generating the root form the words ex: universe - univers, university - univers
def stem(word):
return stemmer.stem(word.lower())
# put all these words in a bag to be used later
def bag_of_words(tokenized_sentence, all_words):
# stem every word in the given sentence
tokenized_sentence = [stem(w) for w in tokenized_sentence]
bag = np.zeros(len(all_words), dtype=np.float32)
for index, word in enumerate(all_words):
if word in tokenized_sentence:
bag[index] = 1.0
return bag
| 776 | 257 |
#!/usr/bin/env python3
import os
import click
from bank_api import create_app, db, models, utils
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
@app.shell_context_processor
def make_shell_context():
return {
'db': db,
'Account': models.Account,
'Customer': models.Customer,
'Transaction': models.Transaction}
@app.cli.command('createdb')
@click.option('--test-data', type=bool, default=True, help="Initializes database with pre-loaded data")
def createdb(test_data):
db.drop_all()
db.create_all()
if test_data:
customer_data = [
{'name': "Robin", 'surname': "Staunton-Collins"},
{'name': "Matin", 'surname': "Abbasi"},
{'name': "Rodrigo", 'surname': "Hammerly"},
{'name': "Monty", 'surname': "Python"}
]
account_data = [
{'customer_id': 1, 'balance': 50, 'account_number': utils.generate_random_account_number()},
{'customer_id': 1, 'balance': 40, 'account_number': utils.generate_random_account_number()},
{'customer_id': 2, 'balance': 450, 'account_number': utils.generate_random_account_number()},
]
transaction_data = [
{'account_id': 1, 'amount': 50},
{'account_id': 2, 'amount': 40},
{'account_id': 3, 'amount': 450},
]
customers = [models.Customer().import_data(c) for c in customer_data]
db.session.add_all(customers)
accounts = [models.Account().import_data(a) for a in account_data]
db.session.add_all(accounts)
transactions = [models.Transaction().import_data(t) for t in transaction_data]
db.session.add_all(transactions)
db.session.commit()
if __name__ == '__main__':
app.run(debug=True)
| 1,799 | 592 |
import sys
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import PackageNotFoundError, version # pragma: no cover
else:
from importlib_metadata import PackageNotFoundError, version # pragma: no cover
try:
# Change here if project is renamed and does not equal the package name
dist_name = "coveo-push-api-client.py"
__version__ = version(dist_name)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
finally:
del version, PackageNotFoundError
from .document import *
from .documentbuilder import *
from .source import *
from .platformclient import *
from .securityidentitybuilder import *
| 742 | 212 |
import os
import sys
rootpath=str("D:/_1work/pycharmcode/pycharmproject/resrep")
syspath=sys.path
sys.path=[]
sys.path.append(rootpath)#ๅฐๅทฅ็จๆ น็ฎๅฝๅ ๅ
ฅๅฐpythonๆ็ดข่ทฏๅพไธญ
sys.path.extend([rootpath+i for i in os.listdir(rootpath) if i[0]!="."])#ๅฐๅทฅ็จ็ฎๅฝไธ็ไธ็บง็ฎๅฝๆทปๅ ๅฐpythonๆ็ดข่ทฏๅพไธญ
sys.path.extend(syspath)
print(sys.path) | 296 | 149 |
import argparse
import os
import shutil
import time
import sys
import sklearn
import sklearn.metrics
import torch
torch.cuda.init()
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from AlexNet import *
from voc_dataset import *
from utils import *
import wandb
USE_WANDB = True # use flags, wandb is not convenient for debugging
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', default='localizer_alexnet')
parser.add_argument(
'-j',
'--workers',
default=4,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument(
'--epochs',
default=30,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument(
'-b',
'--batch-size',
default=256,
type=int,
metavar='N',
help='mini-batch size (default: 256)')
parser.add_argument(
'--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument(
'--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument(
'--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument(
'--print-freq',
'-p',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument(
'--eval-freq',
default=2,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument(
'--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument(
'-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument(
'--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument(
'--world-size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument(
'--dist-url',
default='tcp://224.66.41.62:23456',
type=str,
help='url used to set up distributed training')
parser.add_argument(
'--dist-backend', default='gloo', type=str, help='distributed backend')
parser.add_argument('--vis', action='store_true')
best_prec1 = 0
cntr_train = 0
cntr_val = 0
def main():
global args, best_prec1, cntr_train, cntr_val
args = parser.parse_args()
args.distributed = args.world_size > 1
# create model
print("=> creating model '{}'".format(args.arch))
if args.arch == 'localizer_alexnet':
model = localizer_alexnet(pretrained=args.pretrained)
elif args.arch == 'localizer_alexnet_robust':
model = localizer_alexnet_robust(pretrained=args.pretrained)
print(model)
model = torch.nn.DataParallel(model)
model.cuda()
# TODO:
# define loss function (criterion) and optimizer
# also use an LR scheduler to decay LR by 10 every 30 epochs
# you can also use PlateauLR scheduler, which usually works well
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
training_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
#TODO: Create Datasets and Dataloaders using VOCDataset - Ensure that the sizes are as required
# Also ensure that data directories are correct - the ones use for testing by TAs might be different
# Resize the images to 512x512
train_dataset = VOCDataset(image_size=512)
val_dataset = VOCDataset(split='test', image_size=512)
def collate_fn(batch):
return tuple(zip(*batch))
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
# shuffle=(train_sampler is None),
shuffle=False,
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True, collate_fn=collate_fn)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=True, collate_fn=collate_fn)
if args.evaluate:
validate(val_loader, model, criterion)
return
# TODO: Create loggers for wandb - ideally, use flags since wandb makes it harder to debug code.
if USE_WANDB:
wandb.init(project="vlr2", reinit=True)
for epoch in range(args.start_epoch, args.epochs):
# adjust_learning_rate(optimizer, epoch)
# train for one epoch
loss = train(train_loader, model, criterion, optimizer, epoch)
# training_scheduler.step(loss)
# evaluate on validation set
if epoch % args.eval_freq == 0 or epoch == args.epochs - 1:
m1, m2 = validate(val_loader, model, criterion, epoch)
score = m1 * m2
# remember best prec@1 and save checkpoint
is_best = score > best_prec1
best_prec1 = max(score, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
#TODO: You can add input arguments if you wish
def train(train_loader, model, criterion, optimizer, epoch):
global cntr_train
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
avg_m1 = AverageMeter()
avg_m2 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (data) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# TODO: Get inputs from the data dict
# TODO: Get output from model
# TODO: Perform any necessary functions on the output such as clamping
# TODO: Compute loss using ``criterion``
img_input = torch.stack(data[0], dim=0).cuda()
target = torch.stack(data[1], dim=0).cuda()
wgt = torch.stack(data[2], dim=0).cuda()
# TODO: Get output from model
# TODO: Perform any necessary functions on the output such as clamping
# TODO: Compute loss using ``criterion``
optimizer.zero_grad()
output_heatmap = model(img_input)
if args.arch == 'localizer_alexnet':
max_pool_k = output_heatmap.shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap)
elif args.arch == 'localizer_alexnet_robust':
max_pool_k = output_heatmap[0].shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap[0])
max_pool_k1 = output_heatmap[1].shape[2]
maxPool1 = nn.MaxPool2d(kernel_size=max_pool_k1)
output_1 = maxPool1(output_heatmap[1])
max_pool_k2 = output_heatmap[2].shape[2]
maxPool2 = nn.MaxPool2d(kernel_size=max_pool_k2)
output_2 = maxPool2(output_heatmap[2])
output = output*0.333 + output_1*0.333 + output_2*0.333
output = output.view(output.shape[0], output.shape[1])
loss = criterion(output*wgt, target*wgt)
# measure metrics and record loss
sigmoid = nn.Sigmoid()
m1 = metric1(sigmoid(output), target, wgt)
m2 = metric2(sigmoid(output), target, wgt)
losses.update(loss.item(), img_input.size(0))
avg_m1.update(m1)
avg_m2.update(m2)
# TODO:
# compute gradient and do SGD step
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Metric1 {avg_m1.val:.3f} ({avg_m1.avg:.3f})\t'
'Metric2 {avg_m2.val:.3f} ({avg_m2.avg:.3f})'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
avg_m1=avg_m1,
avg_m2=avg_m2))
#TODO: Visualize/log things as mentioned in handout
#TODO: Visualize at appropriate intervals
if USE_WANDB and i % args.print_freq == 0:
wandb.log({"train/loss": loss, "train/cntr":cntr_train})
wandb.log({"train/m1": m1, "train/cntr":cntr_train})
wandb.log({"train/m2": m2, "train/cntr":cntr_train})
cntr_train+=1
# End of train()
return loss.detach()
def validate(val_loader, model, criterion, epoch = 0):
global cntr_val
batch_time = AverageMeter()
losses = AverageMeter()
avg_m1 = AverageMeter()
avg_m2 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (data) in enumerate(val_loader):
# TODO: Get inputs from the data dict
img_input = torch.stack(data[0], dim=0).cuda()
target = torch.stack(data[1], dim=0).cuda()
wgt = torch.stack(data[2], dim=0).cuda()
# TODO: Get output from model
# TODO: Perform any necessary functions on the output
# TODO: Compute loss using ``criterion``
output_heatmap = model(img_input)
if args.arch == 'localizer_alexnet':
max_pool_k = output_heatmap.shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap)
elif args.arch == 'localizer_alexnet_robust':
max_pool_k = output_heatmap[0].shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap[0])
max_pool_k1 = output_heatmap[1].shape[2]
maxPool1 = nn.MaxPool2d(kernel_size=max_pool_k1)
output_1 = maxPool1(output_heatmap[1])
max_pool_k2 = output_heatmap[2].shape[2]
maxPool2 = nn.MaxPool2d(kernel_size=max_pool_k2)
output_2 = maxPool2(output_heatmap[2])
output = output*0.333 + output_1*0.333 + output_2*0.333
output = output.view(output.shape[0], output.shape[1])
loss = criterion(output*wgt, target*wgt)
sigmoid = nn.Sigmoid()
# measure metrics and record loss
m1 = metric1(sigmoid(output), target, wgt)
m2 = metric2(sigmoid(output), target, wgt)
losses.update(loss.item(), img_input.size(0))
avg_m1.update(m1)
avg_m2.update(m2)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Metric1 {avg_m1.val:.3f} ({avg_m1.avg:.3f})\t'
'Metric2 {avg_m2.val:.3f} ({avg_m2.avg:.3f})'.format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
avg_m1=avg_m1,
avg_m2=avg_m2))
#TODO: Visualize things as mentioned in handout
#TODO: Visualize at appropriate intervals
if USE_WANDB:
if i % args.print_freq == 0:
wandb.log({"val/loss": loss, "val/cntr":cntr_val})
wandb.log({"val/m1": m1, "val/cntr":cntr_val})
wandb.log({"val/m2": m2, "val/cntr":cntr_val})
cntr_val+=1
if i<5 and epoch%14==0:
gt_np_img = img_input[0].detach().cpu().numpy().mean(axis=0)
wandb.log({'heatmaps/epoch_{}_gt_img_{}'.format(epoch, i): wandb.Image(gt_np_img)})
weighted_target = (target[0] * wgt[0]).detach().cpu().numpy()
heat_i = 0
resize512 = transforms.Resize((512, 512))
for class_i in range(20):
print(weighted_target[class_i])
if weighted_target[class_i]==1:
target_gt = class_i
else:
continue
if args.arch == 'localizer_alexnet':
print("output heatmap shape ", output_heatmap.shape)
print(torch.sum(torch.isnan(output_heatmap[0,target_gt]).type(torch.uint8)))
out_heat = resize512(output_heatmap[0,target_gt][None,:,:])
selected_heatmap = out_heat.detach().cpu()
# selected_heatmap = selected_heatmap[None,:,:]
elif args.arch == 'localizer_alexnet_robust':
print("output heatmap shape ", output_heatmap[0].shape, output_heatmap[1].shape, output_heatmap[2].shape)
# print(torch.sum(torch.isnan(output_heatmap[0][0,target_gt]).type(torch.uint8)))
out_heat = resize512(output_heatmap[0][0,target_gt][None,:,:]) * 0.333
out_heat1 = resize512(output_heatmap[1][0,target_gt][None,:,:]) * 0.333
out_heat2 = resize512(output_heatmap[2][0,target_gt][None,:,:]) * 0.333
selected_heatmap = out_heat + out_heat1 + out_heat2
selected_heatmap = selected_heatmap.detach().cpu()
print("target gt", target_gt)
selected_heatmap = resize512(selected_heatmap)
selected_heatmap = torch.permute(selected_heatmap, (1,2,0)).numpy()
print(selected_heatmap.min())
print(selected_heatmap.max())
wandb.log({'heatmaps/epoch_{}_img_{}_heatmap_{}'.format(epoch, i, target_gt): wandb.Image(selected_heatmap)})
print(' * Metric1 {avg_m1.avg:.3f} Metric2 {avg_m2.avg:.3f}'.format(
avg_m1=avg_m1, avg_m2=avg_m2))
return avg_m1.avg, avg_m2.avg
# TODO: You can make changes to this function if you wish (not necessary)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def metric1(pred, gt, valid):
# TODO: Ignore for now - proceed till instructed
pred = torch.sigmoid(pred).cpu().detach().numpy()
gt = gt.cpu().detach().numpy()
valid = valid.cpu().detach().numpy()
nclasses = gt.shape[1]
AP = []
for cid in range(nclasses):
gt_cls = gt[:, cid][valid[:, cid] > 0].astype('float32')
pred_cls = pred[:, cid][valid[:, cid] > 0].astype('float32')
if np.all(gt_cls==0):
if np.all(pred_cls<0.5):
ap=1.
else:
ap=0.
else:
# As per PhilK. code:
# https://github.com/philkr/voc-classification/blob/master/src/train_cls.py
pred_cls -= 1e-5 * gt_cls
ap = sklearn.metrics.average_precision_score(gt_cls, pred_cls)
AP.append(ap)
return np.mean(AP)
def metric2(pred, gt, valid):
#TODO: Ignore for now - proceed till instructed
pred = torch.sigmoid(pred).cpu().detach().numpy()
gt = gt.cpu().detach().numpy()
valid = valid.cpu().detach().numpy()
nclasses = gt.shape[1]
M2 = []
for cid in range(nclasses):
gt_cls = gt[:, cid][valid[:, cid] > 0].astype('float32')
pred_cls = pred[:, cid][valid[:, cid] > 0].astype('float32')
if np.all(gt_cls==0):
if np.all(pred_cls<0.5):
rec=1.
else:
rec=0.
else:
# As per PhilK. code:
# https://github.com/philkr/voc-classification/blob/master/src/train_cls.py
pred_cls -= 1e-5 * gt_cls
# print(gt_cls)
# print(pred_cls)
rec = sklearn.metrics.recall_score(gt_cls, pred_cls>0.5, average='binary')
M2.append(rec)
return np.mean(M2)
if __name__ == '__main__':
main()
| 18,554 | 6,397 |
import sys
from .. import api
import pysam
description = """
Index a VCF file.
This command will create an index file (.tbi) for the input VCF.
"""
epilog = f"""
[Example] Index a compressed VCF file:
$ fuc {api.common._script_name()} in.vcf.gz
[Example] Index an uncompressed VCF file (will create a compressed VCF first):
$ fuc {api.common._script_name()} in.vcf
"""
def create_parser(subparsers):
parser = api.common._add_parser(
subparsers,
api.common._script_name(),
description=description,
epilog=epilog,
help='Index a VCF file.',
)
parser.add_argument(
'vcf',
help='Input VCF file to be indexed. When an uncompressed file is \n'
'given, the command will automatically create a BGZF \n'
'compressed copy of the file (.gz) before indexing.'
)
parser.add_argument(
'--force',
action='store_true',
help='Force to overwrite the index file if it is already present.'
)
def main(args):
pysam.tabix_index(args.vcf, preset='vcf', force=args.force)
| 1,093 | 351 |
import logging
import os
import sys
from peewee import SqliteDatabase, PostgresqlDatabase
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if not os.getenv('POSTGRES_DB_NAME'):
logger.warning('[DB] using sqlite')
db = SqliteDatabase('quiz.db')
else:
logger.info('[DB] Connected to postgresql')
db_name = os.getenv('POSTGRES_DB_NAME')
db_user = os.getenv('POSTGRES_DB_USER')
db_pass = os.getenv('POSTGRES_DB_PASS')
db_host = os.getenv('POSTGRES_DB_HOST')
db_port = int(os.getenv('POSTGRES_DB_PORT', 5432))
db = PostgresqlDatabase(db_name, user=db_user, password=db_pass, host=db_host, port=db_port)
token_length = 64
site_host = os.getenv('APP_SITE_HOST')
# ---- SOCIAL NETWORKS CREDENTIALS ---- #
vk_client_id = os.getenv('VK_CLIENT_ID')
vk_client_secret = os.getenv('VK_CLIENT_SECRET')
fb_client_id = os.getenv('FB_CLIENT_ID')
fb_client_secret = os.getenv('FB_CLIENT_SECRET')
google_client_id = os.getenv('GOOGLE_CLIENT_ID')
google_client_secret = os.getenv('GOOGLE_CLIENT_SECRET')
# ---- END OF CREDENTIALS ---- #
| 1,174 | 462 |
from scuttlecrab.classes.bot import CustomBot
bot = CustomBot()
| 65 | 24 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ReaderBase(object):
def __init__(self):
pass
def parse(self, db_value):
raise NotImplementedError("Failed to parse db_value")
class TmpReader(ReaderBase):
def __init__(self, place):
super(FakeReader, self).__init__()
self.place = place
def parse(self, db_value):
data_dict = {}
data = {}
data_dict["Host|input"] = np.random.randint(2, size=( 1, 1)).astype('int64')
shapes = [[len(c) for c in data_dict["Host|input"]]]
data["Host|input"] = fluid.create_lod_tensor(data_dict["Host|input"].reshape(-1,1), shapes, self.place)
data_dict["Customer|label"] = [1] #np.array([1]).astype('int64')
data["Customer|label"] = data_dict["Customer|label"]
return data
| 1,384 | 437 |
from captcha.image import ImageCaptcha
import random
def create_captcha():
captcha_text = str(hex(random.randint(3000, 5999) * random.randint(100, 199)))
image = ImageCaptcha(width=280, height=90)
data = image.generate(captcha_text)
image.write(captcha_text, 'cImg.png')
return captcha_text
def check_captcha():
rand_n = create_captcha()
counter = 0
while counter < 3:
my_string = input("enter captcha: ")
if my_string != rand_n:
res = False
else:
res = True
break
counter += 1
return res
check_captcha()
| 640 | 235 |
from openstates.utils import LXMLMixin
import datetime as dt
import re
from billy.scrape.events import Event, EventScraper
import lxml.html
import pytz
mi_events = "http://legislature.mi.gov/doc.aspx?CommitteeMeetings"
class MIEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'mi'
_tz = pytz.timezone('US/Eastern')
def scrape_event_page(self, url, chamber, session):
page = self.lxmlize(url)
trs = page.xpath("//table[@id='frg_committeemeeting_MeetingTable']/tr")
metainf = {}
for tr in trs:
tds = tr.xpath(".//td")
if len(tds) <= 1:
continue
key = tds[0].text_content().strip()
val = tds[1]
metainf[key] = {
"txt": val.text_content().strip(),
"obj": val
}
if metainf == {}:
return
# Wednesday, 5/16/2012 3:00 pm
datetime = "%s %s" % (
metainf['Date']['txt'],
metainf['Time']['txt'].replace(".","")
)
if "Cancelled" in datetime:
return
translate = {
"noon": " PM",
"a.m.": " AM",
"am": " AM", # This is due to a nasty line they had.
"a.m": "AM" #another weird one
}
for t in translate:
if t in datetime:
datetime = datetime.replace(t, translate[t])
datetime = re.sub("\s+", " ", datetime)
for text_to_remove in [
"or after committees are given leave",
"or later immediately after committees are given leave",
"or later after committees are given leave by the House to meet",
"**Please note time**"
]:
datetime = datetime.split(text_to_remove)[0].strip()
datetime = datetime.replace('p.m.', 'pm')
datetime = datetime.replace('Noon',"pm")
datetime = dt.datetime.strptime(datetime, "%A, %m/%d/%Y %I:%M %p")
where = metainf['Location']['txt']
title = metainf['Committee']['txt'] # XXX: Find a better title
if chamber == 'other':
chamber = 'joint'
event = Event(session, datetime, 'committee:meeting',
title, location=where)
event.add_source(url)
event.add_source(mi_events)
chair_name = metainf['Chair']['txt'].strip()
if chair_name:
event.add_participant('chair', chair_name, 'legislator', chamber=chamber)
else:
self.warning("No chair found for event '{}'".format(title))
event.add_participant('host', metainf['Committee']['txt'],
'committee',
chamber=chamber)
agenda = metainf['Agenda']['obj']
agendas = agenda.text_content().split("\r")
related_bills = agenda.xpath("//a[contains(@href, 'getObject')]")
for bill in related_bills:
description = agenda
for a in agendas:
if bill.text_content() in a:
description = a
event.add_related_bill(
bill.text_content(),
description=description,
type='consideration'
)
self.save_event(event)
def scrape(self, chamber, session):
page = self.lxmlize(mi_events)
xpaths = {
"lower": "//span[@id='frg_committeemeetings_HouseMeetingsList']",
"upper": "//span[@id='frg_committeemeetings_SenateMeetingsList']",
"other": "//span[@is='frg_committeemeetings_JointMeetingsList']"
}
span = page.xpath(xpaths[chamber])
if len(span) > 0:
span = span[0]
else:
return
events = span.xpath(".//a[contains(@href, 'committeemeeting')]")
for event in events:
url = event.attrib['href']
if 'doPostBack' in url:
continue
self.scrape_event_page(url, chamber, session)
| 4,045 | 1,230 |
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
ret, power = 0, 31
while n:
ret += (n & 1) << power # n & 1 means: ๆซไฝๆฏ1ๅๆฏ1, 0ๅๆฏ0 ๅๅณ็งปไฝ
n = n >> 1 # n ๅทฆ็งป็งปไฝ
power -= 1 # ไฝๆฐ-1
return ret
if "__main__" == __name__:
solution = Solution()
res = solution.reverseBits(43261596)
print(res)
| 418 | 172 |
"""Some preloads of database content."""
tables = list()
roles = list()
roles.append({"id": 1, "name": "administrator"})
roles.append({"id": 2, "name": "contributor"})
roles.append({"id": 3, "name": "staff"})
roles.append({"id": 4, "name": "parent"})
roles.append({"id": 5, "name": "caretaker"})
roles.append({"id": 6, "name": "student"})
tables.append({"model": "Role", "data": roles})
responsibilities = list()
responsibilities.append({"id": 1, "name": "manager"})
responsibilities.append({"id": 2, "name": "user"})
responsibilities.append({"id": 3, "name": "journalist"})
tables.append({"model": "Responsibility", "data": responsibilities})
sets = list()
responsibility_role = [
(1, 1),
(1, 3),
(2, 1),
(2, 2),
(2, 3),
(2, 4),
(2, 5),
(2, 6),
(3, 2),
(3, 3),
(3, 6),
]
sets.append(
{
"parent": "Responsibility",
"rel": "roles",
"child": "Role",
"data": responsibility_role,
}
)
| 1,018 | 442 |
#
# MIT License
#
# Copyright (c) 2020 Andrew Robinson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import collections.abc
import json
import os
import tkinter as tk
class SettingsWrapper(object):
'''
Wraps settings sub-sections with standard interface
'''
def __init__(self, root=None, settings={}):
'''
Constructor
'''
if root is None:
self._root = self
else:
self._root = root
self.settings = settings
def settings_root(self):
'''
Get the top level settings object
'''
return self._root
def __getattr__(self, key):
'''Attribute mapper (i.e. Object.key)'''
return self.get(key)
def __getitem__(self, key):
'''Index mapper (i.e. Object['key'])'''
return self.get(key)
def get(self, key, default=None):
'''
Get the specified key
'''
if default is not None:
result = self.settings.get(key, default)
else:
result = self.settings[key]
if isinstance(result, collections.abc.Mapping):
result = SettingsWrapper(self._root, result)
return result
## end class SettingsWrapper() ##
class Settings(SettingsWrapper):
'''
Object for storing settings including writing-to/reading-from file
'''
def __init__(self, filename=None):
'''
Constructor
@param filename: string, name of file to read-from/write-to
'''
SettingsWrapper.__init__(self, self, {
"colour": {
"background": tk.StringVar(value="black"),
"finished": tk.StringVar(value="red"),
"primary": tk.StringVar(value="green"),
"warning": tk.StringVar(value="orange"),
},
"display": {
"background": tk.StringVar(value="black"),
"foreground": tk.StringVar(value="green"),
"title": tk.StringVar(value=""),
"time": tk.StringVar(value=""),
"speaker": tk.StringVar(value=""),
},
"initial": {
"duration": tk.IntVar(value=540),
"title": tk.StringVar(value="My Webinar"),
"time": tk.StringVar(value=""),
"speaker": tk.StringVar(value="Welcome"),
"warning": tk.IntVar(value=60),
"width": tk.IntVar(value=1280),
"height": tk.IntVar(value=720),
},
"next": {
"duration": tk.IntVar(value=540),
"speaker": tk.StringVar(value="John Smith"),
"title": tk.StringVar(value="My Webinar"),
"warning": tk.IntVar(value=60),
},
"finished_text": tk.StringVar(value="STOP")
})
self._filename = filename
self._settings_loaded = False
def get(self, key, default=None):
'''
Get a specific setting
'''
if not self._settings_loaded and self._filename is not None:
self._settings_loaded = True
self.read()
return SettingsWrapper.get(self, key, default=default)
def read(self, from_filename=None):
'''
Read settings from file
'''
# change filename if required
if from_filename is not None:
self._filename = from_filename
# check filename is ok
if self._filename is None:
raise FileNotFoundError(f"Settings filename not provided")
if not os.path.exists(self._filename):
raise FileNotFoundError(f"Settings file does not exist ({self._filename})")
# open file to read
if os.path.getsize(self._filename) > 0:
with open(self._filename, 'r') as f:
content = json.load(f)
self._read_setting_values(self.settings, content, ('display',))
def write(self, as_filename=None):
'''
Writes settings to file, optionally as an alternate filename
'''
# change filename if required
if as_filename is not None:
self._filename = as_filename
# check filename is ok
if self._filename is None:
raise FileNotFoundError(f"Settings filename not provided")
# open file to write
with open(self._filename, 'w+') as f:
# convert to basic python types
content = self._dump_setting_values(self.settings, ('display',))
# write to file in json format
json.dump(content, f, indent=2, sort_keys=True)
def _read_setting_values(self, settings, values, ignore_keys=()):
'''
Tree-recursively load setting values
'''
for key,var in settings.items():
if key not in ignore_keys:
if isinstance(var, collections.abc.Mapping):
self._read_setting_values(var, values.get(key))
elif values is not None:
var.set(values.get(key))
def _dump_setting_values(self, settings, ignore_keys=()):
'''
Tree-recursively dump settings into regular python types
'''
result = {}
for key,var in settings.items():
if key not in ignore_keys:
if isinstance(var, collections.abc.Mapping):
result[key] = self._dump_setting_values(var)
else:
result[key] = var.get()
return result
## end class Settings() ##
| 6,689 | 1,840 |
from os import getcwd
def rdfile():
data = list()
# ้กฏ็คบ้ๅ็จๅผ็ขผๆชๆกๆฏๅจๅช่ฃก่ขซๅท่ก
print(getcwd())
with open("pm25.txt", 'r') as fd:
for line in fd:
try:
data.append(float(line.replace('\n', '')))
except:
pass
print('Max =', max(data))
print('Min =', min(data))
print('Avg =', (sum(data)/len(data)))
data_bigger_than_70 = int()
for x in range(len(data)):
if data[x] > 70:
data_bigger_than_70 += 1
print('The amount of data which is bigger than 70 :', data_bigger_than_70)
def main():
rdfile()
if __name__ == '__main__':
main()
| 651 | 267 |
####################################################################################################
# File: plotter.py
# Purpose: Plotting module.
#
# Author: Luke Poeppel
#
# Location: Kent, 2021
####################################################################################################
import logging
import os
import json
import sys
import subprocess
import shutil
import tempfile
from .style import (
write_index_html,
write_treant_css,
write_node_css
)
here = os.path.abspath(os.path.dirname(__file__))
treant_templates = here + "/templates"
def get_logger(name, print_to_console=True, write_to_file=None):
"""
A simple helper for logging. Copied from my `decitala` package.
"""
logger = logging.getLogger(name)
if not len(logger.handlers):
logger.setLevel(logging.INFO)
if write_to_file is not None:
file_handler = logging.FileHandler(write_to_file)
logger.addHandler(file_handler)
if print_to_console:
stdout_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stdout_handler)
return logger
def prepare_arrow(dict_in):
"""
Raphaรซl's arrow formatting is a bit more involved. This parsing is done here.
"""
arrow_end = dict_in["arrow_end"]
arrow_width = dict_in["arrow_width"]
arrow_length = dict_in["arrow_length"]
return "-".join([arrow_end, arrow_width, arrow_length])
def _prepare_chart_config(tree):
chart_config = dict()
chart_config["container"] = "#treeplotter"
connector_style_pre = tree.connector_style.style()
connector_style = dict()
for key, val in connector_style_pre.items():
if "_" in key:
new_key = "-".join(key.split("_"))
if key == "arrow_end":
connector_style[new_key] = prepare_arrow(dict_in=connector_style_pre)
elif key in {"arrow_length" or "arrow_width"}:
continue
else:
connector_style[new_key] = val
else:
connector_style[key] = val
connector_type_dict = {
"type": tree.connector_type,
"style": connector_style
}
chart_config["connectors"] = connector_type_dict
chart_config["rootOrientation"] = tree.orientation.upper()
HTML_dict_obj = {
"HTMLclass": "treeNode"
}
chart_config["node"] = HTML_dict_obj
dumped = json.dumps(chart_config)
with open("chart_config.json", "w") as chart_config_file:
json.dump(dumped, chart_config_file)
return
def _prepare_docs_and_screenshot(
path,
tree,
serialized_tree,
background_color,
webshot,
logger
):
with open("tree.json", "w") as json_file:
json.dump(serialized_tree, json_file)
logger.info("-> Copying templates...")
for this_file in os.listdir(treant_templates):
shutil.copyfile(treant_templates + "/" + this_file, path + "/" + this_file)
logger.info("-> Writing index.html...")
write_index_html(
background_color=background_color,
path=path + "/" + "index.html"
)
logger.info("-> Writing Treant CSS file...")
write_treant_css(path=path + "/" + "Treant.css")
logger.info("-> Writing Node CSS file...")
write_node_css(
background_color=tree.node_style.background_color,
font_family=tree.node_style.font_family,
font_size=tree.node_style.font_size,
text_align=tree.node_style.text_align,
width=tree.node_style.width,
border=tree.node_style.border,
padding=tree.node_style.padding,
border_radius=tree.node_style.border_radius,
path=path + "/" + "treeplotter.css"
)
logger.info("-> Running browserify...")
parse_data_file = "/".join([path, "parse_data.js"])
browserified_file = "/".join([path, "bundle.js"])
os.system(f"browserify {parse_data_file} -o {browserified_file}")
if webshot:
logger.info("-> Creating webshot with R...")
webshot_string = "webshot::webshot(url={0}, file={1}, zoom=3, selector={2})".format(
"'" + path + "/index.html" + "'",
"'" + path + "/shot.png" + "'",
"'" + ".Treant" + "'"
)
subprocess.call(
[
f"""Rscript -e "{webshot_string}" """
],
shell=True
)
def create_tree_diagram(
tree,
background_color="#868DEE",
save_path=None,
webshot=False,
verbose=False
):
"""
This function creates a visualization of a given `tree.Tree` by wrapping the TreantJS library.
Parameters
----------
tree : tree.Tree
A `tree.Tree` object.
background_color : str
Color (given in Hex) of the desired background color of the visualization.
save_path : str
Optional path to the directory in which all the relevant files will be saved. Default is `None`.
webshot : bool
Whether or not to invoke Rs webshot library to create a high-res screenshot of the tree.
Default is `False`.
verbose : bool
Whether to print logging messages in the plotting process. Useful for debugging.
"""
if verbose:
logger = get_logger(name=__name__, print_to_console=True)
else:
logger = get_logger(name=__name__, print_to_console=False)
serialized = tree.serialize(for_treant=True)
logger.info("-> Creating directory and writing tree to JSON...")
if save_path:
if not(os.path.isdir(save_path)):
os.mkdir(save_path)
os.chdir(save_path)
_prepare_chart_config(tree=tree)
_prepare_docs_and_screenshot(
path=save_path,
tree=tree,
serialized_tree=serialized,
background_color=background_color,
webshot=webshot,
logger=logger
)
logger.info("Done โ")
return save_path
else:
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
_prepare_docs_and_screenshot(tmpdir, serialized_tree=serialized, logger=logger)
logger.info("Done โ")
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
shutil.copyfile(tmpdir + "/shot.png", tmpfile.name)
return tmpfile.name | 5,534 | 2,049 |
from flask import current_app, Flask, redirect, url_for
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
import config
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object(config) # load config.py
app.secret_key = 'super duper mega secret key'
login_manager = LoginManager() # Login manager for the application
login_manager.init_app(app) # apply login manager
login_manager.login_view = 'home' # set the default redirect page
db = SQLAlchemy(app)
# This imports are necessary for the scope of the directory structure
from app import views
from app import models
from app.views import *
| 638 | 181 |
#!/usr/bin/env python
import logging
from threading import Thread, Event
from Queue import Queue, Empty as QueueEmpty
import codecs
class FileWriter(Thread):
"""
This thread reads log lines from a queue and writes these to a file passed as log_file_path.
The log line queue is filled with new log lines by calling put().
Thread quits if stop() is called. If an exception is raised when writing to file, this thread
will callback to its owner to stop operation.
Setting the read_queue_timer for reading the queue determine the responsiveness to stop call
and is optional.
"""
READ_NEW_LOGLINE_TMO = 0.5
def __init__(self,
log_file_path,
callback,
read_queue_timeout=READ_NEW_LOGLINE_TMO,
encoding='utf8'):
"""
:param log_file_path: The file path to write log lines to.
:param callback: A callback method for calling back to application when error occurs.
:param read_queue_timeout: The read timeout to avoid blocking.
:param encoding: The encoding format when writing to file.
"""
super(FileWriter, self).__init__(name = self.__class__.__name__)
self._read_queue_timeout = read_queue_timeout
self._log_file_path = log_file_path
self._encoding = encoding
self.setDaemon(True)
self._log_line_queue = Queue()
self._stop = Event()
self.logger = logging.getLogger(self.__class__.__name__)
self._callback = callback
codecs.register_error('backslashreplace', self.backslash_replace)
def __repr__(self):
return '{}({!r}, {!r}, {!r}, {!r})'.format(self.__class__.__name__,
self.getName(),
self._read_queue_timeout,
self._log_file_path,
self._encoding)
def put(self, text_line):
"""
Puts a text line to the text queue to be written to the specified file for logging.
:param text_line: A text line to be written to file.
"""
self._log_line_queue.put(text_line) # Queue calls are thread-safe
def stop(self):
"""
Stop writing to a log file from the internal queue and commit suicide.
"""
self._stop.set()
self.logger.debug('writer stopped')
if self.is_alive():
self.join()
self.logger.debug('writer has terminated')
@staticmethod
def backslash_replace(error):
"""
An error handler to be called if escape characters are read from the log line queue input.
"""
return u"".join([u"\\x{:x}".format(ord(error.object[i]))
for i in range(error.start, error.end)]), error.end
def run(self):
try:
with codecs.open(self._log_file_path, 'wb', self._encoding) as log_file:
self.logger.info('start writing to file.')
while not self._stop.is_set():
try: # timeout avoids blocking in order to be responsive to stop calls
log_line = self._log_line_queue.get(timeout=self._read_queue_timeout)
except QueueEmpty:
continue
else:
self._log_line_queue.task_done()
log_file.write(log_line + '\n')
except Exception as e: # this may occur if codecs fails somehow
self.logger.error('Error: {}'.format(e))
self._callback('{} has stopped running. error: {}'.format(self.getName(), str(e))) # call back error
self.logger.info('stopped writing to file.')
| 3,814 | 1,006 |
#Dados dos nรบmeros, mostrar la suma, resta, divisiรณn y multiplicaciรณn de ambos.
a = int(input("Dime el primer nรบmero: "))
b = int(input("Dime el segundo nรบmero: "))
print("La suma de los dos nรบmeros es: ",a+b)
print("La resta de los dos nรบmeros es: ",a-b)
print("La multiplicaciรณn de los dos nรบmeros es: ",a*b)
print("La divisiรณn de los dos nรบmeros es: ",a/b)
| 361 | 129 |
from core.advbase import *
def module():
return Pipple
class Pipple(Adv):
conf = {}
conf['slots.a'] = ['Proper_Maintenance', 'Brothers_in_Arms']
conf['slots.frostbite.a'] = conf['slots.a']
conf['slots.d'] = 'Gaibhne_and_Creidhne'
conf['acl'] = """
`dragon(c3-s-end),x=5
`s2, (x=5 or s) and self.energy()<5
`s4
`s3, cancel
`s1, x>2
"""
conf['coabs'] = ['Tiki', 'Renee', 'Tobias']
conf['share'] = ['Summer_Luca','Patia']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) | 610 | 256 |
class AutoRepresentation:
def __init__(self, adopt_configuration_threshold=None):
self.adoptConfigurationThreshold = adopt_configuration_threshold
| 159 | 40 |
import unittest
import logging
import nzmath.factor.methods as mthd
try:
_log = logging.getLogger('test.testFactorMethod')
except:
try:
_log = logging.getLogger('nzmath.test.testFactorMethod')
except:
_log = logging.getLogger('testFactorMethod')
_log.setLevel(logging.INFO)
class FactorTest (unittest.TestCase):
def testTrialDivision(self):
self.assertEqual([(2,2),(3,1),(5,1)], mthd.trialDivision(60))
self.assertEqual([(2,7)], mthd.trialDivision(128))
self.assertEqual([(409,1),(491,1)], mthd.trialDivision(200819))
self.assertEqual([(701,1),(1487,1)], mthd.trialDivision(1042387))
def testRho(self):
self.assertEqual([(2,2),(3,1),(5,1)], mthd.rhomethod(60))
self.assertEqual([(2,7)], mthd.rhomethod(128))
self.assertEqual([(409,1),(491,1)], mthd.rhomethod(200819))
self.assertEqual([(701,1),(1487,1)], mthd.rhomethod(1042387))
self.assertEqual([(17,2), (19,1)], mthd.rhomethod(17**2 * 19))
def testPMinusOneMethod(self):
self.assertEqual([(19,1), (101,1)], mthd.pmom(1919))
# 6133 = prime.prime(800) > sqrt(B) & 800 == 0 mod 20
p = 4 * 6133 + 1
self.assertEqual([(p,1), (154858631,1)], mthd.pmom(p*154858631))
def testMPQS(self):
p = 4 * 6133 + 1
result = mthd.mpqs(p*154858631)
self.assertEqual([(p,1), (154858631,1)], result)
def testEllipticCurveMethod(self):
#self.assertEqual([(19,1), (101,1)], mthd.ecm(1919))
# 6133 = prime.prime(800) > sqrt(B) & 800 == 0 mod 20
p = 4 * 6133 + 1
self.assertEqual([(p,1), (154858631,1)], mthd.ecm(p*154858631))
def testFactor(self):
# default method
p = 4 * 6133 + 1
result = mthd.factor(p*154858631)
self.assertEqual([(p,1), (154858631,1)], result)
def testFactorSpecifyMethod(self):
self.assertEqual([(2,2),(3,1),(5,1)], mthd.factor(60, method='t'))
self.assertEqual([(2,2),(3,1),(5,1)], mthd.factor(60, method='trial'))
self.assertEqual([(19,1), (101,1)], mthd.factor(1919, method='p'))
self.assertEqual([(19,1), (101,1)], mthd.factor(1919, method='pmom'))
p = 4 * 6133 + 1
self.assertEqual([(p,1), (154858631,1)], mthd.factor(p*154858631, 'm'))
self.assertEqual([(p,1), (154858631,1)], mthd.factor(p*154858631, 'e'))
self.assertEqual([(2,2),(3,1),(5,1)], mthd.factor(60, method='r'))
def testVerbosity(self):
# default method
p = 4 * 6133 + 1
_log.info("silent:")
result = mthd.mpqs(p*154858631, verbose=False)
_log.info("verbose:")
result = mthd.mpqs(p*154858631, verbose=True)
class TrialDivisionTest (unittest.TestCase):
def testTrialDivisionTracker(self):
tdm = mthd.TrialDivision()
factorization_of_49 = tdm.factor(49, return_type='tracker')
self.assertTrue(isinstance(factorization_of_49, mthd.util.FactoringInteger))
self.assertTrue(7 in factorization_of_49.primality)
# fail to factor is iterator is short
factorization_of_10201 = tdm.factor(10201,
return_type='tracker',
iterator=iter(list(range(3, 100, 2))))
self.assertTrue(10201 in factorization_of_10201.primality) # not factored
self.assertFalse(factorization_of_10201.primality[10201]) # not a prime
def suite(suffix = "Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
logging.basicConfig()
runner = unittest.TextTestRunner()
runner.run(suite())
| 3,799 | 1,653 |
import collections
class Solution01:
"""
ไฝฟ็จๅ
็ฝฎAPI
"""
def reverseWords(self, s: str) -> str:
return ' '.join(reversed(s.split()))
class Solution02:
"""
่ชๅทฑๅฎ็ฐๅฏนๅบ็ๅ่ฝ
"""
def trim_space(self, s: str) -> list:
left, right = 0, len(s) - 1
# ๅป้ค้ฆๅฐพ็ฉบๆ ผ
while s[left] == ' ':
left += 1
while s[right] == ' ':
right -= 1
# ๅป้คๅคไฝ็ฉบๆ ผ
output = []
while left <= right:
if s[left] != ' ':
output.append(s[left])
elif output[-1] != ' ':
output.append(s[left])
left += 1
return output
def reverse(self, l: list, left: int, right: int) -> None:
while left < right:
l[left], l[right] = l[right], l[left]
left, right = left + 1, right - 1
def reverse_each_word(self, l: list) -> None:
n = len(l)
start = end = 0
while start < n:
# ๅฏปๆพๅ่ฏ็็ปๅฐพๅค
while (end < n) and (l[end] != ' '):
end += 1
# ๅ่ฝฌๅ่ฏ
self.reverse(l, start, end - 1)
# ๆดๆฐ start ๅ end
start = end + 1
end += 1
def reverseWords(self, s: str) -> str:
# ๅป้คๅคไฝ็ฉบๆ ผ
l = self.trim_space(s)
# ๅ่ฝฌๆดไธชๅญ็ฌฆไธฒ
self.reverse(l, 0, len(l) - 1)
# ๅ่ฝฌๆฏไธชๅ่ฏ
self.reverse_each_word(l)
return ''.join(l)
class Solution03:
"""
ไฝฟ็จๅ็ซฏ้ๅ
"""
def reverseWords(self, s: str) -> str:
# ๅป้คๅญ็ฌฆไธฒไธค็ซฏ็็ฉบๆ ผ
left, right = 0, len(s) - 1
while s[left] == ' ':
left += 1
while s[right] == ' ':
right -= 1
# ๅฐๆฏไธชๅ่ฏไพๆฌกๅๅฐ้ๅ็ๅคด้จ
dq, word = collections.deque(), []
while left <= right:
if s[left] != ' ':
word.append(s[left])
elif s[left] == ' ' and word:
dq.appendleft(''.join(word))
word = []
left += 1
dq.appendleft(''.join(word))
return ' '.join(dq)
| 2,057 | 780 |
# Copyright (c) 2021, Serum Studio
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from setuptools import setup, find_packages
from hype import __license__, __author__, __version__, __desc__
BASE_URL = "https://github.com/serumstudio/hype"
def get_long_description():
with open("README.md", encoding="utf-8") as f:
readme = f.read()
return readme
extras_require = {
'color': ['colorama==0.4.4'], #: Color support
'standard': ['colorama==0.4.4'], #: Standard installation with color support
'progress': ['alive-progress==1.6.2'], #: With progressbar support
'table': ['tabulate==0.8.9'] #: With Table support
}
setup(
name = "hypecli",
author = __author__,
description =__desc__,
long_description=get_long_description(),
long_description_content_type='text/markdown',
project_urls={
'Documentation': 'https://hype.serum.studio',
'Source': BASE_URL,
'Tracker': "%s/issues" % (BASE_URL)
},
version = __version__,
license = __license__,
url=BASE_URL,
keywords='cli,commandline-toolkit,command line toolkit,python cli,python 3'.split(','),
packages = [p for p in find_packages() if 'test' not in p],
extras_require = extras_require,
classifiers = [
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development",
"Typing :: Typed",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License"
],
) | 3,154 | 976 |
import sklearn
from sklearn import datasets
def generate(n_samples, features):
n_features = len(features)
data = sklearn.datasets.make_blobs(n_samples=n_samples, n_features=n_features, centers=10, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None)
return (data, features) | 300 | 111 |
if __name__ == '__main__':
import Recommender_System.utility.gpu_memory_growth
from Recommender_System.algorithm.KGCN.tool import construct_undirected_kg, get_adj_list
from Recommender_System.algorithm.KGCN.model import KGCN_model
from Recommender_System.algorithm.KGCN.train import train
from Recommender_System.data import kg_loader, data_process
import tensorflow as tf
n_user, n_item, n_entity, n_relation, train_data, test_data, kg, topk_data = data_process.pack_kg(kg_loader.ml1m_kg1m, negative_sample_threshold=4)
neighbor_size = 16
adj_entity, adj_relation = get_adj_list(construct_undirected_kg(kg), n_entity, neighbor_size)
model = KGCN_model(n_user, n_entity, n_relation, adj_entity, adj_relation, neighbor_size, iter_size=1, dim=16, l2=1e-7, aggregator='sum')
train(model, train_data, test_data, topk_data, optimizer=tf.keras.optimizers.Adam(0.01), epochs=10, batch=512)
| 947 | 356 |
import os
import setuptools
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(CUR_DIR, "data_spec_validator", "__version__.py"), "r") as f:
exec(f.read(), about)
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="data-spec-validator",
version=about['__version__'],
author="CJHwong, falldog, HardCoreLewis, kilikkuo, xeonchen",
author_email="pypi@hardcoretech.co",
description="Simple validation tool for API",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hardcoretech/data-spec-validator",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"data_spec_validator": "data_spec_validator"},
packages=setuptools.find_packages(),
install_requires=[
"python-dateutil",
],
extras_require={
'decorator': ['Django', 'djangorestframework'],
},
python_requires=">=3.6",
)
| 1,133 | 390 |
"""
A component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
from datetime import timedelta
import functools
import logging
import itertools
import json
from persistent_queue import PersistentQueue
import requests
import voluptuous as vol
from homeassistant.const import (EVENT_STATE_CHANGED, STATE_UNAVAILABLE,
STATE_UNKNOWN, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import state as state_helper
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_point_in_time
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "prisms_influxdb"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8086
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_BATCH_TIME = 10
DEFAULT_CHUNK_SIZE = 500
REQUIREMENTS = ['influxdb==3.0.0', 'python-persistent-queue==1.3.0']
CONF_HOST = 'host'
CONF_DEPLOYMENT_ID = 'home_id'
CONF_PORT = 'port'
CONF_DB_NAME = 'database'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
CONF_SSL = 'ssl'
CONF_VERIFY_SSL = 'verify_ssl'
CONF_BLACKLIST = 'blacklist'
CONF_WHITELIST = 'whitelist'
CONF_TAGS = 'tags'
CONF_BATCH_TIME = 'batch_time'
CONF_CHUNK_SIZE = 'chunk_size'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_DEPLOYMENT_ID): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.positive_int,
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_USERNAME, default=None): vol.Any(cv.string, None),
vol.Optional(CONF_PASSWORD, default=None): vol.Any(cv.string, None),
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL,
default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_BLACKLIST, default=[]): cv.ensure_list,
vol.Optional(CONF_WHITELIST, default=[]): cv.ensure_list,
vol.Optional(CONF_TAGS, default={}): dict,
vol.Optional(CONF_BATCH_TIME,
default=DEFAULT_BATCH_TIME): cv.positive_int,
vol.Optional(CONF_CHUNK_SIZE,
default=DEFAULT_CHUNK_SIZE): cv.positive_int,
})
}, extra=vol.ALLOW_EXTRA)
RUNNING = True
# pylint: disable=too-many-locals
def setup(hass, config):
"""Setup the InfluxDB component."""
from influxdb import InfluxDBClient
conf = config[DOMAIN]
blacklist = conf[CONF_BLACKLIST]
whitelist = conf[CONF_WHITELIST]
tags = conf[CONF_TAGS]
batch_time = conf[CONF_BATCH_TIME]
chunk_size = conf[CONF_CHUNK_SIZE]
tags[CONF_DEPLOYMENT_ID] = conf[CONF_DEPLOYMENT_ID]
influx = InfluxDBClient(host=conf[CONF_HOST],
port=conf[CONF_PORT],
username=conf[CONF_USERNAME],
password=conf[CONF_PASSWORD],
database=conf[CONF_DB_NAME],
ssl=conf[CONF_SSL],
verify_ssl=conf[CONF_VERIFY_SSL])
events = PersistentQueue('prisms_influxdb.queue',
path=hass.config.config_dir)
render = functools.partial(get_json_body, hass=hass, tags=tags)
def influx_event_listener(event):
"""Listen for new messages on the bus and sends them to Influx."""
state = event.data.get('new_state')
if state is None or state.state in (
STATE_UNKNOWN, '', STATE_UNAVAILABLE) or \
state.entity_id in blacklist:
# The state is unknown or it is on the black list
return
if len(whitelist) > 0 and state.entity_id not in whitelist:
# It is not on the white list
return
if batch_time == 0:
# Since batch time hasn't been set, just upload as soon as an event
# occurs
try:
_LOGGER.debug("Since batch_time == 0, writing data")
json_body = render(event)
write_data(influx, json_body)
except ValueError as e:
_LOGGER.error("Something is wrong with the provided template: %s", e)
return
else:
# Convert object to pickle-able. Since State.attributes uses
# MappingProxyType, it is not pickle-able
if event.data['new_state']:
event.data['new_state'].attributes = dict(event.data['new_state'].attributes)
if event.data['old_state']:
event.data['old_state'].attributes = dict(event.data['old_state'].attributes)
# Store event to be uploaded later
events.push(event)
_LOGGER.debug("Saving event for later (%s)", len(events))
hass.bus.listen(EVENT_STATE_CHANGED, influx_event_listener)
if batch_time != 0:
# Set up task to upload batch data
_LOGGER.debug("Starting task to upload batch data")
write_batch_data(hass, events, influx, render, batch_time, chunk_size)
def stop(event):
global RUNNING
_LOGGER.info("Shutting down PRISMS InfluxDB component")
RUNNING = False
# Register to know when home assistant is stopping
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
return True
def write_data(influx, json_body):
from influxdb import exceptions
try:
influx.write_points(json_body)
except requests.exceptions.RequestException as e:
_LOGGER.exception('Unable to connect to database: %s', e)
return False
except exceptions.InfluxDBClientError as e:
error = json.loads(e.content)['error']
_LOGGER.exception('Error saving event "%s": %s', str(json_body)[:1000], error)
return False
except exceptions.InfluxDBServerError as e:
_LOGGER.exception('Error saving event "%s" to InfluxDB: %s', str(json_body)[:1000], e)
return False
except Exception: # Catch anything else
_LOGGER.exception("An unknown exception happened while uploading data")
return False
return True
def write_batch_data(hass, events, influx, render, batch_time, chunk_size):
def next_time():
return dt_util.now() + timedelta(seconds=batch_time)
def action(now):
while RUNNING:
_LOGGER.info("Trying to upload data")
if len(events) == 0:
# No more events to upload
_LOGGER.info("Nothing to upload")
break
events_chunk = events.peek(chunk_size)
size = len(events_chunk)
_LOGGER.info("Uploading chunk of size %s (%s)", size, len(events))
try:
# Render and write events
data = itertools.chain(*[render(event) for event in events_chunk])
result = write_data(influx, list(data))
except ValueError as e:
_LOGGER.error("Something is wrong with the provided template: %s", e)
return
if result:
# Chunk got saved so remove events
_LOGGER.info("Data was uploaded successfully so deleting data")
events.delete(size)
if size < chunk_size:
_LOGGER.debug("Finished uploading data because size <"
" chunk_size: %s < %s (%s)", size,
chunk_size, len(events))
break
else:
# Unable to write data so give up for now
_LOGGER.error("Error while trying to upload data. Trying again later")
break
if RUNNING:
_LOGGER.debug("Flushing all events that were deleted")
events.flush()
# Schedule again
next = next_time()
_LOGGER.info("Scheduling to upload data at %s", next)
track_point_in_time(hass, action, next)
# Start the action
next = next_time()
_LOGGER.info("Scheduling to upload data at %s", next)
track_point_in_time(hass, action, next)
def get_json_body(event, hass, tags):
state = event.data.get('new_state')
try:
_state = float(state_helper.state_as_number(state))
_state_key = "value"
except ValueError:
_state = state.state
_state_key = "state"
measurement = state.attributes.get('unit_of_measurement')
if measurement in (None, ''):
measurement = state.entity_id
event_time = state.attributes.get('sample_time', event.time_fired)
json_body = [
{
'measurement': measurement,
'tags': {
'domain': state.domain,
'entity_id': state.object_id,
},
'time': event_time,
'fields': {
_state_key: _state,
}
}
]
for tag in tags:
json_body[0]['tags'][tag] = tags[tag]
return json_body
| 9,204 | 2,804 |
import numpy as np
import os
from ..helpers import save_json
def callback_weather_fn(sensor_data, custom_args):
## note that in this function, sensor data comes from the
## 'weather' custom arg
## the sensor is used just to trigger the callback at the correct timestamp
weather = custom_args['weather']
world = custom_args['world']
data_dict = weather.to_json()
return data_dict
def save_weather_data_fn(outdir, data_dict, frame_id):
output_file_path = os.path.join(outdir, str(frame_id)+ '.json')
save_json(output_file_path, data_dict)
| 587 | 186 |
Import("env")
# original Makefile builds into dapboot.bin/elf, let's do the same
env.Replace(PROGNAME="dapboot")
| 114 | 41 |
#drawing a line using DDA
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
import math
def init():
glClearColor(1.0,2.0,1.0,1.0)
gluOrtho2D(-100.0,100.0,-100.0,100.0)
x1 = 0
x2 = 0
y1 = 0
y2 = 0
def plotpoints():
global x1, y1, x2, y2
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glPointSize(5.0)
dx = x2 - x1
dy = y2 - y1
if abs(dx) > abs(dy):
steps = abs(dx)
else:
steps = abs(dy)
ix = dx/steps
iy = dy/steps
x = x1
y = y1
glBegin(GL_POINTS)
glVertex2f(x,y)
glEnd()
for i in range(abs(steps)+1):
x = x + ix
y = y + iy
glBegin(GL_POINTS)
glVertex2f(x,y)
glEnd()
glFlush()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(500,500)
glutInitWindowPosition(50,50)
glutCreateWindow("DDA")
global x1, x2, y1, y2
print("Enter coordinates of end-points")
x1 = int(input("X-coordinate of 1st point : "))
y1 = int(input("Y-coordinate of 1st point : "))
x2 = int(input("X-coordinate of 2nd point : "))
y2 = int(input("Y-coordinate of 2nd point : "))
glutDisplayFunc(plotpoints)
init()
glutMainLoop()
if __name__ == "__main__":
main() | 1,393 | 624 |
import pathlib
import numpy as np
import xarray as xr
def to_netcdf(
grid, path, include="*", exclude=None, time=None, format="NETCDF4", mode="w"
):
"""Write landlab a grid to a netcdf file.
Write the data and grid information for *grid* to *path* as NetCDF.
If the *append* keyword argument in True, append the data to an existing
file, if it exists. Otherwise, clobber an existing files.
Parameters
----------
grid : ModelGrid
Landlab grid object that holds a grid and field values.
path : str
Path to which to save this grid.
include : str or iterable of str, optional
A list of unix-style glob patterns of field names to include. Fully
qualified field names that match any of these patterns will be
written to the output file. A fully qualified field name is one that
that has a prefix that indicates what grid element is defined on
(e.g. "at_node:topographic__elevation"). The default is to include
all fields.
exclude : str or iterable of str, optional
Like the *include* keyword but, instead, fields matching these
patterns will be excluded from the output file.
format : {'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', 'NETCDF4'}
Format of output netcdf file.
attrs : dict
Attributes to add to netcdf file.
mode : {"w", "a"}, optional
Write ("w") or append ("a") mode. If mode="w", any existing file at
this location will be overwritten. If mode="a", existing variables
will be overwritten.
Parameters
----------
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.io.netcdf import to_netcdf
Create a uniform rectilinear grid with four rows and 3 columns, and add
some data fields to it.
>>> rmg = RasterModelGrid((4, 3))
>>> rmg.at_node["topographic__elevation"] = np.arange(12.0)
>>> rmg.at_node["uplift_rate"] = 2.0 * np.arange(12.0)
Create a temporary directory to write the netcdf file into.
>>> import tempfile, os
>>> temp_dir = tempfile.mkdtemp()
>>> os.chdir(temp_dir)
Write the grid to a netcdf3 file but only include the *uplift_rate*
data in the file.
>>> to_netcdf(
... rmg, "test.nc", format="NETCDF3_64BIT", include="at_node:uplift_rate"
... )
Read the file back in and check its contents.
>>> from scipy.io import netcdf
>>> fp = netcdf.netcdf_file('test.nc', 'r')
>>> 'at_node:uplift_rate' in fp.variables
True
>>> 'at_node:topographic__elevation' in fp.variables
False
>>> fp.variables['at_node:uplift_rate'][:].flatten()
array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20.,
22.])
>>> rmg.at_cell["air__temperature"] = np.arange(2.0)
>>> to_netcdf(
... rmg,
... "test-cell.nc",
... format="NETCDF3_64BIT",
... include="at_cell:*",
... # names="air__temperature", at="cell",
... )
"""
path = pathlib.Path(path)
if not path.is_file():
mode = "w"
if time is None and mode == "a":
time = np.nan
this_dataset = grid.as_dataset(include=include, exclude=exclude, time=time)
if format != "NETCDF4":
this_dataset["status_at_node"] = (
("node",),
this_dataset["status_at_node"].values.astype(dtype=int),
)
if mode == "a":
with xr.open_dataset(path) as that_dataset:
if "time" not in that_dataset.dims:
_add_time_dimension_to_dataset(that_dataset, time=np.nan)
new_vars = set(this_dataset.variables) - set(that_dataset.variables)
for var in new_vars:
that_dataset[var] = (
this_dataset[var].dims,
np.full_like(this_dataset[var].values, np.nan),
)
for var in list(that_dataset.variables):
if var.startswith("at_layer"):
del that_dataset[var]
this_dataset = xr.concat(
[that_dataset, this_dataset], dim="time", data_vars="minimal"
)
if np.isnan(this_dataset["time"][-1]):
this_dataset["time"].values[-1] = this_dataset["time"][-2] + 1.0
this_dataset.to_netcdf(path, format=format, mode="w", unlimited_dims=("time",))
def _add_time_dimension_to_dataset(dataset, time=0.0):
"""Add a time dimension to all variables except those at_layer."""
names = set(
[
name
for name in dataset.variables
if name.startswith("at_") and not name.startswith("at_layer")
]
)
for name in names:
dataset[name] = (("time",) + dataset[name].dims, dataset[name].values[None])
dataset["time"] = (("time",), [time])
| 4,884 | 1,570 |
from flask import request, url_for, g
from flask_api import FlaskAPI, status, exceptions
from flask_sqlalchemy import SQLAlchemy
import arrow
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_cors import CORS
app = FlaskAPI(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tickets.db'
db = SQLAlchemy(app)
class Ticket(db.Model):
id = db.Column(db.Integer, primary_key=False)
ticketcode = db.Column(db.String(80), unique=True, primary_key=True)
arrived = db.Column(db.Boolean, unique=False)
arrived_at = db.Column(db.String(80), unique=False)
def __init__(self, ticketcode):
self.ticketcode = ticketcode
self.arrived = False
self.arrived_at = ""
def __repr__(self):
return '<User %r>' % self.ticketcode
def arrived():
return len(Ticket.query.filter_by(arrived=True).all())
def not_in_list_repr(ticketcode, arrived):
return {
'ticketcode' : str(ticketcode),
'status' : 'nil',
'count_arrived' : arrived
}
def arrived_repr(ticketcode, ticket, arrived):
return {
'ticketcode' : str(ticketcode),
'status' : 'arr',
'timestamp' : arrow.get(ticket.arrived_at).format('YYYY-MM-DD HH:mm:ss ZZ'),
'human_timestamp' : arrow.get(ticket.arrived_at).humanize(),
'count_arrived' : arrived
}
def not_arrived_repr(ticketcode, ticket, arrived):
return {
'ticketcode' : str(ticketcode),
'status' : 'n_arr',
'count_arrived' : arrived
}
def already_arrived_repr(ticketcode, ticket, arrived):
return {
'ticketcode' : str(ticketcode),
'status' : 'a_arr',
'timestamp' : arrow.get(ticket.arrived_at).format('YYYY-MM-DD HH:mm:ss ZZ'),
'human_timestamp' : arrow.get(ticket.arrived_at).humanize(),
'count_arrived' : arrived
}
def all_tickets_repr(arrived, not_arrived):
count_arrived = len(arrived)
count_not_arrived = len(not_arrived)
not_arrived_ticketcodes = []
for i in not_arrived:
not_arrived_ticketcodes.append(i.ticketcode)
arrived_ticketcodes = {}
for i in arrived:
arrived_ticketcodes[i.ticketcode] = arrow.get(i.arrived_at).format('YYYY-MM-DD HH:mm:ss ZZ')
return {
'count_arrived' : count_arrived,
'count_not_arrived' : count_not_arrived,
'arrived' : arrived_ticketcodes,
#'not_arrived' : not_arrived_ticketcodes
}
@app.route("/", methods=['GET'])
def all_tickets():
if request.method == 'GET':
arrived = Ticket.query.filter_by(arrived=True).all()
not_arrived = Ticket.query.filter_by(arrived=False).all()
return all_tickets_repr(arrived, not_arrived)
@app.route("/<int:ticketcode>/", methods=['GET', 'POST']) #, 'PUT', 'DELETE']
def arrive(ticketcode):
if request.method =='GET':
ticketcode = str(ticketcode)
ticket = Ticket.query.get(ticketcode)
if ticket != None:
if ticket.arrived == True:
return already_arrived_repr(ticketcode, ticket, arrived())
else:
return not_arrived_repr(ticketcode, ticket, arrived())
else:
return not_in_list_repr(ticketcode, arrived())
if request.method =='POST':
ticketcode = str(ticketcode)
ticket = Ticket.query.get(ticketcode)
if ticket != None:
if ticket.arrived == True:
return already_arrived_repr(ticketcode, ticket, arrived())
else:
ticket.arrived = True
ticket.arrived_at = arrow.utcnow().timestamp
db.session.commit()
return arrived_repr(ticketcode, ticket, arrived())
else:
return not_in_list_repr(ticketcode, arrived())
#if request.method == 'PUT':
if __name__ == "__main__":
admin = Admin(app)
admin.add_view(ModelView(Ticket, db.session))
app.run(debug=True, host="0.0.0.0")
| 4,053 | 1,418 |
"""
Your object will be instantiated and called as such:
ty = ToyFactory()
toy = ty.getToy(type)
toy.talk()
"""
class Toy:
def talk(self):
raise NotImplementedError('This method should have implemented.')
class Dog(Toy):
def talk(self):
print('Wow')
class Cat(Toy):
def talk(self):
print('Meow')
class ToyFactory:
# @param {string} shapeType a string
# @return {Toy} Get object of the type
def getToy(self, type):
if type == 'Dog' :
return Dog()
elif type == 'Cat' :
return Cat()
return None | 592 | 191 |
from enum import IntEnum, unique
@unique
class Dimension(IntEnum):
InChan = 0
OutChan = 1
Height = 2
Width = 3
Batch = 4
| 143 | 57 |
from autohandshake.src.Pages.Page import Page
from autohandshake.src.HandshakeBrowser import HandshakeBrowser
from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, \
InvalidEmailError, InvalidPasswordError
import re
class LoginPage(Page):
"""
The old Handshake login page
"""
def __init__(self, url: str, browser: HandshakeBrowser):
"""
:param url: the url of the school's Handshake login page
:type url: str
:param browser: a HandshakeBrowser that has not logged in yet
:type browser: HandshakeBrowser
"""
super().__init__(url, browser)
self.validate_url_school()
def _wait_until_page_is_loaded(self):
"""Wait until the page has finished loading.
Return immediately since there are no complex load conditions
"""
return
def _validate_url(self, url):
"""
Ensure that the given URL is a valid login URL
:param url: the url to validate
:type url: str
"""
try:
re.match(r'^https://[a-zA-Z]+\.joinhandshake\.com(/login)?$', url) \
.group(0)
except AttributeError:
raise InvalidURLError()
def validate_url_school(self):
"""Ensure that the current URL leads to a valid school's login page"""
if self._browser.element_exists_by_xpath('//span[text()=\'Please '
'select your school to '
'sign in.\']'):
raise InvalidURLError("The school specified in the URL is not valid")
@Page.require_user_type(None) # requires the user to be logged out, i.e. no user type
def login(self, email, password):
"""
Log into Handshake using the given credentials
:param email: the username with which to log in
:type email: str
:param password: the password with which to log in
:type password: str
"""
self._enter_email_address(email)
self._enter_password(password)
self._browser.wait_until_element_exists_by_xpath('//div[@class="Select-placeholder"]')
def _enter_email_address(self, email):
"""Enter email address into input field"""
EMAIL_INPUT_XPATH = "//input[@name='identifier']"
try: # if you get the old login page
EMAIL_LINK_XPATH = "//div[@class='sign-with-email-address']//a"
self._browser.click_element_by_xpath(EMAIL_LINK_XPATH)
self._browser.send_text_to_element_by_xpath(EMAIL_INPUT_XPATH, email)
EMAIL_BTN_XPATH = "//div[@class='login-main__email-box']/button"
self._browser.click_element_by_xpath(EMAIL_BTN_XPATH)
if self._browser.element_exists_by_xpath("//div[text()='Please enter a valid email address']"):
raise InvalidEmailError(f"No account found for email {email}")
except NoSuchElementError: # if you get the new login page
EMAIL_LINK_XPATH = "//div[@class='sign-in-with-email-address']//a"
self._browser.click_element_by_xpath(EMAIL_LINK_XPATH)
self._browser.send_text_to_element_by_xpath(EMAIL_INPUT_XPATH, email)
EMAIL_BTN_XPATH = "//div[@class='actions']/button"
self._browser.click_element_by_xpath(EMAIL_BTN_XPATH)
if 'known_error_message_present=true' in self._browser.current_url:
raise InvalidEmailError(f"No account found for email {email}")
def _enter_password(self, password):
"""Enter password into input field after having successfully entered email"""
try: # if you get the old login page
self._browser.click_element_by_xpath("//a[@class='no-underline']")
self._browser.send_text_to_element_by_xpath("//input[@name='password']", password)
self._browser.click_element_by_xpath("//input[@name='commit']")
if self._browser.element_exists_by_xpath("//div[contains(text(), "
"'You entered an invalid password.')]"):
raise InvalidPasswordError("Invalid password")
except NoSuchElementError: # if you get the new login page
self._browser.click_element_by_xpath("//a[@class='alternate-login-link']")
self._browser.send_text_to_element_by_xpath("//input[@name='password']", password)
self._browser.click_element_by_xpath("//button")
if self._browser.element_exists_by_xpath("//div[contains(text(), "
"'You entered an invalid password.')]"):
raise InvalidPasswordError("Invalid password")
| 4,746 | 1,329 |
from omegaconf import DictConfig
import pytorch_lightning as pl
import numpy as np
import torch
import wandb
from simsiam.models import get_resnet
from simsiam.metrics import get_accuracy
from simsiam.optimizer import get_optimizer, get_scheduler
class SupervisedEngine(pl.LightningModule):
def __init__(self, config: DictConfig):
super().__init__()
self.config = config
self.resnet = get_resnet(num_classes=config.dataset.n_classes)
self.loss_func = torch.nn.CrossEntropyLoss()
self.predict_step = self.validation_step
self.test_step = self.validation_step
@property
def lr(self):
result = self.optimizers().param_groups[0]['lr']
return result
def forward(self, x):
x = self.resnet(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.resnet(x)
loss = self.loss_func(y_hat, y[:, 0])
self.log('lr', self.lr, prog_bar=True, on_step=True, logger=False) # For progress bar
return {'loss': loss}
def training_epoch_end(self, outputs: list):
loss = torch.stack([x['loss'] for x in outputs]).mean()
metrics = {'train/loss': loss}
metrics.update({f'train/lr': self.lr})
self.logger.experiment.log(metrics, step=self.current_epoch) # For wandb
self.log_dict(metrics, prog_bar=False, on_epoch=True, on_step=False, logger=False, sync_dist=True) # For callbacks
def validation_step(self, batch, batch_idx):
x, y = batch
f = self.resnet(x)
return f.detach().cpu(), y.detach().cpu()
def validation_epoch_end(self, outputs: list):
self.calc_acc(outputs, 'valid')
def calc_acc(self, outputs, data_split):
y_hat, y = map(torch.cat, zip(*outputs))
y_hat, y = np.argsort(y_hat.numpy(), axis=1)[:, ::-1], y.numpy()
acc = dict()
_acc = get_accuracy(y_hat, y, (1, 3, 5))
for k, v in _acc.items():
acc[f'{data_split}/supervised_{k}'] = v
self.logger.experiment.log(acc, step=self.current_epoch) # For wandb
self.log_dict(acc, prog_bar=False, on_epoch=True, on_step=False, logger=False,
sync_dist=True) # For callbacks
def configure_optimizers(self):
training_config = self.config.training
optimizer = get_optimizer(training_config.optimizer, self.resnet.parameters())
if training_config.scheduler is not None:
scheduler = get_scheduler(training_config.scheduler, optimizer)
return [optimizer], [scheduler]
else:
return optimizer
| 2,649 | 882 |
from flask import render_template, redirect, url_for, flash
from flask_login import current_user, login_user, logout_user
from sqlalchemy import func
import stripe
from app import db
from app.auth import bp
from app.auth.forms import LoginForm, RegistrationForm, ResetPasswordRequestForm, ResetPasswordForm
from app.models.user import User
from app.auth.email import send_password_reset_email, send_verification_email
# Login route
@bp.route('/login', methods=['GET', 'POST'])
def login():
# If the user is logged in, skip the login page and go to the profile page
if current_user.is_authenticated:
return redirect(url_for('profile.profile'))
form = LoginForm()
# If the form was submitted and is validated
if form.validate_on_submit():
user = User.query.filter(func.lower(User.email) == func.lower(form.email.data)).first()
# Check if the user exists and that the password is correct
if user is None or not user.check_password(form.password.data):
# If not, show error
flash('Invalid email or password', 'warning')
return redirect(url_for('auth.login'))
# Otherwise log the user in
login_user(user, remember=form.remember_me.data)
return redirect(url_for('profile.profile'))
# If the page is a GET request, send the loging template
return render_template('auth/login.html', title='Log in', form=form)
# Logout route
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index'))
# Register
@bp.route('/register', methods=['GET', 'POST'])
def register():
# If the user is logged in, skip the register page and go to the profile page
if current_user.is_authenticated:
return redirect(url_for('profile.profile'))
form = RegistrationForm()
# If the form was submitted and is validated
if form.validate_on_submit():
# Create user
u = User()
u.username = form.email.data
u.email = form.email.data
u.firstname = form.firstname.data
u.lastname = form.lastname.data
u.address = form.address.data
u.city = form.city.data
u.state = form.state.data
u.zip_code = form.zip_code.data
u.phone_number = form.phone_number.data
u.set_password(form.password.data)
u.verified = False
u.active_sub = False
u.subscription = ""
# Create stripe user
customer = stripe.Customer.create(email=u.email)
u.stripe_id = customer.id
# Save user to DB
db.session.add(u)
db.session.commit()
# Send verification email to user
send_verification_email(u)
# Send user a success message
flash('Success! Check your email for a verification link', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', title='Register', form=form)
@bp.route('/resend-verify', methods=['GET'])
def resend_verify():
if not current_user.verified:
send_verification_email(current_user)
flash("Check your email for the verification link", "info")
return redirect(url_for('profile.profile'))
# Verify Email
@bp.route('/verify-email/<token>', methods=['GET', 'POST'])
def verify_email(token):
# If the user is logged in, skip the reset password page
if current_user.is_authenticated and current_user.verified:
return redirect(url_for('profile.profile'))
u = User.verify_email(token)
# If don't find the user, redirect home
if not u:
return redirect(url_for('main.index'))
# Verify the user
u.verified = True
db.session.commit()
flash('Success! Your account is now verified', 'success')
if current_user.is_authenticated:
return redirect(url_for('profile.profile'))
return redirect(url_for('main.index'))
# Reset Password Request
@bp.route('/reset-password', methods=['GET', 'POST'])
def reset_password_request():
# If the user is logged in, skip the reset password page
if current_user.is_authenticated:
return redirect(url_for('profile.profile'))
form = ResetPasswordRequestForm()
# If the form was submitted and is validated
if form.validate_on_submit():
u = User.query.filter(func.lower(User.email) == func.lower(form.email.data)).first()
# If we find the user, send them the password reset email
if u:
send_password_reset_email(u)
flash('Success! Check your email for instructions on the next steps for resetting your password', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/reset-password-request.html', title='Reset Password', form=form)
# Reset Password with token
@bp.route('/reset-password/<token>', methods=['GET', 'POST'])
def reset_password(token):
# If the user is logged in, skip the reset password page
if current_user.is_authenticated:
return redirect(url_for('main.index'))
u = User.verify_reset_password(token)
# If don't find the user, redirect home
if not u:
return redirect(url_for('main.index'))
form = ResetPasswordForm()
# If the form was submitted and is validated, set the new password
if form.validate_on_submit():
u.set_password(form.password.data)
db.session.commit()
flash('Success! Your password has been reset.', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/reset-password.html', title='Reset Password', form=form)
| 5,568 | 1,601 |
from nilearn import plotting
from IPython import display
def display_input(nifti, i, fig, ax, cut_coords=None):
if cut_coords is None:
cut_coords = [-9]
plotting.plot_img(nifti, title="In {}".format(i), axes=ax,
display_mode="z", cut_coords=cut_coords)
display.clear_output(wait=True)
display.display(fig)
def display_output(nifti, i, fig, ax, cut_coords=None):
if cut_coords is None:
cut_coords = [-9]
ax.clear()
plotting.plot_img(nifti, title="Out {}".format(i), axes=ax,
display_mode="z", cut_coords=cut_coords)
| 607 | 221 |
#!/usr/bin/env python3
import config
import rsys_api
import secrets
import json
import logging
import sys
def main():
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y/%m/%d %H:%M:%S",
filename="demo.log"
)
logging.info("BEGIN: {script_name}".format(script_name=sys.argv[0]))
# Start new session.
session = rsys_api.Session(
config.LOGIN_BASE_URL, config.BASE_RESOURCE_PATH
)
# Authenticate.
session.password_login(
secrets.USER_NAME, secrets.PASSWORD
)
# Output throttle limits into a json file.
with open("throttle_limits.json", 'w') as output_file:
json.dump(
obj=session.get_throttle_limits(),
indent=4,
fp=output_file
)
# Output information on the next batch of campaigns into a json file.
with open("next_fetched_campaign_batch.json", 'w') as output_file:
json.dump(
obj=session.fetch_next_campaign_batch(),
indent=4,
fp=output_file
)
# Output information on a batch of campaigns into a json file.
with open("fetched_campaign_batch.json", 'w') as output_file:
json.dump(
obj=session.fetch_a_campaign_batch(
limit=200,
offset=0,
campaign_type="email"
),
indent=4,
fp=output_file
)
# Output information on the next batch of campaigns into a json file.
with open("next_fetched_campaign_batch.json", 'w') as output_file:
json.dump(
obj=session.fetch_next_campaign_batch(),
indent=4,
fp=output_file
)
# Output information on all running programs into a json file.
with open("all_fetched_programs.json", 'w') as output_file:
json.dump(
obj=session.fetch_all_programs(status="RUNNING"),
indent=4,
fp=output_file
)
# Output information on all running campaigns into a json file.
with open("all_fetched_campaigns.json", 'w') as output_file:
json.dump(
obj=session.fetch_all_campaigns(campaign_type="email"),
indent=4,
fp=output_file
)
logging.info("END: {script_name}\n".format(script_name=sys.argv[0]))
if __name__ == '__main__':
main()
| 2,420 | 755 |
from utils import save_params, load_params
from importlib import import_module
from environments.env import Env
def run(algorithm_name, exp_name, env_name, agent_params, train_params, use_ray, use_gpu, is_train,
num_runs=None, test_run_id=None, test_model_id=None):
"""
Runner for training or testing DRL algorithms
"""
exp_dir = 'experiments/' + exp_name
if use_ray:
try:
import ray
ray.init(num_cpus=train_params['num_cpus'], num_gpus=1)
except ImportError:
ray = None
use_ray = 0
print('Ray is not installed. I will run in serial training/testing mode.')
"""
Import DRL agent and training function according to algorithm_name
"""
if algorithm_name in ['ddpg', 'ddpg_pds', 'td3', 'td3_pds']:
train = import_module('algorithms.ddpg.train').train
if algorithm_name == 'ddpg':
Agent = import_module('algorithms.ddpg.agent').DDPGAgent
elif algorithm_name == 'ddpg_pds':
Agent = import_module('algorithms.ddpg_pds.agent').PDSDDPGAgent
elif algorithm_name == 'td3':
Agent = import_module('algorithms.td3.agent').TD3Agent
else:
Agent = import_module('algorithms.td3_pds.agent').PDSTD3Agent
elif algorithm_name in ['qprop', 'qprop_pds']:
train = import_module('algorithms.qprop.train').train
if algorithm_name == 'qprop':
Agent = import_module('algorithms.qprop.agent').QPropAgent
else:
Agent = import_module('algorithms.qprop_pds.agent').PDSQPropAgent
elif algorithm_name in ['preplan', 'perfect']:
train = None
Agent = import_module('algorithms.preplan.agent').PrePlanAgent
elif algorithm_name == 'non_predictive':
train = None
Agent = import_module('algorithms.non_predictive.agent').NonPredictiveAgent
else:
print('Unsupported algorithm')
return
if is_train:
"""
Training
"""
env_params = import_module('environments.' + env_name).env_params
# Save all the experiment settings to a json file
save_params([agent_params, train_params, env_params], exp_dir, 'exp_config')
# Create environment
env = Env(env_params)
if use_ray:
# Parallel training
train = ray.remote(train)
train_op = [train.remote(env, Agent, agent_params, train_params, exp_dir, run_id, use_gpu=use_gpu)
for run_id in range(num_runs)]
ray.get(train_op)
else:
# Serial training
[train(env, Agent, agent_params, train_params, exp_dir, run_id, use_gpu=use_gpu)
for run_id in range(num_runs)]
else:
"""
Testing
"""
# Get test set path
test_set_dir = 'data/' + env_name
# Load agent and env parameters from exp_dir
env_params = load_params('data/' + env_name, 'env_config')
if algorithm_name != 'perfect':
if algorithm_name == 'preplan':
env_params_train = load_params(exp_dir, 'env_config')
elif algorithm_name == 'non_predictive':
env_params_train = env_params
else:
agent_params, _, env_params_train = load_params(exp_dir, 'exp_config')
if env_params_train != env_params:
print('Warning: Testing and training env settings do not match!')
# Create environment
env = Env(env_params)
# Import testing function
test = import_module('algorithms.common.test').test
if use_ray:
# Parallel testing
test = ray.remote(test)
test_op = [test.remote(env, Agent, agent_params, exp_dir, run_id, model_id,
test_set_dir=test_set_dir, use_gpu=use_gpu)
for run_id in test_run_id for model_id in test_model_id]
ray.get(test_op)
else:
# Serial testing
[test(env, Agent, agent_params, exp_dir, run_id, model_id,
test_set_dir=test_set_dir, use_gpu=use_gpu)
for run_id in test_run_id for model_id in test_model_id]
| 4,377 | 1,370 |
from decouple import config
from peewee import SqliteDatabase
from playhouse.pool import PooledSqliteExtDatabase, PooledPostgresqlExtDatabase
# db = SqliteDatabase(config('DATABASE_PATH', default='sentiment_analysis.db'))
db = PooledSqliteExtDatabase(
config('DATABASE_PATH', default='sentiment_analysis.db'),
pragmas=[('journal_mode', 'wal')],
max_connections=50,
stale_timeout=3600,
check_same_thread=False)
# Caso utilize-se do postgresql como banco de dados
# db = PooledPostgresqlExtDatabase(
# 'database',
# max_connections=32,
# stale_timeout=300, # 5 minutes.
# host='localhost',
# user='username',
# password='password')
| 679 | 229 |
#!/usr/bin/env python
import webapp2
from pkg.controllers.transactionctrl import TransactionCtrl
from pkg.controllers.appctrl import AppCtrl
from pkg.controllers.debug import Debug
app = webapp2.WSGIApplication([
('/transaction', TransactionCtrl),
('/transaction/([0-9]+)', TransactionCtrl),
('/', AppCtrl),
('/debug', Debug)
], debug=True)
| 347 | 111 |
import re
from django.conf import settings
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
EXEMPT_URLS=[]
if hasattr(settings,'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS+=[re.compile(url) for url in settings.LOGIN_EXEMPT_URLS]
class AuthenticationMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
path=request.path_info.lstrip('/')
url_is_exempt=any(url.match(path) for url in EXEMPT_URLS)
if url_is_exempt!=True:
print('checking....')
if request.session.get('logged',False) is not True:
print('Redirecting .....')
return HttpResponseRedirect('/login/')
response = self.get_response(request)
return response
| 875 | 304 |
from datetime import datetime, timedelta, timezone
import freezegun
from autifycli.domain.entities.metadata import Metadata
JST = timezone(timedelta(hours=+9), "JST")
@freezegun.freeze_time("2021-08-12")
def test_metadata():
site = "https://example.com"
num_links = 0
num_images = 0
last_fetch = datetime.now(JST)
meta = Metadata(site=site, last_fetch=last_fetch)
assert site == meta.site
assert num_links == meta.num_links
assert num_images == meta.num_images
assert str(last_fetch) == str(meta.last_fetch)
assert site in str(meta)
| 579 | 204 |
# -*- coding: utf-8 -*-
"""
demeter database
name:__load__.py
"""
from demeter.model import *
from demeter.core import * | 128 | 48 |
import abc
class Visitor(metaclass=abc.ABCMeta):
@abc.abstractmethod
def visit_node(self, node, udf, orientation, last_iter):
pass | 148 | 49 |
# TIE Methods
import utils
from dxltieclient import TieClient
from dxltieclient.constants import HashType, ReputationProp, FileProvider, FileEnterpriseAttrib, \
CertProvider, CertEnterpriseAttrib, TrustLevel
# TIE Reputation Average Map
tiescoreMap = {0: 'Not Set', 1: 'Known Malicious', 15: 'Most Likely Malicious', 30: 'Might Be Malicious', 50: 'Unknown',
70: "Might Be Trusted", 85: "Most Likely Trusted", 99: "Known Trusted", 100: "Known Trusted Installer"}
# TIE Provider Map
providerMap = {1: 'GTI', 3: 'Enterprise Reputation', 5: 'ATD', 7: "MWG"}
#TODO: rename this to TieSample
class TieSample():
def __init__(self, options, dxlclient, reputation_lookup_dict=None):
# Create the McAfee Threat Intelligence Exchange (TIE) client
self.tie_client = TieClient(dxlclient)
# TODO:Refactor this
self.reputation_lookup_dict = reputation_lookup_dict
if self.reputation_lookup_dict:
try:
self.filehash = reputation_lookup_dict['md5']
except:
try:
self.filehash = reputation_lookup_dict['sha1']
except:
self.filehash = "unknown"
self.reputations_dict = self._getFileRep()
else:
self.filehash = options.filehash
if self.filehash == None:
return "no file hash"
self.reputations_dict = self._getFileRep()
self.content = self._getFileProps()
@property
def reputations_dict(self):
return self._reputations_dict
def _getFileRep(self):
#TODO: Refactor this
if self.reputation_lookup_dict:
reputations_dict = self.tie_client.get_file_reputation(self.reputation_lookup_dict)
else:
if utils.is_sha1(self.filehash):
reputations_dict = self.tie_client.get_file_reputation({HashType.SHA1: self.filehash})
elif utils.is_sha256(self.filehash):
reputations_dict = self.tie_client.get_file_reputation({HashType.SHA256: self.filehash})
elif utils.is_md5(self.filehash):
reputations_dict = self.tie_client.get_file_reputation({HashType.MD5: self.filehash})
else:
return "not a valid file hash"
return reputations_dict
def _getFileProps(self):
# Get File Properties and Map with Providers and TIE Score
propList = []
fileProps = self.reputations_dict
if FileProvider.GTI in fileProps:
propDict = {}
propDict['provider'] = providerMap[fileProps[FileProvider.GTI]['providerId']]
propDict['reputation'] = tiescoreMap[fileProps[FileProvider.GTI]['trustLevel']]
propDict['createDate'] = fileProps[FileProvider.GTI]['createDate']
propList.append(propDict)
if FileProvider.ENTERPRISE in fileProps:
propDict = {}
propDict['provider'] = providerMap[fileProps[FileProvider.ENTERPRISE]['providerId']]
propDict['reputation'] = tiescoreMap[fileProps[FileProvider.ENTERPRISE]['trustLevel']]
propDict['createDate'] = fileProps[FileProvider.ENTERPRISE]['createDate']
propList.append(propDict)
if FileProvider.ATD in fileProps:
propDict = {}
propDict['provider'] = providerMap[fileProps[FileProvider.ATD]['providerId']]
propDict['reputation'] = tiescoreMap[fileProps[FileProvider.ATD]['trustLevel']]
propDict['createDate'] = fileProps[FileProvider.ATD]['createDate']
propList.append(propDict)
if FileProvider.MWG in fileProps:
propDict = {}
propDict['provider'] = providerMap[fileProps[FileProvider.MWG]['providerId']]
propDict['reputation'] = tiescoreMap[fileProps[FileProvider.MWG]['trustLevel']]
propDict['createDate'] = fileProps[FileProvider.MWG]['createDate']
propList.append(propDict)
return propList
#TODO: stupid name.. rename it combined_reputation
#INFO: this returns an array 0 is the val 1 is the str
def calcRep(self):
# Return a Summary Cascade 0-100 Value for Reputation.
# OOP: Enterprise -> ATD -> MWG -> GTI
#TODO: create property sample.combined_reputation_str
#TODO: create property sample.combined_reputation
reputations_dict = self.reputations_dict
# If there is TIE ENTERPRISE rep, use it, then look at ATD, then GTI.
if FileProvider.ENTERPRISE in reputations_dict:
ent_rep = reputations_dict[FileProvider.ENTERPRISE]
rep = ent_rep[ReputationProp.TRUST_LEVEL]
warning_provider = FileProvider.ENTERPRISE
if rep == 0:
if FileProvider.ATD in reputations_dict:
atd_rep = reputations_dict[FileProvider.ATD]
rep = atd_rep[ReputationProp.TRUST_LEVEL]
warning_provider = FileProvider.ATD
if rep == 0:
if FileProvider.MWG in reputations_dict:
mwg_rep = reputations_dict[FileProvider.MWG]
rep = atd_rep[ReputationProp.TRUST_LEVEL]
warning_provider = FileProvider.MWG
if rep == 0:
if FileProvider.GTI in reputations_dict:
gti_rep = reputations_dict[FileProvider.GTI]
rep = gti_rep[ReputationProp.TRUST_LEVEL]
warning_provider = FileProvider.GTI
else:
if FileProvider.GTI in reputations_dict:
gti_rep = reputations_dict[FileProvider.GTI]
rep = gti_rep[ReputationProp.TRUST_LEVEL]
warning_provider = FileProvider.GTI
if rep <= TrustLevel.MOST_LIKELY_TRUSTED:
if rep <= TrustLevel.MOST_LIKELY_MALICIOUS:
rep_str = "bad"
else:
if FileProvider.ATD in reputations_dict:
rep_str = "medium"
else:
rep_str = "unknown"
else:
rep_str = "good"
return [rep, rep_str, warning_provider]
def tieResponse(self):
rtv_string = "File Hash " + self.filehash + " Reputation\n\n"
# Format a String Response
i = 1
for key in self.content:
rtv_string = rtv_string + "Provider: " + key['provider'] + "\n"
rtv_string = rtv_string + "Creation Date: " + utils.time_to_str(key['createDate']) + "\n"
rtv_string = rtv_string + "Reputation: " + key['reputation'] + "\n"
rtv_string += "\n"
i += 1
return rtv_string
## Debug functions
def __printTIE(reputations_dict):
# Display the Global Threat Intelligence (GTI) trust level for the file
if FileProvider.GTI in reputations_dict:
gti_rep = reputations_dict[FileProvider.GTI]
print "Global Threat Intelligence (GTI) trust level: " + \
str(gti_rep[ReputationProp.TRUST_LEVEL])
# Display the Enterprise reputation information
if FileProvider.ENTERPRISE in reputations_dict:
ent_rep = reputations_dict[FileProvider.ENTERPRISE]
print "Threat Intelligence Exchange (Local) trust level: " + \
str(ent_rep[ReputationProp.TRUST_LEVEL])
# Retrieve the enterprise reputation attributes
ent_rep_attribs = ent_rep[ReputationProp.ATTRIBUTES]
# Display prevalence (if it exists)
if FileEnterpriseAttrib.PREVALENCE in ent_rep_attribs:
print "Enterprise prevalence: " + \
ent_rep_attribs[FileEnterpriseAttrib.PREVALENCE]
# Display first contact date (if it exists)
if FileEnterpriseAttrib.FIRST_CONTACT in ent_rep_attribs:
print "First contact: " + \
FileEnterpriseAttrib.to_localtime_string(
ent_rep_attribs[FileEnterpriseAttrib.FIRST_CONTACT])
if FileProvider.ATD in reputations_dict:
atd_rep = reputations_dict[FileProvider.ATD]
print "ATD (sandbox) trust level: " + \
str(atd_rep[ReputationProp.TRUST_LEVEL])
if FileProvider.MWG in reputations_dict:
mwg_rep = reputations_dict[FileProvider.MWG]
print "MWG (WebGatewayy) trust level: " + \
str(mwg_rep[ReputationProp.TRUST_LEVEL])
| 8,217 | 2,551 |
import hashlib
import json
import numpy as np
import pandas as pd
from pymemcache import serde
from pymemcache.client import base
from keepthis.MemcachedConnection import MemcachedConnection
from keepthis.exceptions import KeepThisValueError
class KeepThis:
def __init__(
self,
memcached_host,
memcached_port,
):
self.memcached_host = memcached_host
self.memcached_port = memcached_port
self.__supported_entity_types__ = (
np.ndarray,
str,
int,
float,
)
@staticmethod
def _hash_string(input_string):
sha224 = hashlib.sha224
return sha224(input_string.encode()).hexdigest()
@staticmethod
def _hash_ndarray(input_array):
if not isinstance(input_array, np.ndarray):
raise KeepThisValueError(
"numpy.ndarray instance was expected but got {}".format(
type(input_array)
)
)
string = input_array.data.hex()
return KeepThis._hash_string(string)
@staticmethod
def _hash_pandas(input_dataframe):
if not isinstance(input_dataframe, (pd.DataFrame, pd.Series, pd.Index)):
raise KeepThisValueError(
"numpy.ndarray instance was expected but got {}".format(
type(input_dataframe)
)
)
string = pd.util.hash_pandas_object(input_dataframe).values.data.hex()
return KeepThis._hash_string(string)
def _hash_object(self, entity):
"""Converting to string non-supported by JSON objects.
:param entity: object, any item
:return: object or hash-string
"""
if not isinstance(entity, self.__supported_entity_types__):
raise KeepThisValueError(
"Entity is has type {}, while only {} supports".format(
type(entity),
self.__supported_entity_types__,
)
)
if isinstance(entity, np.ndarray):
# getting hash from numpy.ndarray
return self._hash_ndarray(entity)
elif isinstance(entity, (pd.DataFrame, pd.Series, pd.Index)):
# getting hash from pandas.DataFrame
return self._hash_pandas(entity)
else:
return entity
def drop(self):
memcached = self._get_connection()
memcached.flush_all()
memcached.close()
def _get_connection(self):
return base.Client(
(self.memcached_host, self.memcached_port),
serializer=serde.python_memcache_serializer,
deserializer=serde.python_memcache_deserializer,
)
def _get_unique_key(self, func, *args, **kwargs):
func_name = func.__name__
args_dict = [self._hash_object(x) for x in args]
args_dict.append(kwargs)
args_str = json.dumps(args_dict)
string_to_hash = "{}|{}".format(func_name, args_str)
resulting_hash = self._hash_string(string_to_hash)
return resulting_hash
def this(self, func, *args, **kwargs):
def func_wrapper(*args, **kwargs):
unique_hash = self._get_unique_key(func, *args, **kwargs)
with MemcachedConnection(self.memcached_host, self.memcached_port) as memcached:
cached_value = memcached.get(unique_hash)
if cached_value is not None:
memcached.close()
return cached_value
value_to_cache = func(*args, **kwargs)
memcached.set(unique_hash, value_to_cache)
return value_to_cache
return func_wrapper
| 3,720 | 1,046 |
import json
from pathlib import Path
from sbx_bgsvc_starterpack.sbx_json_default import json_default
def load(default_cfg: dict = {}, file: str = './cfg/cfg.json', encoding: str = 'utf-8-sig') -> dict:
# 1. ์ค์ ํ์ผ์ด ์๋๊ฒฝ์ฐ ์์ฑ
cfg_file1 = Path(file)
if not cfg_file1.is_file():
save(default_cfg, file, encoding)
# 2. ์ค์ ํ์ผ load
with open(file, mode='rt', encoding=encoding) as f:
cfg = json.loads(f.read())
# ์๋ก์ด ์ค์ ์ด ์๋ ๊ฒฝ์ฐ๋ฅผ ์ํด ๋ณํฉ, ์ ์ฅ
default_cfg.update(cfg) # ๋ณํฉ์ ๊ธฐ์กด ์ค์ ์ ๋ฎ์ด์ฐ๋ฏ๋ก ๊ธฐ๋ณธ ์ค์ ์ ํ์ผ์ ์ ์ฅ๋ ์ค์ ์ ๋ฎ์ด์ฐ๊ณ ์ ์ฅ, ๋ฐํ.
save(default_cfg, file, encoding)
return default_cfg
def save(cfg: dict = {}, filename: str = './cfg/cfg.json', encoding: str = 'utf-8-sig'):
cfg_file1 = Path(filename)
cfg_file1.parents[0].mkdir(parents=True, exist_ok=True)
with open(filename, mode='wt', encoding=encoding) as f: # wt : Write Text
# ensure_ascii=False: ํ๊ธ์ด ์ ๋์ฝ๋๋ก ์ถ๋ ฅ๋์ง ์๋๋ก ์ฒ๋ฆฌ.
f.write(json.dumps(cfg, indent=4, ensure_ascii=False, default=json_default))
| 1,016 | 520 |
#!/usr/bin/python
#
# ============================================================================
# Copyright (c) 2011 Marvell International, Ltd. All Rights Reserved
#
# Marvell Confidential
# ============================================================================
#
# Run a random scan. Random color/mono, random DPI, random area (subject to
# constraints).
# Written to do overnight testing.
# davep 6-Mar-2007
import sys
import random
import time
import getopt
import scan
dpi_range = ( 75, 1200 )
#dpi_choices= ( 75, 100, 150, 200, 300 )
dpi_choices= ( 300, 600, 1200 )
#valid_scan_types = ( "color", "mono" )
valid_scan_types = ( "rgbx", "xrgb", "rgb", "color", "mono" )
x_area_range = ( 0, 850 )
y_area_range = ( 0, 1169 )
#y_area_range = ( 0, 1100 )
area_min = 100
# fraction: scale = [0]/[1]
min_scale = ( 1, 16 )
max_scale = ( 8, 1 )
# davep 02-Apr-2009 ; allow option to disable random scaling for platforms that
# don't support scaler (e.g., ICE Lite color scaling broken)
use_random_scale = True
def random_dpi() :
# -1 to convert from length to index
# davep 10-Jun-2011 ; temp for testing
# return 300
return dpi_choices[ random.randint(0,len(dpi_choices)-1) ]
# return random.randint( dpi_range[0], dpi_range[1] )
def random_scan_type() :
# -1 to convert from length to index
return valid_scan_types[ random.randint(0,len(valid_scan_types)-1) ]
def random_area() :
upper_left_x = random.randint( x_area_range[0], x_area_range[1]-area_min )
upper_left_y = random.randint( y_area_range[0], y_area_range[1]-area_min )
width_x = random.randint( area_min, x_area_range[1] )
width_y = random.randint( area_min, y_area_range[1] )
lower_right_x = min( upper_left_x+width_x, x_area_range[1] )
lower_right_y = min( upper_left_y+width_y, y_area_range[1] )
assert lower_right_x-upper_left_x >= area_min, (lower_right_x,upper_left_y)
assert lower_right_y-upper_left_y >= area_min, (lower_right_y,upper_left_y)
return (upper_left_x,upper_left_y,lower_right_x,lower_right_y)
def random_scale() :
min_scale_f = float(min_scale[0]) / float(min_scale[1])
max_scale_f = float(max_scale[0]) / float(max_scale[1])
while 1 :
# numer = random.randint( 1, 65536 )
# denom = random.randint( 1, 65536 )
# limit to signed 16-bit so we don't overflow the firmware (need to fix
# the firmware!)
numer = random.randint( 1, 2**15-1 )
denom = random.randint( 1, 2**15-1 )
scale = float(numer)/float(denom)
if scale >= min_scale_f and scale <= max_scale_f :
break
return (numer,denom)
def maybe() :
return 1
return random.randomint( 0, 1 )
def random_scan( scan_args_hash, my_args ) :
scan_args = []
# first the required arguments
dpi = random_dpi()
scan_type = random_scan_type()
scan_args.extend( ("-o", scan_args_hash["-o"]) )
scan_args.extend( ("-r", "%d"%dpi) )
scan_args.extend( ("-t", scan_type) )
# now put together optional arguments
for optional_key in ( "-p", "-i", "-e", "-v" ) :
if scan_args_hash.has_key( optional_key ) :
scan_args.extend( ( optional_key, scan_args_hash[optional_key] ) )
if maybe() :
area = random_area()
area_str = ",".join( [ "%d"%num for num in area ] )
scan_args.extend( ( "-a", area_str ) )
# avoid trying to capture too much data ; the tests will take forever
bad_scale = 1
while use_random_scale and bad_scale :
scale = 1
scale = 1
# X scale
if maybe() :
scale = random_scale()
scale_str_x = "/".join( [ "%d"%num for num in scale ] )
scale_x = float(scale[0]) / float(scale[1])
# Y scale
if maybe() :
scale = random_scale()
scale_str_y = "/".join( [ "%d"%num for num in scale ] )
scale_y = float(scale[0]) / float(scale[1])
# Now calculate the scan area ; if too big, go back and get a different
# scale.
# area[] is x0,y0,x1,y1 in 1/100"
out_x = ((area[2] - area[0])/100.0) * dpi * scale_x
out_y = ((area[3] - area[1])/100.0) * dpi * scale_y
print "scale_x=%s scale_y=%s" % (str(scale_x),str(scale_y))
print "out_x=%s out_y=%s" % (str(out_x),str(out_y))
# 8.5" at 1200 DPI is 10200 pixels
# we run into memory problems if we scale X up beyond that ; the
# firmware doesn't catch the problems yet
max_x = 11000
if out_x > max_x:
print "out_x=%d max_x=%d ; scan too big" % (out_x,max_x)
else :
bad_scale = 0
if use_random_scale :
scan_args.extend( ( "-x", scale_str_x ) )
scan_args.extend( ( "-y", scale_str_y ) )
scan_args_str = " ".join(scan_args)
print "scan_args={0}".format( scan_args_str )
# write it to a logfile in case the scan crashes
logfile = file( "random_scan.log", "a" )
logfile.write( scan_args_str + "\n" )
logfile.close()
# davep 11-Oct-2012 ; add "debug" flag so we will just print the random
# results without doing the scan
if "debug" in my_args :
print scan_args_str
# pretend we succeeded
return 0
aspscan_exit_code = scan.run_scan( scan_args )
# aspscan_exit_code = 0
return aspscan_exit_code
def usage() :
print "run a random scan"
print "usage: %s \"hdv:p:i:e:o:\" [--infinite] output_filename" % sys.argv[1]
print " -h # show this help"
print " -d # debug ; only print the scan args, don't run the scan"
print " -o outputfile"
print " -v num"
print " USB Vendor ID"
print " -p num"
print " USB Product ID"
print " -i num"
print " USB Interface"
print " -e out[,in]"
print " USB Out,In Endpoints (default is 0x%x,0x%x)"
print " the out endpoint must always be provided, the in endpoint is optional."
print " note direction is with respect to the host: out is host to device (usb write)"
print " --infinite # loop, running scans forever"
def bad_cmdline( msg ) :
print "Invalid command line : %s" % msg
print "Use -h for help."
sys.exit(1)
def parse_args( args_list ) :
scan_args = {}
my_args = {}
# default set of arguments
scan_args["-o"] = "foo"
longopts = ["infinite","help"]
try:
opt_list, remain_list = getopt.getopt(sys.argv[1:], "hdi:e:v:p:o:", longopts )
except getopt.GetoptError,e:
# print help information and exit:
bad_cmdline( e )
# print opt_list
# print remain_list
for opt, arg in opt_list:
# print opt, arg
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt == "--infinite" :
# loop around random scan forever (or until we crash)
my_args["infinite"] = 1
elif opt in ("-i", "-e", "-v", "-p", "-o" ) :
scan_args[opt] = arg
elif opt == "-d" :
my_args["debug"] = 1
else:
assert False, "unhandled option"
return my_args,scan_args
def main() :
(my_args,scan_args) = parse_args( sys.argv[1:] )
# print scan_args
# return
while 1 :
try :
random_scan( scan_args, my_args )
except Exception,e:
print >>sys.stderr, e
logfile = file( "random_scan.log", "a" )
logfile.write( "scan failed\n" )
logfile.close()
sys.exit(1)
if not my_args.has_key("infinite") :
# only run once
break
# this time is not foolproof!
time.sleep(8)
if __name__ == '__main__' :
main()
| 7,804 | 2,865 |
# --------------------------------------------------------------------------
# Copyright 2014 Digital Sapphire Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
import logging
import os
try:
import bsdiff4
except ImportError:
bsdiff4 = None
from pyi_updater.client.downloader import FileDownloader
from pyi_updater.exceptions import PatcherError
from pyi_updater import settings
from pyi_updater.utils import (get_package_hashes,
EasyAccessDict,
lazy_import,
Version)
if bsdiff4 is None:
from pyi_updater.utils import bsdiff4_py as bsdiff4
log = logging.getLogger(__name__)
@lazy_import
def jms_utils():
import jms_utils
import jms_utils.paths
import jms_utils.system
return jms_utils
platform_ = jms_utils.system.get_system()
class Patcher(object):
"""Downloads, verifies, and patches binaries
Kwargs:
name (str): Name of binary to patch
json_data (dict): Info dict with all package meta data
current_version (str): Version number of currently installed binary
highest_version (str): Newest version available
update_folder (str): Path to update folder to place updated binary in
update_urls (list): List of urls to use for file download
verify (bool) Meaning:
True: Verify https connection
False: Don't verify https connection
"""
def __init__(self, **kwargs):
self.name = kwargs.get(u'name')
self.json_data = kwargs.get(u'json_data')
self.star_access_update_data = EasyAccessDict(self.json_data)
self.current_version = Version(kwargs.get(u'current_version'))
self.highest_version = kwargs.get(u'highest_version')
self.update_folder = kwargs.get(u'update_folder')
self.update_urls = kwargs.get(u'update_urls', [])
self.verify = kwargs.get(u'verify', True)
self.progress_hooks = kwargs.get(u'progress_hooks', [])
self.patch_data = []
self.patch_binary_data = []
self.og_binary = None
# ToDo: Update tests with linux archives.
# Used for testing.
self.plat = kwargs.get(u'platform', platform_)
self.current_filename = kwargs.get(u'current_filename')
self.current_file_hash = kwargs.get(u'current_file_hash')
file_info = self._current_file_info(self.name,
self.current_version)
if self.current_filename is None:
self.current_filename = file_info['filename']
if self.current_file_hash is None:
self.current_file_hash = file_info['file_hash']
def start(self):
"Starts patching process"
log.debug(u'Starting patch updater...')
# Check hash on installed binary to begin patching
binary_check = self._verify_installed_binary()
if not binary_check:
log.debug(u'Binary check failed...')
return False
# Getting all required patch meta-data
all_patches = self._get_patch_info(self.name)
if all_patches is False:
log.debug(u'Cannot find all patches...')
return False
# Download and verify patches in 1 go
download_check = self._download_verify_patches()
if download_check is False:
log.debug(u'Patch check failed...')
return False
try:
self._apply_patches_in_memory()
except PatcherError:
return False
else:
try:
self._write_update_to_disk()
except PatcherError:
return False
return True
def _verify_installed_binary(self):
# Verifies currently installed binary against known hash
log.debug(u'Checking for current installed binary to patch')
# I just really like using this ChDir context
# manager. Even sent the developer a cup of coffee
with jms_utils.paths.ChDir(self.update_folder):
if not os.path.exists(self.current_filename):
log.debug(u'Cannot find binary to patch')
return False
installed_file_hash = get_package_hashes(self.current_filename)
if self.current_file_hash != installed_file_hash:
log.debug(u'Binary hash mismatch')
return False
with open(self.current_filename, u'rb') as f:
self.og_binary = f.read()
os.remove(self.current_filename)
log.debug(u'Binary found and verified')
return True
# We will take all versions. Then append any version
# thats greater then the current version to the list
# of needed patches.
def _get_patch_info(self, name):
# Taking the list of needed patches and extracting the
# patch data from it. If any loop fails, will return False
# and start full binary update.
log.debug(u'Getting patch meta-data')
required_patches = self._get_required_patches(name)
for p in required_patches:
info = {}
plat_key = '{}*{}*{}*{}'.format(settings.UPDATES_KEY, name,
str(p), self.plat)
plat_info = self.star_access_update_data.get(plat_key)
try:
info[u'patch_name'] = plat_info[u'patch_name']
info[u'patch_urls'] = self.update_urls
info[u'patch_hash'] = plat_info[u'patch_hash']
self.patch_data.append(info)
except KeyError:
log.error(u'Missing required patch meta-data')
return False
return True
def _get_required_patches(self, name):
needed_patches = []
try:
versions = map(Version,
self.json_data[settings.UPDATES_KEY][name].keys())
except KeyError:
log.debug(u'No updates found in updates dict')
versions = sorted(versions)
log.debug(u'getting required patches')
for i in versions:
if i > self.current_version:
needed_patches.append(i)
# Used to guarantee patches are only added once
return list(set(needed_patches))
def _download_verify_patches(self):
# Downloads & verifies all patches
log.debug('Downloading patches')
downloaded = 0
total = len(self.patch_data)
for p in self.patch_data:
fd = FileDownloader(p[u'patch_name'], p[u'patch_urls'],
p[u'patch_hash'], self.verify)
data = fd.download_verify_return()
if data is not None:
self.patch_binary_data.append(data)
downloaded += 1
status = {u'total': total,
u'downloaed': downloaded,
u'status': u'downloading'}
self._call_progress_hooks(status)
else:
return False
status = {u'total': total,
u'downloaed': downloaded,
u'status': u'finished'}
self._call_progress_hooks(status)
return True
def _call_progress_hooks(self, data):
for ph in self.progress_hooks:
ph(data)
def _apply_patches_in_memory(self):
# Applies a sequence of patches in memory
log.debug(u'Applying patches')
# Beginning the patch process
self.new_binary = self.og_binary
for i in self.patch_binary_data:
try:
self.new_binary = bsdiff4.patch(self.new_binary, i)
except Exception as err:
log.debug(err, exc_info=True)
log.error(err)
raise PatcherError(u'Patch failed to apply')
def _write_update_to_disk(self):
# Writes updated binary to disk
log.debug('Writing update to disk')
filename_key = '{}*{}*{}*{}*{}'.format(settings.UPDATES_KEY, self.name,
self.highest_version,
self.plat,
u'filename')
filename = self.star_access_update_data.get(filename_key)
if filename is None:
raise PatcherError('Filename missing in version file')
with jms_utils.paths.ChDir(self.update_folder):
try:
with open(filename, u'wb') as f:
f.write(self.new_binary)
except IOError:
# Removes file is it somehow got created
if os.path.exists(filename):
os.remove(filename)
log.error(u'Failed to open file for writing')
raise PatcherError(u'Failed to open file for writing')
else:
file_info = self._current_file_info(self.name,
self.highest_version)
new_file_hash = file_info['file_hash']
log.debug(u'checking file hash match')
if new_file_hash != get_package_hashes(filename):
log.error(u'File hash does not match')
os.remove(filename)
raise PatcherError(u'Patched file hash bad checksum')
log.debug('Wrote update file')
def _current_file_info(self, name, version):
# Returns filename and hash for given name and version
info = {}
plat_key = '{}*{}*{}*{}'.format(settings.UPDATES_KEY, name,
version, self.plat)
plat_info = self.star_access_update_data.get(plat_key)
try:
filename = plat_info[u'filename']
except Exception as err:
log.debug(str(err))
filename = ''
log.debug(u'Current filename: {}'.format(filename))
info[u'filename'] = filename
try:
file_hash = plat_info[u'file_hash']
except Exception as err:
log.debug(str(err))
file_hash = ''
info[u'file_hash'] = file_hash
log.debug('Current file_hash {}'.format(file_hash))
return info
| 10,880 | 2,989 |
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField
from wtforms.validators import DataRequired, Length
def range_to_select_choices(items):
choices = [(0, '0 star')]
for item in items:
choices.append((item, str(item) + ' stars'))
return choices
class AddRecommendationForm(FlaskForm):
note = SelectField('Note', choices=range_to_select_choices(range(1, 6)), validate_choice=False)
comment = StringField('Comment', validators=[DataRequired(), Length(min=0, max=1000)],
render_kw={'placeholder': 'Comment'})
submit = SubmitField('Add recommendation')
| 649 | 197 |
from __future__ import annotations
import ast
import os
import re
import sys
from itertools import product
from pathlib import Path
import attr
from hypothesis import given, settings
from hypothesis.strategies import sampled_from
from absort.__main__ import (
CommentStrategy,
FormatOption,
NameRedefinition,
SortOrder,
absort_str,
)
from absort.ast_utils import ast_deep_equal
from absort.utils import constantfunc, contains
from .strategies import products
# Use third-party library hypothesmith to generate random valid Python source code, to
# conduct property-based testing on the absort*() interface.
# The guy who use such tool to test on black library and CPython stdlib and report issues is Zac-HD (https://github.com/Zac-HD).
STDLIB_DIR = Path(sys.executable).with_name("Lib")
# Reference: https://docs.travis-ci.com/user/environment-variables/#default-environment-variables
if os.getenv("CI") and os.getenv("TRAVIS"):
py_version = os.getenv("TRAVIS_PYTHON_VERSION")
assert py_version
# Reference: https://docs.travis-ci.com/user/languages/python/#python-versions
# Reference: https://docs.travis-ci.com/user/languages/python/#development-releases-support
py_version_num = re.fullmatch(r"(?P<num>[0-9.]+)(?:-dev)?", py_version).group("num")
STDLIB_DIR = Path(f"/opt/python/{py_version}/lib/python{py_version_num}/")
TEST_FILES = list(STDLIB_DIR.rglob("*.py"))
@attr.s(auto_attribs=True)
class Option:
comment_strategy: CommentStrategy
format_option: FormatOption
sort_order: SortOrder
@classmethod
def from_tuple(cls: type, tup: tuple) -> Option:
return cls(*tup)
all_comment_strategies = list(CommentStrategy)
all_format_options = [
FormatOption(*p) # type: ignore
for p in product(*([(True, False)] * len(attr.fields(FormatOption))))
]
all_sort_orders = list(SortOrder)
arg_options = constantfunc(
products(all_comment_strategies, all_format_options, all_sort_orders).map(
Option.from_tuple
)
)
@given(sampled_from(TEST_FILES), arg_options())
@settings(deadline=None)
def test_absort_str(test_sample: Path, option: Option) -> None:
try:
source = test_sample.read_text(encoding="utf-8")
new_source = absort_str(source, **attr.asdict(option, recurse=False))
second_run_new_source = absort_str(source, **attr.asdict(option, recurse=False))
# Check that absort is deterministic and stable
assert new_source == second_run_new_source
old_ast = ast.parse(source)
new_ast = ast.parse(new_source)
assert len(old_ast.body) == len(new_ast.body)
for stmt in old_ast.body:
assert contains(new_ast.body, stmt, equal=ast_deep_equal)
except (SyntaxError, NameRedefinition, UnicodeDecodeError):
pass
except Exception as exc:
exc_cls_name = getattr(exc.__class__, "__name__", "some exception")
print(f"Encountered {exc_cls_name} when sorting {test_sample}")
raise
# TODO add unit test for absort_file()
# TODO add unit test for absort_files()
| 3,090 | 1,003 |
import pygame
from BrickBreaker import *
from BrickBreaker.Scenes import *
from BrickBreaker.Shared import *
class BrickBreaker:
def __init__(self):
self._lives = 5
self._score = 0
self._bonus = 1
self._level = Level(self)
self._level.load_random()
self._pad = Pad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.PAD_SIZE[1]),
pygame.image.load(GameConstants.PAD_IMAGE))
self._balls = [
Ball((400, 400), pygame.image.load(GameConstants.BALL_IMAGE), self)
]
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.mixer.init()
pygame.init()
pygame.display.set_caption("Brick Breaker")
self._clock = pygame.time.Clock()
self.screen = pygame.display.set_mode(GameConstants.SCREEN_SIZE)
pygame.mouse.set_visible(False)
self._scenes = (
PlayingGameScene(self),
HighscoreScene(self),
MainMenuScene(self),
GameOverScene(self),
WinScene(self),
ControlsScene(self),
GameRulesScene(self),
)
self._current_scene = 2
self._sounds = (
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_A_STANDARD_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_SPEED_UP_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_EXTRA_LIFE_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_BALL_HITTING_A_WALL_OR_A_PAD),
pygame.mixer.Sound(GameConstants.SOUND_FILE_GAME_OVER),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_EXTRA_BALL_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_BONUS_SIZE_BRICK),
)
def start(self):
while True:
self._clock.tick(60)
self.screen.fill((0, 0, 0))
_current_scene = self._scenes[self._current_scene]
_current_scene.handle_events(pygame.event.get())
_current_scene.render()
pygame.display.update()
def change_scene(self, scene):
self._current_scene = scene
def get_level(self):
return self._level
def get_bonus(self):
return self._bonus
def increment_bonus(self):
self._bonus += 1
def reset_bonus(self):
self._bonus = 1
def double_pad(self):
keyboard = self._pad.get_keyboard_status()
mouse = self._pad.get_mouse_status()
self._pad = DoublePad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.DOUBLE_PAD_SIZE[1]),
pygame.image.load(GameConstants.DOUBLE_PAD_IMAGE))
if keyboard:
self._pad.activate_keyboard()
if mouse:
self._pad.activate_mouse()
def reset_pad(self):
keyboard = self._pad.get_keyboard_status()
mouse = self._pad.get_mouse_status()
self._pad = Pad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.PAD_SIZE[1]),
pygame.image.load(GameConstants.PAD_IMAGE))
if keyboard:
self._pad.activate_keyboard()
if mouse:
self._pad.activate_mouse()
def get_pad(self):
return self._pad
def get_score(self):
return self._score
def increase_score(self, score):
self._score += score * self._bonus
def increase_score_by_1k(self, score=1000):
self._score += score * self._bonus
def get_lives(self):
return self._lives
def get_balls(self):
return self._balls
def add_one_ball(self):
self._balls.append(Ball((400, 400), pygame.image.load(GameConstants.BALL_IMAGE), self))
def play_sound(self, sound_clip):
sound = self._sounds[sound_clip]
sound.stop()
sound.play()
def reduce_life_by_one(self):
self._lives -= 1
def add_one_life(self):
self._lives += 1
def reset(self):
self._lives = 5
self._score = 0
self._bonus = 1
self._level = Level(self)
self._level.load_random()
self._pad = Pad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.PAD_SIZE[1]),
pygame.image.load(GameConstants.PAD_IMAGE))
def main():
BrickBreaker().start()
if __name__ == '__main__':
BrickBreaker().start()
| 4,593 | 1,520 |
from .visualization import plot_mean, plot_mean_interval
| 57 | 16 |
'''
###################################
Modified from Mike's predict_acc.py
###################################
'''
import os
import sys
import random
import pickle
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from keras.models import load_model
from sklearn.metrics import accuracy_score
with open('map.pkl', 'rb') as f:
map_dict = pickle.load(f)
with open('map_reverse.pkl', 'rb') as f:
map_reverse = pickle.load(f)
Y_train = pd.read_csv('/tmp2/b03902110/phase2/data/train_label.csv')
Y_dict = Y_train['label'].map(map_dict)
Y_dict = np.array(Y_dict)
print(Y_dict.shape)
print(Y_dict)
Y_fname_train = Y_train['fname'].tolist()
Y_test = pd.read_csv('./sample_submission.csv')
Y_fname_test = Y_test['fname'].tolist()
Y_all = []
for i in Y_dict:
Y_all.append(to_categorical(i, num_classes=41))
Y_all = np.array(Y_all)
print(Y_all)
print(Y_all.shape)
X_train = np.load('/tmp2/b03902110/phase2/data/X_train.npy')
X_test = np.load('/tmp2/b03902110/phase2/data/X_test.npy')
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
base = '/tmp2/b03902110/newphase2'
modelbase = os.path.join(base, '10_fold_model')
name = sys.argv[1]
fold_num = int(sys.argv[2])
filename = os.path.join(modelbase, name)
X_val = np.load('/tmp2/b03902110/newphase1/data/X/X{}.npy'.format(fold_num+1))
X_val = (X_val - mean) / std
Y_val = np.load('/tmp2/b03902110/newphase1/data/y/y{}.npy'.format(fold_num+1))
npy_predict = os.path.join(base, 'npy_predict')
if not os.path.exists(npy_predict):
os.makedirs(npy_predict)
csv_predict = os.path.join(base, 'csv_predict')
if not os.path.exists(csv_predict):
os.makedirs(csv_predict)
model = load_model(filename)
print('Evaluating {}'.format(name))
score = model.evaluate(X_val, Y_val)
print(score)
print('Predicting X_test...')
result = model.predict(X_test)
np.save(os.path.join(npy_predict, 'mow_cnn2d_semi_test_{}.npy'.format(fold_num+1)), result)
df = pd.DataFrame(result)
df.insert(0, 'fname', Y_fname_test)
df.to_csv(os.path.join(csv_predict, 'mow_cnn2d_semi_test_{}.csv'.format(fold_num+1)), index=False, header=True)
| 2,188 | 945 |
"""Tests for date utils"""
from datetime import time, datetime
import pytz
from jetblack_fixengine.utils.date_utils import (
is_dow_in_range,
is_time_in_range,
delay_for_time_period
)
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
def test_dow_range():
"""Test day of week range"""
assert is_dow_in_range(MONDAY, FRIDAY, MONDAY)
assert is_dow_in_range(MONDAY, FRIDAY, WEDNESDAY)
assert is_dow_in_range(MONDAY, FRIDAY, FRIDAY)
assert not is_dow_in_range(MONDAY, FRIDAY, SATURDAY)
assert not is_dow_in_range(TUESDAY, THURSDAY, MONDAY)
assert not is_dow_in_range(TUESDAY, THURSDAY, FRIDAY)
assert is_dow_in_range(WEDNESDAY, WEDNESDAY, WEDNESDAY)
assert not is_dow_in_range(WEDNESDAY, WEDNESDAY, TUESDAY)
assert not is_dow_in_range(WEDNESDAY, WEDNESDAY, THURSDAY)
assert is_dow_in_range(FRIDAY, TUESDAY, FRIDAY)
assert is_dow_in_range(FRIDAY, TUESDAY, SUNDAY)
assert is_dow_in_range(FRIDAY, TUESDAY, TUESDAY)
assert not is_dow_in_range(FRIDAY, TUESDAY, THURSDAY)
assert not is_dow_in_range(SATURDAY, SUNDAY, MONDAY)
def test_time_range():
"""Test time range"""
assert is_time_in_range(time(0, 0, 0), time(17, 30, 0), time(0, 0, 0))
assert is_time_in_range(time(0, 0, 0), time(17, 30, 0), time(12, 0, 0))
assert is_time_in_range(time(0, 0, 0), time(17, 30, 0), time(17, 30, 0))
assert not is_time_in_range(time(0, 0, 0), time(17, 30, 0), time(20, 0, 0))
assert not is_time_in_range(time(9, 30, 0), time(17, 30, 0), time(0, 0, 0))
def test_seconds_for_period():
"""Test seconds in a period"""
# now=6am, star=8am, end=4pm
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 6, 0, 0),
time(8, 0, 0),
time(16, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 2
assert end_datetime == datetime(2019, 1, 1, 16, 0, 0)
# now=10am, start=8am, end=4pm
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 10, 0, 0),
time(8, 0, 0),
time(16, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 0
assert end_datetime == datetime(2019, 1, 1, 16, 0, 0)
# now=6pm, start=8am, end=4pm
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 18, 0, 0),
time(8, 0, 0),
time(16, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 14
assert end_datetime == datetime(2019, 1, 2, 16, 0, 0)
# now=6pm,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 18, 0, 0),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 2
assert end_datetime == datetime(2019, 1, 2, 4, 0, 0)
# now=10pm,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 22, 0, 0),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 0
assert end_datetime == datetime(2019, 1, 2, 4, 0, 0)
# now=6am,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 6, 0, 0),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 14
assert end_datetime == datetime(2019, 1, 2, 4, 0, 0)
london = pytz.timezone('Europe/London')
# now=6pm,start=8pm, end=4am, London clocks forward.
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 3, 31, 18, 0, 0, tzinfo=london),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 2
assert end_datetime == datetime(2019, 4, 1, 4, 0, 0, tzinfo=london)
# now=10pm,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 3, 31, 22, 0, 0, tzinfo=london),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 0
assert end_datetime == datetime(2019, 4, 1, 4, 0, 0, tzinfo=london)
# now=6am,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 3, 31, 6, 0, 0, tzinfo=london),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 14
assert end_datetime == datetime(2019, 4, 1, 4, 0, 0, tzinfo=london)
| 4,408 | 2,142 |
'''
xScratch exceptions
'''
class XSSyntaxError(Exception):
'''
Error raised when there is a syntax error in the script
'''
pass
class XSSemanticError(Exception):
'''
Error raised when there is a semantic error in the script
'''
pass
class XSArduinoError(Exception):
'''
Error raised when there is a communication error with the arduino
'''
pass
| 400 | 113 |
import os
import pybullet_data
from environments.locomotion.scene_abstract import Scene
import pybullet as p
class StadiumScene(Scene):
zero_at_running_strip_start_line = True # if False, center of coordinates (0,0,0) will be at the middle of the stadium
stadium_halflen = 105 * 0.25 # FOOBALL_FIELD_HALFLEN
stadium_halfwidth = 50 * 0.25 # FOOBALL_FIELD_HALFWID
stadiumLoaded = 0
def episode_restart(self, bullet_client):
self._p = bullet_client
Scene.episode_restart(self, bullet_client) # contains cpp_world.clean_everything()
if (self.stadiumLoaded == 0):
self.stadiumLoaded = 1
# stadium_pose = cpp_household.Pose()
# if self.zero_at_running_strip_start_line:
# stadium_pose.set_xyz(27, 21, 0) # see RUN_STARTLINE, RUN_RAD constants
if self.enable_grid:
filename = os.path.join(pybullet_data.getDataPath(), "plane_stadium.sdf")
else:
filename = os.path.join("environments/locomotion/assets", "plane_stadium.sdf")
self.ground_plane_mjcf = self._p.loadSDF(filename)
# filename = os.path.join(pybullet_data.getDataPath(),"stadium_no_collision.sdf")
# self.ground_plane_mjcf = self._p.loadSDF(filename)
#
for i in self.ground_plane_mjcf:
self._p.changeDynamics(i, -1, lateralFriction=0.8, restitution=0.5)
self._p.changeVisualShape(i, -1, rgbaColor=[1, 1, 1, 0.8])
self._p.configureDebugVisualizer(p.COV_ENABLE_PLANAR_REFLECTION, i)
# for j in range(p.getNumJoints(i)):
# self._p.changeDynamics(i,j,lateralFriction=0)
# despite the name (stadium_no_collision), it DID have collision, so don't add duplicate ground
class SinglePlayerStadiumScene(StadiumScene):
"This scene created by environment, to work in a way as if there was no concept of scene visible to user."
multiplayer = False
class MultiplayerStadiumScene(StadiumScene):
multiplayer = True
players_count = 3
def actor_introduce(self, robot):
StadiumScene.actor_introduce(self, robot)
i = robot.player_n - 1 # 0 1 2 => -1 0 +1
robot.move_robot(0, i, 0)
| 2,267 | 779 |
from defs import *
from utilities import warnings
def move_to(creep: Creep, target: RoomPosition) -> int:
result = creep.moveTo(target, {
'ignoreCreeps': True,
})
if result == ERR_NO_PATH:
result = creep.moveTo(target, {
'ignoreCreeps': False,
})
if result != OK and result != ERR_TIRED:
warnings.warn("unknown result from (creep {}).moveTo({}): {}"
.format(creep.name, target, warnings.transform_error_code(result)))
return result
| 522 | 166 |
from typing import Dict, Sequence, Union
from .typing_annotations import DataFrameOrSeries
import pandas as pd
import numpy as np
def create_constant_time_series(value:Union[int,float], start:pd.Timestamp) -> pd.Series:
return pd.Series([value], index=[start])
def remove_consecutive_duplicates(df:DataFrameOrSeries) -> DataFrameOrSeries:
"""
Compress timeseries by only keeping the first row of consecutive duplicates. This is done by comparing a copied
DataFrame/Series that has been shifted by one, with the original, and only keeping the rows in which at least one
one column value is different from the previous row. The first row will always be kept
"""
if isinstance(df, pd.DataFrame):
df = df.loc[(df.shift() != df).any(1)]
else:
df = df.loc[df.shift() != df]
return df
def get_timestamp_indexed_series(starttime:pd.Timestamp, time_unit:str, t:Sequence[Union[int,float]], y:Sequence[float], column_name:str='data') -> DataFrameOrSeries:
if not isinstance(t, np.ndarray):
t = np.fromiter(t, int)
if not isinstance(y, np.ndarray):
y = np.array(y, dtype=float)
if time_unit == 'minute':
delta = pd.Timedelta(minutes=1)
elif time_unit == 'second':
delta = pd.Timedelta(seconds=1)
else:
delta = pd.Timedelta(hours=1)
# Remove time zone info before calling to_datetime64 which automatically converts timestamps to utc time
tz_name = starttime.tzname()
if tz_name is not None:
starttime = starttime.tz_localize(tz=None)
t = np.repeat(starttime.to_datetime64(), t.size) + t * delta
if y.size > t.size: # Stochastic
value = pd.DataFrame(data=y, index=t)
if tz_name is not None:
value.index = value.index.tz_localize(tz=tz_name) #Add the original time zone info back
else:
value = pd.Series(data=y.flatten(), index=t, name=column_name)
if tz_name is not None:
value = value.tz_localize(tz=tz_name) #Add the original time zone info back
value[value >= 1.0e40] = np.nan
return value
def resample_resolution(time:Dict, df:DataFrameOrSeries, delta:float, time_resolution:pd.Series) -> DataFrameOrSeries:
"""
Resample timeseries when time resolution is non-constant
"""
# Convert timeseries index to integers based on the time unit
df.index = ((df.index - time['starttime']).total_seconds() * delta).astype(int)
# Compress the time resolution returned from shop, by only keeping the first of consecutive duplicate resolutions
resolution_format = time_resolution.astype(int)
compressed_resolution_format = remove_consecutive_duplicates(resolution_format)
# Extract the different time resolutions and their respective time of enactment
resolution_tuples = list(compressed_resolution_format.iteritems())
# Add a dummy time at the optimization end time to serve as a well defined bound
resolution = resolution_tuples[-1][1]
end_unit_index = int((time['endtime'] - time['starttime']).total_seconds() * delta)
resolution_tuples.append((end_unit_index, resolution))
# Build the resampled output
output_parts = []
index = 0
for i, res_tuple in enumerate(resolution_tuples[:-1]):
unit_index, resolution = res_tuple
next_unit_index = resolution_tuples[i+1][0]
selection = df.iloc[unit_index:next_unit_index]
# Normalize index
# line below is commented out since it gives wrong result after concating output parts
# selection.index = selection.index - unit_index
# Resample by taking the mean of all datapoints in "resolution" sized windows
selection = selection.rolling(window=resolution).mean().shift(-(resolution-1))
# Extract the correct means from the rolling means
selection = selection.iloc[::resolution]
# Handle any remaining intervals that are less than "resolution" sized
if (next_unit_index - unit_index) % resolution != 0:
reduced_res = (next_unit_index - unit_index) % resolution
last_selection_index = next_unit_index - reduced_res
last_row = df.iloc[last_selection_index:next_unit_index].mean()
if isinstance(df, pd.Series):
last_row = pd.Series(index=[last_selection_index], data=[last_row])
else:
last_row = last_row.to_frame().T
last_row.index = [last_selection_index]
# Replace the last row, as this has been set to "nan" by the rolling mean
selection = pd.concat([selection[:-1], last_row])
output_parts.append(selection)
index = index + (next_unit_index-unit_index)//resolution
output_df = pd.concat(output_parts)
return output_df
| 4,817 | 1,415 |
# -*- coding: utf-8 -*-
""" Class definitions for the HLSCLT Command Line Tool.
Copyright (c) 2017 Ben Marshall
"""
# Generic error class
class Error(Exception):
"""Base class for exceptions in this module."""
pass
# Specific error class for local config file errors
class ConfigError(Error):
"""Exception raised for options not defined in config.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
# Class to hold application specific info within the Click context.
class hlsclt_internal_object(object):
def __init__(self, config={}, solution_num=1, file=None, syn_command_present=False):
self.config = config
self.solution_num = solution_num
self.file=file
self.syn_command_present = syn_command_present
| 841 | 236 |
๏ปฟ#!/usr/bin/env python
# -*- coding: utf-8 -*-
class BasePage(object):
"""
BasePageๅฐ่ฃ
ๆๆ้กต้ข้ฝๅ
ฌ็จ็ๆนๆณ๏ผไพๅฆdriver, url
"""
#ๅๅงๅdriverใurlใ็ญ
def __init__(self, selenium_driver, base_url, pagetitle):
self.base_url = base_url
self.pagetitle = pagetitle
self.driver = selenium_driver
def _iopen(self, url, pagetitle):
# ไฝฟ็จgetๆๅผ่ฎฟ้ฎ้พๆฅๅฐๅ
self.driver.open(url)
self.driver.max_window()
# ไฝฟ็จassert่ฟ่กๆ ก้ช๏ผๆๅผ็้พๆฅๅฐๅๆฏๅฆไธ้
็ฝฎ็ๅฐๅไธ่ดใ่ฐ็จon_page()ๆนๆณ
assert self.on_page(pagetitle), u"ๆๅผๅผ้กต้ขๅคฑ่ดฅ %s" % url
# ๅฎไนopenๆนๆณ๏ผ่ฐ็จ_open()่ฟ่กๆๅผ้พๆฅ
def iopen(self):
self._iopen(self.base_url, self.pagetitle)
# ไฝฟ็จcurrent_url่ทๅๅฝๅ็ชๅฃUrlๅฐๅ๏ผ่ฟ่กไธ้
็ฝฎๅฐๅไฝๆฏ่พ๏ผ่ฟๅๆฏ่พ็ปๆ๏ผTrue False๏ผ
def on_page(self, pagetitle):
return pagetitle in self.driver.get_title()
| 718 | 388 |
import logging
from collections import deque
from ..tokens import BuiltToken
from ..utils import LogicError
from ..router import NoSuchControlSequence
from ..constants.instructions import Instructions
logger = logging.getLogger(__name__)
# Stuff specific to *my parsing*.
letter_to_non_active_uncased_type_map = {
# For hex characters, need to look for the composite production, not the
# terminal production, because could be, for example, 'A' or
# 'NON_ACTIVE_UNCASED_A', so we should look for the composite production,
# 'non_active_uncased_a'.
'A': 'non_active_uncased_a',
'B': 'non_active_uncased_b',
'C': 'non_active_uncased_c',
'D': 'non_active_uncased_d',
'E': 'non_active_uncased_e',
'F': 'non_active_uncased_f',
'a': Instructions.non_active_uncased_a.value,
'b': Instructions.non_active_uncased_b.value,
'c': Instructions.non_active_uncased_c.value,
'd': Instructions.non_active_uncased_d.value,
'e': Instructions.non_active_uncased_e.value,
'f': Instructions.non_active_uncased_f.value,
'g': Instructions.non_active_uncased_g.value,
'h': Instructions.non_active_uncased_h.value,
'i': Instructions.non_active_uncased_i.value,
'j': Instructions.non_active_uncased_j.value,
'k': Instructions.non_active_uncased_k.value,
'l': Instructions.non_active_uncased_l.value,
'm': Instructions.non_active_uncased_m.value,
'n': Instructions.non_active_uncased_n.value,
'o': Instructions.non_active_uncased_o.value,
'p': Instructions.non_active_uncased_p.value,
'q': Instructions.non_active_uncased_q.value,
'r': Instructions.non_active_uncased_r.value,
's': Instructions.non_active_uncased_s.value,
't': Instructions.non_active_uncased_t.value,
'u': Instructions.non_active_uncased_u.value,
'v': Instructions.non_active_uncased_v.value,
'w': Instructions.non_active_uncased_w.value,
'x': Instructions.non_active_uncased_x.value,
'y': Instructions.non_active_uncased_y.value,
'z': Instructions.non_active_uncased_z.value,
'G': Instructions.non_active_uncased_g.value,
'H': Instructions.non_active_uncased_h.value,
'I': Instructions.non_active_uncased_i.value,
'J': Instructions.non_active_uncased_j.value,
'K': Instructions.non_active_uncased_k.value,
'L': Instructions.non_active_uncased_l.value,
'M': Instructions.non_active_uncased_m.value,
'N': Instructions.non_active_uncased_n.value,
'O': Instructions.non_active_uncased_o.value,
'P': Instructions.non_active_uncased_p.value,
'Q': Instructions.non_active_uncased_q.value,
'R': Instructions.non_active_uncased_r.value,
'S': Instructions.non_active_uncased_s.value,
'T': Instructions.non_active_uncased_t.value,
'U': Instructions.non_active_uncased_u.value,
'V': Instructions.non_active_uncased_v.value,
'W': Instructions.non_active_uncased_w.value,
'X': Instructions.non_active_uncased_x.value,
'Y': Instructions.non_active_uncased_y.value,
'Z': Instructions.non_active_uncased_z.value,
}
def make_literal_token(p):
s = ''.join(t.value['char'] for t in p)
return BuiltToken(type_='literal', value=s, parents=p)
def str_to_char_types(s):
return (letter_to_non_active_uncased_type_map[c] for c in s)
def get_literal_production_rule(word, target=None):
if target is None:
target = word
rule = ' '.join(str_to_char_types(word))
return '{} : {}'.format(target, rule)
class DigitCollection:
def __init__(self, base):
self.base = base
self.digits = []
def __repr__(self):
return f'{self.__class__.__name__}(base {self.base}: {self.digits})'
# More generic utilities.
def wrap(pg, func, rule):
f = pg.production(rule)
return f(func)
class ParsingSyntaxError(Exception):
pass
class ExhaustedTokensError(Exception):
pass
end_tag = '$end'
def is_end_token(t):
return t.type == end_tag and t.value == end_tag
class GetBuffer:
def __init__(self, getter, initial=None):
self.queue = deque()
if initial is not None:
self.queue.extend(initial)
self.getter = getter
def __iter__(self):
return self
def __next__(self):
while not self.queue:
self.queue.extend(self.getter())
return self.queue.popleft()
def chunk_iter(banisher, parser):
"""
Return an iterator over sequential chunks, each satisfying the objective of
a `parser`, by collecting input tokens from `banisher`.
"""
while True:
yield get_chunk(banisher, parser)
def get_chunk(banisher, parser, initial=None):
"""
Return a chunk satisfying the objective of a `parser`, by collecting input
tokens from `banisher`.
"""
# Processing input tokens might return many tokens, so store them in a
# buffer.
input_buffer = GetBuffer(getter=banisher.get_next_output_list,
initial=initial)
# Get the actual chunk.
chunk, parse_queue = _get_chunk(input_buffer, parser)
# We might want to reverse the composition of terminal tokens we just
# did in the parser, so save the bits in a special place.
chunk._terminal_tokens = list(parse_queue)
# Replace any tokens left in the buffer onto the banisher's queue.
if input_buffer.queue:
logger.info(f"Cleaning up tokens on chunk grabber's buffer: {input_buffer.queue}")
banisher.replace_tokens_on_input(input_buffer.queue)
return chunk
def _get_chunk(input_queue, parser):
"""
Return a chunk satisfying the objective of a `parser`, by collecting input
tokens from `input_queue`.
"""
# Get enough tokens to grab a parse-chunk. We know to stop adding tokens
# when we see a switch from failing because we run out of tokens
# (ExhaustedTokensError) to an actual syntax error (ParsingSyntaxError).
# Want to extend the queue-to-be-parsed one token at a time,
# so we can break as soon as we have all we need.
parse_queue = deque()
# We keep track of if we have parsed, just for checking for weird
# situations.
have_parsed = False
while True:
try:
chunk = parser.parse(iter(parse_queue))
# If we got a syntax error, this should mean we have spilled over
# into parsing the next chunk.
except ParsingSyntaxError as exc:
# If we have already parsed a chunk, then we use this as our
# result.
if have_parsed:
# We got one token of fluff due to extra read, to make the
# parse queue not-parse. So put it back on the buffer.
fluff_tok = parse_queue.pop()
logger.debug(f'Replacing fluff token {fluff_tok} on to-parse queue.')
input_queue.queue.appendleft(fluff_tok)
logger.info(f'Got chunk "{chunk}", through failed parsing')
return chunk, parse_queue
# If we have not yet parsed, then something is wrong.
else:
exc.bad_token = parse_queue[-1]
exc.bad_chunk = parse_queue
exc.args += (f'Tokens: {list(parse_queue)}',)
exc.args += (f'Tokens: {list(parse_queue)}',)
raise
except ExhaustedTokensError:
# Carry on getting more tokens, because it seems we can.
pass
else:
# In our modified version of rply, we annotate the
# output token to indicate whether the only action from the
# current parse state could be to end. In this case, we do not
# bother adding another token, and just return the chunk.
# This reduces the number of cases where we expand too far, and
# must handle bad handling of the post-chunk tokens caused by
# not acting on this chunk.
if chunk._could_only_end:
logger.info(f'Got chunk "{chunk}", through inevitability')
return chunk, parse_queue
have_parsed = True
try:
t = next(input_queue)
except EOFError:
# If we get an EOFError, and we have just started trying to
# get a parse-chunk, we are done, so just propagate the
# exception to wrap things up.
if not parse_queue:
raise
# If we get an EOFError and we have already parsed, we need to
# return this parse-chunk, then next time round we will be
# done.
elif have_parsed:
logger.info(f'Got chunk "{chunk}", through end-of-file')
return chunk, parse_queue
# If we get to the end of the file and we have a chunk queue
# that can't be parsed, something is wrong.
else:
raise ValueError(f'Got to end-of-file but still have '
f'unparsed tokens: {parse_queue}')
# If we get an expansion error, it might be because we need to
# act on the chunk we have so far first.
except NoSuchControlSequence as e:
# This is only possible if we have already parsed the chunk-so-
# far.
if have_parsed:
# This might always be fine, but log it anyway.
logger.warning('Ignoring failed expansion in chunk grabber')
logger.info(f'Got chunk "{chunk}", through failed expansion')
return chunk, parse_queue
# Otherwise, indeed something is wrong.
else:
raise
parse_queue.append(t)
raise LogicError('Broke from command parsing loop unexpectedly')
| 9,740 | 3,010 |
# Generated by Django 2.1.4 on 2019-10-17 00:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carnival', '0010_auto_20191013_1246'),
]
operations = [
migrations.AlterField(
model_name='register',
name='prime_one',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='register',
name='prime_three',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='register',
name='prime_two',
field=models.BooleanField(default=False),
),
]
| 719 | 225 |
#!/usr/bin/env python3
#
# Copyright (c) 2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Unit tests for VENDOR definitions.
#
import unittest
from .testutils import TLVSchemaTestCase
class Test_VENDOR(TLVSchemaTestCase):
def test_VENDOR(self):
schemaText = '''
test-vendor-1 => VENDOR [ id 0 ]
test-vendor-2 => VENDOR [ id 1 ]
test-vendor-65535 => VENDOR [ id 65535 ]
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertNoErrors(errs)
def test_VENDOR_NoId(self):
schemaText = 'test-vendor-1 => VENDOR'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'id qualifier missing')
schemaText = 'test-vendor-1 => VENDOR [ ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'id qualifier missing')
def test_VENDOR_BadId(self):
schemaText = 'test-vendor-1 => VENDOR [ id 65536 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'invalid id value')
schemaText = 'test-vendor-1 => VENDOR [ id -1 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'invalid id value')
schemaText = 'test-vendor-1 => VENDOR [ id 42:1 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'invalid id value')
def test_VENDOR_InconsistentId(self):
schemaText = '''
test-vendor-1 => VENDOR [ id 1 ]
test-vendor-2 => VENDOR [ id 2 ]
test-vendor-1 => VENDOR [ id 42 ] // Inconsistent
test-vendor-2 => VENDOR [ id 2 ]
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'inconsistent vendor id: 0x002A (42)')
if __name__ == '__main__':
unittest.main()
| 2,787 | 935 |
# -*- coding:utf-8 -*-
"""
events.py
~~~~~~~~
่ชๅฎไนไฟกๅท, ไบไปถ
:author: Fufu, 2019/12/20
"""
from blinker import signal
# ็จๆท็ปๅฝๆๅ
event_user_logined = signal('event_user_logined')
# ็ณป็ป็ฎก็ๆไฝ(็จๆทๆๆ/ๆ้็ป็ฎก็็ญ)
event_sys_admin = signal('event_sys_admin')
# app ไธไธๆ็ฏๅข็คบไพ
event_async_with_app_demo = signal('event_async_with_app_demo')
| 336 | 180 |
import datetime
import click
from oura import OuraClient
def download(db, token):
client = OuraClient(personal_access_token=token)
click.echo("User: ")
click.echo(client.user_info())
# FIXME get start_date from database (last day downloaded - 1day)
start_date = "2015-01-01"
end_date = str(datetime.date.today())
click.echo("sleep data")
data = client.sleep_summary(start_date, end_date).get("sleep", [])
db["sleep"].insert_all(
data,
replace=True,
pk="summary_date",
)
click.echo("activity data")
data = client.activity_summary(start_date, end_date).get("activity", [])
db["activity"].insert_all(
data,
replace=True,
pk="summary_date",
)
click.echo("readiness data")
data = client.readiness_summary(start_date, end_date).get("readiness", [])
db["readiness"].insert_all(
data,
replace=True,
pk="summary_date",
)
click.echo("ideal bedtime data")
data = client.bedtime_summary(start_date, end_date).get("ideal_bedtimes", [])
db["ideal_bedtimes"].insert_all(
data,
replace=True,
pk="date",
)
| 1,181 | 396 |
import cv2
import os
import time
import advancedcv.hand_tracking as htm
import numpy as np
import itertools
patterns = np.array(list(itertools.product([0, 1], repeat=5)))
p_time = 0
cap = cv2.VideoCapture(0)
# w_cam, h_cam = 648, 480
# cap.set(3, w_cam)
# cap.set(4, h_cam)
folder_path = "finger_images"
my_list = os.listdir(folder_path)
my_list.sort()
overlay_list = []
detector = htm.HandDetector()
for im_path in my_list:
image = cv2.imread(f'{folder_path}/{im_path}')
print(f'{folder_path}/{im_path}')
overlay_list.append(image)
key_ids = [4, 8, 12, 16, 20]
while True:
success, img = cap.read()
img = detector.find_hands(img, draw=False)
lm_list = detector.get_position(img, hand_number=0, draw=False)
if len(lm_list) != 0:
fingers = []
# Thumb
if lm_list[key_ids[0]][1] > lm_list[key_ids[0]-1][1]:
fingers.append(1)
else:
fingers.append(0)
# Other fingers
for idx in range(1, len(key_ids)):
if lm_list[key_ids[idx]][2] < lm_list[key_ids[idx]-2][2]:
fingers.append(1)
else:
fingers.append(0)
dist = (patterns - fingers)**2
dist = np.sum(dist, axis=1)
min_index = np.argmin(dist)
print(min_index)
h, w, c = overlay_list[min_index+1].shape
img[0:h, 0:w] = overlay_list[min_index+1]
c_time = time.time()
fps = 1/(c_time-p_time)
p_time = c_time
cv2.putText(img, f'FPS: {str(round(fps))}', (50, 70), cv2.FONT_HERSHEY_PLAIN, 5, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
| 1,635 | 690 |
import paddle
import paddlefsl
from paddlefsl.model_zoo import maml
# Set computing device
paddle.set_device('gpu:0')
# """ ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, MLP, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.MLP(input_size=(28, 28), output_size=WAYS)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 60000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration60000.params'
# ----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, MLP, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.MLP(input_size=(28, 28), output_size=WAYS)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 20000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration20000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(1, 28, 28), output_size=WAYS, pooling=False)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 60000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration60000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(1, 28, 28), output_size=WAYS, pooling=False)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 20000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration20000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Mini-ImageNet, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.MiniImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.MiniImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.MiniImageNet(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.03
ITERATIONS = 60000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration60000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Mini-ImageNet, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.MiniImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.MiniImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.MiniImageNet(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.1
ITERATIONS = 30000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration30000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CifarFS, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.CifarFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CifarFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CifarFS(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.001
INNER_LR = 0.03
ITERATIONS = 30000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration30000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CifarFS, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.CifarFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CifarFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CifarFS(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.0015
INNER_LR = 0.15
ITERATIONS = 10000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration10000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, FC100, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.FC100(mode='train')
VALID_DATASET = paddlefsl.datasets.FC100(mode='valid')
TEST_DATASET = paddlefsl.datasets.FC100(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS)
META_LR = 0.002
INNER_LR = 0.05
ITERATIONS = 10000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 2000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration10000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, FC100, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.FC100(mode='train')
VALID_DATASET = paddlefsl.datasets.FC100(mode='valid')
TEST_DATASET = paddlefsl.datasets.FC100(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS)
META_LR = 0.003
INNER_LR = 0.08
ITERATIONS = 5000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration5000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CubFS, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.CubFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CubFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CubFS(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.03
ITERATIONS = 20000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration20000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CubFS, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.CubFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CubFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CubFS(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.003
INNER_LR = 0.1
ITERATIONS = 10000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 2000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration10000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Tiered-ImageNet, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.TieredImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.TieredImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.TieredImageNet(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.03
ITERATIONS = 15000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration15000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Tiered-ImageNet, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.TieredImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.TieredImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.TieredImageNet(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.01
ITERATIONS = 30000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration30000.params'
----------------------------------------------------------------------------------"""
def main():
train_dir = maml.meta_training(train_dataset=TRAIN_DATASET,
valid_dataset=VALID_DATASET,
ways=WAYS,
shots=SHOTS,
model=MODEL,
meta_lr=META_LR,
inner_lr=INNER_LR,
iterations=ITERATIONS,
meta_batch_size=META_BATCH_SIZE,
inner_adapt_steps=TRAIN_INNER_ADAPT_STEPS,
approximate=APPROXIMATE,
report_iter=REPORT_ITER,
save_model_iter=SAVE_MODEL_ITER,
save_model_root=SAVE_MODEL_ROOT)
print(train_dir)
state_dict = paddle.load(train_dir + '/' + TEST_PARAM_FILE)
MODEL.load_dict(state_dict)
maml.meta_testing(model=MODEL,
test_dataset=TEST_DATASET,
test_epoch=TEST_EPOCH,
test_batch_size=META_BATCH_SIZE,
ways=WAYS,
shots=SHOTS,
inner_lr=INNER_LR,
inner_adapt_steps=TEST_INNER_ADAPT_STEPS,
approximate=APPROXIMATE)
if __name__ == '__main__':
main()
| 12,852 | 5,329 |
import numpy as np
import collections, numpy
import glob
from PIL import Image
from matplotlib.pyplot import cm
nrImages = 1
imageSize = 449
finalImageSize = 449
ImageNumber = 0
sourceFolder = 'images'
# sourceFolder = "testInput"
destinationFolder = 'final_text_files_2'
# destinationFolder = "testOutput"
def modifica(a):
for i in range(imageSize):
for j in range(imageSize):
if a[i][j] > 170:
a[i][j] = 255
elif a[i][j] > 120:
a[i][j] = 128
else:
a[i][j] = 0
return a
def veciniNegrii(a, x, y):
s = 0
ValoareNegru = 0
try:
if a[x - 1][y - 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x - 1][y] == ValoareNegru:
s += 1
except:
None
try:
if a[x - 1][y + 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x][y + 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x][y - 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x + 1][y + 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x + 1][y] == ValoareNegru:
s += 1
except:
None
try:
if a[x + 1][y - 1] == ValoareNegru:
s += 1
except:
None
return s
def eliminaExtraCladiri(a):
for i in range(imageSize):
for j in range(imageSize):
if a[i][j] == 128 and veciniNegrii(a, i, j) >= 2:
a[i][j] = 255
return a
# image = Image.open("1570.png").convert("L")
# print(np.asarray(image))
index = 0
for filename in glob.glob(sourceFolder + '/*.png'):
image = Image.open(filename).convert("L")
imageArray = np.asarray(image)
imageArray = modifica(imageArray)
eliminaExtraCladiri(imageArray)
g = open("./" + destinationFolder + "/map" + str(index) + ".txt", "w")
g.write("")
g.close()
g = open("./" + destinationFolder + "/map" + str(index) + ".txt", "a")
g.write(str(len(imageArray)) + "\n" + str(len(imageArray)) + "\n")
for x in imageArray:
for y in x:
g.write(str(y) + " ")
g.write("\n")
index += 1
if index % 100 == 0:
print(index)
print(index)
# for i in range(nrImages):
# image = Image.open("./final_images/_2O7gRvMPVdPfW9Ql60S-w.png").convert("L")
# # image = image.resize((imageSize, imageSize), Image.ANTIALIAS)
#
# imageArray = np.asarray(image)
# print(imageArray.shape)
# imageArray = modifica(imageArray)
# eliminaExtraCladiri(imageArray)
# print(imageArray)
# g = open("map2.txt", "w")
# g.write("")
# g.close()
# g = open("map2.txt", "a")
# g.write(str(len(imageArray)) + "\n" + str(len(imageArray)) + "\n")
# for x in imageArray:
# for y in x:
# g.write(str(y) + " ")
# g.write("\n") | 3,085 | 1,249 |
from ravager.database.tasks import Tasks
import logging
from ravager.database.helpers import setup_db
from ravager.config import DATABASE_URL, LOGS_DIR
from ravager.helpers.check_process import Process
from subprocess import check_call
logger = logging.getLogger(__file__)
setup_db.create_tables()
logger.info("Database setup at {}".format(DATABASE_URL))
logger.info(Tasks().clear())
def start_aria():
# "input-file": "{}/session.txt".format(LOGS_DIR)
aria2_command = []
aria2_command.append("aria2c")
aria2_command.append("--enable-rpc=true")
aria2_command.append("--daemon=true")
aria2_command.append("--max-tries=5")
aria2_command.append("--retry-wait=30")
aria2_command.append("--rpc-listen-all=true")
aria2_command.append("--rpc-listen-port=6801")
aria2_command.append("--rpc-secret=qwerty")
aria2_command.append("--save-session={}/session.txt".format(LOGS_DIR))
aria2_command.append("--log={}/aria2.log".format(LOGS_DIR))
aria2_command.append("--save-session-interval=20")
aria2_command.append("--timeout=600")
aria2_command.append("--bt-force-encryption=false")
aria2_command.append("--seed-time=0.01")
aria2_command.append("--log-level=notice")
aria2_command.append("--bt-stop-timeout=21600")
aria2_command.append("--auto-file-renaming=false")
aria2_command.append("--force-save=true")
# aria2_command.append("--allow-overwrite=true")
aria2_command.append("--continue=true")
aria2_command.append("--bt-enable-hook-after-hash-check=false")
aria2_command.append("--auto-save-interval=60")
aria2_command.append("--rpc-save-upload-metadata=false")
aria2_status = Process(process_name="aria2c").check_process()
if not aria2_status:
check_call(aria2_command)
return True
logger.info(start_aria())
logger.info("aria2c started")
| 1,857 | 652 |
from PyDAQmx import *
from ctypes import byref, c_ulong,c_int32
import numpy as np
class DAQSimpleDOTask(Task):
'''
a simple task that set one digital output line to high or low
'''
def __init__(self,chan= 'Dev2/port0/line5'):
'''
chan: name of the chanel, in the format of Dev2/port0/line0
'''
Task.__init__(self)
self.chan = chan
self.CreateDOChan(self.chan,'',DAQmx_Val_ChanPerLine)
def write(self,value,timeout = 0.0001):
'''
write a single sample to the digital line
value: 1-D array with the size of the number of channels take np.uint8 array
timeout: timeout in seconds
'''
written = c_int32(0)
self.WriteDigitalLines(1,True,timeout,DAQmx_Val_GroupByScanNumber,value,byref(written),None)
def write_bool(self,value):
self.write(np.array([value],dtype = np.uint8))
def high(self,timeout = 0.0001):
'''
change the digital line to high
timeout: timeout in seconds
'''
self.write(np.array([1],dtype = np.uint8),timeout)
def low(self,timeout =0.0001):
'''
change the digital line to low
timeout: timeout in seconds
'''
self.write(np.array([0],dtype = np.uint8),timeout)
def close(self):
'''
close task
'''
self.ClearTask()
class DAQContDOTask(Task):
'''
a task that set one digital output to high/low for a selected period of time
'''
def __init__(self,chan= 'Dev2/port0/line5', rate = 1000):
'''
chan: name of the chanel, in the format of Dev2/port0/line0
rate: sampling rate in Hz
'''
Task.__init__(self)
self.chan = chan
self.rate = rate
self.CreateDOChan(self.chan,'',DAQmx_Val_ChanPerLine)
self.CfgSampClkTiming('',self.rate,DAQmx_Val_Rising,DAQmx_Val_ContSamps,1000)
def write(self,value):
'''
write a single sample to the digital line
'''
val_array = np.zeros((10,),dtype = np.uint8)
val_array[:] = value
self.WriteDigitalLines(1,False,0,DAQmx_Val_GroupByScanNumber,val_array,byref(written),None)
def high(self):
'''
change the digital line to high
'''
self.write(1)
def start(self):
'''
start task
'''
self.StartTask()
def done(self):
'''
returns if a started task is done outputting
if done, returns True
'''
value = c_ulong(False)
self.IsTaskDone(byref(value))
return bool(value)
def stop(self):
'''
stop task
'''
self.StopTask()
def low(self):
'''
change the digital line to low
'''
self.write(0)
def close(self):
'''
close task
'''
self.ClearTask()
class DAQCOTask(Task):
'''
This is a task where you can generate precisely time pulses with a certain frequency
'''
def __init__(self,counter = 'Dev2/ctr0',term = '/Dev2/PFI12',freq = 4000,dc = 0.5):
'''
Initialize the task
counter: name of the counter used, e.g. Dev2/ctr0
term: name of terminal used, e.g. /Dev2/PFI10, note that the first slash has to be there
freq: frequency in Hz
dc: pulse duty cycle of the high period, from 0.0 - 1.0
'''
Task.__init__(self)
self.counter = counter
self.term = term
self.CreateCOPulseChanFreq(self.counter,'',DAQmx_Val_Hz,DAQmx_Val_Low,0,freq,dc)
self.SetCOPulseTerm(self.counter, self.term)
def set_pulses(self,num_pulses = 800):
'''
Set the number of pulses to be generated for the next sample
num_pulses: number of pulses to output
'''
self.CfgImplicitTiming(DAQmx_Val_FiniteSamps,num_pulses)
def start(self):
'''
start task
'''
self.StartTask()
def done(self):
'''
returns if a started task is done outputting
if done, returns True
'''
value = c_ulong(False)
self.IsTaskDone(byref(value))
return bool(value)
def stop(self):
'''
stop task
'''
self.StopTask()
def close(self):
'''
close task
'''
self.ClearTask() | 4,623 | 1,451 |
#//
#// -------------------------------------------------------------
#// Copyright 2011 Synopsys, Inc.
#// Copyright 2010-2011 Mentor Graphics Corporation
#// Copyright 2019-2020 Tuomas Poikela (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#// -------------------------------------------------------------
#//
#
#class hw_reset_test(test):
#
# `uvm_component_utils(hw_reset_test)
#
# def __init__(self, name, parent=None)
# super().__init__(name, parent)
# endfunction
#
# local bit once = 1
#async
# def main_phase(self, phase):
# if (once):
# once = 0
# phase.raise_objection(self)
# repeat (100 * 8) @(posedge env.vif.sclk)
# // This will clear the objection
# uvm_info("TEST", "Jumping back to reset phase", UVM_NONE)
# phase.jump(uvm_reset_phase::get())
# end
# endtask
#
from uvm.macros import *
#endclass
| 1,603 | 528 |
from math import inf
nums = list(map(int, input().split()))
signs = { '+': 1, '-': 2, '*': 3, '/': 4, '%': 5, '=': 0 }
anss = []
def comb(nums, exp, wasEq):
if not nums:
try:
res = eval(exp)
except:
res = 0
if type(res) == bool and eval(exp):
anss.append(exp)
return
return
cur, nrest = str(nums[0]), nums[1:]
if wasEq:
comb(nrest, exp + '+' + cur, True)
comb(nrest, exp + '-' + cur, True)
comb(nrest, exp + '*' + cur, True)
comb(nrest, exp + '/' + cur, True)
comb(nrest, exp + '%' + cur, True)
else:
comb(nrest, exp + '+' + cur, False)
comb(nrest, exp + '-' + cur, False)
comb(nrest, exp + '*' + cur, False)
comb(nrest, exp + '/' + cur, False)
comb(nrest, exp + '%' + cur, False)
comb(nrest, exp + '==' + cur, True)
comb(nums[1:], str(nums[0]), False)
print(anss)
min_v = inf
for ans in anss:
ves = 0
for char in ans:
ves += signs.get(char, 0)
if ves < min_v:
min_v = ves
real_ans = ans
real_ans = real_ans.replace('==', '=')
print(real_ans)
for char in real_ans:
if 48 <= ord(char) <= 57:
continue
print(char, end='')
print()
| 1,110 | 539 |
#
# Copyright 2021 by Tatsuya Hasebe, Hitachi, Ltd.
# All rights reserved.
#
# This file is part of the KEMPNN package,
# and is released under the "BSD 3-Clause License". Please see the LICENSE
# file that should have been included as part of this package.
#
import datetime
import json
import os
import pickle
import time
import numpy as np
import torch
import torch.utils.data
from .loader import MoleculeCollater, loadDataset
from .utils import peason_r2_score, rmse_score
defaultMoleculeTrainConfig = {
"name": "",
"device": "cuda",
"optimizer": torch.optim.Adam,
"optimizer_args": {"lr": 0.001},
"optimize_schedule": None,
"optimize_schedule_args": {},
"loss": torch.nn.MSELoss(),
"save": True,
"save_path": "weights",
"batch_size": 16,
"epochs": 50,
"drop_last": True,
}
class ConfigEncoder(json.JSONEncoder):
# overload method default
def default(self, obj):
# Match all the types you want to handle in your converter
if isinstance(obj, (float, int, str, dict, list, tuple)):
return json.JSONEncoder.default(self, obj)
if hasattr(obj, "__class__"):
if obj.__class__.__name__ == "type":
return obj.__name__ # Call the default method for other type
return str(obj) # Call the default method for other type
return json.JSONEncoder.default(self, obj)
@classmethod
def dumps(cls, obj):
return json.dumps(obj, indent=4, cls=cls)
class MoleculeTrainer:
""" Train molecule dataset
"""
def __init__(self):
self.default_cfg = defaultMoleculeTrainConfig
self.trained = False
self.dataset = None
self.att_dataset = None
pass
def setDataset(self, train_dataset, test_dataset, valid_dataset):
"""Set training, test, validation dataset
"""
self.dataset = (train_dataset, test_dataset, valid_dataset)
def setKnowledgeDataset(self, data):
"""Set dataset for knolwedge learning (molecule dataset with node_label)
"""
self.att_dataset = data
def prepareData(self, cfg):
self.dataset = loadDataset(cfg)
def fit(self, model, cfg=None, verbose=True, debug=False):
"""Execute model traning.
"""
if cfg is None:
cfg = self.default_cfg
assert self.dataset is not None
# dataset
train_dataset, test_dataset, valid_dataset = self.dataset
# send model to device
device = cfg["device"]
model.to(device)
# configure save path and save the configurations
model_dir = ""
if "dataset" in cfg and "name" in cfg["dataset"]:
name = cfg["name"] + "_" + cfg["dataset"]["name"]
else:
name = cfg["name"]
root_save_path = cfg["save_path"]
save = cfg["save"]
print_text = None
if save:
model_dir = os.path.join(
root_save_path,
name
+ "_"
+ datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"),
)
os.makedirs(model_dir, exist_ok=True)
cfg["model_path"] = model_dir
cfg["model_str"] = str(model)
with open(model_dir + "/config.json", "w") as fp:
fp.write(ConfigEncoder.dumps(cfg))
with open(model_dir + "/transform.pkl", "wb") as fp:
pickle.dump(train_dataset.transform, fp)
print_text = open(model_dir + "/output.log", "w")
# define SGD optimizer and its schedule
optimizer = cfg["optimizer"](
model.parameters(), **cfg["optimizer_args"]
)
if cfg["optimize_schedule"] is not None:
scheduler = cfg["optimize_schedule"](
optimizer, **cfg["optimize_schedule_args"]
)
else:
scheduler = None
# number of epoches
n_epoch = cfg["epochs"]
# define dataloader using batch_size
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg["batch_size"],
shuffle=True,
collate_fn=MoleculeCollater(label=True),
pin_memory=True,
drop_last=cfg["drop_last"],
)
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=cfg["batch_size"],
shuffle=False,
collate_fn=MoleculeCollater(label=True),
pin_memory=True,
drop_last=False,
)
valid_dataloader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=cfg["batch_size"],
shuffle=False,
collate_fn=MoleculeCollater(label=True),
pin_memory=True,
drop_last=False,
)
# define dataloader for knowledge data
use_knowledge = False
if "knowledge" in cfg and cfg["knowledge"] is not None:
assert self.att_dataset is not None
use_knowledge = True
att_dataloader = torch.utils.data.DataLoader(
self.att_dataset,
batch_size=cfg["knowledge"]["batch_size"],
shuffle=True,
collate_fn=MoleculeCollater(label=False, node_label=True),
pin_memory=True,
drop_last=True,
)
else:
att_dataloader = None
# define loss
loss_func = cfg["loss"]
# define variables used in traning
train_loss_log = torch.zeros(n_epoch).to(device)
test_loss_log = torch.zeros(n_epoch).to(device)
val_loss_log = torch.zeros(n_epoch).to(device)
n_train = len(train_dataset)
n_test = len(test_dataset)
n_val = len(valid_dataset)
n_batch = n_train // cfg["batch_size"]
n_batch_test = n_test // cfg["batch_size"]
n_batch_val = n_val // cfg["batch_size"]
best_valid_rmse = 1e20
best_test_rmse = 1e20
best_epoch = -1
test_rmse = None
val_rmse = None
# define optimizer and loss for knowledge training
if use_knowledge:
k_cfg = cfg["knowledge"]
k_optimizer = k_cfg["optimizer"](
model.parameters(), **k_cfg["optimizer_args_pretrain"]
)
k_loss_func = k_cfg["loss"]
if "optimize_schedule" in k_cfg:
k_scheduler = k_cfg["optimize_schedule"](
k_optimizer, **k_cfg["optimize_schedule_args"]
)
else:
k_scheduler = None
else:
k_cfg = None
k_optimizer = None
k_loss_func = None
k_scheduler = None
# execute knowledge pre-training if configured.
if use_knowledge and cfg["knowledge"]["pretrain_epoch"] > 0:
assert self.att_dataset is not None
k_pre_loss_log = torch.zeros(
cfg["knowledge"]["pretrain_epoch"]
).to(device)
k_n_batch = len(self.att_dataset) // cfg["knowledge"]["batch_size"]
k_n_epoch = cfg["knowledge"]["pretrain_epoch"]
for epoch in range(k_n_epoch):
start_time = time.time()
model.train()
# batch learning
for batch in att_dataloader:
x, y = batch
# send to gpu
x = [_x.to(device) for _x in x]
y = y.to(device)
k_optimizer.zero_grad()
y_pred = model(*x, attention_loss=True)
loss = k_loss_func(y_pred.view(-1, 1), y.view(-1, 1))
loss.backward()
with torch.no_grad():
k_pre_loss_log[epoch] += loss / k_n_batch
k_optimizer.step()
if k_scheduler:
k_scheduler.step()
# batch evaluation
print(
f"knowledge_pretrain epoch:{epoch + 1}/{k_n_epoch}"
f" rmse:{torch.sqrt(k_pre_loss_log[epoch]):.4f}"
)
use_knowledge_train = (
use_knowledge and cfg["knowledge"]["train_factor"] > 0
)
# batch learning for traning dataset
for epoch in range(n_epoch):
start_time = time.time()
model.train()
if device == "cuda":
torch.cuda.empty_cache()
# iterate batch
for batch in train_dataloader:
optimizer.zero_grad()
# calculate knowledge loss (\gamma L_k)
knowledge_loss = 0
if use_knowledge_train:
k_batch = next(iter(att_dataloader))
k_x, k_y = k_batch
# send to gpu
k_x = [_x.to(device) for _x in k_x]
k_y = k_y.to(device)
k_y_pred = model(*k_x, attention_loss=True)
knowledge_loss = (
k_loss_func(k_y_pred.view(-1, 1), k_y.view(-1, 1))
* cfg["knowledge"]["train_factor"]
)
# calculate loss (L_p + \gamma_kp L_kp)
x, y = batch
# send to gpu
x = [_x.to(device) for _x in x]
y = y.to(device)
y_pred = model(*x)
loss = loss_func(y_pred, y.view(-1, 1))
# add knowledge loss
if use_knowledge_train:
loss += knowledge_loss
with torch.no_grad():
train_loss_log[epoch] += loss / n_batch
loss.backward()
optimizer.step()
if scheduler:
scheduler.step()
# batch evaluation
model.eval()
y_test_all = []
y_pred_test_all = []
y_val_all = []
y_pred_val_all = []
# evaluate on test set
with torch.no_grad():
for batch in test_dataloader:
x, y_val = batch
# send to gpu
x = [_x.to(device) for _x in x]
y_val = y_val.to(device)
y_pred_val = model(*x)
test_loss_log[epoch] += (
loss_func(y_pred_val, y_val.view(-1, 1)) / n_batch_test
)
# record label for r2 calculation
y_test_all.append(y_val.cpu().numpy())
if type(y_pred_val) == tuple:
y_pred_test_all.append(
y_pred_val[0][:, 0].cpu().numpy()
)
else:
y_pred_test_all.append(y_pred_val[:, 0].cpu().numpy())
# evaluate on validation set
with torch.no_grad():
for batch in valid_dataloader:
x, y_val = batch
# send to gpu
x = [_x.to(device) for _x in x]
y_val = y_val.to(device)
y_pred_val = model(*x)
val_loss_log[epoch] += (
loss_func(y_pred_val, y_val.view(-1, 1)) / n_batch_val
)
# record label for r2 calculation
y_val_all.append(y_val.cpu().numpy())
if type(y_pred_val) == tuple:
y_pred_val_all.append(
y_pred_val[0][:, 0].cpu().numpy()
)
else:
y_pred_val_all.append(y_pred_val[:, 0].cpu().numpy())
# calulate metrics
# inverse-transform the properties to
# evaluate metrics in the original scale.
y_test_all_inv = test_dataset.inverse_transform(
np.concatenate(y_test_all)
)[:, 0]
y_pred_test_all_inv = test_dataset.inverse_transform(
np.concatenate(y_pred_test_all)
)[:, 0]
y_val_all_inv = valid_dataset.inverse_transform(
np.concatenate(y_val_all)
)[:, 0]
y_pred_val_all_inv = valid_dataset.inverse_transform(
np.concatenate(y_pred_val_all)
)[:, 0]
test_rmse = rmse_score(y_test_all_inv, y_pred_test_all_inv)
val_rmse = rmse_score(y_val_all_inv, y_pred_val_all_inv)
try:
test_r2 = peason_r2_score(y_test_all_inv, y_pred_test_all_inv)
except ValueError:
test_r2 = np.nan
try:
val_r2 = peason_r2_score(y_val_all_inv, y_pred_val_all_inv)
except ValueError:
val_r2 = np.nan
train_loss_ = train_loss_log.cpu().numpy()[epoch]
test_loss_ = test_loss_log.cpu().numpy()[epoch]
val_loss_ = val_loss_log.cpu().numpy()[epoch]
# save and print result
if best_valid_rmse > val_rmse:
if save:
torch.save(
model.state_dict(), model_dir + "/best_model.pth"
)
best_valid_rmse = val_rmse
best_test_rmse = test_rmse
best_epoch = epoch + 1
text = (
f"epoch {epoch+1:d}/{n_epoch:d} "
f"train_loss: {train_loss_:.4f} test_loss: {test_loss_:.4f} "
f"test_r2: {test_r2:.4f} test_rmse: {test_rmse:.4f} "
f"val_loss: {val_loss_:.4f} val_r2: {val_r2:.4f} "
f"val_rmse: {val_rmse:.4f} "
f"time: {time.time() - start_time:.2f}sec"
)
if verbose:
print(text)
if save:
print_text.write(text + "\n")
if save:
torch.save(
torch.stack((train_loss_log, test_loss_log, val_loss_log)),
model_dir + "/losses.pth",
)
torch.save(model, model_dir + "/last_model.pth")
print_text.close()
self.trained = True
ret = {
"test_rmse": test_rmse,
"val_rmse": val_rmse,
"best_test_rmse": best_test_rmse,
"best_val_rmse": best_valid_rmse,
"epoch": best_epoch,
"model_dir": model_dir,
}
print(
f"Training result: "
f"test_rmse:{test_rmse:.5f} val_rmse:{val_rmse:.5f}\n"
f"best_epoch:{best_epoch} best_test_rmse:{best_test_rmse:.5f} "
f"best_val_rmse:{best_valid_rmse:.5f}"
)
if debug:
return (
ret,
{
"k_loss_func": k_loss_func,
"k_optimizer": k_optimizer,
"loss_func": loss_func,
"optimizer": optimizer,
"scheduler": scheduler,
"epochs": n_epoch,
"batch_size": cfg["batch_size"],
},
)
return ret
| 15,291 | 4,723 |
num = int (input ('Digite um nรบmero inteiro: '))
if num % 2 != 0 and num % 3 != 0 and num % 5 != 0 and num % 7 != 0:
print ('{} รฉ um nรบmero primo'.format(num))
else:
print ('{} nรฃo รฉ um nรบmero primo.'.format(num))
| 222 | 84 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Graz University of Technology.
#
# invenio-records-lom is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Provider for LOM PID-fields."""
from invenio_drafts_resources.records.api import DraftRecordIdProviderV2
from invenio_pidstore.providers.recordid_v2 import RecordIdProviderV2
class LOMDraftRecordIdProvider(DraftRecordIdProviderV2):
"""PIDField provider for LOM drafts."""
pid_type = "lomid"
class LOMRecordIdProvider(RecordIdProviderV2):
"""PIDField provider for LOM records."""
pid_type = "lomid"
| 659 | 231 |
# -*- coding: utf-8 -*-
# Part of Ygen. See LICENSE file for full copyright and licensing details.
{
'name': 'Discord - Base module for discord',
'summary': """
This module is a base module to provide foudation for building discord modules for Odoo.""",
'description': """
This module is a base module to provide foudation for building discord modules for Odoo.""",
'version': '12.0.1.0.0',
'license': 'OPL-1',
'author': 'Bishal Pun, '
'Ygen Software Pvt Ltd',
'website': 'https://ygen.io',
'price': 50.00,
'currency': 'EUR',
'depends': [
'mail',
],
'data': [
'security/discord_security.xml',
'security/ir.model.access.csv',
'data/ir_sequence.xml',
'data/ir_config_parameter.xml',
'data/ir_cron_data.xml',
'views/discord_guild_views.xml',
'views/discord_channel_views.xml',
'views/discord_member_views.xml',
'views/discord_message_views.xml',
'views/discord_menu_views.xml',
],
'installable': True,
'auto_install': False,
'application': True,
} | 1,127 | 386 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
import os
basePath = os.path.abspath(os.path.dirname(__file__))
template_dir = os.path.join(basePath, 'templates')
app = Flask(__name__, template_folder=template_dir)
app.config['SECRET_KEY'] = 'Thisissupposedtobesecret'
app.config['SQL_TRACK_MODIFICATION'] = False
app.config['SQL_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = ''
db = SQLAlchemy(app)
Bootstrap(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
| 696 | 247 |
x = int(input())
for i in range(12):
if (i+x) % 2 ==1:
print(i+x)
| 78 | 39 |
from ..constants import JSON_PROP_TYPE
from .base_blueprint import BaseProcedureBlueprint
from ..steps import placeholders
from ..reagents import Reagent
DEFAULT_VESSEL: str = 'reactor'
DEFAULT_SEPARATION_VESSEL: str = 'separator'
DEFAULT_EVAPORATION_VESSEL: str = 'rotavap'
class Chasm2(BaseProcedureBlueprint):
PROP_TYPES = {
'chasm2': JSON_PROP_TYPE,
}
def __init__(self, chasm2):
super().__init__(locals())
def build_reaction(self):
steps, reagents = [], []
current_temp = None
for reaction_id, reaction_chasm2 in self.chasm2['reaction'].items():
for i, item in enumerate(sorted(reaction_chasm2)):
item_chasm2 = reaction_chasm2[item]
if 'temp' in item_chasm2 and item_chasm2['temp'] is not None:
heating_step = placeholders.HeatChillToTemp(
temp=item_chasm2['temp'],
vessel=DEFAULT_VESSEL,
continue_heatchill=True,
active=True
)
if heating_step.temp != current_temp:
current_temp = item_chasm2['temp']
steps.append(heating_step)
item_steps, item_reagents = converters[item](
item_chasm2, position=i)
steps.extend(item_steps)
reagents.extend(item_reagents)
quench_steps, quench_reagents = chasm2_quench(self.chasm2['quench'])
steps.extend(quench_steps)
reagents.extend(quench_reagents)
return steps, reagents
def build_workup(self):
steps, reagents = [], []
i = 0
for separation_id, separation_chasm2 in self.chasm2['workup'].items():
item_steps, item_reagents = converters[separation_id](
separation_chasm2, position=i, workup_steps=steps)
steps.extend(item_steps)
reagents.extend(item_reagents)
i += 1
evaporation_steps, _ =\
chasm2_evaporation(self.chasm2['evaporation'])
if evaporation_steps:
for step in steps:
if step.to_vessel == 'product':
step.to_vessel = DEFAULT_EVAPORATION_VESSEL
steps.extend(evaporation_steps)
return steps, reagents
def build_purification(self):
return chasm2_purification(self.chasm2['purification'])
def chasm2_quench(chasm2):
steps, reagents = [], []
if chasm2['reagent']:
steps.append(
placeholders.Add(
vessel=DEFAULT_VESSEL,
reagent=chasm2['reagent'],
volume=chasm2['volume'],
temp=chasm2['temp'],
)
)
reagents.append(Reagent(chasm2['reagent']))
return steps, reagents
def chasm2_reaction(chasm2, position):
steps, reagents = [], []
if chasm2['time'] is None and chasm2['temp'] is None:
return steps, reagents
steps.append(
placeholders.HeatChill(
vessel=DEFAULT_VESSEL,
temp=chasm2['temp'],
time=chasm2['time'],
)
)
return steps, reagents
def chasm2_addition(chasm2, position):
if not chasm2['reagent']:
return [], []
steps, reagents = [], [Reagent(chasm2['reagent'])]
stir = True if position > 0 else False
if chasm2['reagent_type'] == 'solid':
step = placeholders.AddSolid(
vessel=DEFAULT_VESSEL,
reagent=chasm2['reagent'],
mass=chasm2['amount'],
stir=stir
)
if chasm2['speed']:
step.time = f'{step.mass / float(chasm2["speed"])} min'
steps = [step]
else:
steps = [
placeholders.Add(
vessel=DEFAULT_VESSEL,
reagent=chasm2['reagent'],
volume=chasm2['amount'],
speed=chasm2['speed'],
stir=stir
)
]
return steps, reagents
def chasm2_separation(chasm2, position, workup_steps):
steps, reagents = [], []
if not chasm2['solvent']:
return steps, reagents
if position == 0:
from_vessel = DEFAULT_VESSEL
else:
from_vessel = workup_steps[-1].to_vessel
waste_phase_to_vessel = None
if chasm2['waste_phase_dest'] != 'waste':
waste_phase_to_vessel = chasm2['waste_phase_dest']
steps.append(
placeholders.Separate(
solvent=chasm2['solvent'],
solvent_volume=chasm2['solvent_volume'],
product_phase=chasm2['product_phase'],
from_vessel=from_vessel,
separation_vessel=DEFAULT_SEPARATION_VESSEL,
to_vessel=chasm2['product_phase_dest'],
waste_phase_to_vessel=waste_phase_to_vessel,
purpose=chasm2['purpose'],
)
)
reagents.append(Reagent(chasm2['solvent']))
return steps, reagents
def chasm2_evaporation(chasm2):
steps, reagents = [], []
steps.append(
placeholders.Evaporate(
vessel=DEFAULT_EVAPORATION_VESSEL,
pressure=chasm2['pressure'],
temp=chasm2['temp'],
time=chasm2['time'],
)
)
if chasm2['dry']:
steps.append(
placeholders.Dry(
vessel=DEFAULT_EVAPORATION_VESSEL,
)
)
return steps, reagents
def chasm2_purification(chasm2):
steps, reagents = [], []
return steps, reagents
converters = {
'addition1': chasm2_addition,
'addition2': chasm2_addition,
'addition3': chasm2_addition,
'addition4': chasm2_addition,
'addition5': chasm2_addition,
'addition6': chasm2_addition,
'addition7': chasm2_addition,
'addition8': chasm2_addition,
'addition9': chasm2_addition,
'addition10': chasm2_addition,
'reaction': chasm2_reaction,
'separation1': chasm2_separation,
'separation2': chasm2_separation,
'separation3': chasm2_separation,
'separation4': chasm2_separation,
'separation5': chasm2_separation,
'evaporation': chasm2_evaporation,
'purification': chasm2_purification,
}
| 6,167 | 2,022 |
class Admin(object):
def __init__(self):
self.rpc_host = None
self._abci_info_url = None
self._abci_query_url = None
self._block_url = None
self._block_result_url = None
self._block_chain_url = None
self._broadcast_tx_async_url = None
self._broadcast_tx_commit_url = None
self._broadcast_tx_sync_url = None
self._commit_url = None
self._consensus_params_url = None
self._dump_consensus_url = None
self._genesis_url = None
self._health_url = None
self._net_info_url = None
self._num_unconfirmed_txs_url = None
self._status_url = None
self._subscrible_url = None
self._tx_url = None
self._tx_search_url = None
self._unconfirmed_txs_url = None
self._unsubscribe_url = None
self._unsubscribe_all_url = None
self._validatos_url = None
@property
def abci_info_url(self):
return self._abci_info_url
@property
def abci_query_url(self):
return self._abci_query_url
@property
def broadcast_tx_commit_url(self):
return self._broadcast_tx_commit_url
def set_rpc_host(self, host_name):
self.rpc_host = host_name
self._abci_info_url = self.rpc_host + "/abci_info"
self._abci_query_url = self.rpc_host + "/abci_query"
self._broadcast_tx_commit_url = self.rpc_host + "/broadcast_tx_commit"
print(host_name)
| 1,487 | 501 |
from future.utils import iteritems
from pandaharvester.harvestercore import core_utils
class PluginBase(object):
def __init__(self, **kwarg):
for tmpKey, tmpVal in iteritems(kwarg):
setattr(self, tmpKey, tmpVal)
# make logger
def make_logger(self, base_log, token=None, method_name=None, send_dialog=True):
if send_dialog and hasattr(self, 'dbInterface'):
hook = self.dbInterface
else:
hook = None
return core_utils.make_logger(base_log, token=token, method_name=method_name, hook=hook)
| 570 | 175 |
"""
Filtering of MEG data
Created on 13.9.2017
@author: Anja Thiede <anja.thiede@helsinki.fi>
"""
import os
from os import walk
import datetime
import numpy as np
import mne
now = datetime.datetime.now()
def processedcount(file_list):
n = 0
for item in file_list:
if item[-8:-4] == 'filt':
n = n+1
return n
# set up data paths
root_path = ('/media/cbru/SMEDY_SOURCES/DATA/MEG_prepro/')
f = []
for (dirpath, dirnames, filenames) in walk(root_path):
f.extend(filenames)
break
log_path = root_path+'logs/logs_filt_'+now.strftime("%Y-%m-%d")
log = open(log_path, 'w')
#sub = ['sme_028'] # for testing or filtering single files
i = 0
for subject in dirnames: #sub: #
subject_folder = root_path+subject+'/'
subject_files = os.listdir(subject_folder)
# filt_file_count = processedcount(subject_files)
# if filt_file_count == 2:
# continue
for pieces in subject_files:
if pieces[-11:] == 'ref_ssp.fif':
final_path = subject_folder+pieces
print(final_path)
i = i+1
raw = mne.io.read_raw_fif(final_path, preload=True) # read preprocessed data
# raw.set_eeg_reference()
order = np.arange(raw.info['nchan'])
# filter the data
raw.load_data()
hp = 0.5
lp = 25.0
raw.filter(hp, None, n_jobs=8, method='fir')
# high-pass filter, default hamming window is used
raw.filter(None, lp, n_jobs=8, method='fir') # low-pass filter
fsave = subject_folder+pieces[:-4]+'_filt.fif'
print(fsave)
raw.save(fsave, overwrite=True) # save filtered file to disk
log.write(subject+' processed\n')
log.close()
| 1,762 | 618 |
x = int(input())
if (-15 < x <= 12) or (14 < x < 17) or x >= 19:
print("True")
else:
print("False")
| 108 | 53 |
#
# Copyright 2020โ21, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any commercial
# use must be negotiated with the Office of Technology Transfer at the
# California Institute of Technology.
#
"""
===============
doi_database.py
===============
Contains classes and functions for interfacing with the local transaction
database (SQLite3).
"""
import sqlite3
from collections import OrderedDict
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from sqlite3 import Error
from pds_doi_service.core.entities.doi import DoiStatus
from pds_doi_service.core.entities.doi import ProductType
from pds_doi_service.core.util.config_parser import DOIConfigUtil
from pds_doi_service.core.util.general_util import get_logger
# Get the common logger and set the level for this file.
logger = get_logger(__name__)
class DOIDataBase:
"""
Provides a mechanism to write, update and read rows to/from a local SQLite3
database.
"""
DOI_DB_SCHEMA = OrderedDict(
{
"identifier": "TEXT NOT NULL", # PDS identifier (any version)
"doi": "TEXT", # DOI (may be null for pending or draft)
"status": "TEXT NOT NULL", # current status
"title": "TEXT", # title used for the DOI
"submitter": "TEXT", # email of the submitter of the DOI
"type": "TEXT", # product type
"subtype": "TEXT", # subtype of the product
"node_id": "TEXT NOT NULL", # steward discipline node ID
"date_added": "INT", # as Unix epoch seconds
"date_updated": "INT NOT NULL", # as Unix epoch seconds
"transaction_key": "TEXT NOT NULL", # transaction (key is node id/datetime)
"is_latest": "BOOLEAN", # whether the transaction is the latest
}
)
"""
The schema used to define the DOI DB table. Each key corresponds to a column
name, and each value corresponds to the data type and column constraint as
expected by the Sqlite3 CREATE TABLE statement.
"""
EXPECTED_NUM_COLS = len(DOI_DB_SCHEMA)
""""The expected number of columns as defined by the schema."""
def __init__(self, db_file):
self._config = DOIConfigUtil().get_config()
self.m_database_name = db_file
self.m_default_table_name = "doi"
self.m_my_conn = None
def get_database_name(self):
"""Returns the name of the SQLite database."""
return self.m_database_name
def close_database(self):
"""Close connection to the SQLite database."""
logger.debug("Closing database %s", self.m_database_name)
if self.m_my_conn:
self.m_my_conn.close()
# Set m_my_conn to None to signify that there is no connection.
self.m_my_conn = None
else:
logger.warn("Database connection to %s has not been started or is " "already closed", self.m_database_name)
def create_connection(self):
"""Create and return a connection to the SQLite database."""
if self.m_my_conn is not None:
logger.warning("There is already an open database connection, " "closing existing connection.")
self.close_database()
logger.info("Connecting to SQLite3 (ver %s) database %s", sqlite3.version, self.m_database_name)
try:
self.m_my_conn = sqlite3.connect(self.m_database_name)
except Error as my_error:
logger.error("Failed to connect to database, reason: %s", my_error)
def get_connection(self, table_name=None):
"""
Returns a connection to the SQLite database. If a connection does
already exist, it is created using the default database file.
The default table is also created by this method if it does not exist.
"""
if not table_name:
table_name = self.m_default_table_name
if not self.m_my_conn:
self.create_connection()
if not self.check_if_table_exists(table_name):
self.create_table(table_name)
return self.m_my_conn
def check_if_table_exists(self, table_name):
"""
Check if the expected default table exists in the current database.
If a database connection has not been made yet, one is created by
this method.
"""
logger.info("Checking for existence of DOI table %s", table_name)
o_table_exists_flag = False
if self.m_my_conn is None:
logger.warn("Not connected to %s, establishing new connection...", self.m_database_name)
self.create_connection()
table_pointer = self.m_my_conn.cursor()
# Get the count of tables with the given name.
query_string = "SELECT count(name) FROM sqlite_master WHERE type='table' AND " f"name='{table_name}'"
logger.info("Executing query: %s", query_string)
table_pointer.execute(query_string)
# If the count is 1, then table exists.
if table_pointer.fetchone()[0] == 1:
o_table_exists_flag = True
logger.debug("o_table_exists_flag: %s", o_table_exists_flag)
return o_table_exists_flag
def drop_table(self, table_name):
"""Delete the given table from the SQLite database."""
if self.m_my_conn:
logger.debug("Executing query: DROP TABLE %s", table_name)
self.m_my_conn.execute(f"DROP TABLE {table_name}")
def query_string_for_table_creation(self, table_name):
"""
Builds the query string to create a transaction table in the SQLite
database.
Parameters
----------
table_name : str
Name of the table to build the query for.
Returns
-------
o_query_string : str
The Sqlite3 query string used to create the transaction table within
the database.
"""
o_query_string = f"CREATE TABLE {table_name} "
o_query_string += "("
for index, (column, constraints) in enumerate(self.DOI_DB_SCHEMA.items()):
o_query_string += f"{column} {constraints}"
if index < (self.EXPECTED_NUM_COLS - 1):
o_query_string += ","
o_query_string += ");"
logger.debug("CREATE o_query_string: %s", o_query_string)
return o_query_string
def query_string_for_transaction_insert(self, table_name):
"""
Builds the query string used to insert a transaction row into the SQLite
database table.
Parameters
----------
table_name : str
Name of the table to build the query for.
Returns
-------
o_query_string : str
The Sqlite3 query string used to insert a new row into the database.
"""
o_query_string = f"INSERT INTO {table_name} "
o_query_string += "("
for index, column in enumerate(self.DOI_DB_SCHEMA):
o_query_string += f"{column}"
if index < (self.EXPECTED_NUM_COLS - 1):
o_query_string += ","
o_query_string += ") "
o_query_string += f'VALUES ({",".join(["?"] * self.EXPECTED_NUM_COLS)})'
logger.debug("INSERT o_query_string: %s", o_query_string)
return o_query_string
def query_string_for_is_latest_update(self, table_name, primary_key_column):
"""
Build the query string to set the is_latest to False (0) for rows
in the table having a specified primary key (identifier) value.
Parameters
----------
table_name : str
Name of the table to build the query for.
primary_key_column: str
Name of the database column designated as the primary key.
Returns
-------
o_query_string : str
The Sqlite3 query string used to perform the is_latest update.
"""
# Note that we set column "is_latest" to 0 to signify that all previous
# rows are now not the latest.
o_query_string = f"UPDATE {table_name} "
o_query_string += "SET "
o_query_string += "is_latest = 0 "
o_query_string += f"WHERE {primary_key_column} = ?"
o_query_string += ";" # Don't forget the last semi-colon for SQL to work.
logger.debug("UPDATE o_query_string: %s", o_query_string)
return o_query_string
def create_table(self, table_name):
"""Create a given table in the SQLite database."""
logger.info('Creating SQLite table "%s"', table_name)
self.m_my_conn = self.get_connection()
query_string = self.query_string_for_table_creation(table_name)
self.m_my_conn.execute(query_string)
logger.info("Table created successfully")
def write_doi_info_to_database(
self,
identifier,
transaction_key,
doi=None,
date_added=datetime.now(),
date_updated=datetime.now(),
status=DoiStatus.Unknown,
title="",
product_type=ProductType.Collection,
product_type_specific="",
submitter="",
discipline_node="",
):
"""
Write a new row to the Sqlite3 transaction database with the provided
DOI entry information.
Parameters
----------
identifier : str
The PDS identifier to associate as the primary key for the new row.
transaction_key : str
Path to the local transaction history location associated with the
new row.
doi : str, optional
The DOI value to associate with the new row. Defaults to None.
date_added : datetime, optional
Time that the row was initially added to the database. Defaults
to the current time.
date_updated : datetime, optional
Time that the row was last updated. Defaults to the current time.
status : DoiStatus
The status of the transaction. Defaults to DoiStatus.Unknown.
title : str
The title associated with the transaction. Defaults to an empty string.
product_type : ProductType
The product type associated with the transaction. Defaults to
ProductType.Collection.
product_type_specific : str
The specific product type associated with the transaction.
Defaults to an empty string.
submitter : str
The submitter email associated with the transaction. Defaults
to an empty string.
discipline_node : str
The discipline node ID associated with the transaction. Defaults
to an empty string.
Raises
------
RuntimeError
If the database transaction cannot be committed for any reason.
"""
self.m_my_conn = self.get_connection()
# Convert timestamps to Unix epoch floats for simpler table storage
date_added = date_added.replace(tzinfo=timezone.utc).timestamp()
date_updated = date_updated.replace(tzinfo=timezone.utc).timestamp()
# Map the inputs to the appropriate column names. By doing so, we
# can ignore database column ordering for now.
data = {
"identifier": identifier,
"status": status,
"date_added": date_added,
"date_updated": date_updated,
"submitter": submitter,
"title": title,
"type": product_type,
"subtype": product_type_specific,
"node_id": discipline_node,
"doi": doi,
"transaction_key": transaction_key,
"is_latest": True,
}
try:
# Create and execute the query to unset the is_latest field for all
# records with the same identifier field.
query_string = self.query_string_for_is_latest_update(
self.m_default_table_name, primary_key_column="identifier"
)
self.m_my_conn.execute(query_string, (identifier,))
self.m_my_conn.commit()
except sqlite3.Error as err:
msg = f"Failed to update is_latest field for identifier {identifier}, " f"reason: {err}"
logger.error(msg)
raise RuntimeError(msg)
try:
# Combine the insert and update here so the commit can be applied to both actions.
query_string = self.query_string_for_transaction_insert(self.m_default_table_name)
# Create the named parameters tuple in the order expected by the
# database schema
data_tuple = tuple([data[column] for column in self.DOI_DB_SCHEMA])
self.m_my_conn.execute(query_string, data_tuple)
self.m_my_conn.commit()
except sqlite3.Error as err:
msg = f"Failed to commit transaction for identifier {identifier}, " f"reason: {err}"
logger.error(msg)
raise RuntimeError(msg)
def _normalize_rows(self, columns, rows):
"""
Normalize columns from each rows to be the data types we expect,
rather than the types which are convenient for table storage
"""
for row in rows:
# Convert the add/update times from Unix epoch back to datetime,
# accounting for the expected (PST) timezone
for time_column in ("date_added", "date_updated"):
time_val = row[columns.index(time_column)]
time_val = datetime.fromtimestamp(time_val, tz=timezone.utc).replace(
tzinfo=timezone(timedelta(hours=--8.0))
)
row[columns.index(time_column)] = time_val
# Convert status/product type back to Enums
row[columns.index("status")] = DoiStatus(row[columns.index("status")].lower())
row[columns.index("type")] = ProductType(row[columns.index("type")].capitalize())
return rows
def select_rows(self, query_criterias, table_name=None):
"""Select rows based on the provided query criteria."""
if not table_name:
table_name = self.m_default_table_name
self.m_my_conn = self.get_connection(table_name)
query_string = f"SELECT * FROM {table_name}"
criterias_str, criteria_dict = DOIDataBase.parse_criteria(query_criterias)
if len(query_criterias) > 0:
query_string += f" WHERE {criterias_str}"
query_string += "; "
logger.debug("SELECT query_string: %s", query_string)
cursor = self.m_my_conn.cursor()
cursor.execute(query_string, criteria_dict)
columns = list(map(lambda x: x[0], cursor.description))
rows = [list(row) for row in cursor]
rows = self._normalize_rows(columns, rows)
logger.debug("Query returned %d result(s)", len(rows))
return columns, rows
def select_latest_rows(self, query_criterias, table_name=None):
"""Select all rows marked as latest (is_latest column = 1)"""
if not table_name:
table_name = self.m_default_table_name
self.m_my_conn = self.get_connection(table_name)
criterias_str, criteria_dict = DOIDataBase.parse_criteria(query_criterias)
query_string = f"SELECT * from {table_name} " f"WHERE is_latest=1 {criterias_str} ORDER BY date_updated"
logger.debug("SELECT query_string: %s", query_string)
cursor = self.m_my_conn.cursor()
cursor.execute(query_string, criteria_dict)
columns = list(map(lambda x: x[0], cursor.description))
rows = [list(row) for row in cursor]
rows = self._normalize_rows(columns, rows)
logger.debug("Query returned %d result(s)", len(rows))
return columns, rows
def select_all_rows(self, table_name=None):
"""Select all rows from the database"""
if not table_name:
table_name = self.m_default_table_name
self.m_my_conn = self.get_connection(table_name)
query_string = f"SELECT * FROM {table_name};"
logger.debug("SELECT query_string %s", query_string)
cursor = self.m_my_conn.cursor()
cursor.execute(query_string)
columns = list(map(lambda x: x[0], cursor.description))
rows = [list(row) for row in cursor]
rows = self._normalize_rows(columns, rows)
logger.debug("Query returned %d result(s)", len(rows))
return columns, rows
def update_rows(self, query_criterias, update_list, table_name=None):
"""
Update all rows and fields (specified in update_list) that match
the provided query criteria.
"""
if not table_name:
table_name = self.m_default_table_name
self.m_my_conn = self.get_connection(table_name)
query_string = f"UPDATE {table_name} SET "
for ii in range(len(update_list)):
# Build the SET column_1 = new_value_1,
# column_2 = new_value_2
# Only precede the comma for subsequent values
if ii == 0:
query_string += update_list[ii]
else:
query_string += "," + update_list[ii]
# Add any query_criterias
if len(query_criterias) > 0:
query_string += " WHERE "
# Build the WHERE clause
for ii in range(len(query_criterias)):
if ii == 0:
query_string += query_criterias[ii]
else:
query_string += f" AND {query_criterias[ii]} "
logger.debug("UPDATE query_string: %s", query_string)
self.m_my_conn.execute(query_string)
@staticmethod
def _form_query_with_wildcards(column_name, search_tokens):
"""
Helper method to form a portion of an SQL WHERE clause that returns
matches from the specified column using the provided list of tokens.
The list of tokens may either contain fully specified identifiers, or
identifiers containing Unix-style wildcards (*), aka globs. The method
partitions the tokens accordingly, and forms the appropriate clause
to capture all results.
Parameters
----------
column_name : str
Name of the SQL table column name that will be searched by the
returned query.
search_tokens : list of str
List of tokens to search for. Tokens may either be full identifiers,
or contain one or more wildcards (*).
Returns
-------
where_subclause : str
Query portion which can be used with a WHERE clause to find the
requested set of tokens. This subclause is parameterized, and should
be used with the returned named parameter dictionary.
named_parameter_values : dict
The dictionary mapping the named parameters in the returned subclause
with the actual values to use.
"""
# Partition the tokens containing wildcards from the fully specified ones
wildcard_tokens = list(filter(lambda token: "*" in token, search_tokens))
full_tokens = list(set(search_tokens) - set(wildcard_tokens))
# Clean up the column name provided so it can be used as a suitable
# named parameter placeholder token
filter_chars = [" ", "'", ":", "|"]
named_param_id = column_name
for filter_char in filter_chars:
named_param_id = named_param_id.replace(filter_char, "")
# Set up the named parameters for the IN portion of the WHERE used
# to find fully specified tokens
named_parameters = ",".join([f":{named_param_id}_{i}" for i in range(len(full_tokens))])
named_parameter_values = {f"{named_param_id}_{i}": full_tokens[i] for i in range(len(full_tokens))}
# Set up the named parameters for the GLOB portion of the WHERE used
# find tokens containing wildcards
glob_parameters = " OR ".join(
[f"{column_name} GLOB :{named_param_id}_glob_{i}" for i in range(len(wildcard_tokens))]
)
named_parameter_values.update(
{f"{named_param_id}_glob_{i}": wildcard_tokens[i] for i in range(len(wildcard_tokens))}
)
# Build the portion of the WHERE clause combining the necessary
# parameters needed to search for all the tokens we were provided
where_subclause = "AND ("
if full_tokens:
where_subclause += f"{column_name} IN ({named_parameters}) "
if full_tokens and wildcard_tokens:
where_subclause += " OR "
if wildcard_tokens:
where_subclause += f"{glob_parameters}"
where_subclause += ")"
logger.debug("WHERE subclause: %s", where_subclause)
return where_subclause, named_parameter_values
@staticmethod
def _get_simple_in_criteria(v, column):
named_parameters = ",".join([":" + column + "_" + str(i) for i in range(len(v))])
named_parameter_values = {column + "_" + str(i): v[i].lower() for i in range(len(v))}
return f" AND lower({column}) IN ({named_parameters})", named_parameter_values
@staticmethod
def _get_query_criteria_title(v):
return DOIDataBase._get_simple_in_criteria(v, "title")
@staticmethod
def _get_query_criteria_doi(v):
return DOIDataBase._get_simple_in_criteria(v, "doi")
@staticmethod
def _get_query_criteria_ids(v):
return DOIDataBase._form_query_with_wildcards("identifier", v)
@staticmethod
def _get_query_criteria_submitter(v):
return DOIDataBase._get_simple_in_criteria(v, "submitter")
@staticmethod
def _get_query_criteria_node(v):
return DOIDataBase._get_simple_in_criteria(v, "node_id")
@staticmethod
def _get_query_criteria_status(v):
return DOIDataBase._get_simple_in_criteria(v, "status")
@staticmethod
def _get_query_criteria_start_update(v):
return (" AND date_updated >= :start_update", {"start_update": v.replace(tzinfo=timezone.utc).timestamp()})
@staticmethod
def _get_query_criteria_end_update(v):
return (" AND date_updated <= :end_update", {"end_update": v.replace(tzinfo=timezone.utc).timestamp()})
@staticmethod
def parse_criteria(query_criterias):
criterias_str = ""
criteria_dict = {}
for k, v in query_criterias.items():
logger.debug("Calling get_query_criteria_%s with value %s", k, v)
criteria_str, dict_entry = getattr(DOIDataBase, "_get_query_criteria_" + k)(v)
logger.debug("criteria_str: %s", criteria_str)
logger.debug("dict_entry: %s", dict_entry)
criterias_str += criteria_str
criteria_dict.update(dict_entry)
return criterias_str, criteria_dict
| 22,972 | 6,471 |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPNotFound
from amnesia.modules.tag import Tag
from amnesia.modules.search import SearchResource
def includeme(config):
''' Pyramid includeme func'''
config.scan(__name__)
@view_config(context=SearchResource, name='tag', request_method='GET',
renderer='amnesia:templates/search/tag.pt')
def tag(context, request):
tag_id = request.GET.get('id', '').strip()
tag_obj = request.dbsession.get(Tag, tag_id)
if not tag_obj:
raise HTTPNotFound()
search_query = context.tag_id(tag_obj, limit=500)
return {
'results': search_query.result,
'count': search_query.count,
'tag': tag_obj
}
| 786 | 268 |