id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
122410
|
import os
# from sklearn.metrics import log_loss, roc_auc_score
import time
from librerank.utils import *
from librerank.reranker import *
from librerank.rl_reranker import *
def eval(model, data, l2_reg, batch_size, isrank, metric_scope, _print=False):
preds = []
# labels = []
losses = []
data_size = len(data[0])
batch_num = data_size // batch_size
print('eval', batch_size, batch_num)
t = time.time()
for batch_no in range(batch_num):
data_batch = get_aggregated_batch(data, batch_size=batch_size, batch_no=batch_no)
pred, loss = model.eval(data_batch, l2_reg)
preds.extend(pred)
# labels.extend(label)
losses.append(loss)
loss = sum(losses) / len(losses)
# cates = np.reshape(np.array(data[1])[:, :, 1], [-1, max_time_len]).tolist()
labels = data[4]
# print(preds[0], labels[0])
# poss = data[-2]
res = evaluate_multi(labels, preds, metric_scope, isrank, _print)
print("EVAL TIME: %.4fs" % (time.time() - t))
# return loss, res_low, res_high
return loss, res
def train(train_file, test_file, feature_size, max_time_len, itm_spar_fnum, itm_dens_fnum, profile_num, params):
tf.reset_default_graph()
# gpu settings
gpu_options = tf.GPUOptions(allow_growth=True)
perlist = False
if params.model_type == 'PRM':
model = PRM(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'SetRank':
model = SetRank(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'DLCM':
model = DLCM(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'GSF':
model = GSF(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm, group_size=params.group_size)
elif params.model_type == 'miDNN':
model = miDNN(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'EGR_evaluator':
model = EGR_evaluator(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'EGR_generator':
model = PPOModel(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm, rep_num=params.rep_num)
# discriminator = EGR_discriminator(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
# profile_num, max_norm=params.max_norm)
evaluator = EGR_evaluator(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
with evaluator.graph.as_default() as g:
sess = tf.Session(graph=g, config=tf.ConfigProto(gpu_options=gpu_options))
evaluator.set_sess(sess)
sess.run(tf.global_variables_initializer())
evaluator.load(params.evaluator_path)
# with discriminator.graph.as_default() as g:
# sess = tf.Session(graph=g, config=tf.ConfigProto(gpu_options=gpu_options))
# discriminator.set_sess(sess)
# sess.run(tf.global_variables_initializer())
elif params.model_type == 'Seq2Slate':
# model = Seq2Slate(feature_size, eb_dim, hidden_size, max_time_len, max_seq_len, item_fnum, num_cat, mu)
model = SLModel(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
else:
print('No Such Model', params.model_type)
exit(0)
with model.graph.as_default() as g:
sess = tf.Session(graph=g, config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
model.set_sess(sess)
# training_monitor = {
# 'train_loss': [],
# 'vali_loss': [],
# 'map_l': [],
# 'ndcg_l': [],
# 'clicks_l': [],
# 'utility_l': [],
# 'map_h':[],
# 'ndcg_h': [],
# 'clicks_h': [],
# 'utility_h': [],
# }
training_monitor = {
'train_loss': [],
'vali_loss': [],
'map_l': [],
'ndcg_l': [],
'clicks_l': [],
# 'utility_l': [],
# 'de_ndcg_l': [],
}
model_name = '{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(params.timestamp, initial_ranker, params.model_type, params.batch_size,
params.lr, params.l2_reg, params.hidden_size, params.eb_dim, params.keep_prob)
if not os.path.exists('{}/logs_{}/{}'.format(parse.save_dir, data_set_name, max_time_len)):
os.makedirs('{}/logs_{}/{}'.format(parse.save_dir, data_set_name, max_time_len))
if not os.path.exists('{}/save_model_{}/{}/{}/'.format(parse.save_dir, data_set_name, max_time_len, model_name)):
os.makedirs('{}/save_model_{}/{}/{}/'.format(parse.save_dir, data_set_name, max_time_len, model_name))
save_path = '{}/save_model_{}/{}/{}/ckpt'.format(parse.save_dir, data_set_name, max_time_len, model_name)
log_save_path = '{}/logs_{}/{}/{}.metrics'.format(parse.save_dir, data_set_name, max_time_len, model_name)
# training process
# with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
train_losses_step = []
# before training process
step = 0
vali_loss, res = eval(model, test_file, params.l2_reg, params.batch_size, False, params.metric_scope)
training_monitor['train_loss'].append(None)
training_monitor['vali_loss'].append(None)
training_monitor['map_l'].append(res[0][0])
training_monitor['ndcg_l'].append(res[1][0])
# training_monitor['de_ndcg_l'].append(res[2][0])
training_monitor['clicks_l'].append(res[2][0])
# training_monitor['utility_l'].append(res[4][0])
print("STEP %d INTIAL RANKER | LOSS VALI: NULL" % step)
for i, s in enumerate(params.metric_scope):
print("@%d MAP: %.4f NDCG: %.4f CLICKS: %.4f " % (s, res[0][i], res[1][i], res[2][i]))
early_stop = False
data = train_file
data_size = len(data[0])
batch_num = data_size // params.batch_size
eval_iter_num = (data_size // 5) // params.batch_size
print('train', data_size, batch_num)
# begin training process
for epoch in range(params.epoch_num):
# if early_stop:
# break
for batch_no in range(batch_num):
data_batch = get_aggregated_batch(data, batch_size=params.batch_size, batch_no=batch_no)
# if early_stop:
# break
if params.model_type == 'EGR_generator':
data_batch = repeat_data(data_batch, params.rep_num)
act_idx_out, act_probs_one, rl_sp_outputs, rl_de_outputs, mask_arr, lp_sp_data, lp_de_data, _\
= model.predict(data_batch, params.l2_reg)
pred = evaluator.predict(rl_sp_outputs, rl_de_outputs, data_batch[6])
# d_preds, d_rewards = discriminator.predict(rl_sp_outputs, rl_de_outputs, data_batch[6])
# rewards = pred + d_rewards.reshape((-1, max_time_len)) * c_rewards_d
rewards = pred
# rewards = pred
# train rl-rerank
# for _ in range(update_steps):
loss, mean_return = model.train(data_batch, rl_sp_outputs, rl_de_outputs, act_probs_one, act_idx_out,
rewards, mask_arr, params.c_entropy, params.lr, params.l2_reg, params.keep_prob)
# train discriminator
# if step % (update_rate_d * int(update_steps)) == 0:
# d_label = np.array([1] * lp_sp_data.shape[0] + [0] * rl_sp_outputs.shape[0])
# spar_data = np.concatenate([lp_sp_data, rl_sp_outputs], axis=0)
# dens_data = np.concatenate([lp_de_data, rl_de_outputs], axis=0)
# seq_len = np.array(data_batch[6] + data_batch[6])
# d_total_loss = discriminator.train([spar_data, dens_data, d_label, seq_len], lr, l2_reg)
# print('dis, step: %d' % (step), 'loss', d_total_loss)
elif params.model_type == 'Seq2Slate':
act_idx_out, act_probs_one, rl_sp_outputs, rl_de_outputs, mask_arr, lp_sp_data, lp_de_data, _ \
= model.predict(data_batch, params.l2_reg)
loss = model.train(data_batch, rl_sp_outputs, rl_de_outputs, mask_arr, params.lr,
params.l2_reg, params.keep_prob)
else:
loss = model.train(data_batch, params.lr, params.l2_reg, params.keep_prob)
step += 1
train_losses_step.append(loss)
if step % eval_iter_num == 0:
train_loss = sum(train_losses_step) / len(train_losses_step)
training_monitor['train_loss'].append(train_loss)
train_losses_step = []
vali_loss, res = eval(model, test_file, params.l2_reg, params.batch_size, True,
params.metric_scope, False)
training_monitor['train_loss'].append(train_loss)
training_monitor['vali_loss'].append(vali_loss)
training_monitor['map_l'].append(res[0][0])
training_monitor['ndcg_l'].append(res[1][0])
# training_monitor['de_ndcg_l'].append(res[2][0])
training_monitor['clicks_l'].append(res[2][0])
# training_monitor['utility_l'].append(res[4][0])
print("EPOCH %d STEP %d LOSS TRAIN: %.4f | LOSS VALI: %.4f" % (epoch, step, train_loss, vali_loss))
for i, s in enumerate(params.metric_scope):
print("@%d MAP: %.4f NDCG: %.4f CLICKS: %.4f " % (s, res[0][i], res[1][i], res[2][i]))
if training_monitor['map_l'][-1] > max(training_monitor['map_l'][:-1]):
# save model
model.save(save_path)
pkl.dump(res[-1], open(log_save_path, 'wb'))
print('model saved')
if len(training_monitor['map_l']) > 2 and epoch > 0:
# if (training_monitor['vali_loss'][-1] > training_monitor['vali_loss'][-2] and
# training_monitor['vali_loss'][-2] > training_monitor['vali_loss'][-3]):
# early_stop = True
if (training_monitor['map_l'][-2] - training_monitor['map_l'][-1]) <= 0.01 and (
training_monitor['map_l'][-3] - training_monitor['map_l'][-2]) <= 0.01:
early_stop = True
# generate log
if not os.path.exists('{}/logs_{}/{}/'.format(parse.save_dir, data_set_name, max_time_len)):
os.makedirs('{}/logs_{}/{}/'.format(parse.save_dir, data_set_name, max_time_len))
with open('{}/logs_{}/{}/{}.monitor.pkl'.format(parse.save_dir, data_set_name, max_time_len, model_name), 'wb') as f:
pkl.dump(training_monitor, f)
def reranker_parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--max_time_len', default=10, type=int, help='max time length')
parser.add_argument('--save_dir', type=str, default='./', help='dir that saves logs and model')
parser.add_argument('--data_dir', type=str, default='./data/toy/', help='data dir')
parser.add_argument('--model_type', default='PRM', choices=['PRM', 'DLCM', 'SetRank', 'GSF', 'miDNN', 'Seq2Slate', 'EGR_evaluator', 'EGR_generator'],
type=str, help='algorithm name, including PRM, DLCM, SetRank, GSF, miDNN, Seq2Slate, EGR_evaluator, EGR_generator')
parser.add_argument('--data_set_name', default='ad', type=str, help='name of dataset, including ad and prm')
parser.add_argument('--initial_ranker', default='lambdaMART', choices=['DNN', 'lambdaMART'], type=str, help='name of dataset, including DNN, lambdaMART')
parser.add_argument('--epoch_num', default=30, type=int, help='epochs of each iteration.')
parser.add_argument('--batch_size', default=16, type=int, help='batch size')
parser.add_argument('--rep_num', default=5, type=int, help='samples repeat number')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate')
parser.add_argument('--l2_reg', default=1e-4, type=float, help='l2 loss scale')
parser.add_argument('--keep_prob', default=0.8, type=float, help='keep probability')
parser.add_argument('--eb_dim', default=16, type=int, help='size of embedding')
parser.add_argument('--hidden_size', default=64, type=int, help='hidden size')
parser.add_argument('--group_size', default=1, type=int, help='group size for GSF')
parser.add_argument('--metric_scope', default=[1, 3, 5, 10], type=list, help='the scope of metrics')
parser.add_argument('--max_norm', default=0, type=float, help='max norm of gradient')
parser.add_argument('--c_entropy', default=0.001, type=float, help='entropy coefficient in loss')
# parser.add_argument('--decay_steps', default=3000, type=int, help='learning rate decay steps')
# parser.add_argument('--decay_rate', default=1.0, type=float, help='learning rate decay rate')
parser.add_argument('--timestamp', type=str, default=datetime.datetime.now().strftime("%Y%m%d%H%M"))
parser.add_argument('--evaluator_path', type=str, default='', help='evaluator ckpt dir')
parser.add_argument('--reload_path', type=str, default='', help='model ckpt dir')
parser.add_argument('--setting_path', type=str, default='./config/prm_setting.json', help='setting dir')
FLAGS, _ = parser.parse_known_args()
return FLAGS
if __name__ == '__main__':
# parameters
random.seed(1234)
parse = reranker_parse_args()
if parse.setting_path:
parse = load_parse_from_json(parse, parse.setting_path)
data_set_name = parse.data_set_name
processed_dir = parse.data_dir
stat_dir = os.path.join(processed_dir, 'data.stat')
max_time_len = parse.max_time_len
initial_ranker = parse.initial_ranker
if data_set_name == 'prm' and parse.max_time_len > 30:
max_time_len = 30
print(parse)
with open(stat_dir, 'r') as f:
stat = json.load(f)
num_item, num_cate, num_ft, profile_fnum, itm_spar_fnum, itm_dens_fnum, = stat['item_num'], stat['cate_num'], \
stat['ft_num'], stat['profile_fnum'], stat['itm_spar_fnum'], stat['itm_dens_fnum']
print('num of item', num_item, 'num of list', stat['train_num'] + stat['val_num'] + stat['test_num'],
'profile num', profile_fnum, 'spar num', itm_spar_fnum, 'dens num', itm_dens_fnum)
# train_file, val_file, test_file = pkl.load(open(os.path.join(processed_dir, 'data.data'), 'rb'))
# props = pkl.load(open(os.path.join(processed_dir, 'prop'), 'rb'))
# props[0] = [1e-6 for i in range(max_time_len)]
# profile = pkl.load(open(os.path.join(processed_dir, 'user.profile'), 'rb'))
# construct training files
train_dir = os.path.join(processed_dir, initial_ranker + '.data.train')
if os.path.isfile(train_dir):
train_lists = pkl.load(open(train_dir, 'rb'))
else:
train_lists = construct_list(os.path.join(processed_dir, initial_ranker + '.rankings.train'), max_time_len)
pkl.dump(train_lists, open(train_dir, 'wb'))
# construct test files
test_dir = os.path.join(processed_dir, initial_ranker + '.data.test')
if os.path.isfile(test_dir):
test_lists = pkl.load(open(test_dir, 'rb'))
else:
test_lists = construct_list(os.path.join(processed_dir, initial_ranker + '.rankings.test'), max_time_len)
pkl.dump(test_lists, open(test_dir, 'wb'))
train(train_lists, test_lists, num_ft, max_time_len, itm_spar_fnum, itm_dens_fnum, profile_fnum, parse)
|
122424
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 0, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant'},
{'shape': (2, 3, 2), 'pad_width': ((2, 5), (1, 2), (0, 7)),
'mode': 'constant'},
{'shape': (1, 3, 5, 2), 'pad_width': 2, 'mode': 'constant'}
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestPadDefault(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, self.mode)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, self.mode)
return y_expected.astype(self.dtype),
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant',
'constant_values': 1},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant',
'constant_values': (1, 2)},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant',
'constant_values': ((1, 2), (3, 4))},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
# Old numpy does not work with multi-dimensional constant_values
@testing.with_requires('numpy>=1.11.1')
class TestPad(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y_expected,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y,
testing.run_module(__name__, __file__)
|
122437
|
import pytest
import pandas as pd
from iguanas.rule_selection._base_filter import _BaseFilter
from iguanas.rules import Rules
@pytest.fixture
def _create_data():
X_rules = pd.DataFrame({
'A': [1, 0, 1],
'B': [1, 1, 1]
})
return X_rules
def test_transform(_create_data):
X_rules = _create_data
bf = _BaseFilter(rules_to_keep=['A'], rules=None)
X_rules_ = bf.transform(X_rules)
pd.testing.assert_frame_equal(X_rules_, X_rules[['A']])
# With rules
rules = Rules(
rule_strings={
'A': "X['a']>1",
'B': "X['b']>1"
}
)
bf = _BaseFilter(rules_to_keep=['A'], rules=rules)
X_rules_ = bf.transform(X_rules)
pd.testing.assert_frame_equal(X_rules_, X_rules[['A']])
assert bf.rules.rule_strings == {'A': "X['a']>1"}
def test_fit_transform(_create_data):
bf = _BaseFilter(rules_to_keep=['A'], rules=None)
# Just create dummy fit method for testing
bf.fit = lambda X_rules, y, sample_weight: None
X_rules = _create_data
bf.rules_to_keep = ['A']
X_rules_ = bf.fit_transform(X_rules)
pd.testing.assert_frame_equal(X_rules_, X_rules[['A']])
# With rules
rules = Rules(
rule_strings={
'A': "X['a']>1",
'B': "X['b']>1"
}
)
bf = _BaseFilter(rules_to_keep=['A'], rules=rules)
# Just create dummy fit method for testing
bf.fit = lambda X_rules, y, sample_weight: None
X_rules_ = bf.fit_transform(X_rules)
pd.testing.assert_frame_equal(X_rules_, X_rules[['A']])
assert bf.rules.rule_strings == {'A': "X['a']>1"}
|
122446
|
import param
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# from gym_film.utils.convert_reward import to_single_reward
# from matplotlib.patches import Patch
# Uses the following methods/attributes from env:
# - O, R (observation and reward)
# - jets_power
# - system_state
# - reward, t
matplotlib.rcParams.update({'font.size': 15})
control = param.show_control
NO_LEGEND = True
show = 1
save = 0
class FilmRender():
def __init__(self, env, PLOT_JETS=True):
self.PLOT_JETS = PLOT_JETS
self.env = env
self.Ob = self.env.Ob
self.R = self.env.R
self.blit = False
self.setup_plot()
def setup_h_plot(self, plot_regions=False):
# Plot h
self.hlines, = self.hax.plot(np.linspace(
0, param.L-param.dx, param.NUM), self.env.system_state[0],
label="y = h(x)", linewidth=2.5)
self.qlines, = self.hax.plot(np.linspace(
0, param.L, param.NUM), self.env.system_state[1], alpha=0.5,
label="y = q(x)", linestyle='--', linewidth=2.5)
# Add lims on axes
self.hax.set_xlim(param.start_h_plot, param.L)
self.hax.set_ylim(param.hq_base_value-param.max_h,
param.hq_base_value+param.max_h)
# self.hax.grid()
if self.PLOT_JETS:
# Plot jets
self.setup_jet(self.hax)
# # legend
# legend = self.hax.legend(["y = h(x)", "y = q(x)", "jets position",
# "observation space", "reward space", "jets power"],
# loc='lower left', ncol=2)
handles, labels = self.hax.get_legend_handles_labels()
# sort both labels and handles by labels
order = [0, 1, 4, 2, 3, 5]
self.hax.legend([handles[idx] for idx in order], [labels[idx]
for idx in order],
loc="lower left", ncol=2)
# ax = legend.axes
# handles, labels = ax.get_legend_handles_labels()
# # obs label
# handles.append(Patch(facecolor='orange', edgecolor='r'))
# labels.append("observation domain")
# # reward label
# handles.append(Patch(facecolor='orange', edgecolor='r'))
# labels.append("reward domain")
# legend._legend_box = None
# legend._init_legend_box(handles, labels)
# legend._set_loc(legend._loc)
# legend.set_title(legend.get_title().get_text())
else:
# legend
self.hax.legend(["y = h(x)", "y = q(x)"], loc='lower left')
if NO_LEGEND:
self.hax.get_legend().remove()
# self.text = self.hax.text(1.1, 0.1, 't = '+str(int(round(float(self.env.t)))))
self.text = self.hax.text(1.1, 0.1, 't = '+str(int(round(float(self.env.current_step*param.dt)))))
if plot_regions:
self.plot_regions(self.hax)
# adding x and y labels
no_x_label = False
if not no_x_label:
self.hax.set_xlabel('x')
no_y_label = False
if not no_y_label:
self.hax_ylabel = self.hax.set_ylabel('h, q', labelpad=5)
# changing color of ticks
self.hax.tick_params(colors='black')
no_ticks_x = False
if no_ticks_x:
# removing ticks
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
no_ticks_y = False
if no_ticks_y:
# removing ticks
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
labelbottom=False) # labels along the bottom edge are off
self.hax.set_yticklabels([])
def plot_regions(self, ax):
x1 = 160
ax.axvspan(0, x1, facecolor='blue', alpha=0.1)
ax.axvline(x=x1, ymin=0.0, ymax=1.0, color='k',
linestyle='--', alpha=0.3)
x2 = 270
ax.axvspan(x1, x2, facecolor='green', alpha=0.1)
ax.axvline(x=x2, ymin=0.0, ymax=1.0, color='k',
linestyle='--', alpha=0.3)
ax.axvspan(x2, 340, facecolor='red', alpha=0.1)
textstr1 = "Exponential instability growth region"
textstr2 = "Pseudo-periodic region"
textstr3 = "Fully-developped\nchaotic region"
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.09, 0.95, textstr1, transform=ax.transAxes,
verticalalignment='top', bbox=props)
ax.text(0.54, 0.95, textstr2, transform=ax.transAxes,
verticalalignment='top', bbox=props)
ax.text(0.83, 0.95, textstr3, transform=ax.transAxes,
verticalalignment='top', bbox=props)
def setup_jet(self, ax):
# self.jet_plot = ax.scatter(param.jets_position*param.dx, np.zeros(len(param.jets_position)), s=100*self.env.jets_power)
self.jet_spots = ax.scatter(
param.jets_position*param.dx,
[(0.9 - 0.095*(k)) for k in range(param.n_jets)],
label='jets position', s=30)
plt.plot([], [], label="observation domain", color="green")
plt.plot([], [], label="reward domain", color="red")
self.jet_rect = ax.bar(param.jets_position*param.dx,
self.env.jets_power, param.JET_WIDTH*param.dx, label="jets power", bottom=1)
# show the zone where the control is done as well
x_control_spots = np.array([self.Ob.obs_points+param.jets_position[i]
for i in range(param.n_jets)]).flatten()*param.dx
y_control_spots = np.concatenate(np.array(
[(np.zeros(len(self.Ob.obs_points)) + 0.9 - 0.095*(k)) for k in range(param.n_jets)]))
self.control_spots = ax.scatter(
x_control_spots, y_control_spots, s=1)
# shoz the zone where the reward is calculated
x_reward_spots = np.array([self.R.obs_points_to_reward+param.jets_position[i]
for i in range(param.n_jets)]).flatten()*param.dx
y_reward_spots = np.concatenate(np.array([(np.zeros(len(
self.R.obs_points_to_reward)) + 0.89 - 0.095*(k)) for k in range(param.n_jets)]))
self.reward_spots = ax.scatter(
x_reward_spots, y_reward_spots, s=1)
def update_h_plot(self):
self.hlines.set_ydata(self.env.system_state[0])
self.qlines.set_ydata(self.env.system_state[1])
def update_plot_jet(self, ax):
# self.jet_plot.remove()
# self.jet_plot = ax.scatter(param.jets_position * param.dx, np.zeros(len(param.jets_position)), s=100 * self.env.jets_power)
if self.render_plot:
for i in range(param.n_jets):
self.jet_rect[i].set_height(self.env.jets_power[i])
def setup_control_plot(self):
# Plot control/h as a function of time
self.control_ax.set_ylim(-1.5, 1.5)
self.control_ax.set_xlim(0, param.MAX_TIMEFRAME_CONTROL_PLOT-1)
self.x_t = np.arange(0, param.MAX_TIMEFRAME_CONTROL_PLOT)
self.y_sensor = [0 for i in range(param.MAX_TIMEFRAME_CONTROL_PLOT)]
self.y_control = [0 for i in range(param.MAX_TIMEFRAME_CONTROL_PLOT)]
self.y_reward = [0 for i in range(param.MAX_TIMEFRAME_CONTROL_PLOT)]
self.sensor_lines, = self.control_ax.plot(self.x_t, self.y_sensor)
self.control_lines, = self.control_ax.plot(self.x_t, self.y_control)
self.reward_lines, = self.control_ax.plot(self.x_t, self.y_reward)
# legend
self.control_ax.set_title(
"Some values at x=jets_position[0] as a function of time")
self.control_ax.legend(["y(t) = {}*h(x_jet, t)".format(param.obs_at_jet_render_param),
"jet power (proportion of max jet power)", "{} * reward".format(param.reward_multiplier_render)], loc='lower left')
def update_control_plot(self):
self.y_sensor.append(param.obs_at_jet_render_param*(
self.env.system_state[0, param.jets_position[0]]-param.hq_base_value))
self.y_sensor.pop(0)
self.y_control.append(self.env.jets_power[0])
self.y_control.pop(0)
self.y_reward.append(param.reward_multiplier_render *
self.reward_process(self.env.reward))
self.y_reward.pop(0)
if self.render_plot:
self.control_lines.set_ydata(self.y_control)
self.sensor_lines.set_ydata(self.y_sensor)
self.reward_lines.set_ydata(self.y_reward)
# self.control_ax.set_xlim(max(0, self.env.current_step-max_timeframe), self.env.current_step)
# setup everything - calls setup_jets and everything
def reward_process(self, reward):
if type(reward) is dict:
return np.mean([reward.get(
jet_position) for jet_position in sorted(reward.keys())])
return reward
def setup_plot(self):
standard_size = {'width': 10, 'height': 3}
divide_by = 1
self.figure = plt.figure(figsize=(standard_size.get(
'width')/divide_by, standard_size.get('height')/divide_by))
self.hax = self.figure.add_subplot(1, 1, 1)
# self.figure.subplots_adjust(wspace=0.2)
if control:
self.control_ax = self.figure.add_subplot(2, 1, 2)
self.figure.subplots_adjust(hspace=1)
# Plot h
self.setup_h_plot()
# Plot control
# self.setup_control_plot()
if self.blit:
# cache the background
self.haxbackground = self.figure.canvas.copy_from_bbox(
self.hax.bbox)
self.control_axbackground = self.figure.canvas.copy_from_bbox(
self.control_ax.bbox)
self.figure.canvas.draw()
if show:
plt.show(block=False)
self.counter = 0
# self.hax.set_title('t = {} \n global_reward = {}'.format(
# self.env.t, (to_single_reward(list(self.env.reward.values())) if param.method == '1env_1jet' else self.env.reward)))
self.save = save
if self.save:
self.save_fig()
def save_fig(self):
if self.counter == 0 or self.counter % param.SAVE_PERIOD == 0:
plt.tight_layout()
plt.savefig('fig'+str(self.counter)+str(id(self.env))[:2]+'.png')
self.counter += 1
# update everything
def update_plot(self):
# self.render_plot = self.render_clock == param.RENDER_PERIOD
self.render_plot = True
# # Update time value
# self.figure.suptitle('t = {} \n Reward : {}'.format(
# self.env.t, self.env.reward), fontsize=16)
# self.hax.set_title('t = {} \n global_reward = {}'.format(
# int(round(self.env.t)), (to_single_reward(list(self.env.reward.values())) if param.method == '1env_1jet' else self.env.reward)))
if self.save:
self.save_fig()
# Update data h
self.update_h_plot()
if self.PLOT_JETS:
# Update jet
self.update_plot_jet(self.hax)
# Update control as a function of time
if control:
self.update_control_plot()
self.text.set_text('t = '+str(int(round(float(self.env.current_step*param.simulation_step_time)))))
if self.render_plot:
if self.blit:
# restore background
self.figure.canvas.restore_region(self.haxbackground)
self.figure.canvas.restore_region(self.control_axbackground)
# redraw just the points
self.hax.draw_artist(self.hlines)
self.hax.draw_artist(self.qlines)
self.control_ax.draw_artist(self.sensor_lines)
self.control_ax.draw_artist(self.control_lines)
self.control_ax.draw_artist(self.reward_lines)
# fill in the axes rectangle
self.figure.canvas.blit(self.hax.bbox)
self.figure.canvas.blit(self.control_ax.bbox)
else:
# We draw here
self.figure.canvas.draw()
self.figure.canvas.flush_events()
self.render_clock = 0
self.render_clock += 1
|
122478
|
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import Base
from config import create_new_sqla
from helpers import (get_video_douban_ids,
get_celebrity_douban_ids,
get_animation_bilibili_ids)
test_database_url = 'sqlite:///test.db'
@pytest.fixture(scope='session')
def session(request):
sqla = create_new_sqla(test_database_url, echo=False)
session = sqla['session']
engine = sqla['engine']
Base.metadata.create_all(engine)
def teardown():
Base.metadata.drop_all(engine)
request.addfinalizer(teardown)
return session
@pytest.fixture
def douban_movie_ids():
return list(get_video_douban_ids())
@pytest.fixture
def douban_celebrity_ids():
return list(get_celebrity_douban_ids())
@pytest.fixture
def bilibili_animation_ids():
return list(get_animation_bilibili_ids())
|
122485
|
import logging
import random
from flask import request
from flask_restplus import Resource, Namespace, fields
from ..managers import copy_job_manager
from .. import tasks
from ..exceptions import HTTP_EXCEPTION
api = Namespace('copy-jobs', description='CopyJob related operations')
dto = api.model('copy-job', {
'id': fields.Integer(readonly=True, example=1234),
'description': fields.String(required=True, example='Task Description'),
'src_cloud_id': fields.Integer(required=False, example=1),
'src_resource_path': fields.String(required=True, example='/tmp'),
'dst_cloud_id': fields.Integer(required=False, example=2),
'dst_resource_path': fields.String(required=True, example='/trash'),
'copy_links': fields.Boolean(required=True, example=True),
'notification_email': fields.String(required=False, example='<EMAIL>'),
'owner': fields.String(required=False, example='owner'),
'progress_state': fields.String(readonly=True, example='PENDING'),
'progress_text': fields.String(readonly=True, example='Multi\nLine\nText'),
'progress_error_text': fields.String(readonly=True, example='Multi\nLine\nText'),
'progress_current': fields.Integer(readonly=True, example=45),
'progress_total': fields.Integer(readonly=True, example=100),
'progress_error': fields.String(readonly=True),
'progress_execution_time': fields.Integer(readonly=True, example=3600),
})
@api.route('/')
class CopyJobList(Resource):
@api.marshal_list_with(dto)
def get(self):
"""
List all Copy Jobs
"""
try:
return copy_job_manager.list()
except HTTP_EXCEPTION as e:
api.abort(e.code, e.payload)
except Exception as e:
logging.exception(e, exc_info=True)
api.abort(500, str(e))
@api.expect(dto, validate=True)
@api.marshal_with(dto, code=201)
def post(self):
"""
Create a new Copy Job
"""
try:
return copy_job_manager.create(request.json), 201
except HTTP_EXCEPTION as e:
api.abort(e.code, e.payload)
except Exception as e:
logging.exception(e, exc_info=True)
api.abort(500, str(e))
@api.route('/<id>')
@api.param('id', 'The Copy Job Identifier')
@api.response(404, 'Copy Job not found.')
class CopyJob(Resource):
@api.marshal_with(dto, code=200)
def get(self, id):
"""
Get a specific Copy Job
"""
try:
return copy_job_manager.retrieve(id), 200
except HTTP_EXCEPTION as e:
api.abort(e.code, e.payload)
except Exception as e:
logging.exception(e, exc_info=True)
api.abort(500, str(e))
@api.route('/<id>/stop/')
@api.param('id', 'The Copy Job Identifier')
@api.response(404, 'Copy Job not found.')
class CopyJob(Resource):
@api.marshal_with(dto, code=202)
def put(self, id):
"""
Stop the Copy Job
"""
try:
return copy_job_manager.stop(id), 202
except HTTP_EXCEPTION as e:
api.abort(e.code, e.payload)
except Exception as e:
logging.exception(e, exc_info=True)
api.abort(500, str(e))
|
122487
|
import sys
src = sys.argv[1]
trg = sys.argv[2]
with open(src, "rb") as f:
contents = f.read()
with open(trg, "wb") as f:
f.write(contents)
|
122604
|
from django.db import models
from datetime import datetime
class TestModel(models.Model):
date = models.DateField(default=datetime.today())
|
122611
|
import numpy as np
import pandas as pd
import streamlit as st
import altair as alt
from matplotlib import pyplot as plt
import config, dataset, main, utils
# Matplotlib params
plt.style.use("seaborn")
plt.rcParams["figure.dpi"] = 300
def get_altair_hist_plot(series, name, bin_min, bin_max, bin_step):
"""
Plot the given Pandas Series as an histogram using Altair
"""
hist, bin_edges = np.histogram(
series,
bins=np.arange(bin_min, bin_max, bin_step),
)
print(bin_edges)
data = pd.DataFrame({name: bin_edges[:-1], "Count": hist})
return (
alt.Chart(data)
.mark_bar()
.encode(
alt.X(f"{name}:Q", bin=alt.Bin(extent=[bin_min, bin_max], step=bin_step)), y="Count"
)
)
# Title section
st.set_page_config(
page_title="3D Bin Packing",
)
st.header("3D Bin Packing")
# Dataset section
st.header("Dataset")
product_dataset = dataset.ProductDataset(
"data/products.pkl",
config.NUM_PRODUCTS,
config.MIN_PRODUCT_WIDTH,
config.MAX_PRODUCT_WIDTH,
config.MIN_PRODUCT_DEPTH,
config.MAX_PRODUCT_DEPTH,
config.MIN_PRODUCT_HEIGHT,
config.MAX_PRODUCT_HEIGHT,
config.MIN_PRODUCT_WEIGHT,
config.MAX_PRODUCT_WEIGHT,
force_overload=False,
)
# Plot depth over width ratio in the dataset
dw_ratio_plot = get_altair_hist_plot(
product_dataset.products.depth / product_dataset.products.width,
"D/W Ratio",
0,
1,
0.01,
)
st.altair_chart(dw_ratio_plot, use_container_width=True)
# Plot height over width ratio in the dataset
hw_ratio_plot = get_altair_hist_plot(
product_dataset.products.height / product_dataset.products.width,
"H/W Ratio",
0,
2,
0.05,
)
st.altair_chart(hw_ratio_plot, use_container_width=True)
# Plot volume distribution in the dataset
volume_plot = get_altair_hist_plot(product_dataset.products.volume / 1e6, "Volume", 0, 100, 1)
st.altair_chart(volume_plot, use_container_width=True)
# Plot weight distribution in the dataset
weight_plot = get_altair_hist_plot(product_dataset.products.weight, "Weight", 0, 100, 5)
st.altair_chart(weight_plot, use_container_width=True)
# Order section
st.header("Order")
# Select number of products and get random order
ordered_products = st.slider("Ordered products", 0, 1000, value=10, step=5)
order = product_dataset.get_order(ordered_products)
# Show the order as a table
st.dataframe(order)
# Show lower bounds on bins for the selected order
# on the sidebar of the dashboard
lower_bound = st.sidebar.selectbox(
f"Lower bounds for the selected {ordered_products}-products order", ("L0", "L1", "L2")
)
if lower_bound == "L0":
lb = utils.get_l0_lb(order, config.PALLET_DIMS)
elif lower_bound == "L1":
lb, _, _, _ = utils.get_l1_lb(order, config.PALLET_DIMS)
elif lower_bound == "L2":
lb, _, _, _ = utils.get_l2_lb(order, config.PALLET_DIMS)
st.sidebar.write(f"Martello's {lower_bound} lower bound: {lb}")
# Solutions section
st.header("Solution")
# Select parameters
st.subheader("Parameters")
solution_type = st.selectbox(
"Select the algorithm you'd like to test",
("Baseline", "Maxrects", "Column generation"),
index=1,
)
tlim = st.slider("Time limits", 0, 100, value=10, step=5)
max_iters = st.slider("Maximum re-iterations", 0, 5, value=1, step=1)
superitems_horizontal = st.radio("Add horizontal superitems", ("Yes", "No"))
# Compute solution
if solution_type == "Baseline" or solution_type == "Maxrects":
bin_pool = main.main(
order,
procedure="bl" if solution_type == "Baseline" else "mr",
max_iters=max_iters,
tlim=tlim,
superitems_horizontal=True if superitems_horizontal == "Yes" else False,
)
elif solution_type == "Column generation":
cg_use_height_groups = st.radio(
"Call column generation by height groups", ("Yes", "No"), index=1
)
cg_mr_warm_start = st.radio(
"Use maxrects as a warm-start for column generation", ("Yes", "No"), index=1
)
cg_max_iters = st.slider("Column generation maximum iterations", 0, 100, value=20, step=5)
cg_max_stag_iters = st.slider(
"Column generation early stopping iterations", 0, 100, value=3, step=1
)
cg_sp_mr = st.radio(
"Use maxrects for the pricing subproblem in column generation", ("Yes", "No"), index=1
)
cg_sp_np_type = st.selectbox(
"Select the approach to use in the subproblem no-placement for column generation",
("MIP", "CP"),
index=0,
)
cg_sp_p_type = st.selectbox(
"Select the approach to use in the subproblem placement for column generation",
("Maxrects", "MIP", "CP"),
index=0,
)
bin_pool = main.main(
order,
procedure="cg",
max_iters=max_iters,
tlim=tlim,
superitems_horizontal=True if superitems_horizontal == "Yes" else False,
cg_use_height_groups=True if cg_use_height_groups == "Yes" else False,
cg_mr_warm_start=True if cg_mr_warm_start == "Yes" else False,
cg_max_iters=cg_max_iters,
cg_max_stag_iters=cg_max_stag_iters,
cg_sp_mr=True if cg_sp_mr == "Yes" else False,
cg_sp_np_type=cg_sp_np_type.lower(),
cg_sp_p_type="mr" if cg_sp_p_type == "Maxrects" else cg_sp_p_type.lower(),
)
# Show original layer pool (before compacting)
st.subheader("Original layer pool")
st.dataframe(bin_pool.get_original_layer_pool().to_dataframe())
# Show original bin pool (before compacting)
st.subheader("Original bin pool")
original_bin_pool = bin_pool.get_original_bin_pool()
for i, bin in enumerate(original_bin_pool):
st.write(f"Bin #{i + 1}")
st.dataframe(bin.layer_pool.describe())
ax = bin.plot()
st.pyplot(plt.gcf())
# Show compact bin pool
st.subheader("Compact bin pool")
for i, bin in enumerate(bin_pool.compact_bins):
st.write(f"Bin #{i + 1}")
ax = bin.plot()
st.pyplot(plt.gcf())
# Success message
st.success("Bin packing procedure successfully completed")
|
122641
|
from __future__ import print_function
import networkx as nx
from semnav.lib.categories import room2category, behavior_id2category
from semnav.lib.sem_graph import Node, Edge
class SubGraph(object):
"""Sub-graph of SemGraph used for graph networks.
"""
def __init__(self, sem_graph, cur_position, n_neighbor_dist, cur_behavior_id=None):
"""Build a local subgraph from sem_graph centered around the cur_position node or
cur_behavior_id edge (which is an outgoing edge of the cur_position node). The subgraph
extends to a max of n_neighbor_dist nodes or edges away from the current node/edge.
Args:
sem_graph: Semantic graph.
cur_position: Node or Edge representing the current position in the graph, or a str
representing the node name of a Node.
n_neighbor_dist: Int representing the max distance from the current position that the subgraph
will contain.
cur_behavior_id: Current edge behavior ID as a string (e.g. 'tr'). If this is None, then
the subgraph will be cropped centered around the current node (cur_position). If
cur_behavior_id is NOT None, then the subgraph will be centered around the
current edge (which is computed based on cur_position and cur_behavior_id).
"""
self.nxG, self.nodes, self.edges, self.nxG_compact = self._build_subgraph(
sem_graph,
cur_position,
n_neighbor_dist,
cur_behavior_id=cur_behavior_id
)
@classmethod
def get_successors(cls, cur_node, depth, forward):
if depth <= 0:
return []
if forward is True: # Find successors
neighboring_edges = cur_node.outgoing_edges
start_or_end_node = 'end_node'
elif forward is False: # Find predecessors
neighboring_edges = cur_node.incoming_edges
start_or_end_node = 'start_node'
else:
raise ValueError('The forward parameter must be a boolean.')
successor_nodes = []
for edge in neighboring_edges:
successor_nodes.append(getattr(edge, start_or_end_node))
all_future_edges = []
if depth > 1:
all_future_successors = []
for successor_node in successor_nodes:
future_successors, future_edges = cls.get_successors(successor_node, depth - 1, forward=forward)
all_future_successors.extend(future_successors)
all_future_edges.extend(future_edges)
successor_nodes.extend(all_future_successors)
successor_edges = neighboring_edges + all_future_edges
return successor_nodes, successor_edges
def _build_subgraph(self, sem_graph, cur_position, n_neighbor_dist, cur_behavior_id=None):
"""Build a list of nodes (nbunch) and let networkx extract the subgraph.
"""
if isinstance(cur_position, str):
cur_position = sem_graph.nodes[cur_position]
if cur_behavior_id is not None: # Set cur_position to be an Edge
assert isinstance(cur_position, Node)
for edge in cur_position.outgoing_edges:
if edge.behavior_id == cur_behavior_id:
cur_position = edge
break
if isinstance(cur_position, Node): # Build local graph centered around node
cur_node = cur_position
forward_dist = n_neighbor_dist
elif isinstance(cur_position, Edge): # Build local graph centered around edge
cur_node = cur_position.start_node
forward_dist = n_neighbor_dist + 1
else:
raise ValueError('Invalid current position input type.')
nbunch = [cur_node]
# Look at successors and successors of successors
successor_nodes, successor_edges = self.get_successors(cur_node, depth=forward_dist, forward=True)
nbunch += successor_nodes
# Look at predecessors and predecessors of predecessors
predecessor_nodes, predecessor_edges = self.get_successors(cur_node, depth=n_neighbor_dist, forward=False)
nbunch += predecessor_nodes
nbunch_edges = successor_edges + predecessor_edges
nx_subgraph = nx.OrderedDiGraph()
nx_subgraph.add_nodes_from(nbunch)
for u, v, cur_obj in sem_graph.nxG.edges(data='object'):
if (u in nx_subgraph) and (v in nx_subgraph) and True:
nx_subgraph.add_edge(u, v, object=cur_obj)
nodes = {node.name: node for node in nx_subgraph.nodes}
edges = [edge_tuple[2] for edge_tuple in nx_subgraph.edges(data='object')]
nxG_compact = self.convert_to_compact(nx_subgraph, store_features=False)
return nx_subgraph, nodes, edges, nxG_compact
def convert_to_compact(self, G, store_features):
"""Generates a compact networkx graph for the input graph. The features of each node and
edge are converted to room categories and behavior categories, which may result in a loss
of information (e.g. s_r -> s category) in the networkx compact graph.
"""
nxG_compact = nx.OrderedDiGraph()
# Create nodes
for node in G.nodes:
# Create node
if store_features is True: # Store a features attribute
nxG_compact.add_node(node.name, room_category=room2category(node.name),
features=node['features'])
else:
nxG_compact.add_node(node.name, room_category=room2category(node.name))
# Create edges
for u, v, edge in G.edges(data='object'):
if store_features is True:
nxG_compact.add_edge(u.name, v.name,
behavior_category=behavior_id2category(edge.behavior_id),
features=edge['features'])
else:
nxG_compact.add_edge(u.name, v.name,
behavior_category=behavior_id2category(edge.behavior_id))
assert len(nxG_compact.nodes) == len(G.nodes)
assert len(nxG_compact.edges) == len(G.edges)
return nxG_compact
|
122741
|
import os
import re
import bpy
import logging
import json
from . import bpy_helper
from . import ifc_helper
from . import io
from . import exporter
def create(project, ifc_type=None, group_name='', prefix='', **kwargs):
i = 0
if ifc_type:
for elem in ifc_helper.elements_by_type(project, ifc_type):
i += 1
grp_name = prefix + elem.name
logging.info('{}: create group {}'.format(i, grp_name))
grp = bpy.data.groups.new(grp_name)
else:
grp_name = prefix + group_name
logging.info('{}: create group {}'.format(i, grp_name))
grp = bpy.data.groups.new(grp_name)
def children_by_regex(project, ifc_target_groups, source_regex, target_regex,
force_int=False, source_prefix='', prefix='', **kwargs):
i = 0
# get all storeys
target_names = {}
for group in ifc_helper.elements_by_type(project, ifc_target_groups):
m = re.match(target_regex, group.name)
if m:
name = m.group('name')
if force_int:
name = int(name)
target_names[name] = prefix+group.name
# find blender groups that are not assigned to the target
for source_group in bpy.data.groups:
source_name = source_group.name
m = re.match(source_regex, source_name)
if m:
group_nr = m.group('name')
if force_int:
group_nr = int(group_nr)
target_name = target_names[group_nr]
logging.info('assign all elements of group {} to {}'.format(
source_name, target_name
))
for obj in bpy.data.groups[source_name].objects:
if not obj.name in bpy.data.groups[target_name].objects:
bpy.data.groups[target_name].objects.link(obj)
i += 1
#logging.info('assign {} to {}'.format(
# obj.name, target_name
#))
logging.info("assigned {} objects to group {}".format(i, target_name))
def _by_elevation(building, prefix=''):
min_elevation = -20
max_elevation = 1000
elevations = {}
for storey in building.storeys:
elevations[storey.name] = [storey.elevation, 0]
# calculate min and max for elevation
ele = [e[0] for e in elevations.values()] + [max_elevation]
for elevation in elevations.values():
elevation[1] = min([e for e in ele if e > elevation[0]])
min([x for x in elevations.values()])[0] = min_elevation
logging.info("elevations: " + json.dumps(elevations, indent=2))
i = 0
for obj in bpy.data.objects:
num_groups = sum([obj.name in g.objects for g in bpy.data.groups])
if num_groups == 0:
if obj.type != 'MESH':
continue
if not obj.data.vertices:
continue
vcos = [ obj.matrix_world * v.co for v in obj.data.vertices ]
findCenter = lambda l: ( max(l) + min(l) ) / 2
x,y,z = [ [ v[i] for v in vcos ] for i in range(3) ]
center = [ findCenter(axis) for axis in [x,y,z] ]
z = center[2]
grp_name = [n for n, e in elevations.items() if e[0] <= z < e[1]]
if grp_name:
grp_name = prefix + grp_name[0]
# logging.info('assign {} to {} - {}'.format(
# obj.name, grp_name, z
# ))
bpy.data.groups[grp_name].objects.link(obj)
i += 1
logging.info("assigned {} objects to group by elevation".format(i))
def by_elevation(project, prefix='', **kwargs):
for site in project.sites:
for building in site.buildings:
_by_elevation(building, prefix)
def by_regex(project, ifc_type_group, group_name, regex, **kwargs):
i = 0
for elem in ifc_helper.elements_by_type(project, ifc_type_group):
m = re.search(regex, elem.name)
if not m:
continue
obj = bpy_helper.find_object(elem)
print(obj)
print(elem.name)
if not obj.name in bpy.data.groups[group_name].objects:
bpy.data.groups[group_name].objects.link(obj)
i += 1
#logging.info('assign {} to {}'.format(
# obj.name, group_name
#))
logging.info("assigned {} objects to group {}".format(i, group_name))
def unlink(project, regex, **kwargs):
i = 0
for obj in bpy.data.objects:
m = re.match(regex, obj.name)
if not m:
continue
for grp in obj.users_group:
grp.objects.unlink(obj)
i += 1
logging.info("unlinked {} objects from groups, {} '{}'".format(i, len(bpy.data.objects), regex))
def add(project, ifc_type_group, prefix='', ifc_children=None, **kwargs):
# get group
for group in ifc_helper.elements_by_type(project, ifc_type_group):
i = 0
j = 0
grp_name = prefix + group.name
# assign self to group
if not ifc_children:
continue
# assign item to group
for elem in getattr(group, ifc_children):
i += 1
obj = bpy_helper.find_object(elem)
if not obj:
# logging.error('can not assign {} to group {}'.format(elem.name, grp_name))
j += 1
continue
if obj.name in bpy.data.groups[grp_name].objects:
logging.warn('DUPLICATE: object {} already in group {}'.format(elem.name, grp_name))
continue
# logging.info('{}: link {} to group {}'.format(i, elem.name, grp_name))
bpy.data.groups[grp_name].objects.link(obj)
logging.info("linked {} objects to group {}".format(i, grp_name))
if j:
logging.warn("could not assign {} objects to group {}".format(j, grp_name))
# get all products
#for elem in ifc_helper.elements_by_type(project, ifc_type_group):
# # find out which room/storey they are assigned to
#for item in ifc_helper.elements_by_type(project, ifc_type):
# ifc_helper.elements_by_type(project, 'product')
"""
split into multiple .blend-files based on blender groups and ifc_type
"""
def split(project, ifc_type, outpath, blend_file, export=None, **kwargs):
elements = ifc_helper.elements_by_type(project, ifc_type)
for elem in elements:
i = 0
io.load(blend_file)
logging.info('delete all but {}'.format(elem.name))
for other in elements:
if other == elem:
continue
# remove all other groups
for obj in bpy.data.groups[other.name].objects:
# logging.info('delete {} from group {}'.format(obj.name, other.name))
i += 1
bpy.data.objects.remove(obj, True)
logging.info('delete {} objects from group {}, kept {}'.format(i, elem.name, len(bpy.data.objects)))
filepath = os.path.join(outpath, elem.name)
io.save(filepath)
if export:
exporter.export(project, filepath, export)
|
122761
|
import re
from datetime import datetime
from pathlib import Path
from subprocess import Popen, check_output
from tempfile import TemporaryDirectory
from django.core.files.base import ContentFile
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from django_rq import job
from tqdm import trange
from accounts.models import HierarchicUser
from common.utils.cache import unlock_user
from common.utils.export import send_pdf, send_export
from common.utils.file import FileAnalyzer
from libretto.export import EvenementExporter, Source
from libretto.models import Evenement
def create_image_from_pdf(pdf_path, page_index, image_path: Path):
assert image_path.suffix == '.jpg'
p = Popen([
'pdftoppm',
'-r', '300', # Utilise une densité de 300 dpi pour assurer une
# haute qualité.
'-scale-to', '3000', # S’assure que la taille de l’image finale ne
# dépasse pas 3000 pixels de largeur ou hauteur.
'-cropbox', # Utilise la CropBox du PDF pour déterminer les dimensions
# de la partie affichée de la page.
'-jpeg',
# Progressive JPEGs reduce the size while making a nicer browser load.
'-jpegopt', 'quality=85,progressive=y',
# Only the current page.
'-f', str(page_index + 1), '-l', str(page_index + 1), '-singlefile',
pdf_path, str(image_path.with_suffix('')),
])
p.wait()
def get_pdf_num_pages(path):
return int(
re.search(
r'^Pages:\s+(\d+)$',
check_output(['pdfinfo', path]).decode(),
flags=re.MULTILINE
).group(1)
)
@job
@transaction.atomic
def split_pdf(source_pk, user_pk):
try:
source = Source.objects.get(pk=source_pk)
assert source.is_pdf()
assert not source.children.exists()
f = source.fichier
num_pages = get_pdf_num_pages(f.path)
with TemporaryDirectory() as tmp:
for i in trange(num_pages):
image_path = Path(tmp) / f'{Path(f.name).stem}_{i}.jpg'
create_image_from_pdf(f.path, i, image_path)
cf = ContentFile(image_path.read_bytes(), image_path.name)
page = i + 1
source_page = Source.objects.create(
parent=source, position=page, page=page, type=source.type,
fichier=cf, type_fichier=FileAnalyzer.IMAGE,
owner=source.owner,
)
# Generates thumbnails.
source_page.small_thumbnail
source_page.medium_thumbnail
finally:
unlock_user(HierarchicUser.objects.get(pk=user_pk))
@job
def events_to_pdf(pk_list, user_pk, site_pk, language_code):
evenements = Evenement.objects.filter(pk__in=pk_list)
context = {'evenements': evenements.prefetch_all}
template_name = 'libretto/evenement_list.tex'
n = len(pk_list)
subject = _('de %s événements') % n
filename = '%s-evenements_%s' % (n, datetime.today().date().isoformat())
send_pdf(context, template_name, subject, filename, user_pk, site_pk,
language_code)
def export_events(extension):
def inner(pk_list, user_pk, site_pk, language_code):
exporter = EvenementExporter(Evenement.objects.filter(pk__in=pk_list))
n = len(pk_list)
subject = _('de %s événements') % n
filename = f'{n}-evenements_{datetime.today().date().isoformat()}'
send_export(exporter, extension, subject, filename, user_pk,
language_code)
return inner
@job
def events_to_json(*args):
export_events('json')(*args)
@job
def events_to_csv(*args):
export_events('csv')(*args)
@job
def events_to_xlsx(*args):
export_events('xlsx')(*args)
|
122771
|
import tensorflow as tf
tf.enable_eager_execution()
import numpy as np
import pandas as pd
import os
# import argparse
def read_tf(tfrecord_path):
"""
read in the tensors
:param tfrecord_path: the path to the tensor
:return: the image and the label
"""
raw_image_dataset = tf.data.TFRecordDataset(tfrecord_path)
# Create a dictionary describing the features.
image_feature_description = {
'data_vol': tf.io.FixedLenFeature([], tf.string),
'label_vol': tf.io.FixedLenFeature([], tf.string),
}
def _parse_image_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_single_example(example_proto, image_feature_description)
parsed_image_dataset = raw_image_dataset.map(_parse_image_function)
for parser in parsed_image_dataset:
data_vol = tf.decode_raw(parser['data_vol'], tf.float32)
label_vol = tf.decode_raw(parser['label_vol'], tf.float32)
image_raw1 = data_vol.numpy()
image_raw2 = label_vol.numpy()
image_raw1 = image_raw1.reshape((256, 256, 3))
image_raw2 = np.expand_dims(image_raw2.reshape((256, 256, 3))[..., 0], -1)
return image_raw1, image_raw2
def tf_to_numpy(tf_path='../../input/'):
"""
convert tensor to numpy array and save it
:param tf_path: the path to the csv file that save all the path to the tensors
:return:
"""
for data_name in ["ct_train", "ct_val", "mr_train", "mr_val"]:
df_train = pd.read_csv(os.path.join(tf_path, '{}_list.csv'.format(data_name)))
ids_train = df_train['img']
folder_tosave = os.path.join(tf_path, 'PnpAda_release_data/{}/'.format(data_name))
if not os.path.exists(folder_tosave):
os.mkdir(folder_tosave)
if not os.path.exists(os.path.join(folder_tosave, 'img')):
os.mkdir(os.path.join(folder_tosave, 'img/'))
if not os.path.exists(os.path.join(folder_tosave, 'mask')):
os.mkdir(os.path.join(folder_tosave, 'mask/'))
for i, id in enumerate(ids_train):
if i % 100 == 0:
print(id)
if not os.path.exists(os.path.join(folder_tosave, 'img', id)):
img_path = '../../input/PnpAda_release_data/train_n_val/{}_tfs/{}'.format(data_name, id)
img, mask = read_tf(img_path)
np.save(os.path.join(folder_tosave, 'img', id), img)
np.save(os.path.join(folder_tosave, 'mask', id), mask)
print('**************** {} finished ****************'.format(data_name))
if __name__ == '__main__':
# tf_to_numpy()
# print("################ all the processes finished ################")
img, mask = read_tf('../../input/PnpAda_release_data/train_n_val/ct_train_tfs/ct_train_slice{}.tfrecords'.format(0))
print(img.shape, mask.shape)
print(np.mean(img), np.std(img))
print(img.min(), img.max())
img2 = (img - img.min()) * 255 / (img.max() - img.min())
img2 = np.array(img2, dtype=int)
print(img2.min(), img2.max())
from matplotlib import pyplot as plt
plt.imshow(img2[128-112:128+112,128-112:128+112], cmap='gray')
plt.show()
plt.imshow(mask[128-112:128+112,128-112:128+112,0], cmap='gray')
plt.show()
img, mask = read_tf('../../input/PnpAda_release_data/train_n_val/ct_val_tfs/ct_val_slice{}.tfrecords'.format(1))
print(img.shape, mask.shape)
print(np.mean(img), np.std(img))
print(img.min(), img.max())
img2 = (img - img.min()) * 255 / (img.max() - img.min())
img2 = np.array(img2, dtype=int)
print(img2.min(), img2.max())
plt.imshow(img2[128 - 112:128 + 112, 128 - 112:128 + 112], cmap='gray')
plt.show()
plt.imshow(mask[128 - 112:128 + 112, 128 - 112:128 + 112, 0], cmap='gray')
plt.show()
|
122784
|
import threading
from functools import wraps
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
_kwd_mark = object()
_data_lock = threading.Lock()
def cache(f):
"""
Decorator that caches the function's return value, so cached RDDs,
DataFrames, and other objects can be shared between calls to tasks.
"""
@wraps(f)
def wrapper(self, *args, **kwargs):
with _data_lock:
try:
self._cache
except AttributeError:
self._cache = {}
# function call key adapted from http://stackoverflow.com/a/10220908/1236542
key = (f,) + args + (_kwd_mark,) + tuple(sorted(kwargs.items()))
if key in self._cache:
return self._cache[key]
else:
from pyspark.rdd import RDD
from pyspark.sql import DataFrame
result = f(self, *args, **kwargs)
self._cache[key] = result
if isinstance(result, RDD):
st = result.getStorageLevel()
if not st.useDisk and not st.useMemory and not st.useOffHeap:
raise ValueError('An RDD returned by a @cache function should be persisted with .cache() or .persist().')
elif isinstance(result, DataFrame):
st = result.storageLevel
if not st.useDisk and not st.useMemory and not st.useOffHeap:
raise ValueError('A DataFrame returned by a @cache function should be persisted with .cache() or .persist().')
return result
return wrapper
|
122786
|
import datetime
print(1,datetime.datetime.now())
import apscheduler
print(2,datetime.datetime.now())
import gevent
print(3,datetime.datetime.now())
import eventlet
print(4,datetime.datetime.now())
import asyncio
print(5,datetime.datetime.now())
import threading
print(6,datetime.datetime.now())
import pymongo
print(7,datetime.datetime.now())
import redis
print(8,datetime.datetime.now())
import pysnooper
print(9,datetime.datetime.now())
|
122794
|
import numpy as np
from pyriemann.estimation import Covariances
from pyriemann.spatialfilters import CSP
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from moabb.pipelines.utils import FilterBank
parameters = {"C": np.logspace(-2, 2, 10)}
clf = GridSearchCV(SVC(kernel="linear"), parameters)
fb = FilterBank(make_pipeline(Covariances(estimator="oas"), CSP(nfilter=4)))
pipe = make_pipeline(fb, SelectKBest(score_func=mutual_info_classif, k=10), clf)
# this is what will be loaded
PIPELINE = {
"name": "FBCSP + optSVM",
"paradigms": ["FilterBankMotorImagery"],
"pipeline": pipe,
}
|
122825
|
import torch
from torch import nn
from torch.nn import functional as F
from typing import Dict, Optional
from utils.misc_utils import combine_first_ax
from slowfast.models.video_model_builder import SlowFast, ResNet
from fairseq.models.transformer import (
TransformerEncoder,
TransformerDecoder,
# EncoderOut,
)
from utils.transformer_code import Transformer as TxCodeEnc
from vidsitu_code.seq_gen import SeqGenCustom, EncoderOut
from transformers import GPT2LMHeadModel
from vidsitu_code.hf_gpt2_fseq import HuggingFaceGPT2Decoder
class SlowFast_FeatModel(SlowFast):
def forward_features(self, x):
x = self.s1(x)
x = self.s1_fuse(x)
x = self.s2(x)
x = self.s2_fuse(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s3_fuse(x)
x = self.s4(x)
x = self.s4_fuse(x)
x = self.s5(x)
return x
def forward(self, x, bboxes=None):
x = self.forward_features
if self.enable_detection:
x = self.head(x, bboxes)
else:
x = self.head(x)
return x
class ResNet_FeatModel(ResNet):
def forward_features(self, x):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
return x
def forward(self, x, bboxes=None):
if self.enable_detection:
x = self.head(x, bboxes)
else:
x = self.head(x)
return x
class ResNetBasicHead_Trimmed(nn.Module):
"""
ResNe(X)t 3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(self, dim_in, pool_size):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetBasicHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
"""
super().__init__()
assert (
len({len(pool_size), len(dim_in)}) == 1
), "pathway dimensions are not consistent."
self.num_pathways = len(pool_size)
self.dim_in = dim_in
for pathway in range(self.num_pathways):
if pool_size[pathway] is None:
avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
# avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1)
avg_pool = nn.AvgPool3d(pool_size[pathway])
self.add_module("pathway{}_avgpool".format(pathway), avg_pool)
def forward(self, inputs):
assert (
len(inputs) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
m = getattr(self, "pathway{}_avgpool".format(pathway))
pool_out.append(m(inputs[pathway]))
x = torch.cat(pool_out, 1)
return x
class SFBase(nn.Module):
def __init__(self, cfg, comm):
super(SFBase, self).__init__()
self.full_cfg = cfg
self.sf_cfg = cfg.sf_mdl
self.cfg = cfg.mdl
self.comm = comm
self.build_model()
def build_model(self):
self.build_sf_model(self.sf_cfg)
self.build_head(self.sf_cfg)
self.build_projection_head(self.sf_cfg)
def build_sf_model(self, cfg):
mdl_name = cfg.MODEL.MODEL_NAME
if mdl_name == "SlowFast":
mdl = SlowFast_FeatModel(cfg)
elif mdl_name == "ResNet":
mdl = ResNet_FeatModel(cfg)
else:
raise NotImplementedError
self.sf_mdl = mdl
return
def build_head(self, cfg):
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
# pool_size = _POOL1[cfg.MODEL.ARCH]
if self.comm.path_type == "multi":
self.head = ResNetBasicHead_Trimmed(
dim_in=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
pool_size=[None, None], # None for AdaptiveAvgPool3d((1, 1, 1))
)
elif self.comm.path_type == "single":
self.head = ResNetBasicHead_Trimmed(
dim_in=[width_per_group * 32],
pool_size=[None], # None for AdaptiveAvgPool3d((1, 1, 1))
)
return
def build_projection_head(self, cfg, out_dim=None):
if out_dim is None:
out_dim = len(self.comm.vb_id_vocab)
din = sum(self.head.dim_in)
self.proj_head = nn.Sequential(
*[nn.Linear(din, din // 2), nn.ReLU(), nn.Linear(din // 2, out_dim)]
)
def get_feats(self, inp):
if self.comm.path_type == "multi":
feat_slow = combine_first_ax(inp["frms_ev_slow_tensor"])
feat_fast = combine_first_ax(inp["frms_ev_fast_tensor"])
feats_used = [feat_slow, feat_fast]
elif self.comm.path_type == "single":
feat_fast = combine_first_ax(inp["frms_ev_fast_tensor"])
feats_used = [feat_fast]
else:
raise NotImplementedError
return feats_used
def forward_encoder(self, inp):
feats_used = self.get_feats(inp)
nfeats_used = len(feats_used)
feat_out = self.sf_mdl.forward_features(feats_used)
assert len(feat_out) == nfeats_used
return feat_out
def forward_decoder(self, enc_out, inp):
# enc_out: List
# len(enc_out) = nfeats_used
# enc_out[0]: B x C x T x H x W
head_out = self.head(enc_out)
# (B, C, T, H, W) -> (B, T, H, W, C).
head_out = head_out.permute((0, 2, 3, 4, 1))
# B = len(inp["vseg_idx"])
# assert head_out.size(1) == 1
# assert head_out.size(2) == 1
# assert head_out.size(3) == 1
# out = head_out.view(B, 5, -1)
# import pdb
# pdb.set_trace()
proj_out = self.proj_head(head_out)
B = len(inp["vseg_idx"])
out = proj_out.view(B, 5, -1)
assert out.size(-1) == len(self.comm.vb_id_vocab)
return out
def forward(self, inp: Dict):
feat_out = self.forward_encoder(inp)
mdl_out = self.forward_decoder(feat_out, inp)
return {"mdl_out": mdl_out}
class LossB(nn.Module):
def __init__(self, cfg, comm):
super().__init__()
self.cfg = cfg
self.comm = comm
self.loss_keys = ["loss"]
def forward(self, mdl_out, inp):
labels_c1 = combine_first_ax(inp["label_tensor"])
mdl_preds = mdl_out["mdl_out"]
mdl_preds_c1 = combine_first_ax(mdl_preds)
loss = F.cross_entropy(mdl_preds_c1, labels_c1)
return {"loss": loss}
class LossLambda(nn.Module):
def __init__(self, cfg, comm):
super().__init__()
self.cfg = cfg
self.comm = comm
self.loss_keys = ["loss"]
def forward(self, mdl_out, inp):
assert "loss" in mdl_out
return {"loss": mdl_out["loss"]}
class TxEncoderOld(TransformerEncoder):
def __init__(self, cfg, comm):
self.full_cfg = cfg
self.comm = comm
# dictionary = comm.vb_id_vocab
dct_id = comm.dct_id
dictionary = comm[dct_id]
num_embeddings = len(dictionary)
padding_idx = dictionary.pad_token_id
args = cfg.tx_dec
embed_dim = args.encoder_embed_dim
embed_toks = nn.Embedding(num_embeddings, embed_dim, padding_idx)
super().__init__(args, dictionary, embed_toks)
self.after_init()
def after_init(self):
return
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
namedtuple:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
encoder_states = [] if return_all_hiddens else None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
return EncoderOut(
encoder_out=x, # T x B x C
encoder_padding_mask=encoder_padding_mask, # B x T
encoder_embedding=encoder_embedding, # B x T x C
encoder_states=encoder_states, # List[T x B x C]
src_tokens=None,
src_lengths=None,
)
class TxEncoderNew(TxCodeEnc):
def __init__(self, cfg, comm):
self.full_cfg = cfg
self.comm = comm
# dictionary = comm.vb_id_vocab
# dictionary = comm.gpt2_hf_tok
# num_embeddings = len(dictionary)
# padding_idx = dictionary.pad_token_id
args = cfg.tx_dec
# embed_dim = args.encoder_embed_dim
# embed_toks = nn.Embedding(num_embeddings, embed_dim, padding_idx)
super().__init__(
d_model=1024,
n_vocab_src=0,
vocab_trg=0,
d_hidden=1024,
n_layers=args.encoder_layers,
n_heads=args.encoder_attention_heads,
drop_ratio=args.dropout,
pe=False,
)
def forward(
self,
src_tokens=None,
src_lengths=None,
return_all_hiddens=False,
token_embeddings=None,
):
assert token_embeddings is not None
enc_out = self.encoder(token_embeddings)[-1]
return EncoderOut(
encoder_out=enc_out.transpose(0, 1).contiguous(),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def get_enc_out_base(enc_out):
return EncoderOut(
encoder_out=enc_out, # T x B x C
encoder_padding_mask=None, # B x T
encoder_embedding=None, # B x T x C
encoder_states=None, # List[T x B x C]
src_tokens=None,
src_lengths=None,
)
class TxEncoderNew_Conc(TxEncoderOld):
def after_init(self):
self.orig_tx_out_comb = nn.Sequential(
*[nn.Linear(2048, 1024), nn.ReLU(), nn.Linear(1024, 1024)]
)
return
def forward(
self,
src_tokens,
src_lengths,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
tx_out = super().forward(
src_tokens=src_tokens,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
token_embeddings=token_embeddings,
)
# B x T x C
enc_out = tx_out.encoder_out.transpose(0, 1).contiguous()
enc_out2 = torch.cat([token_embeddings, enc_out], dim=-1)
enc_out3 = self.orig_tx_out_comb(enc_out2)
return get_enc_out_base(enc_out=enc_out3.transpose(0, 1).contiguous())
def TxEncoder(cfg, comm):
if cfg.mdl.tx_enc_type == "old":
return TxEncoderOld(cfg, comm)
elif cfg.mdl.tx_enc_type == "new":
return TxEncoderNew(cfg, comm)
elif cfg.mdl.tx_enc_type == "new_conc":
return TxEncoderNew_Conc(cfg, comm)
else:
raise NotImplementedError
class TxDecoderReal(TransformerDecoder):
def __init__(self, cfg, comm):
self.full_cfg = cfg
self.comm = comm
dictionary = comm.gpt2_hf_tok
num_embeddings = len(dictionary)
padding_idx = dictionary.pad_token_id
args = cfg.tx_dec
embed_dim = args.decoder_embed_dim
embed_toks = nn.Embedding(num_embeddings, embed_dim, padding_idx)
super().__init__(args, dictionary, embed_toks)
class GPT2_hf_fseqDec(HuggingFaceGPT2Decoder):
def __init__(self, cfg, comm):
self.full_cfg = cfg
self.comm = comm
dictionary = comm.gpt2_hf_tok
args = cfg
super().__init__(args, dictionary)
def TxDecoder(full_cfg, comm):
if full_cfg.mdl.tx_dec_type == "gpt2":
return GPT2_hf_fseqDec(full_cfg, comm)
elif full_cfg.mdl.tx_dec_type == "txdec":
return TxDecoderReal(full_cfg, comm)
else:
raise NotImplementedError
class Simple_GPT2(nn.Module):
"""
Simply Run a GPT2 model
Assumes Verbs are given
"""
def __init__(self, cfg, comm):
super().__init__()
self.full_cfg = cfg
self.cfg = cfg.mdl
self.comm = comm
self.build_model()
def build_model(self):
self.gpt2_mdl = GPT2LMHeadModel.from_pretrained(self.cfg.gpt2_mdl_name)
self.voc_size = len(self.comm.gpt2_hf_tok)
self.gpt2_mdl.resize_token_embeddings(self.voc_size)
self.pad_index = self.comm.gpt2_hf_tok.pad_token_id
self.bos_index = self.comm.gpt2_hf_tok.eos_token_id
return
def forward_gen(self, inp, *args):
src_toks1 = inp["seq_out_by_ev"][:, :, [0], :]
B, num_ev, num_seq_eg, seq_len = src_toks1.shape
src_toks = src_toks1.view(B * num_ev, num_seq_eg * seq_len)
inp_ids = src_toks[..., :1].contiguous()
wvoc = self.comm.gpt2_hf_tok
out_sents = self.gpt2_mdl.generate(
input_ids=inp_ids,
max_length=60,
use_cache=True,
num_beams=1,
num_return_sequences=1,
do_sample=False,
pad_token_id=wvoc.pad_token_id,
)
out_sents = out_sents.view(B, num_ev, num_seq_eg, -1)
return out_sents
def forward(self, inp):
src_toks1 = inp["seq_out_by_ev"][:, :, [0], :]
src_attn1 = inp["seq_out_lens_by_ev"][:, :, [0], :]
B, num_ev, num_seq_eg, seq_len = src_toks1.shape
assert num_seq_eg == 1
src_toks = src_toks1.view(B * num_ev, num_seq_eg * seq_len)
src_attn_mask = src_attn1.view(B * num_ev, num_seq_eg * seq_len)
out = self.gpt2_mdl(
input_ids=src_toks, attention_mask=src_attn_mask, return_dict=True,
)
# B*num_ev x num_seq_eg*seq_len x vocab_size
logits = out["logits"]
# out contains logits, past_key_vals
# logits of shape: B x seq_len x vocab_size
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = src_toks[..., 1:].contiguous()
loss = F.cross_entropy(
shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1),
ignore_index=self.pad_index,
)
out["loss"] = loss
return out
class GPT2_New(GPT2LMHeadModel):
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, **kwargs
):
# only last token for inputs_ids if past is defined in kwargs
if past is None:
if "vid_emb" in kwargs:
vid_emb = kwargs.pop("vid_emb")
input_embs = self.transformer.wte(input_ids)
input_embs_new = torch.cat([vid_emb, input_embs], dim=1)
return {
"inputs_embeds": input_embs_new,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
}
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
}
class Simple_GPT2_New(Simple_GPT2):
def build_model(self):
self.gpt2_mdl = GPT2_New.from_pretrained(self.cfg.gpt2_mdl_name)
self.voc_size = len(self.comm.gpt2_hf_tok)
self.gpt2_mdl.resize_token_embeddings(self.voc_size)
self.pad_index = self.comm.gpt2_hf_tok.pad_token_id
self.bos_index = self.comm.gpt2_hf_tok.eos_token_id
return
def forward_gen(self, inp, *args):
src_toks1 = inp["seq_out_by_ev"][:, :, [0], :]
B, num_ev, num_seq_eg, seq_len = src_toks1.shape
src_toks = src_toks1.view(B * num_ev, num_seq_eg * seq_len)
inp_ids = src_toks[..., :1].contiguous()
wvoc = self.comm.gpt2_hf_tok
out_sents = self.gpt2_mdl.generate(
input_ids=inp_ids,
max_length=60 + inp_ids.size(-1),
use_cache=True,
num_beams=1,
num_return_sequences=1,
do_sample=False,
pad_token_id=wvoc.pad_token_id,
)
out_sents = out_sents.view(B, num_ev, num_seq_eg, -1)
return out_sents
class Simple_TxDec(nn.Module):
def __init__(self, cfg, comm):
super(Simple_TxDec, self).__init__()
self.full_cfg = cfg
self.cfg = cfg.mdl
self.sf_cfg = cfg.sf_mdl
self.comm = comm
self.use_encoder = False
self.build_model()
def build_model(self):
self.decoder = TxDecoder(self.full_cfg, self.comm)
self.pad_index = self.comm.gpt2_hf_tok.pad_token_id
self.bos_index = self.comm.gpt2_hf_tok.eos_token_id
self.max_decoder_positions = lambda: 1024
self.get_normalized_probs = self.decoder.get_normalized_probs
return
def forward_encoder(self, inp):
return None
def prepare_prev_toks_inp(self, inp):
dst_toks1 = inp["seq_out_by_ev"][:, :, [0], :]
dst_attn1 = inp["seq_out_lens_by_ev"][:, :, [0], :]
vb_toks1 = inp["vb_out_by_ev"][:, :, [0], :]
B, num_ev, num_seq_eg, seq_len = dst_toks1.shape
assert num_seq_eg == 1
dst_toks = dst_toks1.view(B * num_ev, num_seq_eg * seq_len)
dst_attn_mask = dst_attn1.view(B * num_ev, num_seq_eg * seq_len)
dst_lens = dst_attn_mask.sum(dim=-1)
vb_toks = vb_toks1.view(B * num_ev, num_seq_eg * vb_toks1.size(-1))
return {"dst_toks": dst_toks, "dst_lens": dst_lens, "vb_only_tokens": vb_toks}
def forward_decoder(
self, prev_tokens, encoder_out, incremental_state=None, temperature=None
):
if isinstance(encoder_out, list) and len(encoder_out) == 0:
encoder_out = None
decoder_out = self.decoder(
prev_tokens, encoder_out=encoder_out, incremental_state=incremental_state
)
return decoder_out
def forward(self, inp):
inp_prep = self.prepare_prev_toks_inp(inp)
encoder_out = self.forward_encoder(inp)
prev_tokens = inp_prep["dst_toks"]
decoder_out = self.forward_decoder(
prev_tokens=prev_tokens, encoder_out=encoder_out
)
logits = decoder_out[0]
shift_logits = logits[..., :-1, :].contiguous()
labels = inp_prep["dst_toks"]
shifted_labels = labels[..., 1:].contiguous()
loss = F.cross_entropy(
shift_logits.view(-1, logits.size(-1)),
shifted_labels.view(-1),
ignore_index=self.pad_index,
)
out_dct = {"loss": loss, "logits": logits}
return out_dct
def forward_gen(self, inp, seq_gen: SeqGenCustom):
inp_prep = self.prepare_prev_toks_inp(inp)
inp["src_tokens"] = inp_prep["dst_toks"][..., :1]
inp["src_lengths"] = inp_prep["dst_lens"]
inp_ids = inp_prep["dst_toks"][..., :1]
out_sents = seq_gen._generate(inp, prefix_tokens=inp_ids)
src_toks1 = inp["seq_out_by_ev"][:, :, [0], :]
B, num_ev, num_seq_eg, seq_len = src_toks1.shape
max_len = max([len(o[0]["tokens"]) for o in out_sents])
B1 = inp_ids.size(0)
out_sents_tensor = inp_ids.new_full((B1, max_len), self.pad_index)
for ix in range(B1):
xtoks = out_sents[ix][0]["tokens"]
out_sents_tensor[ix, : len(xtoks)] = xtoks
out_sents1 = out_sents_tensor.view(B, num_ev, num_seq_eg, -1)
return out_sents1
class Simple_TxEncDec(Simple_TxDec):
def build_model(self):
super().build_model()
self.encoder = TxEncoder(self.full_cfg, self.comm)
self.use_encoder = True
return
def forward_encoder(self, inp):
src_toks = inp["src_tokens"]
src_lens = inp["src_lengths"]
encoder_out = self.encoder(
src_toks, src_lengths=src_lens, return_all_hiddens=True
)
return encoder_out
class Reorderer:
def reorder_encoder_out(self, encoder_out: EncoderOut, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
"""
Since encoder_padding_mask and encoder_embedding are both of type
Optional[Tensor] in EncoderOut, they need to be copied as local
variables for Torchscript Optional refinement
"""
encoder_padding_mask = encoder_out.encoder_padding_mask
encoder_embedding = encoder_out.encoder_embedding
new_encoder_out = (
encoder_out.encoder_out
if encoder_out.encoder_out is None
else encoder_out.encoder_out.index_select(1, new_order)
)
new_encoder_padding_mask = (
encoder_padding_mask
if encoder_padding_mask is None
else encoder_padding_mask.index_select(0, new_order)
)
new_encoder_embedding = (
encoder_embedding
if encoder_embedding is None
else encoder_embedding.index_select(0, new_order)
)
src_tokens = encoder_out.src_tokens
if src_tokens is not None:
src_tokens = src_tokens.index_select(0, new_order)
src_lengths = encoder_out.src_lengths
if src_lengths is not None:
src_lengths = src_lengths.index_select(0, new_order)
encoder_states = encoder_out.encoder_states
if encoder_states is not None:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return EncoderOut(
encoder_out=new_encoder_out, # T x B x C
encoder_padding_mask=new_encoder_padding_mask, # B x T
encoder_embedding=new_encoder_embedding, # B x T x C
encoder_states=encoder_states, # List[T x B x C]
src_tokens=src_tokens, # B x T
src_lengths=src_lengths, # B x 1
)
def get_head_dim(full_cfg) -> int:
if "i3d" in full_cfg.ds.vsitu.vsit_frm_feats_dir:
head_dim = 2048
elif ("slow_fast" in full_cfg.ds.vsitu.vsit_frm_feats_dir) or (
"sfast" in full_cfg.ds.vsitu.vsit_frm_feats_dir
):
head_dim = 2304
else:
raise NotImplementedError
return head_dim
class SFPreFeats_TxDec(Simple_TxDec, Reorderer):
def build_model(self):
super().build_model()
head_dim = get_head_dim(self.full_cfg)
self.vid_feat_encoder = nn.Sequential(
*[nn.Linear(head_dim, 1024), nn.ReLU(), nn.Linear(1024, 1024)]
)
self.use_encoder = True
return
def forward_encoder(self, inp):
frm_feats = inp["frm_feats"]
B = inp["vseg_idx"].size(0)
assert frm_feats.size(1) == 5
out = self.vid_feat_encoder(frm_feats)
out = out.view(B * 5, 1, -1)
encoder_out = EncoderOut(
encoder_out=out.transpose(0, 1).contiguous(), # 5 x B x vdim,
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
return encoder_out
class SFPreFeats_TxEncDec(Simple_TxDec, Reorderer):
def build_model(self):
super().build_model()
head_dim = get_head_dim(self.full_cfg)
self.vid_feat_encoder = nn.Sequential(
*[nn.Linear(head_dim, 1024), nn.ReLU(), nn.Linear(1024, 1024)]
)
self.use_encoder = True
self.vid_feat_txenc = TxEncoder(self.full_cfg, self.comm)
return
def forward_encoder(self, inp):
frm_feats = inp["frm_feats"]
B = inp["vseg_idx"].size(0)
assert frm_feats.size(1) == 5
out = self.vid_feat_encoder(frm_feats)
out = out.view(B, 5, -1)
tx_out = self.vid_feat_txenc(
src_tokens=out[..., 0],
src_lengths=None,
return_all_hiddens=True,
token_embeddings=out,
)
enc_out_batch1 = tx_out.encoder_out.transpose(0, 1).contiguous()
enc_out2 = enc_out_batch1.view(B * 5, 1, -1)
enc_out3 = enc_out2.transpose(0, 1).contiguous()
encoder_out = EncoderOut(
encoder_out=enc_out3, # 1 x 5*B x vdim,
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
return encoder_out
|
122847
|
from bltk.langtools.taggertools import adjective_suffix, noun_suffix, verb_suffix, pronouns
from bltk.langtools.pos_tagger import PosTagger
class UgraStemmer:
def __init__(self):
self.pos_tagger = PosTagger()
self.pronoun_values = list(pronouns.values())
self.pronoun_keys = list(pronouns.keys())
def get_tag(self, sentence: list):
tagged_sentence = self.pos_tagger.pos_tag(sentence)
return tagged_sentence
def stem(self, sentence: list):
stemmed = []
if len(sentence) == 0:
return None
elif len(sentence) > 0:
tagged_sentence = self.get_tag(sentence)
for word, tag in tagged_sentence:
if (str(word).endswith('ও') or str(word).endswith('ই')) and len(word) > 2:
word = word.replace(word[-1], "")
if tag in ["JJ", "JQ"]:
flag = False
for suf in adjective_suffix:
if str(word).endswith(suf) and (len(word) - len(suf) >= 2):
my_stem = str(word).replace(word[-len(suf):], '')
stemmed.append(my_stem)
flag = True
break
if not flag:
my_stem = word
stemmed.append(my_stem)
elif tag in ["NC", "NP", "NV", "NST"]:
flag = False
for suf in noun_suffix:
if str(word).endswith(suf) and (len(word) - len(suf) >= 2):
my_stem = str(word).replace(word[-len(suf):], '')
stemmed.append(my_stem)
flag = True
break
if not flag:
my_stem = word
stemmed.append(my_stem)
elif tag in ["VA", "VM"]:
flag = False
for suf in verb_suffix:
if str(word).endswith(suf) and (len(word) - len(suf) >= 2):
my_stem = str(word).replace(word[-len(suf):], '')
stemmed.append(my_stem)
flag = True
break
if not flag:
my_stem = word
stemmed.append(my_stem)
elif tag in ["PPR", "PRF", "PRC", "PRL", "PWH"]:
flag = False
for pronoun in self.pronoun_values:
if word in pronoun:
my_stem = self.pronoun_keys[self.pronoun_values.index(pronoun)]
stemmed.append(my_stem)
flag = True
break
if not flag:
my_stem = word
stemmed.append(my_stem)
else:
my_stem = word
stemmed.append(my_stem)
return stemmed
|
122851
|
import FWCore.ParameterSet.Config as cms
# Trigger Primitive Producer
from SimCalorimetry.EcalTrigPrimProducers.ecalTriggerPrimitiveDigis_readDBOffline_cfi import *
|
122873
|
from games.abstract_game import AbstractGame
import subprocess
from constants import *
import json
import platform
class Mario(AbstractGame):
"""
Represents a single Mario game.
"""
def __init__(self, model, game_batch_size, seed, level=None, vis_on=False, use_visualization_tool=False, test=False):
"""
Initializes a new instance of Mario game.
:param model: Model which will be playing this game.
:param game_batch_size: Number of games that will be played immediately (one after one) within the single game
instance. Result is averaged.
:param seed: A random seed for random generator within the game.
:param level: Level for mario game. Can be 'gombas' or 'spikes' for example; this is used in combination with
use_visualization_tool set to true.
:param vis_on: Determines whether the Mario will has a visual output. Used in combination with
use_visualization_tool set to true.
:param use_visualization_tool: Determines whether use specific visualization tool. Starts different subprocess.
:param test: Indicates whether the game is in testing mode.
"""
super(Mario, self).__init__()
self.model = model
self.game_batch_size = game_batch_size
self.seed = seed
self.use_visualization_tool = use_visualization_tool
self.vis_on = "0"
if vis_on:
self.vis_on = "1"
self.level = level
def init_process(self):
"""
Initializes a subprocess with the game and returns first state of the game.
"""
windows = platform.system() == "Windows"
if self.use_visualization_tool:
params = ["java", "-cp", MARIO_CP, MARIO_VISUALISATION_CLASS, str(self.game_batch_size), str(self.level),
str(self.vis_on)]
if windows:
command = "{} {} {} {} {} {} {}".format(*params)
else:
command = params
else:
params = ["java", "-cp", MARIO_CP, MARIO_CLASS, str(self.seed), str(self.game_batch_size)]
if windows:
command = "{} {} {} {} {} {}".format(*params)
else:
command = params
self.process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1)
data = self.get_process_data()
return data["state"], data["current_phase"]
def get_process_data(self):
"""
Gets a subprocess next data (line).
:return: a subprocess next data (line).
"""
line = " "
# Skip non-json file outputs from mario
while line == '' or line[0] != '{':
# print("line: '{}'".format(line))
line = self.process.stdout.readline().decode('ascii')
return json.loads(line)
|
122885
|
from moto.core.exceptions import RESTError
class InvalidParameterValueError(RESTError):
def __init__(self, message):
super(InvalidParameterValueError, self).__init__(
"InvalidParameterValue", message
)
class ResourceNotFoundException(RESTError):
def __init__(self, message):
super(ResourceNotFoundException, self).__init__(
"ResourceNotFoundException", message
)
|
122933
|
def dfs(at, graph, visited):
if visited[at]:
return
visited[at] = True
print(at, end=" -> ")
neighbours = graph[at]
for next in neighbours:
dfs(next, graph, visited)
if __name__ == "__main__":
n = int(input("No. of Nodes : "))
graph = []
for i in range(n):
graph.append(list(map(int, input("Nodes linked with {} : ".format(i)).split())))
start_node = int(input("Starting Node : "))
# graph = [[1, 2], [3], [1], [2], [3, 5], [5]]
visited = [False] * n
print("\nNodes in DFS are : ", end=" ")
dfs(start_node, graph, visited)
print(" / ")
"""
Input :
No. of Nodes : 6
Nodes linked with 0 : 1 2
Nodes linked with 1 : 3
Nodes linked with 2 : 1
Nodes linked with 3 : 2
Nodes linked with 4 : 3 5
Nodes linked with 5 : 5
Starting Node : 4
Output:
Nodes in DFS are : 4 -> 3 -> 2 -> 1 -> 5 -> /
"""
|
122947
|
from ltypes import i32
def test_list_i32():
a: list[i32] = [1]
a.append(2)
a.append(3)
a.append(4)
a.append(5)
print(a[1])
assert a[1] == 2 or a[1] == 3
test_list_i32()
|
122961
|
import os
from pathlib import Path
import time
class Source_file_generator:
"""
Library to easily generate proximal functions
"""
number_of_spaces_in_tab=4
def __init__(self,location,function_type):
"""
Parameters
---------
location : target location
function_type : should be either "g" or "proxg"
"""
self._location=location
self._function_type=function_type
def open(self):
"""
Open fil stream
"""
file = Path(self._location)
if (file.exists()):
print(self._location + " already exists, removing it before adding the new file")
self._source_file = open(self._location, 'w')
self._source_file.write("/* file generated on " + time.strftime("%x") +
" at " + time.strftime("%H:%M:%S") + " */" + "\n\n")
if (self._function_type == "g"):
print("generating g-type function")
self._source_file.write("real_t casadi_interface_g(const real_t* state){\n")
elif (self._function_type == "proxg"):
print("generating proxg-type function")
self._source_file.write("void casadi_interface_proxg(real_t* state){\n")
else:
print("ERROR wrong function_type pick either g or proxg")
self._source_file.close()
def start_for(self,iterator_name,length,indent):
self.write_line("size_t "+iterator_name+ ";",indent)
self.write_line("for("+iterator_name+"=0;i<"+str(length)+";i++){",indent)
def close_for(self,indent):
self.write_line("}",indent)
def write_line(self,line,indent):
string_indent = " "*indent*Source_file_generator.number_of_spaces_in_tab
self._source_file.write(string_indent+line+"\n")
def write_define(self,name,value,indent):
self.write_line("#define "+name+" "+str(value),indent)
def write_comment_line(self,line,indent):
self.write_line("/* "+line+" */",indent)
def set_output(self,output_index,value,indent):
self.write_line("state["+str(output_index)+"]="+str(value)+";",indent)
def close(self):
self._source_file.write("\n}\n")
self._source_file.close()
|
122965
|
from conftest import rvo_output, rvo_err
from click.testing import CliRunner
from rvo import cli
def test_delete_yes():
options = ['delete', '569e5eed6815b47ce7bdb583', '--yes']
output = ["Removed"]
rvo_output(options,output)
def test_delete_input_yes():
runner = CliRunner()
result = runner.invoke(cli.cli, ['delete', '569e5eed6815b47ce7bdb583'], input="y\n")
assert not result.exception
assert result.output.strip().endswith('Removed Nutella, Coffee, ninja')
def test_delete_input_no():
runner = CliRunner()
result = runner.invoke(cli.cli, ['delete', '569e5eed6815b47ce7bdb583'], input="n\n")
assert not result.exception
assert not result.output.strip().endswith('Removed Nutella, Coffee, ninja')
def test_delete_input_default():
runner = CliRunner()
result = runner.invoke(cli.cli, ['delete', '569e5eed6815b47ce7bdb583'], input="\n")
assert not result.exception
assert not result.output.strip().endswith('Removed Nutella, Coffee, ninja')
def test_delete_nonexistent():
options = ['delete', '769e5eed6815b47ce7bdb583']
rvo_err(options)
def test_delete_shortid_yes():
options = ['delete', '2', '--yes']
output = ["Removed"]
rvo_output(options,output)
def test_delete_shortid_input_yes():
runner = CliRunner()
result = runner.invoke(cli.cli, ['delete', '1'], input="y\n")
assert not result.exception
assert result.output.strip().endswith('Removed Nutella, Coffee, ninja')
def test_delete_shortid_input_no():
runner = CliRunner()
result = runner.invoke(cli.cli, ['delete', '1'], input="n\n")
assert not result.exception
assert not result.output.strip().endswith('Removed Nutella, Coffee, ninja')
def test_delete_shortid_input_default():
runner = CliRunner()
result = runner.invoke(cli.cli, ['delete', '1'], input="\n")
assert not result.exception
assert not result.output.strip().endswith('Removed Nutella, Coffee, ninja')
def test_delete_shortid_nonexistent():
options = ['delete', '7']
rvo_err(options)
|
122982
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
# For reproducibility
np.random.seed(1000)
if __name__ == '__main__':
# Load MNIST digits
digits = load_digits()
# Show some random digits
selection = np.random.randint(0, 1797, size=100)
fig, ax = plt.subplots(10, 10, figsize=(10, 10))
samples = [digits.data[x].reshape((8, 8)) for x in selection]
for i in range(10):
for j in range(10):
ax[i, j].set_axis_off()
ax[i, j].imshow(samples[(i * 8) + j], cmap='gray')
plt.show()
# Perform a PCA on the digits dataset
pca = PCA(n_components=36, whiten=True)
X_pca = pca.fit_transform(digits.data / 255)
print('Explained variance ratio')
print(pca.explained_variance_ratio_)
# Plot the explained variance ratio
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
ax[0].set_xlabel('Component')
ax[0].set_ylabel('Variance ratio (%)')
ax[0].bar(np.arange(36), pca.explained_variance_ratio_ * 100.0)
ax[1].set_xlabel('Component')
ax[1].set_ylabel('Cumulative variance (%)')
ax[1].bar(np.arange(36), np.cumsum(pca.explained_variance_)[::-1])
plt.show()
# Rebuild from PCA and show the result
fig, ax = plt.subplots(10, 10, figsize=(10, 10))
samples = [pca.inverse_transform(X_pca[x]).reshape((8, 8)) for x in selection]
for i in range(10):
for j in range(10):
ax[i, j].set_axis_off()
ax[i, j].imshow(samples[(i * 8) + j], cmap='gray')
plt.show()
|
123088
|
from hivemind.client.expert import RemoteExpert
from hivemind.client.moe import RemoteMixtureOfExperts
from hivemind.client.switch_moe import RemoteSwitchMixtureOfExperts
from hivemind.client.averaging import DecentralizedAverager
from hivemind.client.averaging.training import TrainingAverager
|
123131
|
import c4d
from c4d import documents
from . import Utilities as util
from .CustomIterators import ObjectIterator
from .Utilities import dazToC4Dutils
from .IkMax import applyDazIK, ikmaxUtils
from .AllSceneToZero import AllSceneToZero
dazName = "Object_"
class DazToC4D:
def figureFixBrute(self):
"""Hard Coded Fixes to Rig to Deal with Larger Character"""
doc = c4d.documents.GetActiveDocument()
def checkIfBrute():
isBrute = False
docMaterials = doc.GetMaterials()
for mat in docMaterials:
mapDiffuse = ""
try:
mapDiffuse = mat[c4d.MATERIAL_COLOR_SHADER][
c4d.BITMAPSHADER_FILENAME
]
except:
pass
if "Brute8" in mapDiffuse:
isBrute = True
return isBrute
def nullSize(nullName, rad=1, ratio=1):
dazName = "Genesis8Male_"
obj = doc.SearchObject(dazName + nullName)
if obj:
obj[c4d.NULLOBJECT_RADIUS] = rad
obj[c4d.NULLOBJECT_ASPECTRATIO] = ratio
c4d.EventAdd()
if checkIfBrute(): # If BRUTE8! Change Null Sizes!
nullSize("Pelvis_ctrl", 40, 0.8)
nullSize("Spine_ctrl", 30, 0.8)
nullSize("Chest_ctrl", 30, 0.8)
nullSize("Foot_PlatformBase", 9.3, 1.52)
nullSize("Foot_PlatformBase___R", 9.3, 1.52)
nullSize("Collar_ctrl", 20, 0.3)
nullSize("Collar_ctrl___R", 20, 0.3)
nullSize("ForearmTwist_ctrl", 11, 1.0)
nullSize("ForearmTwist_ctrl___R", 11, 1.0)
nullSize("IK_Hand", 7, 1.4)
nullSize("IK_Hand___R", 7, 1.4)
def freezeTwistBones(self):
doc = c4d.documents.GetActiveDocument()
nullForeArm = doc.SearchObject(dazName + "ForearmTwist_ctrl")
nullForeArmR = doc.SearchObject(dazName + "ForearmTwist_ctrl___R")
if nullForeArm:
nullForeArm.SetFrozenPos(nullForeArm.GetAbsPos())
nullForeArm.SetFrozenRot(nullForeArm.GetAbsRot())
nullForeArmR.SetFrozenPos(nullForeArmR.GetAbsPos())
nullForeArmR.SetFrozenRot(nullForeArmR.GetAbsRot())
nullForeArm.SetRelPos(c4d.Vector(0, 0, 0))
nullForeArm.SetRelRot(c4d.Vector(0, 0, 0))
nullForeArmR.SetRelPos(c4d.Vector(0, 0, 0))
nullForeArmR.SetRelRot(c4d.Vector(0, 0, 0))
def lockAllModels(self):
doc = documents.GetActiveDocument()
obj = doc.GetFirstObject()
scene = ObjectIterator(obj)
for obj in scene:
if (obj.GetType() == 5100) or (obj.GetType() == 1007455):
lockLayer = ikmaxUtils().layerSettings(obj, 1, 1)
def limitFloorContact(self):
doc = documents.GetActiveDocument()
obj = doc.GetFirstObject()
def addProtTag(obj):
xtag = c4d.BaseTag(c4d.Tprotection)
xtag[c4d.PROTECTION_P] = 2
xtag[c4d.PROTECTION_S] = False
xtag[c4d.PROTECTION_R] = False
xtag[c4d.PROTECTION_P_X] = False
xtag[c4d.PROTECTION_P_Y] = True
xtag[c4d.PROTECTION_P_Z] = False
xtag[c4d.PROTECTION_P_MIN_Y] = 0
xtag[c4d.PROTECTION_P_MAX_Y] = 1000000
obj.InsertTag(xtag)
doc = documents.GetActiveDocument()
obj = doc.GetFirstObject()
scene = ObjectIterator(obj)
for obj in scene:
if "Foot_PlatformBase" in obj.GetName():
addProtTag(obj)
c4d.EventAdd()
def protectTwist(self):
doc = c4d.documents.GetActiveDocument()
dazName = util.get_daz_name() + "_"
def addProtTag(obj):
xtag = c4d.BaseTag(c4d.Tprotection)
xtag[c4d.PROTECTION_P] = 1
xtag[c4d.PROTECTION_S] = False
xtag[c4d.PROTECTION_R] = 1
xtag[c4d.PROTECTION_R_X] = True
xtag[c4d.PROTECTION_R_Y] = False
xtag[c4d.PROTECTION_R_Z] = True
obj.InsertTag(xtag)
c4d.EventAdd()
nullForeArm = doc.SearchObject(dazName + "ForearmTwist_ctrl")
nullForeArmR = doc.SearchObject(dazName + "ForearmTwist_ctrl___R")
addProtTag(nullForeArm)
addProtTag(nullForeArmR)
def protectIKMControls(self):
def protectTag(jointName, protectPreset):
doc = documents.GetActiveDocument()
obj = doc.SearchObject(jointName)
tagProtec = c4d.BaseTag(5629) # Protection Tag
if protectPreset == "finger":
if obj:
obj[c4d.ID_BASEOBJECT_ROTATION_ORDER] = 6
tagProtec[c4d.PROTECTION_P_X] = False
tagProtec[c4d.PROTECTION_P_Y] = False
tagProtec[c4d.PROTECTION_P_Z] = False
tagProtec[c4d.PROTECTION_S_X] = False
tagProtec[c4d.PROTECTION_S_Y] = False
tagProtec[c4d.PROTECTION_S_Z] = False
tagProtec[c4d.PROTECTION_R_X] = True
tagProtec[c4d.PROTECTION_R_Y] = False
tagProtec[c4d.PROTECTION_R_Z] = True
if protectPreset == "position":
tagProtec[c4d.PROTECTION_P_X] = True
tagProtec[c4d.PROTECTION_P_Y] = True
tagProtec[c4d.PROTECTION_P_Z] = True
tagProtec[c4d.PROTECTION_S_X] = False
tagProtec[c4d.PROTECTION_S_Y] = False
tagProtec[c4d.PROTECTION_S_Z] = False
tagProtec[c4d.PROTECTION_R_X] = False
tagProtec[c4d.PROTECTION_R_Y] = False
tagProtec[c4d.PROTECTION_R_Z] = False
if protectPreset == "twist":
tagProtec[c4d.PROTECTION_P_X] = True
tagProtec[c4d.PROTECTION_P_Y] = True
tagProtec[c4d.PROTECTION_P_Z] = True
tagProtec[c4d.PROTECTION_S_X] = False
tagProtec[c4d.PROTECTION_S_Y] = False
tagProtec[c4d.PROTECTION_S_Z] = False
tagProtec[c4d.PROTECTION_R_X] = True
tagProtec[c4d.PROTECTION_R_Y] = False
tagProtec[c4d.PROTECTION_R_Z] = True
if obj:
obj.InsertTag(tagProtec)
c4d.EventAdd()
dazName = util.get_daz_name() + "_"
# LEFT
# protectTag(dazName + "jMiddle2", "finger")
# protectTag(dazName + "jMiddle3", "finger")
# protectTag(dazName + "jMiddle4", "finger")
# protectTag(dazName + "jRing2", "finger")
# protectTag(dazName + "jRing3", "finger")
# protectTag(dazName + "jRing4", "finger")
# protectTag(dazName + "jPink2", "finger")
# protectTag(dazName + "jPink3", "finger")
# protectTag(dazName + "jPink4", "finger")
# protectTag(dazName + "jIndex2", "finger")
# protectTag(dazName + "jIndex3", "finger")
# protectTag(dazName + "jIndex4", "finger")
# # RIGHT
# protectTag(dazName + "jMiddle2___R", "finger")
# protectTag(dazName + "jMiddle3___R", "finger")
# protectTag(dazName + "jMiddle4___R", "finger")
# protectTag(dazName + "jRing2___R", "finger")
# protectTag(dazName + "jRing3___R", "finger")
# protectTag(dazName + "jRing4___R", "finger")
# protectTag(dazName + "jPink2___R", "finger")
# protectTag(dazName + "jPink3___R", "finger")
# protectTag(dazName + "jPink4___R", "finger")
# protectTag(dazName + "jIndex2___R", "finger")
# protectTag(dazName + "jIndex3___R", "finger")
# protectTag(dazName + "jIndex4___R", "finger")
# MIDDLE
# protectTag(dazName + "Spine_ctrl", "position")
# protectTag(dazName + "Chest_ctrl", "position")
# protectTag(dazName + "Neck_ctrl", "position")
# protectTag(dazName + "Head_ctrl", "position")
def unhideProps(self):
doc = documents.GetActiveDocument()
obj = doc.SearchObject("hip")
ObjectIterator(obj)
for o in ObjectIterator(obj):
if o.GetType() == 5100 or o.GetType() == 5140:
o[c4d.ID_BASEOBJECT_VISIBILITY_EDITOR] = 0
o[c4d.ID_BASEOBJECT_VISIBILITY_RENDER] = 0
c4d.EventAdd()
def autoIK(self, var):
doc = c4d.documents.GetActiveDocument()
obj = doc.SearchObject("hip")
if obj:
AllSceneToZero().sceneToZero()
applyDazIK(var)
dazToC4Dutils().changeSkinType()
self.unhideProps()
c4d.EventAdd()
c4d.DrawViews(
c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW
| c4d.DRAWFLAGS_NO_THREAD
| c4d.DRAWFLAGS_STATICBREAK
)
self.protectIKMControls()
self.limitFloorContact()
self.freezeTwistBones()
self.figureFixBrute()
self.protectTwist()
|
123132
|
import json
from enum import Enum
from test.util import TestCase
from OpenCast.app.command.video import CreateVideo
from OpenCast.app.tool.json_encoder import (
EnhancedJSONEncoder,
EventEncoder,
ModelEncoder,
)
from OpenCast.domain.event.video import VideoCreated
from OpenCast.domain.model.player import Player
from OpenCast.domain.model.video import Path, Stream, Video
from OpenCast.domain.service.identity import IdentityService
class EnhancedEncoderTest(TestCase):
def test_encode_id(self):
id = IdentityService.random()
json.dumps({"id": id}, cls=EnhancedJSONEncoder)
def test_encode_path(self):
path = Path(".")
json.dumps({"path": path}, cls=EnhancedJSONEncoder)
def test_encode_enum(self):
class Color(Enum):
RED = 1
color = Color.RED
json.dumps({"color": color}, cls=EnhancedJSONEncoder)
class ModelEncoderTest(TestCase):
def test_encode_player(self):
player_id = IdentityService.id_player()
queue_id = IdentityService.id_playlist()
player = Player(player_id, queue_id)
json.dumps({"id": IdentityService.random(), "player": player}, cls=ModelEncoder)
def test_encode_video(self):
video_id = IdentityService.id_video("source")
video = Video(video_id, "source", "protocol", "title", "album", "thumbnail")
video.location = "/tmp/video.mp4"
video.streams = [Stream(0, "audio", "en")]
video.subtitle = Path("/tmp/video.srt")
json.dumps({"id": IdentityService.random(), "video": video}, cls=ModelEncoder)
class EventEncoderTest(TestCase):
def test_encode_event(self):
video_id = IdentityService.id_video("source")
collection_id = None
cmd_id = IdentityService.id_command(CreateVideo, video_id)
event = VideoCreated(
cmd_id,
video_id,
"source",
collection_id,
"album",
"title",
"protocol",
"thumbnail",
)
json.dumps({"id": IdentityService.random(), "event": event}, cls=EventEncoder)
|
123134
|
import subprocess, re, sys,os, os.path, shutil, time, glob
ROOT="/home/blackie/dump/KDABViewer"
BUILDROOT=ROOT+"/build"
ITERATIONS=5
FOREAL=1
CCACHE="/usr/lib/ccache"
def runCommand(cmd):
print(" ".join(cmd))
if FOREAL:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
res = process.communicate()[0]
#print(res)
def nukeBuildDir():
if FOREAL:
shutil.rmtree(BUILDROOT)
os.mkdir(BUILDROOT)
def clearCCache():
runCommand(["/usr/bin/ccache", "-C"])
def runCMake(clang, ninja, define=None):
command=["cmake"]
if clang:
command = command + ["-DCMAKE_CXX_COMPILER=clang++"]
if ninja:
command = command + ["-G", "Ninja"]
if define:
command = command + ["-DCMAKE_CXX_FLAGS=-D" + define]
command = command + [".."]
runCommand(command)
def compile(ninja):
os.environ["MAKEFLAGS"]="-j 16"
command = ["make", "-j", "16"]
if ninja:
command = ["ninja"]
runCommand(command)
def setOutputFile(filename):
global TIMINGFILE
TIMINGFILE="/home/blackie/profiling/"+filename
writeHeader()
def writeHeader():
FILE = open(TIMINGFILE, "w")
FILE.write("compiler,build system,cclang on,stage,time\n")
def addOutputLine(clang,ninja,step,time):
FILE = open(TIMINGFILE, "a+")
ccacheon = "ccache" in os.environ["PATH"]
FILE.write("%s,%s,%s,%s,%s\n" % ("clang" if clang else "gcc", "ninja" if ninja else "make", "yes" if ccacheon else "no", step, int(round(time))))
def makeClean(ninja):
runCommand(["ninja" if ninja else "make", "clean"])
def timeAndWrite(clang,ninja,step):
start=time.time()
compile(ninja)
end = time.time()
addOutputLine(clang,ninja, step, end-start)
def setOrUnsetCCacheInPath(doInclude):
path = os.environ["PATH"].split(":")
path = filter(lambda item: "ccache" not in item, path)
if doInclude:
path = [CCACHE] + path
os.environ["PATH"] = ":".join(path)
# ---------------------------- Test funcitons
def oneGoWithCompilterAndBuildSystem(clang,ninja):
clearCCache()
nukeBuildDir()
os.chdir(BUILDROOT)
runCMake(clang=clang, ninja=ninja)
timeAndWrite(clang, ninja, "full build")
# rebuild after make clean
clearCCache()
makeClean(ninja)
timeAndWrite(clang, ninja, "clean build")
def compileAndBuildSystemTest():
setOutputFile("compilerAndBuild.csv")
setOrUnsetCCacheInPath(0)
for round in range(ITERATIONS):
print("compileAndBuildSystemTest------> Round %d of %d" % (round+1, ITERATIONS))
for ninja in [0, 1]:
for clang in [0,1]:
oneGoWithCompilterAndBuildSystem(clang=clang,ninja=ninja)
def ccacheTest():
setOutputFile("ccache.csv")
for useccache in [0,1]:
setOrUnsetCCacheInPath(useccache)
for round in range(ITERATIONS):
print("ccacheTest------> Round %d of %d (using CCache=%s)" % (useccache*ITERATIONS + round+1, ITERATIONS*2,"yes" if useccache else "no"))
oneGoWithCompilterAndBuildSystem(clang=1, ninja=1)
def runPCHMutation(headers):
for index in range(len(headers)+1):
subList = headers[:index]
if FOREAL:
FILE = open(ROOT + "/KDABViewer_pch.h","w")
for x in subList:
FILE.write("#include <%s>\n" % x)
FILE.close()
nukeBuildDir()
os.chdir(BUILDROOT)
runCMake(clang=1,ninja=1)
compile(ninja=1)
for round in range(ITERATIONS):
print("pchTest------> Round %d of %d" % (index*ITERATIONS + round+1, ITERATIONS*len(headers)+1))
clearCCache()
makeClean(ninja=1)
timeAndWrite(clang=1, ninja=1, step="/" + "/".join(subList))
def pchTest():
setOutputFile("PCH.csv")
setOrUnsetCCacheInPath(0)
runPCHMutation(["QtWidgets", "QtGui", "QtCore", "KDChart", "memory", "functional"]) # "chrono", "cmath", "optional", "mutex", "array", "vector", "numeric", "algorithm"
runPCHMutation(["QtCore", "QtGui", "QtWidgets"])
# -------- pchMostUsedTest
REGEXP = re.compile("^#include *<(Q.*/)?([a-zA-Z_]+)>")
def countIncludes(filename, map):
with open(filename) as fp:
for line in fp.readlines():
match = REGEXP.match(line)
if match:
str = match.group(2)
if str in map:
map[str]= map[str]+1
else:
map[str] = 1
def findSystemIncludes():
map = {}
for filename in glob.glob(ROOT + "/**/*.cpp", recursive=1)+ glob.glob(ROOT + "/**/*.h",recursive=1) :
if "3rdparty" in filename or "prefix" in filename or "xternal" in filename:
continue
countIncludes(filename, map)
list = sorted(map.items(), key=lambda x: x[1])
list.reverse()
print(list)
return [key for (key,count) in list]
def pchMostUsedTest():
setOutputFile("PCH-most-used.csv")
setOrUnsetCCacheInPath(0)
nukeBuildDir()
os.chdir(BUILDROOT)
# We need to build it all first, so we get all the ui_* files into existance
runCMake(clang=1,ninja=1)
compile(ninja=1)
list = findSystemIncludes()
steps=len(list)
for stage in range(steps):
with open(ROOT + "/KDABViewer_pch.h","w") as FILE:
for i in range(stage):
FILE.write("#include<%s>\n" % list[i])
runCMake(clang=1,ninja=1)
compile(ninja=1)
for round in range(ITERATIONS):
print("pchMostUsedTest------> Round %d of %d" % (stage*ITERATIONS + round+1, ITERATIONS*steps))
makeClean(ninja=1)
timeAndWrite(clang=1, ninja=1, step="%d" % stage)
#compileAndBuildSystemTest()
#ccacheTest()
#pchTest()
#pchMostUsedTest()
|
123147
|
import os, math
import errno
import stat
from py.builtin import sorted
from pypy.tool import udir
from pypy.rpython.test.test_rbuiltin import BaseTestRbuiltin
from pypy.rpython.module.test.test_ll_time import BaseTestTime as llBaseTestTime
class BaseTestBuiltin(BaseTestRbuiltin):
def test_os_flags(self):
from pypy.translator.oosupport.support import NT_OS
def fn():
return os.O_CREAT
assert self.interpret(fn, []) == NT_OS['O_CREAT']
def test_os_read_binary_crlf(self):
tmpfile = str(udir.udir.join("os_read_test"))
def fn(flag):
if flag:
fd = os.open(tmpfile, os.O_RDONLY|os.O_BINARY, 0666)
else:
fd = os.open(tmpfile, os.O_RDONLY, 0666)
res = os.read(fd, 4096)
os.close(fd)
return res
f = file(tmpfile, 'w')
f.write('Hello\nWorld')
f.close()
res = self.ll_to_string(self.interpret(fn, [True]))
assert res == file(tmpfile, 'rb').read()
res = self.ll_to_string(self.interpret(fn, [False]))
assert res == file(tmpfile, 'r').read()
def test_os_dup_oo(self):
tmpdir = str(udir.udir.join("os_dup_oo"))
def fn():
fd = os.open(tmpdir, os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0777)
os.write(fd, "hello world")
fd2 = os.dup(fd)
os.write(fd2, " (dupped)")
os.close(fd)
try:
os.write(fd2, " (uh oh)")
except OSError, e:
return e.errno
return -1
assert self.interpret(fn, []) == 5 # EIO
assert file(tmpdir).read() == 'hello world (dupped)'
# the following tests can't be executed with gencli because they
# returns file descriptors, and cli code is executed in another
# process. Instead of those, there are new tests that opens and
# write to a file all in the same process.
def test_os_dup(self):
pass
def test_os_write(self):
pass
def test_os_write_single_char(self):
pass
def test_os_open(self):
pass
def test_os_open_write(self):
tmpdir = str(udir.udir.join("os_write_test"))
def fn():
fd = os.open(tmpdir, os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0777)
os.write(fd, "hello world")
os.close(fd)
self.interpret(fn, [])
assert file(tmpdir).read() == 'hello world'
def test_os_write_magic(self):
MAGIC = 62061 | (ord('\r')<<16) | (ord('\n')<<24)
tmpfile = str(udir.udir.join("os_write_test"))
def long2str(x):
a = x & 0xff
x >>= 8
b = x & 0xff
x >>= 8
c = x & 0xff
x >>= 8
d = x & 0xff
return chr(a) + chr(b) + chr(c) + chr(d)
def fn(magic):
fd = os.open(tmpfile, os.O_BINARY|os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0777)
os.write(fd, long2str(magic))
os.close(fd)
self.interpret(fn, [MAGIC])
contents = file(tmpfile, 'rb').read()
assert contents == long2str(MAGIC)
def test_os_stat(self):
def fn(flag):
if flag:
return os.stat('.')[0]
else:
return os.stat('.').st_mode
mode = self.interpret(fn, [0])
assert stat.S_ISDIR(mode)
mode = self.interpret(fn, [1])
assert stat.S_ISDIR(mode)
def test_os_stat_oserror(self):
def fn():
return os.stat('/directory/unlikely/to/exists')[0]
self.interpret_raises(OSError, fn, [])
def test_os_strerror(self):
def fn():
return os.strerror(errno.ENOTDIR)
res = self.ll_to_string(self.interpret(fn, []))
# XXX assert something about res
def test_environ(self):
def fn():
os.environ['PYPY_TEST_ENVIRON'] = '42'
return os.environ['PYPY_TEST_ENVIRON']
assert self.interpret(fn, []) == '42'
def test_environ_items(self):
def fn():
env = os.environ.items()
env2 = []
for key in os.environ.keys():
env2.append((key, os.environ[key]))
assert env == env2
self.interpret(fn, [])
def test_os_listdir(self):
def fn():
return os.listdir('.')
res = self.ll_to_list(self.interpret(fn, []))
res = [self.ll_to_string(s) for s in res]
res.sort()
assert res == sorted(os.listdir('.'))
# XXX: remember to test ll_os_readlink and ll_os_pipe as soon as
# they are implemented
def test_math_modf(self):
def fn(x):
return math.modf(x)
for x in (.5, 1, 1.5):
for y in (1, -1):
act_res = self.interpret(fn, [x*y])
exp_res = math.modf(x*y)
assert act_res.item0 == exp_res[0]
assert act_res.item1 == exp_res[1]
class BaseTestTime(llBaseTestTime):
def test_time_clock(self):
import time
def f():
return time.clock(), time.clock(), time.clock()
res = self.interpret(f, [])
t1, t2, t3 = self.ll_to_tuple(res)
assert 0 <= t1 <= t2 <= t3
|
123205
|
import numpy
import json
import os
import sys
import time
import sh_common
if len(sys.argv) != 2:
print("import_vgg7.py JSONPATH")
print(" i.e. import_vgg7.py /home/you/Documents/External/waifu2x/models/vgg_7/art/scale2.0x_model.json")
sys.exit(1)
try:
os.mkdir("model-kipper")
except:
pass
data_list = json.load(open(sys.argv[1], "rb"))
idx = 0
for i in range(7):
layer = data_list[i]
w = numpy.array(layer["weight"])
w.reshape((-1, 3, 3)).transpose((0, 2, 1))
b = numpy.array(layer["bias"])
sh_common.save_param("kipper", idx, w)
idx += 1
sh_common.save_param("kipper", idx, b)
idx += 1
|
123241
|
import time
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.helper import shell_cmd_timeout
class RebootTest(oeRuntimeTest):
'''Reboot target device
@class RebootTest
'''
def setUp(self):
'''pre condition check
@fn setUp
@param self
@return
'''
self.assertTrue(self._alive(), msg="device is not alive before test")
def _alive(self):
'''check if device alive
@fn _alive
@param self
@return
'''
#ret = shell_cmd_timeout("ssh -o ConnectTimeout=5 -o UserKnownHostsFile=/dev/null \
# -o StrictHostKeyChecking=no root@%s '/bin/true'" % self.target.ip)[0]
(ret, output) = self.target.run('/bin/true', 10)
return True if ret == 0 else False
def _wait_offline(self):
'''wait till device offline
@fn _wait_offline
@param self
@return
'''
for _ in range(60):
if not self._alive():
return True
time.sleep(1)
return False
def _wait_online(self):
'''wait till device online
@fn _wait_online
@param self
@return
'''
for _ in range(60):
if self._alive():
return True
time.sleep(1)
return False
def test_reboot(self):
'''reboot target device
@fn test_reboot
@param self
@return
'''
for cnt in range(1):
print "Reboot %d time" % cnt
ret = self.target.run('reboot &', 5)[0]
# self.assertEqual(ret, 0, msg="Fail to trigger reboot command")
time.sleep(4)
status = self._wait_offline()
##
# TESTPOINT: #1, test_reboot
#
self.assertTrue(status, msg="Fail to drive system off")
time.sleep(4)
status = self._wait_online()
##
# TESTPOINT: #2, test_reboot
#
self.assertTrue(status, msg="Fail to bring up system")
|
123263
|
import pytest
from django.contrib.auth import get_user_model
from django.core.management import call_command
from parkings.models import ParkingArea, PaymentZone, PermitArea
from ..management.commands import (
import_parking_areas, import_payment_zones, import_permit_areas)
from .request_mocking import mocked_requests
@pytest.mark.django_db
def test_import_parking_areas():
with mocked_requests():
call_command(import_parking_areas.Command())
assert ParkingArea.objects.count() == 1
@pytest.mark.django_db
def test_import_payment_zones():
with mocked_requests():
call_command(import_payment_zones.Command())
assert PaymentZone.objects.count() == 1
@pytest.mark.django_db
def test_permit_area_importer():
with mocked_requests():
test_user = get_user_model().objects.create(username='TEST_USER', is_staff=True)
call_command(import_permit_areas.Command(), test_user.username)
assert PermitArea.objects.count() == 1
|
123267
|
import json
import pytest
import requests
from mjolnir.kafka.msearch_daemon import Daemon, FlexibleInterval, MetricMonitor, StreamingEMA
def test_consume_nothing(mocker):
mocker.patch('kafka.KafkaProducer')
# Test it doesn't blow up
Daemon(None).consume([])
def test_consume_end_sigil(mocker, monkeypatch):
# simple mock we can observe
monkeypatch.setattr('kafka.KafkaProducer', MockProducer)
# Fetching result offsets uses the consumer
mock = mocker.patch('kafka.KafkaConsumer')
mock.partitions_for_topic.return_value = [0]
mock.position.return_value = 42
# Finally we can run something
daemon = Daemon(None)
daemon.consume([
{'run_id': 'abc', 'meta': {}, 'complete': True, 'partition': 0}
])
assert len(daemon.producer.sent) == 0
assert len(daemon.ack_all_producer.sent) == 1
sent_topic, sent_message = daemon.ack_all_producer.sent[0]
sent_message = json.loads(sent_message.decode('utf8'))
assert sent_message['run_id'] == 'abc'
def test_consume_msearch_req(mocker, monkeypatch):
# Mock out the actual search request
make_req_mock = mocker.patch('mjolnir.cirrus.make_request')
make_req_mock.return_value = requests.models.Response()
# simple mock we can observe
monkeypatch.setattr('kafka.KafkaProducer', MockProducer)
daemon = Daemon(None)
daemon.consume([
{'run_id': 'zyx', 'meta': {}, 'request': 'invalid unit test req'}
])
assert len(daemon.producer.sent) == 1
assert len(daemon.ack_all_producer.sent) == 0
sent_topic, sent_message = daemon.producer.sent[0]
sent_message = json.loads(sent_message.decode('utf8'))
assert sent_message['run_id'] == 'zyx'
class MockFuture(object):
def add_errback(self, fn):
pass
def get(self):
pass
class MockProducer(object):
def __init__(self, *args, **kwargs):
self.sent = []
def send(self, topic, message):
self.sent.append((topic, message))
return MockFuture()
def close(self):
pass
def flush(self):
pass
def test_streaming_ema():
# Starts in a not-ready state
now = 0
alpha = 0.1
ema = StreamingEMA(alpha=alpha, max_sec_valid=60, clock=lambda: now)
assert not ema.is_below_threshold(100)
# Providing a single value isn't enough to initialize the state
ema.update(42)
assert not ema.is_below_threshold(100)
# Providing a second value gets everything going
now += 1
ema.update(43)
assert ema.is_below_threshold(100)
assert ema.value == 1
# Simple check of ema calculation
now += 1
ema.update(45)
assert ema.value == (alpha * 2) + ((1 - alpha) * 1)
# Waiting multiple seconds between updates spreads the count out.
# 200 ops in 10 seconds, 20ops/s.
now += 10
ema.update(245)
assert ema.value == pytest.approx(13.41, abs=0.01)
assert ema.is_below_threshold(100)
# The metric is only valid without updates for max_sec_valid
now += 60
assert not ema.is_valid
assert not ema.is_below_threshold(100)
# Updating with large values moves above threshold
ema.update(10000)
assert ema.is_valid
assert not ema.is_below_threshold(100)
# When the counter resets (shard move?) we do not become valid
# and the metric increases.
now += 15
old_value = ema.value
ema.update(0)
assert ema.is_valid
assert ema.value > old_value
assert not ema.is_below_threshold(100)
# We can transition back below threshold
now += 60
ema.update(60)
assert ema.is_valid
assert ema.is_below_threshold(100)
class LatestValue(object):
is_valid = False
value = None
def is_below_threshold(self, threshold):
return self.is_valid and self.value < threshold
def update(self, value):
self.value = value
self.is_valid = True
def test_prod_load_monitor(mocker):
mocker.patch('mjolnir.kafka.msearch_daemon.FlexibleInterval')
stat = 1000
monitor = MetricMonitor(lambda: stat, LatestValue(), threshold=916)
# Starts in a disabled state
assert not monitor.is_below_threshold
# Transition to valid state
stat = 408
monitor.update_metric()
assert monitor.is_below_threshold
# Transition back to disabled state
stat = 1000
monitor.update_metric()
assert not monitor.is_below_threshold
# New monitor with values above threshold does not activate
monitor = MetricMonitor(lambda: stat, LatestValue(), threshold=917)
monitor.update_metric()
assert not monitor.is_below_threshold
def test_flexible_interval():
now = 0
def clock():
return now
interval = FlexibleInterval(min_value=20, max_value=200, ratio=0.5, clock=clock)
assert interval.value == 20
interval.decrease()
assert interval.value == 20
interval.increase()
assert interval.value == 30
interval.increase()
assert interval.value == 45
interval.value = 200
interval.increase()
assert interval.value == 200
interval.decrease()
assert interval.value == 20
|
123303
|
import carla
import os
import sys
import cv2
import json
import numpy as np
CARLA_ROOT = os.getenv("CARLA_ROOT")
if CARLA_ROOT is None:
raise ValueError("CARLA_ROOT must be defined.")
scriptdir = CARLA_ROOT + "PythonAPI/"
sys.path.append(scriptdir)
from examples.synchronous_mode import CarlaSyncMode
scriptdir = os.path.abspath(__file__).split('scripts')[0] + 'scripts/carla/'
sys.path.append(scriptdir)
from scenarios.run_intersection_scenario import CarlaParams, DroneVizParams, VehicleParams, PredictionParams, RunIntersectionScenario
def setup_intersection_scenario(scenario_dict, ego_init_dict, savedir):
# This is simply used to start up the scenarios with vehicles determining the route.
# The route is simply queried for overlays - the actual policies are never run here.
carla_params = CarlaParams(**scenario_dict["carla_params"])
drone_viz_params = DroneVizParams(**scenario_dict["drone_viz_params"])
pred_params = PredictionParams()
vehicles_params_list = []
policy_type = "lk_pi"
for vp_dict in scenario_dict["vehicle_params"]:
if vp_dict["role"] == "static":
# Not generating static vehicles
# vehicles_params_list.append( VehicleParams(**vp_dict) )
continue
elif vp_dict["role"] == "target":
vp_dict["policy_type"] = policy_type
vehicles_params_list.append( VehicleParams(**vp_dict) )
elif vp_dict["role"] == "ego":
vp_dict.update(ego_init_dict)
vp_dict["policy_type"] = policy_type
vehicles_params_list.append( VehicleParams(**vp_dict) )
else:
raise ValueError(f"Invalid vehicle role: {vp_dict['role']}")
runner = RunIntersectionScenario(carla_params,
drone_viz_params,
vehicles_params_list,
pred_params,
savedir)
return runner
def get_drone_snapshot(runner):
# Get a single drone image on which to overlay trajectories.
img_drone = None
with CarlaSyncMode(runner.world, runner.drone, fps=runner.carla_fps) as sync_mode:
_, img = sync_mode.tick(timeout=runner.timeout)
img_drone = np.frombuffer(img.raw_data, dtype=np.uint8)
img_drone = np.reshape(img_drone, (img.height, img.width, 4))
img_drone = img_drone[:, :, :3]
img_drone = cv2.resize(img_drone, (runner.viz_params.img_width, runner.viz_params.img_height), interpolation = cv2.INTER_AREA)
return img_drone
def overlay_trajectories(img, runner, line_thickness=5, goal_radius=10):
# Code to overlay the reference trajectories for every agent.
def xy_to_px_center(xy):
px = runner.A_world_to_drone @ xy + runner.b_world_to_drone
center_x = int(px[0])
center_y = int(px[1])
return center_x, center_y
for (veh_policy, veh_color) in zip(runner.vehicle_policies, runner.vehicle_colors):
veh_color = veh_color[::-1] # RGB to BGR
xy_traj = veh_policy._frenet_traj.trajectory[:, 1:3]
pts = [xy_to_px_center(xy) for xy in xy_traj]
for px_ind in range(len(pts)-1):
cv2.line(img, pts[px_ind], pts[px_ind+1], veh_color, thickness=line_thickness)
cv2.circle(img, pts[-1], goal_radius, veh_color, thickness=-1)
if __name__ == '__main__':
TOWN_NUM = 7 # 5 or 7
if TOWN_NUM == 5:
scenario_suffix = ""
scenarios_to_overlay = [1, 2, 3]
elif TOWN_NUM == 7:
scenario_suffix = "_t7"
scenarios_to_overlay = [1, 2, 3, 4]
else:
raise ValueError(TOWN_NUM)
img = None
for scenario_num in scenarios_to_overlay:
# Loading + Setup.
scenario_path = os.path.join(scriptdir, f"scenarios/scenario_{scenario_num:02d}{scenario_suffix}.json")
ego_init_path = os.path.join(scriptdir, "scenarios/ego_init_00.json")
scenario_dict = json.load(open(scenario_path, "r"))
ego_init_dict = json.load(open(ego_init_path, "r"))
scenario_name = scenario_path.split("/")[-1].split('.json')[0]
savedir = os.path.join( os.path.abspath(__file__).split("scripts")[0], "results/route_viz/" )
runner = None
try:
runner = setup_intersection_scenario(scenario_dict, ego_init_dict, savedir)
img = get_drone_snapshot(runner)
overlay_trajectories(img, runner)
except Exception as e:
print(e)
finally:
if runner:
for actor in runner.vehicle_actors:
actor.destroy()
runner.drone.destroy()
cv2.destroyAllWindows()
cv2.imwrite(os.path.join(savedir, f"scenario_route{scenario_suffix}_{scenario_num}.png"), img)
|
123331
|
from decimal import Decimal
from unittest import TestCase
import copy
import unittest
import os
import sys
from importlib import reload
from mock import Mock, call
from mockextras import stub
sys.path = [os.path.abspath(os.path.join('..', os.pardir))] + sys.path
from digesters.charges.charge_card_digester import ChargeCardDigester
PIMORONI_CHARGE = {
1460185000: {
"amt": Decimal(4.00),
"type": "Charge",
"curr": "GBP",
"vendor": "Pimoroni",
"card": "Amex 1234"
}
}
PIMORONI_CHARGE_WITH_WHEN_STR = copy.deepcopy(PIMORONI_CHARGE)
PIMORONI_CHARGE_WITH_WHEN_STR[1460185000]['when_str'] = 'Apr---09 02:56'
PIHUT_CHARGE = {
1460184000: {
"amt": Decimal(5.00),
"type": "Charge",
"curr": "USD",
"vendor": "PiHut",
"card": "Amex 1234"
}
}
PIHUT_CHARGE_WITH_WHEN_STR = copy.deepcopy(PIHUT_CHARGE)
PIHUT_CHARGE_WITH_WHEN_STR[1460184000]['when_str'] = 'Apr---09 02:40'
PIHUT_AND_PIMORONI_CHARGE_WITH_WHEN_STR = copy.deepcopy(PIMORONI_CHARGE_WITH_WHEN_STR)
PIHUT_AND_PIMORONI_CHARGE_WITH_WHEN_STR[1460184000] = copy.deepcopy(PIHUT_CHARGE_WITH_WHEN_STR[1460184000])
MAIL_HDR = """From: "Charge Cards" <<EMAIL>>
Date: Sat, 09 Apr 2016 06:56:40 -0000
Content-Transfer-Encoding: 8bit
Content-Type: multipart/alternative; boundary="---NOTIFICATION_BOUNDARY-5678"
MIME-Version: 1.0
This is a multi-part message in MIME format.
-----NOTIFICATION_BOUNDARY-5678
Content-Type: text/html; charset="utf-8"
Content-Transfer-Encoding: 8bit
"""
class TestChargeCardDigester(TestCase):
def __init__(self, methodName='runTest'):
super(TestChargeCardDigester, self).__init__(methodName)
reload(sys)
# sys.setdefaultencoding('utf8')
# print "P1 " + str(PIMORONI_CHARGE)
# print "P2 " + str(PIMORONI_CHARGE_WITH_WHEN_STR)
def test_no_previous_email_yet_one_old_and_one_new_charge_yields_only_the_newer_charge_in_the_email(self):
store_writer = Mock()
store_writer.get_from_binary.side_effect = stub(
(call('charges'), {
"charges": PIHUT_CHARGE,
"most_recent_seen": 1460184000
}),
(call('most-recently-seen'), 1460184000)
)
store_writer.store_as_binary.side_effect = stub(
(call('charges', {
'charges': PIMORONI_CHARGE_WITH_WHEN_STR,
'most_recent_seen': 1460184000
}), True),
(call('most-recently-seen', 1460184000), True)
)
digester = ChargeCardDigester(store_writer) ## What we are testing
expected_payload = """<table>
<tr style="background-color: #acf;">
<th>Type</th><th>Vendor</th><th>When</th><th>Curr</th><th>Amt</th><th>Card</th>
</tr>
<tr style="">
<td>Charge</td>
<td>Pimoroni</td>
<td>Apr 09 02:56</td>
<td>GBP</td>
<td style="text-align: right;"><b>4</b></td>
<td>Amex 1234</td>
</tr>
</table>"""
expected_message = ("Subject: Spending Digest\n"
+ MAIL_HDR + expected_payload + "\n\n-----NOTIFICATION_BOUNDARY-5678")
digest_inbox_proxy = Mock()
digest_inbox_proxy.delete_previous_message.side_effect = stub((call(), True))
digest_inbox_proxy.append.side_effect = stub((call(expected_message), True))
digester.new_charge_summary = PIMORONI_CHARGE
digester.notification_boundary_rand = "5678"
digester.rewrite_digest_emails(digest_inbox_proxy, False, False, "<EMAIL>")
self.assertEqual(digest_inbox_proxy.mock_calls,
[call.append(expected_message)])
calls = store_writer.mock_calls
self.assertEqual(calls, [
call.get_from_binary('charges'),
call.store_as_binary('charges', {
'charges': PIMORONI_CHARGE_WITH_WHEN_STR,
'most_recent_seen': 1460184000
})
])
def test_with_a_previous_email_and_one_old_and_one_new_charge_yields_both_charges_in_the_email(self):
store_writer = Mock()
store_writer.get_from_binary.side_effect = stub(
(call('charges'), {
"charges": PIHUT_CHARGE,
"most_recent_seen": 1460184000
}),
(call('most-recently-seen'), 1460184000)
)
store_writer.store_as_binary.side_effect = stub(
(call('charges', {
'charges': PIHUT_AND_PIMORONI_CHARGE_WITH_WHEN_STR,
'most_recent_seen': 1460184000
}), True),
(call('most-recently-seen', 1460184000), True)
)
digester = ChargeCardDigester(store_writer) ## What we are testing
expected_payload = """<table>
<tr style="background-color: #acf;">
<th>Type</th><th>Vendor</th><th>When</th><th>Curr</th><th>Amt</th><th>Card</th>
</tr>
<tr style="">
<td>Charge</td>
<td>Pimoroni</td>
<td>Apr 09 02:56</td>
<td>GBP</td>
<td style="text-align: right;"><b>4</b></td>
<td>Amex 1234</td>
</tr>
<tr>
<td colspan="6" style="color:red; text-align: center; border-bottom: 1pt solid red; border-top: 1pt solid red;">
^ New Charges Since You Last checked ^
</td>
</tr>
<tr style="background-color: #def;">
<td>Charge</td>
<td>PiHut</td>
<td>Apr 09 02:40</td>
<td>USD</td>
<td style="text-align: right;"><b>5</b></td>
<td>Amex 1234</td>
</tr>
</table>"""
expected_message = ("Subject: Spending Digest\n"
+ MAIL_HDR + expected_payload + "\n\n-----NOTIFICATION_BOUNDARY-5678")
digest_inbox_proxy = Mock()
digest_inbox_proxy.delete_previous_message.side_effect = stub((call(), True))
digest_inbox_proxy.append.side_effect = stub((call(expected_message), True))
digester.new_charge_summary = PIMORONI_CHARGE
digester.notification_boundary_rand = "5678"
digester.rewrite_digest_emails(digest_inbox_proxy, True, False, "<EMAIL>")
self.assertEqual(digest_inbox_proxy.mock_calls,
[call.delete_previous_message(), call.append(expected_message)])
calls = store_writer.mock_calls
self.assertEqual(calls, [
call.get_from_binary('charges'),
call.store_as_binary('charges', {
'charges': PIHUT_AND_PIMORONI_CHARGE_WITH_WHEN_STR,
'most_recent_seen': 1460184000
})
])
if __name__ == '__main__':
unittest.main()
|
123357
|
from bokeh.models import ActionTool
class ParallelResetTool(ActionTool):
""" Tool to reset only plot axes and not selections
"""
__implementation__ = 'parallel_reset.ts'
|
123372
|
from django.db.models.signals import post_save
from .models import Demand, DemandHub
from profiles.models import ProfileHub
from hubs.models import HubGeolocation
from utils.utils import coordinates_calculation, distance_calculation
def demand_created_or_updated(sender, update_fields, **kwargs):
instance = kwargs['instance']
if kwargs['created']:
lat_cal, lng_cal = coordinates_calculation(
instance.number,
instance.street,
instance.postal_code,
instance.city
)
DemandHub.objects.create(demand=instance, lat=lat_cal, lng=lng_cal)
demand_geo = DemandHub.objects.get(demand=instance)
hub_selected = ProfileHub.objects.get(profile=instance.requester).hub
hub_geo = HubGeolocation.objects.get(hub=hub_selected)
distance = distance_calculation(demand_geo, hub_geo)
demand_geo.hub = hub_selected
demand_geo.distance_km = distance
demand_geo.save()
post_save.connect(demand_created_or_updated, sender=Demand)
|
123494
|
from litex.soc.integration.soc_core import mem_decoder
from litex.soc.integration.soc_sdram import *
from liteeth.common import convert_ip
from liteeth.core import LiteEthUDPIPCore
from liteeth.frontend.etherbone import LiteEthEtherbone
from liteeth.mac import LiteEthMAC
from liteeth.phy import LiteEthPHY
from targets.arty.base import SoC as BaseSoC
class EtherboneSoC(BaseSoC):
def __init__(self, platform, *args, **kwargs):
# Need a larger integrated ROM on or1k to fit the BIOS with TFTP support.
if 'integrated_rom_size' not in kwargs and kwargs.get('cpu_type', 'lm32') != 'lm32':
kwargs['integrated_rom_size'] = 0x10000
BaseSoC.__init__(self, platform, *args, **kwargs)
# Ethernet ---------------------------------------------------------------------------------
# Ethernet Phy
self.submodules.ethphy = LiteEthPHY(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
self.add_csr("ethphy")
# Ethernet Core
etherbone_mac_address = 0x10e2d5000000
etherbone_ip_address = "192.168.100.50"
self.submodules.ethcore = LiteEthUDPIPCore(
phy = self.ethphy,
mac_address = etherbone_mac_address,
ip_address = etherbone_ip_address,
clk_freq = self.clk_freq)
# Etherbone Core
self.submodules.etherbone = LiteEthEtherbone(self.ethcore.udp, 1234)
self.add_wb_master(self.etherbone.wishbone.bus)
# timing constraints
self.platform.add_period_constraint(self.ethphy.crg.cd_eth_rx.clk, 1e9/25e6)
self.platform.add_period_constraint(self.ethphy.crg.cd_eth_tx.clk, 1e9/25e6)
self.platform.add_false_path_constraints(
self.crg.cd_sys.clk,
self.ethphy.crg.cd_eth_rx.clk,
self.ethphy.crg.cd_eth_tx.clk)
# Analyzer ---------------------------------------------------------------------------------
#analyzer_signals = [
# # FIXME: find interesting signals to probe
# self.cpu.ibus,
# self.cpu.dbus
#]
#self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 512)
#self.add_csr("analyzer")
def configure_iprange(self, iprange):
iprange = [int(x) for x in iprange.split(".")]
while len(iprange) < 4:
iprange.append(0)
# Our IP address
self._configure_ip("LOCALIP", iprange[:-1]+[50])
# IP address of tftp host
self._configure_ip("REMOTEIP", iprange[:-1]+[100])
def _configure_ip(self, ip_type, ip):
for i, e in enumerate(ip):
s = ip_type + str(i + 1)
s = s.upper()
self.add_constant(s, e)
SoC = EtherboneSoC
|
123499
|
from html.parser import HTMLParser
# create a subclass and override the handler methods
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print("Start :", tag)
for li in attrs:
print('->', li[0], '>', li[1])
def handle_endtag(self, tag):
print("End :", tag)
def handle_startendtag(self, tag, attrs):
print("Empty :", tag)
for li in attrs:
print('->', li[0], '>', li[1])
if __name__ == '__main__':
n = int(input())
s = ''
for i in range(n):
temp = input()
s += temp
obj = MyHTMLParser()
obj.feed(s)
|
123522
|
import json
import os
import sys
from glob import glob
from tasks.utils import *
TASK = sys.argv[1]
MODEL = sys.argv[2]
METHOD = sys.argv[3]
SPECIAL_METRICS = {
'cb' : 'f1',
'mrpc' : 'f1',
'cola' : 'matthews_correlation',
'stsb' : 'combined_score'
}
METRIC = "accuracy"
if TASK in SPECIAL_METRICS:
METRIC = SPECIAL_METRICS[TASK]
best_score = 0
files = glob(f"./checkpoints/{TASK}-{MODEL}-search{METHOD}/*/predict_results.json")
for f in files:
metrics = json.load(open(f, 'r'))
if metrics["predict_"+METRIC] > best_score:
best_score = metrics["predict_"+METRIC]
best_metrics = metrics
best_file_name = f
print(f"best_{METRIC}: {best_score}")
print(f"best_metrics: {best_metrics}")
print(f"best_file: {best_file_name}")
|
123556
|
from ..errors.validation import Validation
from . import element as element_module, section_element
from .element_base import ElementBase
from .missing import missing_empty
from .missing import missing_field
from .missing import missing_fieldset
from .missing import missing_list
from .missing import missing_section
from .missing import missing_section_element
from ..constants import (
DOCUMENT,
EMPTY,
FIELD,
FIELDSET,
FIELD_OR_FIELDSET_OR_LIST,
LIST,
MULTILINE_FIELD_BEGIN,
SECTION
)
class Section(ElementBase):
def __init__(self, context, instruction, parent=None):
super().__init__(context, instruction, parent)
self._all_elements_required = parent._all_elements_required if parent else False
def __repr__(self):
if self._instruction['type'] == DOCUMENT:
return f"<class Section document elements={len(self._elements())}>"
return f"<class Section key=\"{self._instruction['key']}\" elements={len(self._elements())}>"
def _element(self, key, *, required=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
if len(elements) == 0:
if required or required is None and self._all_elements_required:
raise Validation.missing_element(self._context, key, self._instruction, 'missing_element')
elif required is None:
return missing_section_element.MissingSectionElement(key, self)
else:
return None
if len(elements) > 1:
raise Validation.unexpected_multiple_elements(
self._context,
key,
[element._instruction for element in elements],
'expected_single_element'
)
return elements[0]
def _elements(self, as_map=False):
if not hasattr(self, '_instantiated_elements'):
self._instantiated_elements = []
self._instantiated_elements_map = {}
self._instantiate_elements(self._instruction)
return self._instantiated_elements_map if as_map else self._instantiated_elements
def _empty(self, key, *, required=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
if len(elements) == 0:
if required or required is None and self._all_elements_required:
raise Validation.missing_element(self._context, key, self._instruction, 'missing_empty')
elif required is None:
return missing_empty.MissingEmpty(key, self)
else:
return None
if len(elements) > 1:
raise Validation.unexpected_multiple_elements(
self._context,
key,
[element._instruction for element in elements],
'expected_single_empty'
)
element = elements[0]
if element._instruction['type'] != EMPTY:
raise Validation.unexpected_element_type(self._context, key, element._instruction, 'expected_empty')
return element.to_empty()
def _field(self, key, *, required=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
if len(elements) == 0:
if required or required is None and self._all_elements_required:
raise Validation.missing_element(self._context, key, self._instruction, 'missing_field')
elif required is None:
return missing_field.MissingField(key, self)
else:
return None
if len(elements) > 1:
raise Validation.unexpected_multiple_elements(
self._context,
key,
[element._instruction for element in elements],
'expected_single_field'
)
element = elements[0]
if (element._instruction['type'] != FIELD and
element._instruction['type'] != MULTILINE_FIELD_BEGIN and
element._instruction['type'] != FIELD_OR_FIELDSET_OR_LIST):
raise Validation.unexpected_element_type(self._context, key, element._instruction, 'expected_field')
return element.to_field()
def _fieldset(self, key, *, required=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
if len(elements) == 0:
if required or required is None and self._all_elements_required:
raise Validation.missing_element(self._context, key, self._instruction, 'missing_fieldset')
elif required is None:
return missing_fieldset.MissingFieldset(key, self)
else:
return None
if len(elements) > 1:
raise Validation.unexpected_multiple_elements(
self._context,
key,
[element._instruction for element in elements],
'expected_single_fieldset'
)
element = elements[0]
if element._instruction['type'] != FIELDSET and element._instruction['type'] != FIELD_OR_FIELDSET_OR_LIST:
raise Validation.unexpected_element_type(self._context, key, element._instruction, 'expected_fieldset')
return element.to_fieldset()
def _instantiate_elements(self, section):
if 'mirror' in section:
self._instantiate_elements(section['mirror'])
else:
def instantiate_and_index(element):
instance = section_element.SectionElement(self._context, element, self)
if element['key'] in self._instantiated_elements_map:
self._instantiated_elements_map[element['key']].append(instance)
else:
self._instantiated_elements_map[element['key']] = [instance]
return instance
filtered = [element for element in section['elements'] if element['key'] not in self._instantiated_elements_map]
self._instantiated_elements.extend(instantiate_and_index(element) for element in filtered) # TODO: Probably needs to come AFTER 'if 'extend' in section:' below because otherwise the order is incorrect?
if 'extend' in section:
self._instantiate_elements(section['extend'])
def _list(self, key, *, required=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
if len(elements) == 0:
if required or required is None and self._all_elements_required:
raise Validation.missing_element(self._context, key, self._instruction, 'missing_list')
elif required is None:
return missing_list.MissingList(key, self)
else:
return None
if len(elements) > 1:
raise Validation.unexpected_multiple_elements(
self._context,
key,
[element._instruction for element in elements],
'expected_single_list'
)
element = elements[0]
if element._instruction['type'] != LIST and element._instruction['type'] != FIELD_OR_FIELDSET_OR_LIST:
raise Validation.unexpected_element_type(self._context, key, element._instruction, 'expected_list')
return element.to_list()
def _missing_error(self, element):
if isinstance(element, missing_field.MissingField):
raise Validation.missing_element(self._context, element._key, self._instruction, 'missing_field')
elif isinstance(element, missing_fieldset.MissingFieldset):
raise Validation.missing_element(self._context, element._key, self._instruction, 'missing_fieldset')
elif isinstance(element, missing_list.MissingList):
raise Validation.missing_element(self._context, element._key, self._instruction, 'missing_list')
elif isinstance(element, missing_section.MissingSection):
raise Validation.missing_element(self._context, element._key, self._instruction, 'missing_section')
else:
raise Validation.missing_element(self._context, element._key, self._instruction, 'missing_element')
def _section(self, key, *, required=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
if len(elements) == 0:
if required or required is None and self._all_elements_required:
raise Validation.missing_element(self._context, key, self._instruction, 'missing_section')
elif required is None:
return missing_section.MissingSection(key, self)
else:
return None
if len(elements) > 1:
raise Validation.unexpected_multiple_elements(
self._context,
key,
[element._instruction for element in elements],
'expected_single_section'
)
element = elements[0]
if element._instruction['type'] != SECTION:
raise Validation.unexpected_element_type(self._context, key, element._instruction, 'expected_section')
return element.to_section()
def _untouched(self):
if not hasattr(self, '_touched'):
return self._instruction
for element in self._elements():
untouched_element = element._untouched()
if untouched_element:
return untouched_element
return False
def all_elements_required(self, required=True):
self._all_elements_required = required
for element in self._elements():
if element._instruction['type'] == SECTION and element._yielded:
element.to_section().all_elements_required(required)
elif element._instruction['type'] == FIELDSET and element._yielded:
element.to_fieldset().all_entries_required(required)
def assert_all_touched(self, message=None, *, only=None, skip=None):
elements_map = self._elements(True)
for key, elements in elements_map.items():
if (skip and key in skip) or (only and key not in only):
continue
for element in elements:
untouched = element._untouched()
if untouched:
if callable(message):
message = message(element_module.Element(self._context, untouched, self))
raise Validation.unexpected_element(self._context, message, untouched)
def element(self, key=None):
return self._element(key)
def elements(self, key=None):
self._touched = True
if key:
elements_map = self._elements(True)
return elements_map[key] if key in elements_map else []
return self._elements()
def empty(self, key=None):
return self._empty(key)
def field(self, key=None):
return self._field(key)
def fields(self, key=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
def cast(element):
if (element._instruction['type'] != FIELD and
element._instruction['type'] != MULTILINE_FIELD_BEGIN and
element._instruction['type'] != FIELD_OR_FIELDSET_OR_LIST):
raise Validation.unexpected_element_type(self._context, key, element._instruction, 'expected_fields')
return element.to_field()
return [cast(element) for element in elements]
def fieldset(self, key=None):
return self._fieldset(key)
def fieldsets(self, key=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
def cast(element):
if element._instruction['type'] != FIELDSET and element._instruction['type'] != FIELD_OR_FIELDSET_OR_LIST:
raise Validation.unexpected_element_type(self._context, key, element._instruction, 'expected_fieldsets')
return element.to_fieldset()
return [cast(element) for element in elements]
def list(self, key=None):
return self._list(key)
def lists(self, key=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
def cast(element):
if element._instruction['type'] != LIST and element._instruction['type'] != FIELD_OR_FIELDSET_OR_LIST:
raise Validation.unexpected_element_type(self._context, key, element._instruction, 'expected_lists')
return element.to_list()
return [cast(element) for element in elements]
def optional_element(self, key):
return self._element(key, required=False)
def optional_empty(self, key):
return self._empty(key, required=False)
def optional_field(self, key):
return self._field(key, required=False)
def optional_fieldset(self, key):
return self._fieldset(key, required=False)
def optional_list(self, key):
return self._list(key, required=False)
def optional_section(self, key):
return self._section(key, required=False)
def parent(self):
if self._instruction['type'] == DOCUMENT:
return None
return self._parent or Section(self._context, self._instruction['parent'])
def required_element(self, key=None):
return self._element(key, required=True)
def required_empty(self, key=None):
return self._empty(key, required=True)
def required_field(self, key=None):
return self._field(key, required=True)
def required_fieldset(self, key=None):
return self._fieldset(key, required=True)
def required_list(self, key=None):
return self._list(key, required=True)
def required_section(self, key=None):
return self._section(key, required=True)
def section(self, key=None):
return self._section(key)
def sections(self, key=None):
self._touched = True
if not key:
elements = self._elements()
else:
elements_map = self._elements(True)
elements = elements_map[key] if key in elements_map else []
def cast(element):
if element._instruction['type'] != SECTION:
raise Validation.unexpected_element_type(self._context, key, element._instruction, 'expected_sections')
return element.to_section()
return [cast(element) for element in elements]
def touch(self):
self._touched = True
for element in self._elements():
element.touch()
|
123590
|
from .session import ClientSession
from .endpoints import (
LiveEndpointsMixin,
VREndpointsMixin,
RoomEndpointsMixin,
UserEndpointsMixin,
OtherEndpointsMixin
)
from json import JSONDecodeError
import time
from showroom.api.utils import get_csrf_token
from requests.exceptions import HTTPError
import logging
_base_url = 'https://www.showroom-live.com'
# TODO: logging, warnings
# TODO: load auth or credentials from file or dict
# TODO: save auth or credentials to file
client_logger = logging.getLogger('showroom.client')
class ShowroomClient(
LiveEndpointsMixin,
UserEndpointsMixin,
RoomEndpointsMixin,
VREndpointsMixin,
OtherEndpointsMixin
):
"""
Client for interacting with the Showroom API.
:param cookies: dict containing stored cookies
:ivar cookies: Reference to the underlying session's cookies.
"""
def __init__(self, cookies=None):
self._session = ClientSession()
self._auth = None
self.cookies = self._session.cookies
if cookies:
self.cookies.update(cookies)
expiry = self.cookies.expires_earliest
if expiry and int(time.time()) >= expiry:
# TODO: more information, more specific error
raise ValueError('A cookie has expired')
# TODO: does this actually mean we're logged in? if no, how do I check?
self._auth = self.cookies.get('sr_id')
# TODO: request responses in different languages
# to force japanese text in responses:
# self.session.cookies.update({'lang': 'ja'})
# this doesn't always seem to work? it worked until i manually set lang:en, then switching back failed
self.__csrf_token = None
self._last_response = None
@property
def _csrf_token(self):
if not self.__csrf_token:
self._update_csrf_token(_base_url)
return self.__csrf_token
def _update_csrf_token(self, url):
r = self._session.get(url)
self.__csrf_token = get_csrf_token(r.text)
def _api_get(self, endpoint, params=None, return_response=False, default=None, raise_error=True):
try:
r = self._session.get(_base_url + endpoint, params=params)
except HTTPError as e:
r = e.response
if raise_error:
raise
self._last_response = r
if return_response:
return r
else:
try:
return r.json()
except JSONDecodeError as e:
client_logger.error('JSON decoding error while getting {}: {}'.format(r.request.url, e))
return default or {}
def _api_post(self, endpoint, params=None, data=None, return_response=None, default=None):
try:
r = self._session.post(_base_url + endpoint, params=params, data=data)
except HTTPError as e:
r = e.response
self._last_response = r
# TODO: check for expired csrf_token
if return_response:
return r
else:
try:
return r.json()
except JSONDecodeError as e:
client_logger.error('JSON decoding error while posting to {}: {}'.format(r.request.url, e))
return default or {}
|
123610
|
from os import path
from django.core.management.base import BaseCommand
from uwsgiconf.sysinit import get_config, TYPE_SYSTEMD
from uwsgiconf.utils import Finder
from ...toolbox import SectionMutator
class Command(BaseCommand):
help = 'Generates configuration files for Systemd, Upstart, etc.'
def add_arguments(self, parser): # pragma: nocover
super().add_arguments(parser)
parser.add_argument(
'--systype', dest='systype',
help='System type alias to make configuration for. E.g.: systemd, upstart.',
)
parser.add_argument(
'--nostatic', action='store_true', dest='nostatic',
help='Do not serve static and media files.',
)
parser.add_argument(
'--noruntimes', action='store_true', dest='noruntimes',
help='Do not automatically use a runtime directory to store pid and fifo files.',
)
parser.add_argument(
'--noerrpages', action='store_true', dest='noerrpages',
help='Do not to configure custom error pages (403, 404, 500).',
)
def handle(self, *args, **options):
systype = options['systype'] or TYPE_SYSTEMD
mutator = SectionMutator.spawn()
command = 'manage.py uwsgi_run'
for opt in ('nostatic', 'noruntimes', 'noerrpages'):
if options.get(opt, False):
command = command + f' --{opt}'
config = get_config(
systype,
conf=mutator.section,
conf_path=path.join(mutator.dir_base, command),
runner=Finder.python(),
)
print(config)
|
123634
|
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
from gpflow import settings
float_type = settings.float_type
jitter_level = settings.jitter
class EulerMaruyama:
def __init__(self,f,total_time,nsteps):
self.ts = np.linspace(0,total_time,nsteps)
self.f = f
def forward(self,y0,save_intermediate=False):
time_grid = ops.convert_to_tensor(self.ts, preferred_dtype=float_type, name='t')
y0 = ops.convert_to_tensor(y0, name='y0')
time_delta_grid = time_grid[1:] - time_grid[:-1]
time_grid = time_grid[1:]
time_combined = tf.concat([time_grid[:,None],time_delta_grid[:,None]],axis=1)
scan_func = self._make_scan_func(self.f)
if save_intermediate:
y_grid = functional_ops.scan(scan_func, time_combined,y0)
y_s = array_ops.concat([[y0], y_grid], axis=0)
y_t = y_s[-1,:,:,:]
return y_t, y_s
else:
y_t = functional_ops.foldl(scan_func, time_combined,y0)
return y_t, None
def _step_func(self, evol_func, t_and_dt, y):
t = t_and_dt[0];dt = t_and_dt[1]
mu,var = evol_func(y, t) #NXD, NXD
if var.get_shape().ndims == 3:
raise NotImplementedError
dt_cast = math_ops.cast(dt, y.dtype)
dy = mu*dt_cast + tf.sqrt(dt_cast)*tf.sqrt(var)*tf.random_normal(shape=[tf.shape(y)[0],tf.shape(y)[1]], dtype=y.dtype) #NXD
return dy
def _make_scan_func(self, evol_func):
def scan_func(y, t_and_dt):
dy = self._step_func(evol_func, t_and_dt, y)
dy = math_ops.cast(dy, dtype=y.dtype)
return y + dy
return scan_func
|
123637
|
import numpy as np
class ReplayMemory(object):
def __init__(self, max_size, obs_dim, act_dim):
self.max_size = int(max_size)
self.obs = np.zeros((max_size, ) + obs_dim, dtype='float32')
self.action = np.zeros((max_size, act_dim), dtype='float32')
self.reward = np.zeros((max_size,), dtype='float32')
self.terminal = np.zeros((max_size,), dtype='bool')
self.next_obs = np.zeros((max_size, ) + obs_dim, dtype='float32')
self._curr_size = 0
self._curr_pos = 0
def sample_batch(self, batch_size):
batch_idx = np.random.randint(self._curr_size - 300 - 1, size=batch_size)
obs = self.obs[batch_idx]
reward = self.reward[batch_idx]
action = self.action[batch_idx]
next_obs = self.next_obs[batch_idx]
terminal = self.terminal[batch_idx]
return obs, action, reward, next_obs, terminal
def append(self, obs, act, reward, next_obs, terminal):
if self._curr_size < self.max_size:
self._curr_size += 1
self.obs[self._curr_pos] = obs
self.action[self._curr_pos] = act
self.reward[self._curr_pos] = reward
self.next_obs[self._curr_pos] = next_obs
self.terminal[self._curr_pos] = terminal
self._curr_pos = (self._curr_pos + 1) % self.max_size
def size(self):
return self._curr_size
|
123643
|
import json
import os
from pathlib import Path
import logging
def read_json(loc : str):
'''
:param loc: path to file
:return: yaml converted to a dictionary
'''
with open(loc) as f:
data = json.load(f)
return data
def write_json(data, loc):
with open(loc, 'w') as json_file:
json.dump(data, json_file)
def read_from_root(file: str):
cwd = os.getcwd()
home_path = str(Path.home()) + '/.tenbagger'
os.chdir(home_path)
data = read_json(loc=file)
os.chdir(cwd)
return data
def create_hidden_folder(name: str):
cwd = os.getcwd()
home_path = Path.home()
os.chdir(home_path)
if os.path.exists(f".{name}"):
logging.info("Folder already exist")
return
os.mkdir(f".{name}")
os.chdir(cwd)
|
123676
|
import re
inName = "./char_order.txt"
inLines = file(inName, 'r').readlines()
charDict = dict()
puncDict = dict()
for line in inLines:
seq = line.split()
if re.match('[a-zA-Z\.\,\'\"\-]', seq[1]):
count = int(seq[2][1:])
if charDict.has_key(seq[1]):
charDict[seq[1]] += count
else:
charDict[seq[1]] = count
elif re.match('\d', seq[1]):
count = int(seq[2][1:])
if puncDict.has_key('num'):
puncDict['num'] += count
else:
puncDict['num'] = count
else:
count = int(seq[2][1:])
if puncDict.has_key(seq[1]):
puncDict[seq[1]] += count
else:
puncDict[seq[1]] = count
charList = sorted(charDict.iteritems(), key = lambda d : d[1])
puncDict = sorted(puncDict.iteritems(), key = lambda d : d[1])
print len(charList)
for t in charList:
print t
print len(puncDict)
for t in puncDict:
print t
s = "["
for t in puncDict:
s += "\\" + t[0]
s += "]"
print s
newDict = dict()
for line in inLines:
seq = line.split()
if re.match("[\&\*\]\[\/\+\;\#\)\(\:\!\?\d]", seq[1]):
count = int(seq[2][1:])
if newDict.has_key("None"):
newDict["None"] += count
else:
newDict["None"] = count
else:
count = int(seq[2][1:])
if newDict.has_key(seq[1]):
newDict[seq[1]] += count
else:
newDict[seq[1]] = count
newList = sorted(newDict.items(), key = lambda d : d[1])
print
print len(newList)
for t in newList:
print t
|
123722
|
from EasyLogin import EasyLogin
a=EasyLogin()
def get_question(qid):
global a
html=a.get("http://gre.kmf.com/question/{qid}.html".format(qid=qid), cache=True)
soup = a.b
question_type = soup.find("input",{"id":"GlobeQUESTIONNAME"})["value"]
question_from = soup.find("span",{"class":"gray"}).text
good_count = soup.find("span",{"class":"actionbg good-ico"}).find_next_sibling("span",{"class":"count"}).text
bad_count = soup.find("span",{"class":"actionbg bad-ico"}).find_next_sibling("span",{"class":"count"}).text
passage = ""
if question_type=='填空':
div_body = "exa-question"
next_link = soup.find("div",{"class":"clearfix exa-que-bottom"}).find("a")["href"]
elif question_type=='阅读':
div_body = "queanswer"
passage = soup.find("div",{"class":"quecontent"}).find("div",{"class":"content"}).text.strip()
next_link = soup.find("div",{"class":"queanswer"}).find("li",{"class":"current"}).find_next_sibling("li")
if next_link is not None:
next_link = next_link.find("a")["href"]
question_body = soup.find("div",{"class":div_body}).find("div",{"class":"mb20"}).text.strip()
question_selections = [""]
try:
# 普通的单选题/多选题
for i in soup.find("form", {"id":"QuestionSubmit"}).find_all("li"):
question_selections.append(". ".join((i.find("strong").text, i.text.lstrip(i.find("strong").text))))
except:
# 有多个空
t = 0
for ul in soup.find("div",{"id":"QuestionSubmit"}).find_all("ul"):
t+=1
question_selections.append("Blank"+str(t))
question_selections.extend([". ".join((i.find("strong").text, i.text.lstrip(i.find("strong").text))) for i in ul.find_all("li")])
question_body += "\n".join(question_selections)
answer = soup.find("b",{"class":"que-anser-right"}).text
explain = soup.find("div",{"id":"Explain"}).text.strip()
return {
"id": qid,
"type": question_type,
"from": question_from,
"good_count": good_count,
"bad_count": bad_count,
"passage":passage,
"body": question_body,
"answer": answer,
"explain": explain,
"next_id": next_link.replace("/question/","").replace(".html","") if next_link else None
}
from pprint import pprint
import string
theloop = (string.digits+string.ascii_lowercase)[::-1]
data = get_question("42nkhj")#92nm4j
while data["next_id"]:
data = get_question(data["next_id"])
print(data["body"][:20].replace("题真题","日真题").replace("(","\t").replace(")","\t").replace("年","\t").replace("月","\t").replace("日","\t"))
#for x1 in theloop[theloop.find('k'):]:
# for x2 in theloop:
# id = "a2n{x1}{x2}j".format(**locals())
# print(id)
# data = get_question(id)
# print(data["body"][:20])
|
123745
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kompassi.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
123763
|
import os
# hydra
import hydra
from omegaconf import DictConfig, OmegaConf
# pytorch-lightning related imports
from pytorch_lightning import Trainer
import pytorch_lightning.loggers as pl_loggers
from pytorch_lightning.callbacks import LearningRateMonitor
# own modules
from dataloader import PL_DataModule
from method import LitModel
def setup_cuda(cfg: DictConfig):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.trainer.cuda_number
def get_dataloader(cfg: DictConfig):
return PL_DataModule(cfg.dataloader)
@hydra.main(config_path='./configs', config_name='defaults')
def main(cfg: DictConfig):
setup_cuda(cfg)
print(OmegaConf.to_yaml(cfg))
# Configure weight and biases
logger = pl_loggers.WandbLogger(
project=cfg.wadb.logger_project_name,
name=cfg.wadb.logger_name,
entity=cfg.wadb.entity)
# Configure trained
trainer = Trainer(
pus=cfg.trainer.gpus,
logger=logger if cfg.trainer.is_logger_enabled else False,
num_sanity_val_steps=cfg.trainer.num_sanity_val_steps,
check_val_every_n_epoch=cfg.trainer.check_val_every_n_epoch,
max_epochs=cfg.model.opt.max_epochs,
log_every_n_steps=cfg.trainer.log_every_n_steps,
callbacks=[LearningRateMonitor("step")] if cfg.trainer.is_logger_enabled else [],)
# Setup dataloader and model
datamodule = get_dataloader(cfg)
model = LitModel(cfg=cfg.model)
# Train
trainer.fit(model, datamodule)
if __name__ == "__main__":
main()
|
123883
|
import _optimize
def _optimize_fold_load_constants(nn_layers):
"""
Fold load constants that interact through 'add', 'multiply', 'activation',
'slice', 'reduce' or 'unary' layers.
In other words, evaluate any sub-graph that involves only 'load_constant',
'multiply', 'add', 'activation', 'slice', 'reduce'
or 'unary' layer types and replace it with a single load constant layer.
"""
_optimize._fold_constants(nn_layers)
def _optimize_conv_mul_add(nn_layers):
"""
Detect Multiply or add layers after convolution and recast as Batchnorm layer
so that it can be fused in the framework.
"""
_optimize._fuse_conv_mul_add(nn_layers)
def _optimize_spatial_reduce_operation(nn_layers):
"""
Find a reduce layer with mode = 'average'/'max' and axis = 'HW'
and replace it with global average/max pooling layer.
"""
_optimize._spatial_reduce_as_global_pool(nn_layers)
def _optimize_disconnected_components(builder):
"""
Removes from the CoreML NN graph all the layers that are not connected
to the output nodes.
"""
_optimize._remove_disconnected_components(builder)
def optimize_nn_spec(builder):
"""
Call a specific set of network optimizations
"""
_optimize_fold_load_constants(builder.nn_spec.layers)
_optimize_spatial_reduce_operation(builder.nn_spec.layers)
_optimize_conv_mul_add(builder.nn_spec.layers)
_optimize_disconnected_components(builder)
|
123891
|
from nesi.devices.softbox.api import db
class ServicePort(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(64))
box_id = db.Column(db.Integer, db.ForeignKey('box.id'))
admin_state = db.Column(db.Enum('0', '1', '2'), default='0') # Alcatel: 0 => down, 1 => up, 2 => not-appl; Huawei: 0 => down, 1 => up
operational_state = db.Column(db.Enum('0', '1'), default='0') # Alcatel: 0 => down, 1 => up; Huawei: 0 => down, 1 => up
connected_id = db.Column(db.Integer(), nullable=False)
connected_type = db.Column(db.Enum('port', 'ont', 'cpe'), nullable=False)
# Alcatel data
pvid = db.Column(db.Integer(), default=None, nullable=True)
max_unicast_mac = db.Column(db.Integer(), default=None, nullable=True)
qos_profile_id = db.Column(db.Integer(), default=None, nullable=True)
pvc = db.Column(db.Boolean(), default=False)
# Huawei Data
vpi = db.Column(db.String(), default='-')
vci = db.Column(db.String(), default='-')
flow_type = db.Column(db.Enum('vlan', 'encap', '-'), default='vlan')
flow_para = db.Column(db.Enum('untag', 'pppoe', '-'), default='untag')
rx = db.Column(db.Integer(), default=560)
tx = db.Column(db.Integer(), default=520)
rx_cttr = db.Column(db.String(), default='-')
tx_cttr = db.Column(db.String(), default='-')
max_mac_count = db.Column(db.Integer(), default=600)
support_down_multicast_stream = db.Column(db.String(), default='disable')
support_igmp_packet = db.Column(db.String(), default='disable')
bytes_us = db.Column(db.Integer(), default=448203129)
packets_us = db.Column(db.Integer(), default=6386689)
bytes_ds = db.Column(db.Integer(), default=430667320)
packets_ds = db.Column(db.Integer(), default=6493472)
inbound_table_name = db.Column(db.String(), default='ip-traffic-table_520')
outbound_table_name = db.Column(db.String(), default='ip-traffic-table_560')
label = db.Column(db.String(), default='-')
priority = db.Column(db.String(), default='-')
pvc_bundle = db.Column(db.Enum('yes', 'no'), default='no')
tag_transforms = db.Column(db.String(), default='default')
description = db.Column(db.String(), nullable=True, default='')
remote_description = db.Column(db.String(), nullable=True, default='')
service_port_bundle = db.Column(db.String(), default='-')
cos = db.Column(db.String(), default='-')
static_mac = db.Column(db.String(), nullable=True, default='')
ip_address = db.Column(db.String(), nullable=True, default='')
|
123962
|
import json
from pathlib import Path
import aiohttp
import pytest
from gentools import sendreturn
import ns
import snug
live = pytest.config.getoption('--live')
CRED_PATH = Path('~/.snug/ns.json').expanduser()
auth = json.loads(CRED_PATH.read_bytes())
@pytest.fixture(scope='module')
async def exec():
async with aiohttp.ClientSession() as client:
yield ns.async_executor(auth=auth, client=client)
@pytest.mark.asyncio
async def test_all_stations(exec):
all_stations = ns.stations()
if live:
stations = await exec(all_stations)
assert isinstance(stations, list)
amsterdam_stations = [s for s in stations
if s.full_name.startswith('Amsterdam')]
assert len(amsterdam_stations) == 11
den_bosch = stations[0]
assert den_bosch.synonyms == ["Hertogenbosch ('s)", 'Den Bosch']
# offline test
query = iter(all_stations)
assert next(query).url.endswith('stations-v2')
result = sendreturn(query, snug.Response(200, content=STATIONS_SAMPLE))
assert len(result) == 4
assert result[3].full_name == '<NAME>'
@pytest.mark.asyncio
async def test_departures(exec):
departures = ns.departures(station='Amsterdam')
if live:
deps = await exec(departures)
assert len(deps) >= 10
departure = deps[0]
assert isinstance(departure, ns.Departure)
# offline test
query = iter(departures)
req = next(query)
assert req.url.endswith('avt')
assert req.params == {'station': 'Amsterdam'}
result = sendreturn(query, snug.Response(200, content=DEPARTURES_SAMPLE))
assert len(result)
assert result[1].platform_changed
@pytest.mark.asyncio
async def test_journey_options(exec):
travel_options = ns.journey_options(origin='Breda',
destination='Amsterdam')
travel_options_no_hsl = travel_options.replace(hsl='false')
if live:
options = await exec(travel_options)
assert len(options) >= 10
assert isinstance(options[0], ns.Journey)
# offline test
query = iter(travel_options)
assert next(query).params == {'fromStation': 'Breda',
'toStation': 'Amsterdam'}
result = sendreturn(query, snug.Response(200, content=JOURNEYS_SAMPLE))
assert len(result) == 3
assert result[0].components[1].stops[-1].platform == '8a'
assert next(iter(travel_options_no_hsl)).params == {
'fromStation': 'Breda',
'toStation': 'Amsterdam',
'hslAllowed': 'false'}
STATIONS_SAMPLE = b'''\
<Stations>
<Station>
<Code>HT</Code>
<Type>knooppuntIntercitystation</Type>
<Namen>
<Kort>Den Bosch</Kort>
<Middel>'s-Hertogenbosch</Middel>
<Lang>'s-Hertogenbosch</Lang>
</Namen>
<Land>NL</Land>
<UICCode>8400319</UICCode>
<Lat>51.69048</Lat>
<Lon>5.29362</Lon>
<Synoniemen>
<Synoniem>Hertogenbosch ('s)</Synoniem>
<Synoniem>Den Bosch</Synoniem>
</Synoniemen>
</Station>
<Station>
<Code>HTO</Code>
<Type>stoptreinstation</Type>
<Namen>
<Kort>Dn Bosch O</Kort>
<Middel>Hertogenb. Oost</Middel>
<Lang>'s-Hertogenbosch Oost</Lang>
</Namen>
<Land>NL</Land>
<UICCode>8400320</UICCode>
<Lat>51.700553894043</Lat>
<Lon>5.3183331489563</Lon>
<Synoniemen>
<Synoniem>Hertogenbosch Oost ('s)</Synoniem>
<Synoniem>Den Bosch Oost</Synoniem>
</Synoniemen>
</Station>
<Station>
<Code>HDE</Code>
<Type>stoptreinstation</Type>
<Namen>
<Kort>'t Harde</Kort>
<Middel>'t Harde</Middel>
<Lang>'t Harde</Lang>
</Namen>
<Land>NL</Land>
<UICCode>8400388</UICCode>
<Lat>52.4091682</Lat>
<Lon>5.893611</Lon>
<Synoniemen>
<Synoniem>Harde ('t)</Synoniem>
</Synoniemen>
</Station>
<Station>
<Code>AHBF</Code>
<Type>knooppuntIntercitystation</Type>
<Namen>
<Kort>Aachen</Kort>
<Middel>Aachen Hbf</Middel>
<Lang>Aachen Hbf</Lang>
</Namen>
<Land>D</Land>
<UICCode>8015345</UICCode>
<Lat>50.7678</Lat>
<Lon>6.091499</Lon>
<Synoniemen>
</Synoniemen>
</Station>
</Stations>
'''
DEPARTURES_SAMPLE = b'''\
<ActueleVertrekTijden>
<VertrekkendeTrein>
<RitNummer>2187</RitNummer>
<VertrekTijd>2018-01-22T21:49:00+0100</VertrekTijd>
<EindBestemming>Den Haag Centraal</EindBestemming>
<TreinSoort>Intercity</TreinSoort>
<RouteTekst>A'dam Sloterdijk, Haarlem, Leiden C.</RouteTekst>
<Vervoerder>NS</Vervoerder>
<VertrekSpoor wijziging="false">2a</VertrekSpoor>
</VertrekkendeTrein>
<VertrekkendeTrein>
<RitNummer>4083</RitNummer>
<VertrekTijd>2018-01-22T21:49:00+0100</VertrekTijd>
<EindBestemming>Rotterdam Centraal</EindBestemming>
<TreinSoort>Sprinter</TreinSoort>
<RouteTekst>Duivendrecht, Bijlmer ArenA, Breukelen</RouteTekst>
<Vervoerder>NS</Vervoerder>
<VertrekSpoor wijziging="true">4b</VertrekSpoor>
</VertrekkendeTrein>
<VertrekkendeTrein>
<RitNummer>2974</RitNummer>
<VertrekTijd>2018-01-22T21:53:00+0100</VertrekTijd>
<EindBestemming>Enkhuizen</EindBestemming>
<TreinSoort>Intercity</TreinSoort>
<RouteTekst>A'dam Sloterdijk, Hoorn</RouteTekst>
<Vervoerder>NS</Vervoerder>
<VertrekSpoor wijziging="false">8a</VertrekSpoor>
</VertrekkendeTrein>
<VertrekkendeTrein>
<RitNummer>14681</RitNummer>
<VertrekTijd>2018-01-22T21:53:00+0100</VertrekTijd>
<EindBestemming>Zwolle</EindBestemming>
<TreinSoort>Sprinter</TreinSoort>
<RouteTekst>Weesp, Lelystad C.</RouteTekst>
<Vervoerder>NS</Vervoerder>
<VertrekSpoor wijziging="false">10b</VertrekSpoor>
</VertrekkendeTrein>
</ActueleVertrekTijden>
'''
JOURNEYS_SAMPLE = b'''\
<ReisMogelijkheden>
<ReisMogelijkheid>
<AantalOverstappen>1</AantalOverstappen>
<GeplandeReisTijd>1:29</GeplandeReisTijd>
<ActueleReisTijd>1:29</ActueleReisTijd>
<GeplandeVertrekTijd>2018-01-22T20:20:00+0100</GeplandeVertrekTijd>
<ActueleVertrekTijd>2018-01-22T20:20:00+0100</ActueleVertrekTijd>
<GeplandeAankomstTijd>2018-01-22T21:49:00+0100</GeplandeAankomstTijd>
<ActueleAankomstTijd>2018-01-22T21:49:00+0100</ActueleAankomstTijd>
<Status>NIEUW</Status>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>3674</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>Breda</Naam>
<Tijd>2018-01-22T20:20:00+0100</Tijd>
<Spoor wijziging="false">3</Spoor>
</ReisStop>
<ReisStop>
<Naam>Tilburg</Naam>
<Tijd>2018-01-22T20:34:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>'s-Hertogenbosch</Naam>
<Tijd>2018-01-22T20:49:00+0100</Tijd>
<Spoor wijziging="false">1</Spoor>
</ReisStop>
</ReisDeel>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>2974</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>'s-Hertogenbosch</Naam>
<Tijd>2018-01-22T20:54:00+0100</Tijd>
<Spoor wijziging="false">3</Spoor>
</ReisStop>
<ReisStop>
<Naam>Utrecht Centraal</Naam>
<Tijd>2018-01-22T21:23:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Amsterdam Amstel</Naam>
<Tijd>2018-01-22T21:41:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Amsterdam Centraal</Naam>
<Tijd>2018-01-22T21:49:00+0100</Tijd>
<Spoor wijziging="false">8a</Spoor>
</ReisStop>
</ReisDeel>
</ReisMogelijkheid>
<ReisMogelijkheid>
<Melding>
<Id></Id>
<Ernstig>true</Ernstig>
<Text>Dit reisadvies vervalt</Text>
</Melding>
<AantalOverstappen>1</AantalOverstappen>
<GeplandeReisTijd>1:14</GeplandeReisTijd>
<ActueleReisTijd>1:14</ActueleReisTijd>
<Optimaal>false</Optimaal>
<GeplandeVertrekTijd>2018-01-22T20:23:00+0100</GeplandeVertrekTijd>
<ActueleVertrekTijd>2018-01-22T20:23:00+0100</ActueleVertrekTijd>
<GeplandeAankomstTijd>2018-01-22T21:37:00+0100</GeplandeAankomstTijd>
<ActueleAankomstTijd>2018-01-22T21:37:00+0100</ActueleAankomstTijd>
<Status>NIET-MOGELIJK</Status>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>1170</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>Breda</Naam>
<Tijd>2018-01-22T20:23:00+0100</Tijd>
<Spoor wijziging="false">7</Spoor>
</ReisStop>
<ReisStop>
<Naam>Rotterdam Centraal</Naam>
<Tijd>2018-01-22T20:47:00+0100</Tijd>
<Spoor wijziging="false">9</Spoor>
</ReisStop>
</ReisDeel>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity direct</VervoerType>
<RitNummer>1061</RitNummer>
<Status>GEANNULEERD</Status>
<Reisdetails>
<Reisdetail>Toeslag Schiphol-Rotterdam vv</Reisdetail>
</Reisdetails>
<ReisStop>
<Naam>Rotterdam Centraal</Naam>
<Tijd>2018-01-22T20:57:00+0100</Tijd>
<Spoor wijziging="false">12</Spoor>
</ReisStop>
<ReisStop>
<Naam>Schiphol Airport</Naam>
<Tijd>2018-01-22T21:23:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Amsterdam Centraal</Naam>
<Tijd>2018-01-22T21:37:00+0100</Tijd>
<Spoor wijziging="false">14a</Spoor>
</ReisStop>
</ReisDeel>
</ReisMogelijkheid>
<ReisMogelijkheid>
<Melding>
<Id></Id>
<Ernstig>false</Ernstig>
<Text>Dit is een aangepast reisadvies</Text>
</Melding>
<AantalOverstappen>1</AantalOverstappen>
<GeplandeReisTijd>1:47</GeplandeReisTijd>
<ActueleReisTijd>1:47</ActueleReisTijd>
<Optimaal>false</Optimaal>
<GeplandeVertrekTijd>2018-01-22T20:23:00+0100</GeplandeVertrekTijd>
<ActueleVertrekTijd>2018-01-22T20:23:00+0100</ActueleVertrekTijd>
<GeplandeAankomstTijd>2018-01-22T22:10:00+0100</GeplandeAankomstTijd>
<ActueleAankomstTijd>2018-01-22T22:10:00+0100</ActueleAankomstTijd>
<Status>GEWIJZIGD</Status>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>1170</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>Breda</Naam>
<Tijd>2018-01-22T20:23:00+0100</Tijd>
<Spoor wijziging="false">7</Spoor>
</ReisStop>
<ReisStop>
<Naam>Rotterdam Centraal</Naam>
<Tijd>2018-01-22T20:48:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Delft</Naam>
<Tijd>2018-01-22T21:00:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam><NAME> HS</Naam>
<Tijd>2018-01-22T21:08:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Den Haag Centraal</Naam>
<Tijd>2018-01-22T21:12:00+0100</Tijd>
<Spoor wijziging="false">1</Spoor>
</ReisStop>
</ReisDeel>
<ReisDeel reisSoort="TRAIN">
<Vervoerder>NS</Vervoerder>
<VervoerType>Intercity</VervoerType>
<RitNummer>2170</RitNummer>
<Status>VOLGENS-PLAN</Status>
<ReisStop>
<Naam>Den Haag Centraal</Naam>
<Tijd>2018-01-22T21:18:00+0100</Tijd>
<Spoor wijziging="false">10</Spoor>
</ReisStop>
<ReisStop>
<Naam>Leiden Centraal</Naam>
<Tijd>2018-01-22T21:35:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Heemstede-Aerdenhout</Naam>
<Tijd>2018-01-22T21:49:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Haarlem</Naam>
<Tijd>2018-01-22T21:55:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam><NAME></Naam>
<Tijd>2018-01-22T22:04:00+0100</Tijd>
</ReisStop>
<ReisStop>
<Naam>Amsterdam Centraal</Naam>
<Tijd>2018-01-22T22:10:00+0100</Tijd>
<Spoor wijziging="false">7a</Spoor>
</ReisStop>
</ReisDeel>
</ReisMogelijkheid>
</ReisMogelijkheden>
'''
|
123988
|
import base64
import binascii
import decimal
import json
import os
import platform
import sys
import urllib.parse as urlparse
from http.client import HTTP_PORT, HTTPConnection
DEFAULT_USER_AGENT = "AuthServiceProxy/0.1"
DEFAULT_HTTP_TIMEOUT = 30
# (un)hexlify to/from unicode, needed for Python3
unhexlify = binascii.unhexlify
hexlify = binascii.hexlify
if sys.version > '3':
unhexlify = lambda h: binascii.unhexlify(h.encode('utf8'))
hexlify = lambda b: binascii.hexlify(b).decode('utf8')
class JSONRPCError(Exception):
"""JSON-RPC protocol error base class
Subclasses of this class also exist for specific types of errors; the set
of all subclasses is by no means complete.
"""
SUBCLS_BY_CODE = {}
@classmethod
def _register_subcls(cls, subcls):
cls.SUBCLS_BY_CODE[subcls.RPC_ERROR_CODE] = subcls
return subcls
def __new__(cls, rpc_error):
assert cls is JSONRPCError
cls = JSONRPCError.SUBCLS_BY_CODE.get(rpc_error['code'], cls)
self = Exception.__new__(cls)
super(JSONRPCError, self).__init__(
'msg: %r code: %r' %
(rpc_error['message'], rpc_error['code']))
self.error = rpc_error
return self
class BaseProxy(object):
"""Base JSON-RPC proxy class. Contains only private methods; do not use
directly."""
def __init__(self,
service_url=None,
service_port=None,
btc_conf_file=None,
timeout=DEFAULT_HTTP_TIMEOUT):
# Create a dummy connection early on so if __init__() fails prior to
# __conn being created __del__() can detect the condition and handle it
# correctly.
self.__conn = None
if service_url is None:
# Figure out the path to the bitcoin.conf file
if btc_conf_file is None:
if platform.system() == 'Darwin':
btc_conf_file = os.path.expanduser('~/Library/Application Support/Bitcoin/')
elif platform.system() == 'Windows':
btc_conf_file = os.path.join(os.environ['APPDATA'], 'Bitcoin')
else:
btc_conf_file = os.path.expanduser('~/.bitcoin')
btc_conf_file = os.path.join(btc_conf_file, 'bitcoin.conf')
# Bitcoin Core accepts empty rpcuser, not specified in btc_conf_file
conf = {'rpcuser': ""}
# Extract contents of bitcoin.conf to build service_url
try:
with open(btc_conf_file, 'r') as fd:
for line in fd.readlines():
if '#' in line:
line = line[:line.index('#')]
if '=' not in line:
continue
k, v = line.split('=', 1)
conf[k.strip()] = v.strip()
# Treat a missing bitcoin.conf as though it were empty
except FileNotFoundError:
pass
conf['rpcport'] = int(conf.get('rpcport', service_port))
conf['rpchost'] = conf.get('rpcconnect', 'localhost')
service_url = ('%s://%s:%d' %
('http', conf['rpchost'], conf['rpcport']))
cookie_dir = conf.get('datadir', os.path.dirname(btc_conf_file))
cookie_file = os.path.join(cookie_dir, ".cookie")
try:
with open(cookie_file, 'r') as fd:
authpair = fd.read()
except IOError as err:
if 'rpcpassword' in conf:
authpair = "%s:%s" % (conf['rpcuser'], conf['rpcpassword'])
else:
raise ValueError(
'Cookie file unusable (%s) and rpcpassword not specified in the configuration file: %r' % (
err, btc_conf_file))
else:
url = urlparse.urlparse(service_url)
authpair = "%s:%s" % (url.username, url.password)
self.__service_url = service_url
self.__url = urlparse.urlparse(service_url)
if self.__url.scheme not in ('http',):
raise ValueError('Unsupported URL scheme %r' % self.__url.scheme)
if self.__url.port is None:
port = HTTP_PORT
else:
port = self.__url.port
self.__id_count = 0
if authpair is None:
self.__auth_header = None
else:
authpair = authpair.encode('utf8')
self.__auth_header = b"Basic " + base64.b64encode(authpair)
self.__conn = HTTPConnection(self.__url.hostname, port=port,
timeout=timeout)
def _call(self, service_name, *args):
self.__id_count += 1
postdata = json.dumps({'version': '1.1',
'method': service_name,
'params': args,
'id': self.__id_count})
headers = {
'Host': self.__url.hostname,
'User-Agent': DEFAULT_USER_AGENT,
'Content-type': 'application/json',
}
if self.__auth_header is not None:
headers['Authorization'] = self.__auth_header
self.__conn.request('POST', self.__url.path, postdata, headers)
response = self._get_response()
if response['error'] is not None:
raise JSONRPCError(response['error'])
elif 'result' not in response:
raise JSONRPCError({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list))
headers = {
'Host': self.__url.hostname,
'User-Agent': DEFAULT_USER_AGENT,
'Content-type': 'application/json',
}
if self.__auth_header is not None:
headers['Authorization'] = self.__auth_header
self.__conn.request('POST', self.__url.path, postdata, headers)
return self._get_response()
def _get_response(self):
http_response = self.__conn.getresponse()
if http_response is None:
raise JSONRPCError({
'code': -342, 'message': 'missing HTTP response from server'})
return json.loads(http_response.read().decode('utf8'),
parse_float=decimal.Decimal)
def close(self):
if self.__conn is not None:
self.__conn.close()
def __del__(self):
if self.__conn is not None:
self.__conn.close()
class RawProxy(BaseProxy):
"""Low-level proxy to a bitcoin JSON-RPC service
Unlike ``Proxy``, no conversion is done besides parsing JSON. As far as
Python is concerned, you can call any method; ``JSONRPCError`` will be
raised if the server does not recognize it.
"""
def __init__(self,
service_url=None,
service_port=None,
btc_conf_file=None,
timeout=DEFAULT_HTTP_TIMEOUT):
super(RawProxy, self).__init__(service_url=service_url,
service_port=service_port,
btc_conf_file=btc_conf_file,
timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
f = lambda *args: self._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class Proxy(BaseProxy):
"""Proxy to a bitcoin RPC service
Unlike ``RawProxy``, data is passed as ``bitcoin.core`` objects or packed
bytes, rather than JSON or hex strings. Not all methods are implemented
yet; you can use ``call`` to access missing ones in a forward-compatible
way. Assumes Bitcoin Core version >= v0.16.0; older versions mostly work,
but there are a few incompatibilities.
"""
def __init__(self,
service_url=None,
service_port=None,
btc_conf_file=None,
timeout=DEFAULT_HTTP_TIMEOUT):
"""Create a proxy object
If ``service_url`` is not specified, the username and password are read
out of the file ``btc_conf_file``. If ``btc_conf_file`` is not
specified, ``~/.bitcoin/bitcoin.conf`` or equivalent is used by
default. The default port is set according to the chain parameters in
use: mainnet, testnet, or regtest.
Usually no arguments to ``Proxy()`` are needed; the local bitcoind will
be used.
``timeout`` - timeout in seconds before the HTTP interface times out
"""
super(Proxy, self).__init__(service_url=service_url,
service_port=service_port,
btc_conf_file=btc_conf_file,
timeout=timeout)
def call(self, service_name, *args):
"""Call an RPC method by name and raw (JSON encodable) arguments"""
return self._call(service_name, *args)
def get_raw_mempool(self):
results = self.call('getrawmempool', True)
new_results = []
for result_key in results:
del results[result_key]['fees']
del results[result_key]['depends']
del results[result_key]['spentby']
results[result_key]['txid'] = result_key
new_results.append(results[result_key])
return new_results
|
124015
|
from bson import json_util
slide_list_file = '/data08/shared/lehhou/necrosis_segmentation_workingdir/slide_list.txt';
# read from file
with open(slide_list_file) as f:
content = f.readlines();
#lines = content.split('\n');
print content[-1][-2];
#print (content[-1][-3]);
|
124022
|
class Base(object):
def __secret(self):
print("don't tell")
def public(self):
self.__secret()
class Derived(Base):
def __secret(self):
print("never ever")
if __name__ == "__main__":
print("Base class members:", dir(Base))
print("Derived class members:", dir(Derived))
print("Base.public() result:")
Base().public()
print("Derived.public() result:")
Derived().public()
|
124025
|
from kubernetes_manager.models import (
KubernetesBase,
KubernetesConfigMap,
KubernetesContainer,
KubernetesDeployment,
KubernetesIngress,
KubernetesJob,
KubernetesMetadataObjBase,
KubernetesNamespace,
KubernetesNetworkingBase,
KubernetesPodTemplate,
KubernetesService,
KubernetesVolume,
KubernetesVolumeMount,
TargetCluster,
)
from rest_framework import serializers
class TargetClusterSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = TargetCluster
fields = ["title", "api_endpoint", "telemetry_endpoint", "config"]
class KubernetesBaseSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = KubernetesBase
fields = ["title", "description", "cluster", "config"]
abstract = True
class KubernetesMetadataObjBaseSerializer(KubernetesBaseSerializer):
class Meta:
model = KubernetesMetadataObjBase
fields = KubernetesBaseSerializer.Meta.fields + ["labels", "annotations"]
abstract = True
class KubernetesNetworkingBaseSerializer(KubernetesMetadataObjBaseSerializer):
class Meta:
model = KubernetesNetworkingBase
fields = KubernetesMetadataObjBaseSerializer.Meta.fields + ["api_version", "kind", "port", "namespace", "kuid"]
abstract = True
class KubernetesVolumeSerializer(KubernetesBaseSerializer):
class Meta:
model = KubernetesVolume
fields = KubernetesBaseSerializer.Meta.fields
class KubernetesVolumeMountSerializer(KubernetesBaseSerializer):
class Meta:
model = KubernetesVolumeMount
fields = KubernetesBaseSerializer.Meta.fields + ["mount_path", "sub_path"]
class KubernetesNamespaceSerializer(KubernetesMetadataObjBaseSerializer):
class Meta:
model = KubernetesNamespace
fields = KubernetesMetadataObjBaseSerializer.Meta.fields + ["api_version", "kind", "exists"]
class KubernetesConfigMapSerializer(KubernetesMetadataObjBaseSerializer):
class Meta:
model = KubernetesConfigMap
fields = KubernetesMetadataObjBaseSerializer.Meta.fields + [
"data",
"kind",
]
class KubernetesContainerSerializer(KubernetesBaseSerializer):
class Meta:
model = KubernetesContainer
fields = KubernetesBaseSerializer.Meta.fields + ["image_name", "image_tag", "image_pull_policy", "command", "args", "port", "volume_mounts"]
class KubernetesPodTemplateSerializer(KubernetesMetadataObjBaseSerializer):
class Meta:
model = KubernetesPodTemplate
fields = KubernetesMetadataObjBaseSerializer.Meta.fields + ["volumes", "containers", "restart_policy"]
class KubernetesDeploymentSerializer(KubernetesNetworkingBaseSerializer):
class Meta:
model = KubernetesDeployment
fields = KubernetesNetworkingBaseSerializer.Meta.fields + ["selector", "replicas", "pod_template"]
class KubernetesJobSerializer(KubernetesNetworkingBaseSerializer):
class Meta:
model = KubernetesJob
fields = KubernetesNetworkingBaseSerializer.Meta.fields + ["backoff_limit", "pod_template"]
class KubernetesServiceSerializer(KubernetesNetworkingBaseSerializer):
class Meta:
model = KubernetesService
fields = KubernetesNetworkingBaseSerializer.Meta.fields + ["selector", "target_port"]
class KubernetesIngressSerializer(KubernetesNetworkingBaseSerializer):
class Meta:
model = KubernetesIngress
fields = KubernetesNetworkingBaseSerializer.Meta.fields + ["hostname", "path", "target_service"]
|
124041
|
import click
from pprint import pprint
from .decorators import onlineChain, unlockWallet
from .main import main
@main.command()
@click.pass_context
@onlineChain
@click.argument("members", nargs=-1)
@click.option("--account", help="Account that takes this action", type=str)
@unlockWallet
def approvecommittee(ctx, members, account):
""" Approve committee member(s)
"""
pprint(ctx.peerplays.approvecommittee(members, account=account))
@main.command()
@click.pass_context
@onlineChain
@click.argument("members", nargs=-1)
@click.option("--account", help="Account that takes this action", type=str)
@unlockWallet
def disapprovecommittee(ctx, members, account):
""" Disapprove committee member(s)
"""
pprint(ctx.peerplays.disapprovecommittee(members, account=account))
|
124042
|
from itertools import islice
from re import compile
from snowddl.blueprint import TableBlueprint, TableColumn, DataType, BaseDataType
from snowddl.resolver.abc_schema_object_resolver import AbstractSchemaObjectResolver, ResolveResult, ObjectType
cluster_by_syntax_re = compile(r'^(\w+)?\((.*)\)$')
class TableResolver(AbstractSchemaObjectResolver):
def get_object_type(self) -> ObjectType:
return ObjectType.TABLE
def get_existing_objects_in_schema(self, schema: dict):
existing_objects = {}
cur = self.engine.execute_meta("SHOW TABLES IN SCHEMA {database:i}.{schema:i}", {
"database": schema['database'],
"schema": schema['schema'],
})
for r in cur:
# Skip external tables
if r['is_external'] == 'Y':
continue
full_name = f"{r['database_name']}.{r['schema_name']}.{r['name']}"
existing_objects[full_name] = {
"database": r['database_name'],
"schema": r['schema_name'],
"name": r['name'],
"owner": r['owner'],
"is_transient": r['kind'] == 'TRANSIENT',
"cluster_by": r['cluster_by'] if r['cluster_by'] else None,
"change_tracking": bool(r['change_tracking'] == 'ON'),
"search_optimization": bool(r.get('search_optimization') == 'ON'),
"comment": r['comment'] if r['comment'] else None,
}
return existing_objects
def get_blueprints(self):
return self.config.get_blueprints_by_type(TableBlueprint)
def create_object(self, bp: TableBlueprint):
query = self._build_create_table(bp)
self.engine.execute_safe_ddl(query)
if bp.search_optimization:
self.engine.execute_safe_ddl("ALTER TABLE {full_name:i} ADD SEARCH OPTIMIZATION", {
"full_name": bp.full_name,
})
return ResolveResult.CREATE
def compare_object(self, bp: TableBlueprint, row: dict):
alters = []
is_replace_required = False
bp_cols = {str(c.name): c for c in bp.columns}
snow_cols = self._get_existing_columns(bp)
remaining_col_names = list(snow_cols.keys())
for col_name, snow_c in snow_cols.items():
# Drop columns which do not exist in blueprint
if col_name not in bp_cols:
alters.append(self.engine.format("DROP COLUMN {col_name:i}", {
"col_name": col_name,
}))
remaining_col_names.remove(col_name)
continue
bp_c = bp_cols[col_name]
# Set or drop NOT NULL constraint
if snow_c.not_null and not bp_c.not_null:
alters.append(self.engine.format("MODIFY COLUMN {col_name:i} DROP NOT NULL", {
"col_name": col_name,
}))
elif not snow_c.not_null and bp_c.not_null:
alters.append(self.engine.format("MODIFY COLUMN {col_name:i} SET NOT NULL", {
"col_name": col_name,
}))
# Default
if snow_c.default != bp_c.default:
# DROP DEFAULT is supported
if snow_c.default is not None and bp_c.default is None:
alters.append(self.engine.format("MODIFY COLUMN {col_name:i} DROP DEFAULT", {
"col_name": col_name,
}))
# Switch to another sequence is supported
elif isinstance(snow_c.default, str) and snow_c.default.upper().endswith('.NEXTVAL') \
and isinstance(bp_c.default, str) and bp_c.default.upper().endswith('.NEXTVAL'):
alters.append(self.engine.format("MODIFY COLUMN {col_name:i} SET DEFAULT {default:r}", {
"col_name": col_name,
"default": bp_c.default,
}))
# All other DEFAULT changes are not supported
else:
is_replace_required = True
# Comments
if snow_c.comment != bp_c.comment:
# UNSET COMMENT is currently not supported for columns, we can only set it to empty string
alters.append(self.engine.format("MODIFY COLUMN {col_name:i} COMMENT {comment}", {
"col_name": col_name,
"comment": bp_c.comment if bp_c.comment else '',
}))
# If type matches exactly, skip all other checks
if snow_c.type == bp_c.type:
continue
# Only a few optimized MODIFY COLUMN ... TYPE actions are supported
# https://docs.snowflake.com/en/sql-reference/sql/alter-table-column.html
if snow_c.type.base_type == bp_c.type.base_type:
# Increase or decrease precision of NUMBER, but not scale
if snow_c.type.base_type == BaseDataType.NUMBER \
and snow_c.type.val1 != bp_c.type.val2 \
and snow_c.type.val2 == bp_c.type.val2:
alters.append(self.engine.format("MODIFY COLUMN {col_name:i} TYPE {col_type:r}", {
"col_name": col_name,
"col_type": bp_c.type,
}))
continue
if snow_c.type.base_type == BaseDataType.VARCHAR \
and snow_c.type.val1 < bp_c.type.val1:
alters.append(self.engine.format("MODIFY COLUMN {col_name:i} TYPE {col_type:r}", {
"col_name": col_name,
"col_type": bp_c.type,
}))
continue
# All other transformations require full table replace
is_replace_required = True
# Remaining column names exactly match initial part of blueprint column names
if remaining_col_names == list(islice(bp_cols.keys(), 0, len(remaining_col_names))):
# Get remaining part of blueprint columns
for col_name, bp_c in islice(bp_cols.items(), len(remaining_col_names), None):
query = self.engine.query_builder()
query.append("ADD COLUMN {col_name:i} {col_type:r}", {
"col_name": col_name,
"col_type": bp_c.type,
})
if bp_c.default is not None:
query.append("DEFAULT {default}", {
"default": bp_c.default,
})
if bp_c.not_null:
query.append("NOT NULL")
if bp_c.comment:
query.append("COMMENT {comment}", {
"comment": bp_c.comment,
})
alters.append(query)
else:
# Reordering of columns is not supported
is_replace_required = True
# Changing TRANSIENT tables to permanent and back are not supported
if bp.is_transient != row['is_transient']:
is_replace_required = True
# Clustering key
if not self._compare_cluster_by(bp, row):
if bp.cluster_by:
alters.append(self.engine.format("CLUSTER BY ({cluster_by:r})", {
"cluster_by": bp.cluster_by,
}))
else:
alters.append(self.engine.format("DROP CLUSTERING KEY"))
# Change tracking
if bp.change_tracking != row['change_tracking']:
alters.append(self.engine.format("SET CHANGE_TRACKING = {change_tracking:b}", {
"change_tracking": bp.change_tracking,
}))
# Search optimization
if bp.search_optimization and not row['search_optimization']:
alters.append("ADD SEARCH OPTIMIZATION")
elif not bp.search_optimization and row['search_optimization']:
alters.append("DROP SEARCH OPTIMIZATION")
# Comment
if bp.comment != row['comment']:
if bp.comment:
alters.append(self.engine.format("SET COMMENT = {comment}", {
"comment": bp.comment,
}))
else:
alters.append(self.engine.format("UNSET COMMENT"))
if is_replace_required:
self.engine.execute_unsafe_ddl(self._build_create_table(bp, snow_cols), condition=self.engine.settings.execute_replace_table)
if bp.search_optimization:
self.engine.execute_safe_ddl("ALTER TABLE {full_name:i} ADD SEARCH OPTIMIZATION", {
"full_name": bp.full_name,
})
return ResolveResult.REPLACE
elif alters:
for alter in alters:
self.engine.execute_unsafe_ddl("ALTER TABLE {full_name:i} {alter:r}", {
"full_name": bp.full_name,
"alter": alter,
})
return ResolveResult.ALTER
return ResolveResult.NOCHANGE
def drop_object(self, row: dict):
self.engine.execute_unsafe_ddl("DROP TABLE {database:i}.{schema:i}.{table_name:i}", {
"database": row['database'],
"schema": row['schema'],
"table_name": row['name'],
})
return ResolveResult.DROP
def _get_existing_columns(self, bp: TableBlueprint):
existing_columns = {}
cur = self.engine.execute_meta("DESC TABLE {full_name:i}", {
"full_name": bp.full_name,
})
for r in cur:
existing_columns[r['name']] = TableColumn(
name=r['name'],
type=DataType(r['type']),
not_null=bool(r['null?'] == 'N'),
default=r['default'] if r['default'] else None,
comment=r['comment'] if r['comment'] else None,
)
return existing_columns
def _build_create_table(self, bp: TableBlueprint, snow_cols=None):
query = self.engine.query_builder()
query.append("CREATE")
if snow_cols:
query.append("OR REPLACE")
if bp.is_transient:
query.append("TRANSIENT")
query.append("TABLE {full_name:i}", {
"full_name": bp.full_name,
})
query.append_nl("(")
for idx, c in enumerate(bp.columns):
query.append_nl(" {comma:r}{col_name:i} {col_type:r}", {
"comma": " " if idx == 0 else ", ",
"col_name": c.name,
"col_type": c.type,
})
if c.default is not None:
query.append("DEFAULT {default:r}", {
"default": c.default,
})
if c.not_null:
query.append("NOT NULL")
if c.comment:
query.append("COMMENT {comment}", {
"comment": c.comment,
})
query.append_nl(")")
if bp.cluster_by:
query.append_nl("CLUSTER BY ({cluster_by:r})", {
"cluster_by": bp.cluster_by,
})
if bp.change_tracking:
query.append_nl("CHANGE_TRACKING = TRUE")
if bp.retention_time is not None:
query.append_nl("DATA_RETENTION_TIME_IN_DAYS = {retention_time:d}", {
"retention_time": bp.retention_time
})
if bp.comment:
query.append_nl("COMMENT = {comment}", {
"comment": bp.comment,
})
if snow_cols:
query.append_nl("COPY GRANTS")
query.append_nl("AS")
query.append_nl("SELECT")
for idx, c in enumerate(bp.columns):
if str(c.name) in snow_cols:
query.append_nl(" {comma:r}{col_name:i}::{col_type:r} AS {col_name:i}", {
"comma": " " if idx == 0 else ", ",
"col_name": c.name,
"col_type": c.type,
})
else:
query.append_nl(" {comma:r}{col_val}::{col_type:r} AS {col_name:i}", {
"comma": " " if idx == 0 else ", ",
"col_name": c.name,
"col_type": c.type,
"col_val": c.default,
})
query.append_nl("FROM {full_name:i}", {
"full_name": bp.full_name,
})
return query
def _compare_cluster_by(self, bp: TableBlueprint, row: dict):
bp_cluster_by = ', '.join(bp.cluster_by) if bp.cluster_by else None
snow_cluster_by = cluster_by_syntax_re.sub(r'\2', row['cluster_by']) if row['cluster_by'] else None
return bp_cluster_by == snow_cluster_by
|
124043
|
from aqt import gui_hooks
from aqt.utils import showWarning
opened = False
def startup():
global opened
if opened:
warning_text = "\n".join((
"Pokemanki does not support opening a second profile in one session.",
"Please close Anki and reopen it again to the desired profile.",
"Pokemanki may behave strangely"
))
showWarning(warning_text, title="Pokemanki won't function properly")
return
opened = True
from . import main
gui_hooks.profile_did_open.append(startup)
|
124046
|
import grpc
import pytest
from uuid import uuid4
from multiprocessing import Event
from google.protobuf import json_format
from google.protobuf.empty_pb2 import Empty
from common.cryptographer import Cryptographer
from teos.watcher import Watcher
from teos.responder import Responder
from teos.gatekeeper import UserInfo
from teos.internal_api import (
InternalAPI,
SubscriptionExpired,
AppointmentLimitReached,
AppointmentAlreadyTriggered,
AppointmentNotFound,
AppointmentStatus,
)
from teos.protobuf.tower_services_pb2 import GetTowerInfoResponse
from teos.protobuf.tower_services_pb2_grpc import TowerServicesStub
from teos.protobuf.user_pb2 import (
RegisterRequest,
RegisterResponse,
GetUsersResponse,
GetUserRequest,
GetUserResponse,
GetSubscriptionInfoRequest,
)
from teos.protobuf.appointment_pb2 import (
Appointment,
AddAppointmentRequest,
AddAppointmentResponse,
GetAppointmentRequest,
GetAppointmentResponse,
GetAllAppointmentsResponse,
)
from test.teos.conftest import config
from test.teos.unit.mocks import AppointmentsDBM as DBManagerMock
from test.teos.unit.conftest import (
generate_keypair,
get_random_value_hex,
mock_connection_refused_return,
raise_invalid_parameter,
raise_auth_failure,
raise_not_enough_slots,
)
internal_api_endpoint = "{}:{}".format(config.get("INTERNAL_API_HOST"), config.get("INTERNAL_API_PORT"))
MAX_APPOINTMENTS = 100
teos_sk, teos_pk = generate_keypair()
teos_id = Cryptographer.get_compressed_pk(teos_pk)
user_sk, user_pk = generate_keypair()
user_id = Cryptographer.get_compressed_pk(user_pk)
def raise_subscription_expired(*args, **kwargs):
# Message is passed in the API response
raise SubscriptionExpired("Your subscription expired at")
def raise_appointment_limit_reached(*args, **kwargs):
raise AppointmentLimitReached("")
def raise_appointment_already_triggered(*args, **kwargs):
raise AppointmentAlreadyTriggered("")
def raise_appointment_not_found(*args, **kwargs):
raise AppointmentNotFound("")
@pytest.fixture(scope="module")
def internal_api(gatekeeper_mock, carrier_mock):
db_manager = DBManagerMock()
responder = Responder(db_manager, gatekeeper_mock, carrier_mock, gatekeeper_mock.block_processor)
watcher = Watcher(
db_manager,
gatekeeper_mock,
gatekeeper_mock.block_processor,
responder,
teos_sk,
MAX_APPOINTMENTS,
config.get("LOCATOR_CACHE_SIZE"),
)
i_api = InternalAPI(watcher, internal_api_endpoint, config.get("INTERNAL_API_WORKERS"), Event())
i_api.rpc_server.start()
yield i_api
i_api.rpc_server.stop(None)
@pytest.fixture()
def stub():
return TowerServicesStub(grpc.insecure_channel(internal_api_endpoint))
def send_appointment(stub, appointment, signature):
response = stub.add_appointment(
AddAppointmentRequest(
appointment=Appointment(
locator=appointment.locator,
encrypted_blob=appointment.encrypted_blob,
to_self_delay=appointment.to_self_delay,
),
signature=signature,
)
)
return response
def send_wrong_appointment(stub, appointment, signature):
with pytest.raises(grpc.RpcError) as e:
send_appointment(stub, appointment, signature)
return e
# METHODS ACCESSIBLE BY THE CLIENT
# The following collection of tests are of methods the client can reach and, therefore, need to be properly
# authenticated at the application level as well as check for input data correctness
def test_register(internal_api, stub, monkeypatch):
# Normal request should work just fine
# Monkeypatch the response from the Watcher
slots = 100
expiry = 1000
sig = get_random_value_hex(73)
monkeypatch.setattr(internal_api.watcher, "register", lambda x: (slots, expiry, sig))
response = stub.register(RegisterRequest(user_id=user_id))
assert isinstance(response, RegisterResponse)
def test_register_wrong_user_id(internal_api, stub, monkeypatch):
# If the user id is wrong we should get INVALID_ARGUMENT with the proper message
wrong_user_id = get_random_value_hex(32)
# Monkeypatch the response from the Watcher
monkeypatch.setattr(internal_api.watcher, "register", raise_invalid_parameter)
with pytest.raises(grpc.RpcError) as e:
stub.register(RegisterRequest(user_id=wrong_user_id))
assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
assert "Provided public key does not match expected format" in e.value.details()
def test_add_appointment(internal_api, stub, generate_dummy_appointment, monkeypatch):
# Normal request should work just fine
appointment = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
# Mock the return from the Watcher
data = {
"locator": appointment.locator,
"start_block": 100,
"signature": get_random_value_hex(71),
"available_slots": 100,
"subscription_expiry": 1000,
}
monkeypatch.setattr(internal_api.watcher, "add_appointment", lambda x, y: data)
response = send_appointment(stub, appointment, appointment_signature)
assert isinstance(response, AddAppointmentResponse)
def test_add_appointment_non_registered(internal_api, stub, generate_dummy_appointment, monkeypatch):
# If the user is not registered we should get UNAUTHENTICATED + the proper error message
# Mock not registered user
monkeypatch.setattr(internal_api.watcher, "add_appointment", raise_auth_failure)
appointment = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Invalid signature or user does not have enough slots available" in e.value.details()
def test_add_appointment_not_enough_slots(internal_api, stub, generate_dummy_appointment, monkeypatch):
# UNAUTHENTICATED should also be get if the user does not have enough appointment slots
# Mock user with 0 slots
monkeypatch.setattr(internal_api.watcher, "add_appointment", raise_not_enough_slots)
appointment = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Invalid signature or user does not have enough slots available" in e.value.details()
def test_add_appointment_subscription_expired(internal_api, stub, generate_dummy_appointment, monkeypatch):
# UNAUTHENTICATED is returned if the subscription has expired
# Mock a user with an expired subscription
monkeypatch.setattr(internal_api.watcher, "add_appointment", raise_subscription_expired)
appointment = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Your subscription expired at" in e.value.details()
def test_add_appointment_limit_reached(internal_api, stub, generate_dummy_appointment, monkeypatch):
# If the tower appointment limit is reached RESOURCE_EXHAUSTED should be returned
# Mock the Watcher's return
monkeypatch.setattr(internal_api.watcher, "add_appointment", raise_appointment_limit_reached)
appointment = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.RESOURCE_EXHAUSTED
assert "Appointment limit reached" in e.value.details()
def test_add_appointment_already_triggered(internal_api, stub, generate_dummy_appointment, monkeypatch):
# If the appointment has already been trigger we should get ALREADY_EXISTS
# Mock the Watcher's return
monkeypatch.setattr(internal_api.watcher, "add_appointment", raise_appointment_already_triggered)
appointment = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
e = send_wrong_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.ALREADY_EXISTS
assert "The provided appointment has already been triggered" in e.value.details()
def test_get_appointment(internal_api, stub, generate_dummy_appointment, monkeypatch):
# Requests should work provided the user is registered and the appointment exists for him
# Create an appointment and mock the return from the Watcher (the appointment status is not relevant here)
appointment = generate_dummy_appointment()
monkeypatch.setattr(
internal_api.watcher, "get_appointment", lambda x, y: (appointment.to_dict(), AppointmentStatus.BEING_WATCHED)
)
# Request it back
message = f"get appointment {appointment.locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
response = stub.get_appointment(GetAppointmentRequest(locator=appointment.locator, signature=request_signature))
assert isinstance(response, GetAppointmentResponse)
def test_get_appointment_non_registered(internal_api, stub, generate_dummy_appointment, monkeypatch):
# If the user is not registered or the appointment does not belong to him the response should be NOT_FOUND
# Mock the response from the Watcher
monkeypatch.setattr(internal_api.watcher, "get_appointment", raise_auth_failure)
# Send the request as an non-registered user
locator = get_random_value_hex(32)
message = f"get appointment {locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert "Appointment not found" in e.value.details()
def test_get_appointment_non_existent(internal_api, stub, monkeypatch):
# Non-existing appointment will also return NOT_FOUND
# Mock the response from the Watcher
monkeypatch.setattr(internal_api.watcher, "get_appointment", raise_appointment_not_found)
# Request a non-existing appointment
locator = get_random_value_hex(16)
message = f"get appointment {locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert "Appointment not found" in e.value.details()
def test_get_appointment_subscription_expired(internal_api, stub, generate_dummy_appointment, monkeypatch):
# UNAUTHENTICATED is returned if the subscription has expired
# Mock a user with an expired subscription
monkeypatch.setattr(internal_api.watcher, "get_appointment", raise_subscription_expired)
# Request the data
locator = get_random_value_hex(32)
message = f"get appointment {locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Your subscription expired at" in e.value.details()
def test_get_subscription_info(internal_api, stub, monkeypatch):
# Requesting the subscription info for a registered user should work
# Mock the user being there. Data is not relevant since we only care about the type of response.
subscription_info = UserInfo(100, [], 1000)
monkeypatch.setattr(internal_api.watcher, "get_subscription_info", lambda x: (subscription_info, []))
# Request subscription details
message = "get subscription info"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
response = stub.get_subscription_info(GetSubscriptionInfoRequest(signature=request_signature))
assert isinstance(response, GetUserResponse)
def test_get_subscription_info_non_registered(internal_api, stub, monkeypatch):
# Requesting the subscription info for a non-registered user should fail
# Mock the user not being there.
monkeypatch.setattr(internal_api.watcher, "get_subscription_info", raise_auth_failure)
message = "get subscription info"
signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_subscription_info(GetSubscriptionInfoRequest(signature=signature))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "User not found. Have you registered?" in e.value.details()
def test_get_subscription_info_expired(internal_api, stub, monkeypatch):
# Requesting the subscription info for expired users should fail
# Mock the user not being there.
monkeypatch.setattr(internal_api.watcher, "get_subscription_info", raise_subscription_expired)
# Request subscription details
message = "get subscription info"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_subscription_info(GetSubscriptionInfoRequest(signature=request_signature))
assert e.value.code() == grpc.StatusCode.UNAUTHENTICATED
assert "Your subscription expired at" in e.value.details()
# METHODS ACCESSIBLE BY THE CLI
# The following collection of tests are for methods the CLI can reach and, therefore, have a softer security model than
# the previous set. Notice the currently there is not authentication for the CLI (FIXME: #230)
def test_get_all_appointments(internal_api, stub, generate_dummy_appointment, generate_dummy_tracker, monkeypatch):
# get_all_appointments should return a dict with the appointments in the Watcher and Responder
# Mock the Watcher's response to get_all_watcher_appointments and get_all_responder_trackers
local_appointments = {uuid4().hex: generate_dummy_appointment().to_dict() for _ in range(4)}
local_trackers = {uuid4().hex: generate_dummy_tracker().to_dict() for _ in range(2)}
monkeypatch.setattr(internal_api.watcher, "get_all_watcher_appointments", lambda: local_appointments)
monkeypatch.setattr(internal_api.watcher, "get_all_responder_trackers", lambda: local_trackers)
# Get the response and cast it to dict
response = stub.get_all_appointments(Empty())
assert isinstance(response, GetAllAppointmentsResponse)
appointments = json_format.MessageToDict(response.appointments)
for uuid, appointment in local_appointments.items():
assert dict(appointments.get("watcher_appointments")[uuid]) == appointment
for uuid, tracker in local_trackers.items():
assert dict(appointments.get("responder_trackers")[uuid]) == tracker
def test_get_all_appointments_watcher(internal_api, stub, generate_dummy_appointment, monkeypatch):
# Mock data being only present in the Watcher
local_appointments = {uuid4().hex: generate_dummy_appointment().to_dict()}
monkeypatch.setattr(internal_api.watcher, "get_all_watcher_appointments", lambda: local_appointments)
monkeypatch.setattr(internal_api.watcher, "get_all_responder_trackers", lambda: {})
# Get the response and cast it to dict
response = stub.get_all_appointments(Empty())
assert isinstance(response, GetAllAppointmentsResponse)
appointments = json_format.MessageToDict(response.appointments)
assert len(appointments.get("responder_trackers")) == 0
for uuid, appointment in local_appointments.items():
assert appointments.get("watcher_appointments")[uuid] == appointment
def test_get_all_appointments_responder(internal_api, stub, generate_dummy_tracker, monkeypatch):
# Mock data being only present in the Watcher
local_trackers = {uuid4().hex: generate_dummy_tracker().to_dict()}
monkeypatch.setattr(internal_api.watcher, "get_all_watcher_appointments", lambda: {})
monkeypatch.setattr(internal_api.watcher, "get_all_responder_trackers", lambda: local_trackers)
# Get the response and cast it to dict
response = stub.get_all_appointments(Empty())
assert isinstance(response, GetAllAppointmentsResponse)
appointments = json_format.MessageToDict(response.appointments)
assert len(appointments.get("watcher_appointments")) == 0
for uuid, tracker in local_trackers.items():
assert dict(appointments.get("responder_trackers")[uuid]) == tracker
def test_get_tower_info_empty(internal_api, stub):
response = stub.get_tower_info(Empty())
assert isinstance(response, GetTowerInfoResponse)
assert response.tower_id == teos_id
assert response.n_registered_users == 0
assert response.n_watcher_appointments == 0
assert response.n_responder_trackers == 0
def test_get_tower_info(internal_api, stub, monkeypatch):
monkeypatch.setattr(internal_api.watcher.gatekeeper, "registered_users", {"uid1": {}})
monkeypatch.setattr(
internal_api.watcher,
"appointments",
{
"uid1": {"locator": "locator1", "user_id": "user_id1"},
"uid2": {"locator": "locator2", "user_id": "user_id2"},
},
)
monkeypatch.setattr(
internal_api.watcher.responder,
"trackers",
{
"uid1": {"penalty_txid": "txid1", "locator": "locator1", "user_id": "user_id1"},
"uid2": {"penalty_txid": "txid2", "locator": "locator2", "user_id": "user_id2"},
"uid3": {"penalty_txid": "txid3", "locator": "locator2", "user_id": "user_id3"},
},
)
response = stub.get_tower_info(Empty())
assert isinstance(response, GetTowerInfoResponse)
assert response.tower_id == Cryptographer.get_compressed_pk(internal_api.watcher.signing_key.public_key)
assert response.n_registered_users == 1
assert response.n_watcher_appointments == 2
assert response.n_responder_trackers == 3
def test_get_users(internal_api, stub, monkeypatch):
# Mock user data (doesn't matter it's not properly formatted for the sake of the test)
mock_users = ["user1", "user2", "user3"]
monkeypatch.setattr(
internal_api.watcher, "get_registered_user_ids", lambda: {"user1": dict(), "user2": dict(), "user3": dict()},
)
# Check we receive the same list of users
response = stub.get_users(Empty())
assert isinstance(response, GetUsersResponse)
assert response.user_ids == mock_users
def test_get_user(internal_api, stub, monkeypatch):
# Mock the Watcher's call return
mock_user_id = "02c73bad28b78dd7e3bcad609d330e0d60b97fa0e08ca1cf486cb6cab8dd6140ac"
mock_available_slots = 100
mock_subscription_expiry = 1234
mock_user_info = UserInfo(mock_available_slots, mock_subscription_expiry)
monkeypatch.setattr(internal_api.watcher, "get_user_info", lambda x: mock_user_info)
response = stub.get_user(GetUserRequest(user_id=mock_user_id))
assert isinstance(response, GetUserResponse)
# Numbers are currently returned as floats, even if they are integers. This is due to gRPC.
assert json_format.MessageToDict(response.user) == {
"appointments": [],
"available_slots": float(mock_available_slots),
"subscription_expiry": float(mock_subscription_expiry),
}
def test_get_user_not_found(internal_api, stub, monkeypatch):
# Mock a non-registered user response
monkeypatch.setattr(internal_api.watcher, "get_user_info", lambda x: None)
with pytest.raises(grpc.RpcError) as e:
stub.get_user(GetUserRequest(user_id=get_random_value_hex(32)))
assert e.value.code() == grpc.StatusCode.NOT_FOUND
assert "User not found" in e.value.details()
def test_stop(internal_api, stub):
# Test how the event changes when stop is called
assert not internal_api.stop_command_event.is_set()
stub.stop(Empty())
assert internal_api.stop_command_event.is_set()
# TESTS WITH BITCOIND UNREACHABLE
def test_register_bitcoind_crash(internal_api, stub, monkeypatch):
monkeypatch.setattr(internal_api.watcher, "register", mock_connection_refused_return)
with pytest.raises(grpc.RpcError) as e:
stub.register(RegisterRequest(user_id=user_id))
assert e.value.code() == grpc.StatusCode.UNAVAILABLE
assert "Service unavailable" in e.value.details()
def test_add_appointment_bitcoind_crash(internal_api, stub, generate_dummy_appointment, monkeypatch):
monkeypatch.setattr(internal_api.watcher, "add_appointment", mock_connection_refused_return)
appointment = generate_dummy_appointment()
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
with pytest.raises(grpc.RpcError) as e:
send_appointment(stub, appointment, appointment_signature)
assert e.value.code() == grpc.StatusCode.UNAVAILABLE
assert "Service unavailable" in e.value.details()
def test_get_appointment_bitcoind_crash(internal_api, stub, monkeypatch):
monkeypatch.setattr(internal_api.watcher, "get_appointment", mock_connection_refused_return)
locator = get_random_value_hex(32)
message = f"get appointment {locator}"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_appointment(GetAppointmentRequest(locator=locator, signature=request_signature))
assert e.value.code() == grpc.StatusCode.UNAVAILABLE
assert "Service unavailable" in e.value.details()
def test_get_subscription_info_bitcoind_crash(internal_api, stub, monkeypatch):
monkeypatch.setattr(internal_api.watcher, "get_subscription_info", mock_connection_refused_return)
message = "get subscription info"
request_signature = Cryptographer.sign(message.encode("utf-8"), user_sk)
with pytest.raises(grpc.RpcError) as e:
stub.get_subscription_info(GetSubscriptionInfoRequest(signature=request_signature))
assert e.value.code() == grpc.StatusCode.UNAVAILABLE
assert "Service unavailable" in e.value.details()
|
124064
|
import cv2
from skimage.feature import hog
from sklearn.decomposition import PCA
class FeatureSelection:
def __init__(self):
print("\n----------------------------------------------------------")
print("--------------P-R-O-C-E-S-S-I-N-G---D-A-T-A---------------")
print("----------------------------------------------------------\n")
def choose(self, function, image_path, size, n_feature):
if function == "resize":
return self.resize_normalization(image_path, size)
if function == "pca":
return self.pca(image_path, size, n_feature)
if function == "hog":
return self.hog(image_path, size)
def resize_normalization(self, image_path, size):
img = cv2.imread(image_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (size, size), interpolation=cv2.INTER_AREA)
_, im_bw = cv2.threshold(resized, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
flatten_feature = list(im_bw.flatten())
return flatten_feature
def hog(self, image_path, size):
img = cv2.imread(image_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (size, size), interpolation=cv2.INTER_AREA)
_, im_bw = cv2.threshold(resized, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
fd, hog_image = hog(im_bw, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualize=True, multichannel=False)
im_bw = list(hog_image.flatten())
return im_bw
def pca(self, image_path, size, n_feature):
img = cv2.imread(image_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (size, size), interpolation=cv2.INTER_AREA)
_, im_bw = cv2.threshold(resized, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# print("bw: ", len(list(im_bw.flatten())))
pca = PCA(n_components=n_feature)
pca.fit(im_bw)
X = pca.transform(im_bw)
flatten_feature = list(X.flatten())
# print("PCA: ", len(flatten_feature))
return flatten_feature
|
124101
|
from tulip import hybrid
import polytope
import scipy.io
"""
Contains functions that will read a .mat file exported by the MATLAB function
mpt2python and import it to either a PwaSysDyn or a LtiSysDyn.
<NAME>, June 2014
"""
def load(filename):
data = scipy.io.loadmat(filename)
islti = bool(data['islti'][0][0])
ispwa = bool(data['ispwa'][0][0])
if islti:
sys = load_lti(data['A'], data['B'], data['domainA'],
data['domainB'], data['UsetA'], data['UsetB'])
elif ispwa:
nlti = len(data['A'][0])
lti_systems = []
for i in range(nlti):
A = data['A'][0][i]
B = data['B'][0][i]
K = data['K'][0][i]
domainA = data['domainA'][0][i]
domainB = data['domainB'][0][i]
UsetA = data['UsetA'][0][i]
UsetB = data['UsetB'][0][i]
ltisys = load_lti(A, B, K, domainA, domainB, UsetA, UsetB)
lti_systems.append(ltisys)
cts_ss = polytope.Polytope(data['ctsA'], data['ctsB'])
sys = hybrid.PwaSysDyn(list_subsys=lti_systems, domain=cts_ss)
return sys
def load_lti(A, B, K, domainA, domainB, UsetA, UsetB):
domain = polytope.Polytope(domainA, domainB)
Uset = polytope.Polytope(UsetA, UsetB)
lti = hybrid.LtiSysDyn(A=A, B=B, K=K, domain=domain, Uset=Uset)
return lti
|
124107
|
from ._base import DanubeCloudCommand, CommandError
class Command(DanubeCloudCommand):
help = 'Initialize the virtual environments.'
def handle(self, *args, **options):
raise CommandError('Use ctl.sh directly')
|
124148
|
import FWCore.ParameterSet.Config as cms
import EventFilter.RPCRawToDigi.rpcUnpackingModule_cfi
rpcunpacker = EventFilter.RPCRawToDigi.rpcUnpackingModule_cfi.rpcUnpackingModule.clone()
rpcunpacker.InputLabel = cms.InputTag("rawDataCollector")
rpcunpacker.doSynchro = cms.bool(True)
|
124151
|
from luigi.contrib.s3 import S3Target
from ob_pipelines.batch import BatchTask, LoggingTaskWrapper
from ob_pipelines.config import cfg
from ob_pipelines.entities.sample import Sample
from ob_pipelines.pipelines.xenograft.tasks.star_by_species import StarBySpecies
class DisambiguateHumanMouse(BatchTask, LoggingTaskWrapper, Sample):
job_definition = 'disambiguate'
@property
def parameters(self):
outdir = '{}/{}/disambiguate/'.format(cfg['S3_BUCKET'], self.sample_folder)
return {
'outdir': outdir,
'sample': self.sample_id,
'aligner': 'star',
'A': self.input()['human']['bam'].path,
'B': self.input()['mouse']['bam'].path
}
def requires(self):
return {
'human': StarBySpecies(sample_id=self.sample_id, species='human'),
'mouse': StarBySpecies(sample_id=self.sample_id, species='mouse')
}
def output(self):
output_files = {
'human': '{}{}.disambiguatedSpeciesA.bam',
'mouse': '{}{}.disambiguatedSpeciesB.bam',
'human_ambiguous': '{}{}.ambiguousSpeciesA.bam',
'mouse_ambiguous': '{}{}.ambiguousSpeciesB.bam',
'summary': '{}{}_summary.txt'
}
s3_paths = {k: v.format(self.parameters['outdir'], self.parameters['sample'])
for k, v in output_files.items()}
return {k: S3Target(path) for k, path in s3_paths.items()}
|
124152
|
def test_metadata(system_config) -> None:
assert system_config.provider_code == "system"
assert system_config._prefix == "TEST"
def test_prefixize(system_config) -> None:
assert system_config.prefixize("key1") == "TEST_KEY1"
assert system_config.unprefixize("TEST_KEY1") == "key1"
def test_get_variable(monkeypatch, system_config) -> None:
monkeypatch.setenv("TEST_KEY1", "1")
monkeypatch.setenv("TEST_KEY2", "2")
assert system_config.get("key1") == "1"
assert system_config.get("key2") == "2"
monkeypatch.undo()
def test_get_variables_list(monkeypatch, system_config) -> None:
monkeypatch.setenv("TEST_KEY1", "1")
monkeypatch.setenv("TEST_KEY2", "2")
monkeypatch.setenv("TEZT_T1", "1")
monkeypatch.setenv("TEZT_T2", "2")
assert "key1" in system_config.keys()
assert "key2" in system_config.keys()
assert "t1" not in system_config.keys()
assert "t2" not in system_config.keys()
monkeypatch.undo()
|
124181
|
from functools import partial
from textwrap import dedent
from io import StringIO
import pytest
import pandas.testing as pdtest
import numpy
import pandas
from wqio.utils import misc
from wqio.tests import helpers
@pytest.fixture
def basic_data():
testcsv = """\
Date,A,B,C,D
X,1,2,3,4
Y,5,6,7,8
Z,9,0,1,2
"""
return pandas.read_csv(StringIO(dedent(testcsv)), index_col=["Date"])
@pytest.fixture
def multiindex_df():
index = pandas.MultiIndex.from_product(
[["A", "B", "C"], ["mg/L"]], names=["loc", "units"]
)
return pandas.DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["a", "b"])
class mockDataset(object):
def __init__(self, inflow, outflow):
self.inflow = mockLocation(inflow)
self.outflow = mockLocation(outflow)
class mockLocation(object):
def __init__(self, data):
self.data = data
self.stats = mockSummary(data)
class mockSummary(object):
def __init__(self, data):
self.N = len(data)
self.max = max(data)
self.min = min(data)
self.nonething = None
def test_add_column_level(basic_data):
known_cols = pandas.MultiIndex.from_tuples(
[(u"test", u"A"), (u"test", u"B"), (u"test", u"C"), (u"test", u"D")]
)
newdata = misc.add_column_level(basic_data, "test", "testlevel")
assert known_cols.tolist() == newdata.columns.tolist()
# can only add levels to non-MultiIndex columns
with helpers.raises(ValueError):
misc.add_column_level(newdata, "test2", "testlevel2")
@pytest.mark.parametrize("L1", [0, "loc"])
@pytest.mark.parametrize("L2", [2, "units"])
def test_swap_column_levels(multiindex_df, L1, L2):
columns = pandas.MultiIndex.from_product(
[["A", "B", "C"], ["res", "cen"], ["mg/L"]], names=["loc", "value", "units"]
)
data = numpy.arange(len(columns) * 10).reshape((10, len(columns)))
df = pandas.DataFrame(data, columns=columns).pipe(misc.swap_column_levels, L1, L2)
expected_columns = pandas.MultiIndex.from_product(
[["mg/L"], ["cen", "res"], ["A", "B", "C"]], names=["units", "value", "loc"]
)
pdtest.assert_index_equal(df.columns, expected_columns)
def test_flatten_columns(multiindex_df, basic_data):
expected = ["A_mg/L", "B_mg/L", "C_mg/L"]
flat = misc.flatten_columns(multiindex_df.T)
assert flat.columns.tolist() == expected
assert (
misc.flatten_columns(basic_data).columns.tolist() == basic_data.columns.tolist()
)
def test_expand_columns():
x = numpy.arange(12).reshape(3, 4)
df = pandas.DataFrame(x, columns=("A_a", "A_b", "B_a", "B_c"))
res_cols = pandas.MultiIndex(
levels=[["A", "B"], ["a", "b", "c"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 2]],
names=["top", "bottom"],
)
expected = pandas.DataFrame(x, columns=res_cols)
result = misc.expand_columns(df, ["top", "bottom"])
pdtest.assert_frame_equal(result, expected)
@pytest.mark.parametrize("criteria", [None, lambda row: row[0] in ["A", "B"]])
@pytest.mark.parametrize("dropold", [True, False])
def test_redefine_index_level(multiindex_df, criteria, dropold):
expected_cols = ["a", "b"]
if dropold:
expected_value = [[1, 2], [3, 4], [5, 6]]
if criteria:
expected_index = [("A", "ug/L"), ("B", "ug/L"), ("C", "mg/L")]
else:
expected_index = [("A", "ug/L"), ("B", "ug/L"), ("C", "ug/L")]
else:
if criteria:
expected_value = [[1, 2], [1, 2], [3, 4], [3, 4], [5, 6]]
expected_index = [
("A", "mg/L"),
("A", "ug/L"),
("B", "mg/L"),
("B", "ug/L"),
("C", "mg/L"),
]
else:
expected_value = [[1, 2], [1, 2], [3, 4], [3, 4], [5, 6], [5, 6]]
expected_index = [
("A", "mg/L"),
("A", "ug/L"),
("B", "mg/L"),
("B", "ug/L"),
("C", "mg/L"),
("C", "ug/L"),
]
result = misc.redefine_index_level(
multiindex_df, "units", "ug/L", criteria=criteria, dropold=dropold
)
expected = pandas.DataFrame(
data=expected_value,
index=pandas.MultiIndex.from_tuples(expected_index, names=["loc", "units"]),
columns=expected_cols,
)
pdtest.assert_frame_equal(result, expected)
@pytest.fixture
def basic_dataset():
inflow = [1, 3, 4, 12.57]
outflow = [2, 5, 7, 15.17]
return mockDataset(inflow, outflow)
def test_nested_getattr(basic_dataset):
result = misc.nested_getattr(basic_dataset, "inflow.stats.max")
expected = basic_dataset.inflow.stats.max
assert result == expected
@pytest.mark.parametrize(
("strformat", "expected", "attribute"),
[
("%d", "4", "inflow.stats.N"),
("%0.2f", "15.17", "outflow.stats.max"),
(None, "--", "inflow.stats.nonething"),
],
)
def test_stringify(basic_dataset, strformat, expected, attribute):
result = misc.stringify(basic_dataset, strformat, attribute=attribute)
assert result == expected
def test_categorize_columns():
csvdata = StringIO(
dedent(
"""\
parameter,units,season,lower,NSQD Median,upper
Cadmium (Cd),ug/L,autumn,0.117,0.361,0.52
Cadmium (Cd),ug/L,spring,0.172,0.352,0.53
Cadmium (Cd),ug/L,summer,0.304,0.411,0.476
Cadmium (Cd),ug/L,winter,0.355,0.559,1.125
Dissolved Chloride (Cl),mg/L,autumn,0.342,2.3,5.8
Dissolved Chloride (Cl),mg/L,spring,2.5,2.5,2.5
Dissolved Chloride (Cl),mg/L,summer,0.308,0.762,1.24
Escherichia coli,MPN/100 mL,autumn,1200.0,15500.0,24000.0
Escherichia coli,MPN/100 mL,spring,10.0,630.0,810.0
Escherichia coli,MPN/100 mL,summer,21000.0,27000.0,35000.0
<NAME>,MPN/100 mL,winter,20.0,200.0,800.0
"""
)
)
df = pandas.read_csv(csvdata)
df2 = misc.categorize_columns(df, "parameter", "units", "season")
# check pandas API that the first df has object columns
assert object in df.dtypes.values
# confirm that all of those objects are gone
assert object not in df2.dtypes.values
with helpers.raises(ValueError):
misc.categorize_columns(df, "parameter", "upper")
@pytest.mark.parametrize(
("value", "expected"), [(3, "<5"), (17, "15 - 20"), (25, "20 - 25"), (46, ">35")]
)
@pytest.mark.parametrize("units", [None, "mm"])
def test_classifier(value, units, expected):
bins = numpy.arange(5, 36, 5)
if units is not None:
expected = "{} {}".format(expected, units)
result = misc.classifier(value, bins, units=units)
assert result == expected
assert numpy.isnan(misc.classifier(numpy.nan, bins, units=units))
def test_unique_categories():
bins = [5, 10, 15]
classifier = partial(misc.classifier, bins=bins, units="mm")
known_categories = ["<5 mm", "5 - 10 mm", "10 - 15 mm", ">15 mm"]
result_categories = misc.unique_categories(classifier, bins)
assert result_categories == known_categories
def test_pop_many():
some_dict = dict(zip(list("ABCDE"), range(5)))
expected = {"C": 2, "D": 3}
assert misc.pop_many(some_dict, "A", "B", "E") == expected
def test_selector():
x = numpy.arange(10)
expected = numpy.array(list("AAABBBCCZZ"))
result = misc.selector("Z", (x <= 2, "A"), (x < 6, "B"), (x <= 7, "C"))
assert all(result == expected)
@pytest.mark.parametrize("input_values", ["A", 4, ("this", "5")])
def test_non_filter(input_values):
assert misc.non_filter(input_values)
@pytest.mark.parametrize("input_values", ["A", 4, ("this", "5")])
def test_no_op(input_values):
assert misc.no_op(input_values) == input_values
@pytest.mark.parametrize("value", [4, lambda x: 4])
def test_assign_multilevel_column(value):
df = pandas.DataFrame(
data=1,
index=pandas.MultiIndex.from_product([list("ABCD"), [1, 2, 3, 4]]),
columns=pandas.MultiIndex.from_product([list("abc"), [1, 2, 3]]),
)
result = misc.assign_multilevel_column(df, value, "d", 1)
expected = pandas.Series(4, index=df.index, name=("d", 1))
pdtest.assert_series_equal(result[("d", 1)], expected)
@pytest.mark.parametrize("join_char", [None, "-"])
def test_symbolize_bools(join_char):
df = pandas.DataFrame(
{
"A": [True, False, False],
"B": [False, True, True],
"C": [False, True, numpy.nan],
}
)
result = misc.symbolize_bools(
df, true_symbol="◆", false_symbol="◇", other_symbol="✖", join_char=join_char
)
if not join_char:
expected = pandas.DataFrame(
{
"A": {0: "◆", 1: "◇", 2: "◇"},
"B": {0: "◇", 1: "◆", 2: "◆"},
"C": {0: "◇", 1: "◆", 2: "✖"},
}
)
pdtest.assert_frame_equal(result, expected)
else:
expected = pandas.Series(["◆-◇-◇", "◇-◆-◆", "◇-◆-✖"])
pdtest.assert_series_equal(result, expected)
|
124236
|
class NoDataError(Exception):
def __init__(self, field, obj, module):
message = "Missing field '" + field + "' in the object " + str(obj) + " needed in " + module
super(NoDataError, self).__init__(message)
|
124240
|
from absl import logging
import os
from lib import dataset
from libs import settings
import tensorflow as tf
def generate_tf_record(
data_dir,
raw_data=False,
tfrecord_path="serialized_dataset",
num_shards=8):
teacher_sett = settings.Settings(use_student_settings=False)
student_sett = settings.Settings(use_student_settings=True)
dataset_args = teacher_sett["dataset"]
if dataset_args["name"].lower().strip() == "div2k":
assert len(data_dir) == 2
ds = dataset.load_div2k_dataset(
data_dir[0],
data_dir[1],
student_sett["hr_size"],
shuffle=True)
elif raw_data:
ds = dataset.load_dataset_directory(
dataset_args["name"],
data_dir,
dataset.scale_down(
method=dataset_args["scale_method"],
size=student_sett["hr_size"]))
else:
ds = dataset.load_dataset(
dataset_args["name"],
dataset.scale_down(
method=dataset_args["scale_method"],
size=student_sett["hr_size"]),
data_dir=data_dir)
to_tfrecord(ds, tfrecord_path, num_shards)
def load_dataset(tfrecord_path, lr_size, hr_size):
def _parse_tf_record(serialized_example):
features = {
"low_res_image": tf.io.FixedLenFeature([], dtype=tf.string),
"high_res_image": tf.io.FixedLenFeature([], dtype=tf.string)}
example = tf.io.parse_single_example(serialized_example, features)
lr_image = tf.io.parse_tensor(
example["low_res_image"],
out_type=tf.float32)
lr_image = tf.reshape(lr_image, lr_size)
hr_image = tf.io.parse_tensor(
example["high_res_image"],
out_type=tf.float32)
hr_image = tf.reshape(hr_image, hr_size)
return lr_image, hr_image
files = tf.io.gfile.glob(
os.path.join(tfrecord_path, "*.tfrecord"))
if len(files) == 0:
raise ValueError("Path Doesn't contain any file")
ds = tf.data.TFRecordDataset(files).map(_parse_tf_record)
if len(files) == 1:
option = tf.data.Options()
option.auto_shard = False
ds.with_options(ds)
ds = ds.shuffle(128, reshuffle_each_iteration=True)
return ds
def to_tfrecord(ds, tfrecord_path, num_shards=8):
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def serialize_to_string(image_lr, image_hr):
features = {
"low_res_image": _bytes_feature(
tf.io.serialize_tensor(image_lr).numpy()),
"high_res_image": _bytes_feature(
tf.io.serialize_tensor(image_hr).numpy())}
example_proto = tf.train.Example(
features=tf.train.Features(feature=features))
return example_proto.SerializeToString()
def write_to_tfrecord(shard_id, ds):
filename = tf.strings.join(
[tfrecord_path, "/dataset.", tf.strings.as_string(shard_id),
".tfrecord"])
writer = tf.data.experimental.TFRecordWriter(filename)
writer.write(ds.map(lambda _, x: x))
return tf.data.Dataset.from_tensors(filename)
def map_serialize_to_string(image_lr, image_hr):
map_fn = tf.py_function(
serialize_to_string,
(image_lr, image_hr),
tf.string)
return tf.reshape(map_fn, ())
ds = ds.map(map_serialize_to_string)
ds = ds.enumerate()
ds = ds.apply(tf.data.experimental.group_by_window(
lambda i, _: i % num_shards,
write_to_tfrecord,
tf.int64.max))
for data in ds:
logging.info("Written to: %s" % data.numpy())
|
124252
|
from lrabbit_scrapy.spider import LrabbitSpider
from lrabbit_scrapy.common_utils.network_helper import RequestSession
from lrabbit_scrapy.common_utils.print_log_helper import LogUtils
from lrabbit_scrapy.common_utils.all_in_one import FileStore
import os
from lrabbit_scrapy.common_utils.mysql_helper import MysqlClient
from parsel import Selector
class Spider(LrabbitSpider):
"""
spider_name : lrabbit blog spider
"""
# unique spider name
spider_name = "lrabbit_blog"
# max thread worker numbers
max_thread_num = 2
# is open for every thread a mysql connection,if your max_thread_num overpass 10 and in code need mysql query ,you need open this config
thread_mysql_open = True
# reset all task_list,every restart program will init task list
reset_task_config = True
# open loop init_task_list ,when your task is all fnish,and you want again ,you can open it
loop_task_config = True
# remove config option,if open it,then confirm option when you init task
remove_confirm_config = True
# config_path_name, this is env name ,is this code ,you need in linux to execute: export config_path="crawl.ini"
config_env_name = "config_path"
# redis db_num
redis_db_config = 0
# debug log ,open tracback log
debug_config = False
def __init__(self):
super().__init__()
self.session = RequestSession()
self.proxy_session = RequestSession(proxies=None)
csv_path = os.path.join(os.path.abspath(os.getcwd()), f"{self.spider_name}.csv")
self.field_names = ['id', 'title', 'datetime']
self.blog_file = FileStore(file_path=csv_path, filed_name=self.field_names)
def worker(self, *args):
task = args[0]
mysql_client: MysqlClient
if len(args) == 2:
mysql_client = args[1]
mysql_client.execute("select id from rookie limit 100")
# mysql_client.execute("")
url = f'http://www.lrabbit.life/post_detail/?id={task}'
LogUtils.log_running(url)
res = self.session.send_request(method='GET', url=url)
selector = Selector(res.text)
title = selector.css(".detail-title h1::text").get()
datetime = selector.css(".detail-info span::text").get()
if title:
post_data = {"id": task, "title": title, 'datetime': datetime}
self.blog_file.write(post_data)
# when you succes get content update redis stat
self.update_stat_redis()
LogUtils.log_finish(task)
def init_task_list(self):
# you can get init task from mysql
res = self.mysql_client.query("select id from rookie limit 100 ")
return [task['id'] for task in res]
# return [i for i in range(100)]
if __name__ == '__main__':
spider = Spider()
spider.run()
|
124275
|
from keyboard_alike import reader
class BarCodeReader(reader.Reader):
"""
This class supports Lindy USB bar code scanner configured to work as a keyboard
http://www.lindy.co.uk/accessories-c9/input-devices-c357/barcode-scanners-c360/barcode-scanner-ccd-usb-p1352
"""
pass
if __name__ == "__main__":
reader = BarCodeReader(0x03eb, 0x6201, 84, 6, should_reset=True)
reader.initialize()
print(reader.read().strip())
reader.disconnect()
|
124312
|
import requests
import json
import base64
import redis
import os
def handle(st):
# parse Github event
req = json.loads(st)
minutes = 1
minutes_val = os.getenv("cache-minutes")
if minutes_val != None:
minutes = int(minutes_val)
loginName = req["sender"]["login"]
try:
redis_client = redis.StrictRedis("redis")
redis_key = "tweet-" + loginName
cached = redis_client.get(redis_key)
if cached != None:
print(loginName + " attempted to trigger event again before cache expired. Extending cache timeout.")
redis_client.setex(redis_key, 60 * minutes, "1")
return
redis_client.setex(redis_key, 60 * minutes, "1")
except Exception:
print("Redis may be down or errored")
# download the avatar binary using getavatar function
r = requests.post("http://gateway:8080/function/get-avatar", json=req)
res = r.json()
# Figure out the correct extension for the avatar.
ext = ".jpg"
if res["contentType"] == "image/png":
ext = ".png"
# Take the encoded image and turn into binary bytes
imageData = base64.standard_b64decode(res["content"])
put_url = "http://minio-shim:8080/put/" + loginName + ext
# Store in the fan-club photo gallery
r1 = requests.post(put_url, data= imageData)
gazer = {}
gazer["login"] = loginName
gazer["filename"] = loginName + ext
r2 = requests.post("http://gateway:8080/function/tweetstargazer", json.dumps(gazer))
club_res = {}
club_res["put_url"] = put_url
club_res["tweet_result"] = r2.text
club_res["status"] = "success"
club_res["username"] = req["sender"]["login"]
club_res["bytes"] = len(imageData)
# Useful for logging, GitHub's invoker will receive this string
print(json.dumps(club_res))
|
124317
|
from py2neo import authenticate, Graph
import os.path
import bleach
import sys
from flask import Flask
app = Flask(__name__)
def FindSimilarRepositories(InputrepoK):
#Sanitize input
print("got ......",InputrepoK)
sys.stdout.flush()
Inputrepo = bleach.clean(InputrepoK).strip()
host = os.environ['LOCALNEO4JIPPORT']
login = os.environ['LOCALNEO4JLOGIN']
password = os.environ['<PASSWORD>']
authenticate(host,login,password)
graph = Graph(os.environ['neoURLlocal'])
output = ""
path1 = "<a href=\"/?q=repository "
path2 = "&action=Search\" class=\"repositoryinfo\">"
path3 = "</a>"
#Find similar repository > 1 connections
query1="MATCH (a {id:\"" + Inputrepo + "\"})"
query2="-[r1:IS_ACTOR|IN_ORGANIZATION]->(match)<-[r2:IS_ACTOR|IN_ORGANIZATION]-(b) "
query3="with b, collect (distinct match.id) as connections, collect (distinct type(r1)) as rel1 "
query4="where length(connections) >= 1 return b.id,length(connections) as count,length(rel1) as rel "
query5="order by length(connections) desc limit 5"
query = query1 + query2 + query3 + query4 + query5
#print query
a = graph.cypher.execute(query)
for record in a:
if (record['rel'] < 2):
output += "<li>" + path1 + record['b.id'] + path2 + record['b.id'] + path3 + ": " + str(record['count']) + " contributors in common</li>"
else:
output += "<li>" + path1 + record['b.id'] + path2 + record['b.id'] + path3 + ": " + str(record['count']-1) + " contributors in common & belong to same organization</li>"
if (len(output) > 0):
return ("<ul>" + output + "</ul>")
else:
#Nothing found!
return "<span class=\"text-danger\">You got me stumped!</span>"
|
124322
|
from graphwar.attack.flip_attacker import FlipAttacker
class UntargetedAttacker(FlipAttacker):
r"""Base class for adversarial non-targeted attack.
Parameters
----------
data : Data
PyG-like data denoting the input graph
device : str, optional
the device of the attack running on, by default "cpu"
seed : Optional[int], optional
the random seed for reproducing the attack, by default None
name : Optional[str], optional
name of the attacker, if None, it would be :obj:`__class__.__name__`,
by default None
kwargs : additional arguments of :class:`graphwar.attack.Attacker`,
Raises
------
TypeError
unexpected keyword argument in :obj:`kwargs`
Note
----
:class:`graphwar.attack.targeted.UntargetedAttacker` is a subclass of
:class:`graphwar.attack.FlipAttacker`.
It belongs to graph modification attack (GMA).
"""
def reset(self) -> "UntargetedAttacker":
"""Reset the state of the Attacker
Returns
-------
UntargetedAttacker
the attacker itself
"""
super().reset()
self.num_budgets = None
self.structure_attack = None
self.feature_attack = None
return self
def attack(self, num_budgets, structure_attack, feature_attack) -> "UntargetedAttacker":
"""Base method that describes the adversarial untargeted attack
Parameters
----------
num_budgets : int (0<`num_budgets`<=:attr:max_perturbations) or float (0<`num_budgets`<=1)
Case 1:
`int` : the number of attack budgets,
i.e., how many edges can be perturbed.
Case 2:
`float`: the number of attack budgets is
the ratio of :attr:max_perturbations
See `:attr:max_perturbations`
structure_attack : bool
whether to conduct structure attack, i.e., modify the graph structure (edges)
feature_attack : bool
whether to conduct feature attack, i.e., modify the node features
"""
_is_setup = getattr(self, "_is_setup", True)
if not _is_setup:
raise RuntimeError(
f'{self.__class__.__name__} requires a surrogate model to conduct attack. '
'Use `attacker.setup_surrogate(surrogate_model)`.')
if not self._is_reset:
raise RuntimeError(
'Before calling attack, you must reset your attacker. Use `attacker.reset()`.'
)
if not (structure_attack or feature_attack):
raise RuntimeError(
'Either `structure_attack` or `feature_attack` must be True.')
if feature_attack and not self._allow_feature_attack:
raise RuntimeError(
f"{self.name} does NOT support attacking features."
" If the model can conduct feature attack, please call `attacker.set_allow_feature_attack(True)`."
)
if structure_attack and not self._allow_structure_attack:
raise RuntimeError(
f"{self.name} does NOT support attacking structures."
" If the model can conduct structure attack, please call `attacker.set_allow_structure_attack(True)`."
)
num_budgets = self._check_budget(
num_budgets, max_perturbations=self.num_edges//2)
self.num_budgets = num_budgets
self.structure_attack = structure_attack
self.feature_attack = feature_attack
self._is_reset = False
return self
|
124328
|
import torch
import warnings
from torch.optim.optimizer import Optimizer, required
import math
import itertools as it
import torch.optim as optim
warnings.filterwarnings("once")
def get_optimizer(optimizer: str = 'Adam',
lookahead: bool = False,
model=None,
separate_decoder: bool = True,
lr: float = 1e-3,
lr_e: float = 1e-3):
"""
# https://github.com/lonePatient/lookahead_pytorch/blob/master/run.py
:param optimizer:
:param lookahead:
:param model:
:param separate_decoder:
:param lr:
:param lr_e:
:return:
"""
if separate_decoder:
params = [
{'params': model.cls_head.parameters(), 'lr': lr
},
{'params': model.encoder.parameters(), 'lr': lr_e},
]
else:
params = [{'params': model.parameters(), 'lr': lr}]
if optimizer == 'Adam':
optimizer = optim.Adam(params, lr=lr)
elif optimizer == 'RAdam':
optimizer = RAdam(params, lr=lr)
elif optimizer == 'Ralamb':
optimizer = Ralamb(params, lr=lr)
elif optimizer == 'AdamW':
optimizer = AdamW(params, lr=lr)
elif optimizer == 'diffGrad':
optimizer = diffGrad(params, lr=lr)
elif optimizer == 'diffRGrad':
optimizer = diffRGrad(params, lr=lr)
else:
raise ValueError('unknown base optimizer type')
if lookahead:
optimizer = Lookahead(base_optimizer=optimizer, k=5, alpha=0.5)
return optimizer
class diffGrad(Optimizer):
r"""Implements diffGrad algorithm. It is modified from the pytorch implementation of Adam.
It has been proposed in `diffGrad: An Optimization Method for Convolutional Neural Networks`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _diffGrad: An Optimization Method for Convolutional Neural Networks:
https://arxiv.org/abs/1909.11015
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, version=0, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
#save version
self.version = version
def __setstate__(self, state):
super().__setstate__(state)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('diffGrad does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Previous gradient
state['previous_grad'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq, previous_grad = state['exp_avg'], state['exp_avg_sq'], state['previous_grad']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# compute diffgrad coefficient (dfc)
if self.version==0:
diff = abs(previous_grad - grad)
elif self.version ==1:
diff = previous_grad-grad
elif self.version ==2:
diff = .5*abs(previous_grad - grad)
if self.version==0 or self.version==1:
dfc = 1. / (1. + torch.exp(-diff))
elif self.version==2:
dfc = 9. / (1. + torch.exp(-diff))-4 #DFC2 = 9/(1+e-(.5/g/)-4 #range .5,5
state['previous_grad'] = grad
# update momentum with dfc
exp_avg1 = exp_avg * dfc
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg1, denom)
return loss
class diffRGrad(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
version=0,
weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
self.version = version
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)])
super(diffRGrad, self).__init__(params, defaults)
def __setstate__(self, state):
super(diffRGrad, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('diffGRad does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# Previous gradient
state['previous_grad'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
state['previous_grad'] = state['previous_grad'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
previous_grad = state['previous_grad']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
# compute diffgrad coefficient (dfc)
#print("grad = ",grad.size())
#print("prev_grad = ",previous_grad.size())
if self.version==0:
diff = abs(previous_grad - grad)
elif self.version ==1:
diff = previous_grad-grad
elif self.version ==2:
diff = .5*abs(previous_grad - grad)
if self.version==0 or self.version==1:
dfc = 1. / (1. + torch.exp(-diff))
elif self.version==2:
dfc = 9. / (1. + torch.exp(-diff))-4 #DFC2 = 9/(1+e-(.5/g/)-4 #range .5,5
state['previous_grad'] = grad
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
denom = exp_avg_sq.sqrt().add_(group['eps'])
# update momentum with dfc
#print("dfc ",dfc.size())
#print("exp_avg ",exp_avg.size())
exp_avg1 = exp_avg * dfc.float()
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg1, denom)
p.data.copy_(p_data_fp32)
elif step_size > 0:
#print("exp_avg in elif",exp_avg.size())
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, warmup = warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']
else:
scheduled_lr = group['lr']
step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss
class Ranger(Optimizer):
# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer/blob/master/ranger.py
def __init__(self, params, lr=1e-3, alpha=0.5, k=6,
N_sma_threshhold=5, betas=(.95, 0.999), eps=1e-5, weight_decay=0):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None,None,None] for ind in range(10)]
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] #get state dict for this param
if len(state) == 0: #if first time to run...init dictionary with our desired entries
#if self.first_run_check==0:
#self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
#look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
#begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
#compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
#compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
#integrated look ahead...
#we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] #get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) #(fast weights - slow weights) * alpha
p.data.copy_(slow_p) #copy interpolated weights to RAdam param tensor
return loss
# class RAdam(Optimizer):
# def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
# defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
# self.buffer = [[None, None, None] for ind in range(10)]
# super(RAdam, self).__init__(params, defaults)
# def __setstate__(self, state):
# super(RAdam, self).__setstate__(state)
# def step(self, closure=None):
# loss = None
# if closure is not None:
# loss = closure()
# for group in self.param_groups:
# for p in group['params']:
# if p.grad is None:
# continue
# grad = p.grad.data.float()
# if grad.is_sparse:
# raise RuntimeError('RAdam does not support sparse gradients')
# p_data_fp32 = p.data.float()
# state = self.state[p]
# if len(state) == 0:
# state['step'] = 0
# state['exp_avg'] = torch.zeros_like(p_data_fp32)
# state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# else:
# state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
# state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
# exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
# beta1, beta2 = group['betas']
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# exp_avg.mul_(beta1).add_(1 - beta1, grad)
# state['step'] += 1
# buffered = self.buffer[int(state['step'] % 10)]
# if state['step'] == buffered[0]:
# N_sma, step_size = buffered[1], buffered[2]
# else:
# buffered[0] = state['step']
# beta2_t = beta2 ** state['step']
# N_sma_max = 2 / (1 - beta2) - 1
# N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
# buffered[1] = N_sma
# if N_sma >= 5:
# step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
# else:
# step_size = 1.0 / (1 - beta1 ** state['step'])
# buffered[2] = step_size
# if group['weight_decay'] != 0:
# p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# # more conservative since it's an approximated value
# if N_sma >= 5:
# denom = exp_avg_sq.sqrt().add_(group['eps'])
# p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
# else:
# p_data_fp32.add_(-step_size * group['lr'], exp_avg)
# p.data.copy_(p_data_fp32)
# return loss
# https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py
class Lookahead(Optimizer):
def __init__(self, base_optimizer,alpha=0.5, k=6):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
self.optimizer = base_optimizer
self.param_groups = self.optimizer.param_groups
self.alpha = alpha
self.k = k
for group in self.param_groups:
group["step_counter"] = 0
self.slow_weights = [[p.clone().detach() for p in group['params']]
for group in self.param_groups]
for w in it.chain(*self.slow_weights):
w.requires_grad = False
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
loss = self.optimizer.step()
for group,slow_weights in zip(self.param_groups,self.slow_weights):
group['step_counter'] += 1
if group['step_counter'] % self.k != 0:
continue
for p,q in zip(group['params'],slow_weights):
if p.grad is None:
continue
q.data.add_(self.alpha,p.data - q.data)
p.data.copy_(q.data)
return loss
class Ralamb(Optimizer):
'''
Ralamb optimizer (RAdam + LARS trick)
'''
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def __setstate__(self, state):
super(Ralamb, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = radam_step
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
radam_norm = p_data_fp32.pow(2).sum().sqrt()
if weight_norm == 0 or radam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-radam_step * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss
|
124377
|
import fnmatch
import json
import logging
import os
import re
from collections import Counter
from operator import itemgetter
import en_core_web_sm
from spacy.matcher import Matcher
from gamechangerml.configs.config import DefaultConfig as Config
import gamechangerml.src.modelzoo.semantic.term_extract.version_ as v
logger = logging.getLogger("gamechanger")
class TermExtractor(object):
noun, adj, prep = (
{"POS": "NOUN", "IS_PUNCT": False},
{"POS": "ADJ", "IS_PUNCT": False},
{"POS": "DET", "IS_PUNCT": False},
)
patterns = [
[adj],
[{"POS": {"IN": ["ADJ", "NOUN"]}, "OP": "*", "IS_PUNCT": False}, noun],
[
{"POS": {"IN": ["ADJ", "NOUN"]}, "OP": "*", "IS_PUNCT": False},
noun,
prep,
{"POS": {"IN": ["ADJ", "NOUN"]}, "OP": "*", "IS_PUNCT": False},
noun,
],
]
entities = ["ORG"]
__version__ = v.__version__
def __init__(self, max_term_length=2, min_freq=2, ner=False):
"""
Term extraction using parts-of-speech.
Args:
max_term_length (int): extracts `patterns` up the length of
this argument
min_freq (int): minimum term frequency for constructing the
final output
ner (bool): if True, adds organization named entity to the
term counts. NB this adds significant time to the
processing
"""
if max_term_length < 2:
raise ValueError("max_tokens must be > 1")
if max_term_length > 5:
raise ValueError("max_tokens must be < 5")
if min_freq <= 0:
raise ValueError("min_freq must be > 0")
self.ner = ner
self.min_freq = min_freq
self.max_tokens = max_term_length
if self.ner:
self.nlp = en_core_web_sm.load(disable=["parser"])
else:
self.nlp = en_core_web_sm.load(disable=["ner", "parser"])
self.matcher = Matcher(self.nlp.vocab)
logger.info(
"{} version {}".format(self.__class__.__name__, self.__version__)
)
@staticmethod
def _word_length(string):
return string.count(" ") + 1
@staticmethod
def _clean(string):
cleaned = re.sub("\\n", " ", string)
cleaned = re.sub("[\u201c\u201d]+", "", cleaned, re.U)
cleaned = re.sub('"', "", cleaned)
cleaned = re.sub("(?:\\d+\\.)+", " ", cleaned)
cleaned = re.sub("- {2,}", " ", cleaned)
cleaned = re.sub("\\.\\.{2,}", " ", cleaned)
cleaned = re.sub("\\(\\w\\)", " ", cleaned, re.I)
cleaned = re.sub("\\w\\. ?", " ", cleaned, re.I)
return re.sub("\\s{2,}", " ", cleaned)
def count_from_document(self, document):
"""
Counts patterns and optionally, named entities from a single document.
Args:
document (str): input document
Returns:
Counter
"""
term_counter = Counter()
def add_to_counter(matcher, doc, idx, matches):
match_id, start, end = matches[idx]
candidate = str(doc[start:end])
if 1 < self._word_length(candidate) <= self.max_tokens:
term_counter[candidate] += 1
for i, pattern in enumerate(TermExtractor.patterns):
self.matcher.add("term_{}".format(i), add_to_counter, pattern)
doc = self.nlp(document)
_ = self.matcher(doc)
if self.ner:
ent_counter = self._count_entities(doc)
term_counter.update(ent_counter)
return term_counter
@staticmethod
def _count_entities(doc):
ents = [
re.sub("^the ", "", ent.text)
for ent in doc.ents
if ent.label_ in TermExtractor.entities
]
ent_counter = Counter({ent: 1 for ent in ents})
return ent_counter
@staticmethod
def gen_json(data_dir=Config.DATA_DIR):
"""
Generator to read and extract the `text` from a JSON file in the
`data_dir`.
Args:
data_dir (str): path to the JSON files
Yields:
str
Raises:
ValueError if the directory is not valid
JSONDecodeError if json.load() fails
IOError, RuntimeError if there is a problem
opening or reading a file
"""
if not os.path.isdir(data_dir):
raise ValueError("invalid data_dir, got {}".format(data_dir))
try:
for file_ in os.listdir(data_dir):
if fnmatch.fnmatch(file_, "*.json"):
with open(os.path.join(data_dir, file_)) as fp:
j_doc = json.load(fp)
if "text" in j_doc:
yield j_doc["text"]
else:
logger.warning("no 'text' key in {}".format(file_))
except (IOError, json.JSONDecodeError, RuntimeError) as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
def generate_counts(self, data_dir=Config.DATA_DIR):
"""
Generator to read and process successive JSON files.
Args:
data_dir (str): path to the JSON files
Yields:
Counter
"""
technical_counts = Counter()
for text in self.gen_json(data_dir):
doc_count = self.count_from_document(self._clean(text.lower()))
technical_counts.update(doc_count)
yield technical_counts
@staticmethod
def _make_output(final_count, min_freq):
by_key = dict(sorted(final_count.items(), key=itemgetter(0)))
suggests = [
{"input": term, "weight": weight}
for term, weight in by_key.items()
if weight >= min_freq
]
return suggests
def count_from_dir(self, max_files=None, data_dir=Config.DATA_DIR):
"""
Counts patterns from JSON files in `data_dir`. For each 1-gram
prefix, a list of suffix, frequency tuples is created.
Args:
max_files (int|None): optional; maximum number of files to
consider for processing.
data_dir (str): path to the JSON files
Returns:
dict
"""
final_counter = dict()
f_count = 0
for tech_counts in self.generate_counts(data_dir):
final_counter = tech_counts
f_count += 1
if max_files is not None and f_count == max_files:
break
if f_count in [1, 5, 10] or f_count % 25 == 0:
logger.debug("processed {:>5,}".format(f_count))
logger.debug("total files processed : {:,}".format(f_count))
prefix_order = self._make_output(final_counter, self.min_freq)
logger.debug("total terms: {:,}".format(len(prefix_order)))
return prefix_order
|
124392
|
from a_resnet_training_common_cli import Hyperparameters
from stanford_cars_augmentation_cli import AugmentHyperparameters, AugmentCLI
def generate_cli_hpo(parser):
"""Adding Hyperparameters to CLI arguments"""
parser.add_argument("--" + Hyperparameters.SCEDULER_RATE.value, dest=Hyperparameters.SCEDULER_RATE.value,
type=float,
help="number of epochs to wait before annealing learning rate", required=True)
parser.add_argument("--" + Hyperparameters.LEARNING_RATE.value, dest=Hyperparameters.LEARNING_RATE.value,
type=float,
help="learning rate to use", required=True)
parser.add_argument("--" + Hyperparameters.BATCH_SIZE.value, dest=Hyperparameters.BATCH_SIZE.value, type=int,
help="batch size to use", required=True)
parser.add_argument("--" + Hyperparameters.LEARNING_RATE_SCHEDULER.value,
dest=Hyperparameters.LEARNING_RATE_SCHEDULER.value, type=float,
help="annealing schedule rate to use. multiplied to learning rate", required=True)
parser.add_argument("--" + Hyperparameters.WEIGHT_DECAY.value, dest=Hyperparameters.WEIGHT_DECAY.value, type=float,
help="weight decay to use", required=True)
parser.add_argument("--" + Hyperparameters.MOMENTUM.value, dest=Hyperparameters.MOMENTUM.value, type=float,
help="momentum to use", required=True)
parser.add_argument("--" + Hyperparameters.NESTEROV.value, dest=Hyperparameters.NESTEROV.value, action='store_true',
help="use Nesterov")
parser.add_argument("--" + "no-" + Hyperparameters.NESTEROV.value, dest=Hyperparameters.NESTEROV.value,
action='store_false',
help="do not use Nesterov")
return parser
def generate_cli_hpo_augment(parser):
parser.add_argument("--" + AugmentHyperparameters.BRIGHTNESS.value, dest=AugmentHyperparameters.BRIGHTNESS.value,
type=float,
help="brightness factor. recommended range 0 - 9", required=True, default=3.2907)
parser.add_argument("--" + AugmentHyperparameters.CONTRAST.value, dest=AugmentHyperparameters.CONTRAST.value,
type=float,
help="contrast factor. recommended range 0-100", required=True, default=56.793)
parser.add_argument("--" + AugmentHyperparameters.HUE.value, dest=AugmentHyperparameters.HUE.value,
type=float,
help="hue factor. recommend range -0.5 - 0.5", required=True, default=-0.01286)
parser.add_argument("--" + AugmentHyperparameters.SATURATION.value, dest=AugmentHyperparameters.SATURATION.value,
type=float,
help="saturation factor. recommended range 0-100", required=True, default=2.36640)
return parser
|
124417
|
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
length = len(num)
# use dict: value: index + 1
# since there is only one solution, the right value must not be duplicated
dic = {}
for i in xrange(0, length):
val = num[i]
if (target - val) in dic:
return (dic[target - val], i + 1)
dic[val] = i + 1
## test code
# num=[2, 7, 11, 15]
# t= 26
# s = Solution()
# print s.twoSum(num, t)
|
124482
|
from .lop_setup import *
def test_matrix_1():
A = jnp.ones([4, 4])
op = lop.matrix(A)
x = jnp.ones(4)
assert_allclose(op.times(x), A @ x)
assert_allclose(op.trans(x), A.T @ x)
def test_matrix_2():
A = jnp.reshape(jnp.arange(20), (4,5))
op = lop.matrix(A)
x = jnp.ones(4)
y = jnp.ones(5)
assert_allclose(op.times(y), A @ y)
assert_allclose(op.trans(x), A.T @ x)
assert lop.dot_test_real(keys[0], op)
def test_matrix_3():
m, n, k = 10, 20, 5
# axis=0 [process column wise]
A = random.normal(cnb.KEYS[0], (m,n))
T = lop.jit(lop.matrix(A))
X = random.normal(cnb.KEYS[1], (n,k))
assert_allclose(T.times(X), A @ X)
Y = random.normal(cnb.KEYS[2], (m,k))
assert_allclose(T.trans(Y), A.T @ Y, atol=atol, rtol=rtol)
# axis=1 [process row wise]
T = lop.jit(lop.matrix(A, axis=1))
X = X.T
Y = Y.T
assert_allclose(T.times(X), (X @ A.T), atol=atol)
assert_allclose(T.trans(Y), (Y @ A), atol=atol)
def test_matrix_4():
# complex case
m, n, k = 10, 20, 5
# axis=0 [process column wise]
Ar = random.normal(cnb.KEYS[0], (m,n))
Ac = random.normal(cnb.KEYS[1], (m,n))
A = Ar + Ac * 1j
T = lop.jit(lop.matrix(A))
Xr = random.normal(cnb.KEYS[2], (n,k))
Xc = random.normal(cnb.KEYS[3], (n,k))
X = Xr + Xc * 1j
assert_allclose(T.times(X), A @ X)
Yr = random.normal(cnb.KEYS[4], (m,k))
Yc = random.normal(cnb.KEYS[5], (m,k))
Y = Yr + Yc * 1j
assert_allclose(T.trans(Y), cnb.hermitian(A) @ Y)
# axis=1 [process row wise]
T = lop.jit(lop.matrix(A, axis=1))
X = X.T
Y = Y.T
assert_allclose(T.times(X), (X @ A.T), atol=atol)
assert_allclose(T.trans(Y), (Y @ jnp.conjugate(A)), atol=atol)
|
124497
|
import os.path
import os
import sys
import math
import argparse
import time
import random
from collections import OrderedDict
import torch
import options.options as option
from utils import util
from data import create_dataloader, create_dataset
from models import create_model
from utils.logger import Logger, PrintLogger
from sampler import generate_code_samples
import numpy as np
def validate(val_loader, opt, model, current_step, epoch, logger):
print('---------- validation -------------')
start_time = time.time()
avg_psnr = 0.0
avg_lpips = 0.0
idx = 0
for val_data in val_loader:
idx += 1
img_name = os.path.splitext(os.path.basename(val_data['HR_path'][0]))[0]
img_dir = os.path.join(opt['path']['val_images'], img_name)
util.mkdir(img_dir)
tensor_type = torch.zeros if opt['train']['zero_code'] else torch.randn
code = model.gen_code(val_data['network_input'][0].shape[0],
val_data['network_input'][0].shape[2],
val_data['network_input'][0].shape[3],
tensor_type=tensor_type)
model.feed_data(val_data, code=code)
model.test()
visuals = model.get_current_visuals()
# HR_pred : is the predicted colored image in RGB color space
# HR : is the original input in RGB color space
sr_img = util.tensor2img(visuals['HR_pred']) # uint8
gt_img = util.tensor2img(visuals['HR']) # uint8
# Save generated images for reference
save_img_path = os.path.join(img_dir, '{:s}_{:s}_{:d}.png'.format(opt['name'], img_name, current_step))
util.save_img(sr_img, save_img_path)
# calculate PSNR
sr_img = sr_img
gt_img = gt_img
avg_psnr += util.psnr(sr_img, gt_img)
avg_lpips += torch.sum(model.get_loss(level=-1))
if current_step == 0:
print('Saving the model at the end of iter {:d}.'.format(current_step))
model.save(current_step)
avg_psnr = avg_psnr / idx
avg_lpips = avg_lpips / idx
time_elapsed = time.time() - start_time
# Save to log
print_rlt = OrderedDict()
print_rlt['model'] = opt['model']
print_rlt['epoch'] = epoch
print_rlt['iters'] = current_step
print_rlt['time'] = time_elapsed
print_rlt['psnr'] = avg_psnr
if opt['train']['pixel_weight'] > 0:
print_rlt[opt['train']['pixel_criterion']] = avg_lpips
else:
print_rlt['lpips'] = avg_lpips
logger.print_format_results('val', print_rlt)
print('-----------------------------------')
def main():
# options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=True)
util.mkdir_and_rename(opt['path']['experiments_root']) # rename old experiments if exists
util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and
not key == 'pretrain_model_G'))
option.save(opt)
opt = option.dict_to_nonedict(opt) # Convert to NoneDict, which return None for missing key.
# print to file and std_out simultaneously
sys.stdout = PrintLogger(opt['path']['log'])
# random seed
seed = opt['train']['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
print("Random Seed: ", seed)
random.seed(seed)
torch.manual_seed(seed)
# LAB setup settings
print("Color output mode: ", util.color_output_mode)
print("AB range: ", util.AB_range)
# create train and val dataloader
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = create_dataset(dataset_opt)
train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size_per_month']))
print('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size))
num_months = int(opt['train']['num_months'])
num_days = int(opt['train']['num_days'])
total_iters = int(num_months * num_days)
print('Total epochs needed: {:d} for iters {:,d}'.format(num_months, total_iters))
train_loader = create_dataloader(train_set, dataset_opt)
batch_size_per_month = dataset_opt['batch_size_per_month']
batch_size_per_day = int(opt['datasets']['train']['batch_size_per_day'])
use_dci = opt['train']['use_dci']
inter_supervision = opt['train']['inter_supervision']
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(val_set, dataset_opt)
print('Number of val images in [{:s}]: {:d}'.format(dataset_opt['name'], len(val_set)))
else:
raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
assert train_loader is not None
# Create model
model = create_model(opt)
# create logger
logger = Logger(opt)
current_step = 0
start_time = time.time()
print('---------- Start training -------------')
validate(val_loader, opt, model, current_step, 0, logger)
for epoch in range(num_months):
for i, train_data in enumerate(train_loader):
# Sample the codes used for training of the month
if use_dci:
cur_month_code = generate_code_samples(model, train_data, opt)
else:
tensor_type = torch.zeros if opt['train']['zero_code'] else torch.randn
cur_month_code = model.gen_code(train_data['network_input'][0].shape[0],
train_data['network_input'][0].shape[2],
train_data['network_input'][0].shape[3],
tensor_type=tensor_type)
# clear projection matrix to save memory
model.clear_projection()
for j in range(num_days):
current_step += 1
cur_month_batch_size = min(batch_size_per_month, train_data['network_input'][0].shape[0])
# get the sliced data
cur_day_batch_start_idx = (j * batch_size_per_day) % cur_month_batch_size
cur_day_batch_end_idx = cur_day_batch_start_idx + batch_size_per_day
if cur_day_batch_end_idx > cur_month_batch_size:
cur_day_batch_idx = np.hstack((np.arange(cur_day_batch_start_idx, cur_month_batch_size),
np.arange(cur_day_batch_end_idx - cur_month_batch_size)))
else:
cur_day_batch_idx = slice(cur_day_batch_start_idx, cur_day_batch_end_idx)
cur_day_train_data = {key: val[cur_day_batch_idx] for key, val in train_data.items()}
code = [gen_code[cur_day_batch_idx] for gen_code in cur_month_code]
cur_day_train_data['network_input'] = []
for net_inp in range(len(train_data['network_input'])):
cur_day_train_data['network_input'].append(train_data['network_input'][net_inp][cur_day_batch_idx])
if 'rarity_masks' in train_data.keys():
cur_day_train_data['rarity_masks'] = []
for rar_msk in range(len(train_data['rarity_masks'])):
cur_day_train_data['rarity_masks'].append(
train_data['rarity_masks'][rar_msk][cur_day_batch_idx])
# training
model.feed_data(cur_day_train_data, code=code)
model.optimize_parameters(current_step, inter_supervision=inter_supervision)
time_elapsed = time.time() - start_time
start_time = time.time()
# log
if current_step % opt['logger']['print_freq'] == 0 or current_step == 1:
logs = model.get_current_log()
print_rlt = OrderedDict()
print_rlt['model'] = opt['model']
print_rlt['epoch'] = epoch
print_rlt['iters'] = current_step
print_rlt['time'] = time_elapsed
for k, v in logs.items():
print_rlt[k] = v
print_rlt['lr'] = model.get_current_learning_rate()
logger.print_format_results('train', print_rlt)
# save models
if current_step % opt['logger']['save_checkpoint_freq'] == 0:
print('Saving the model at the end of iter {:d}.'.format(current_step))
model.save(current_step)
# validation
if current_step % opt['train']['val_freq'] == 0:
validate(val_loader, opt, model, current_step, epoch, logger)
# update learning rate
model.update_learning_rate()
print('Saving the final model.')
model.save('latest')
print('End of training.')
if __name__ == '__main__':
main()
|
124499
|
from PIL import Image, ImageDraw
img = Image.new("RGB", (100, 100), (88, 88, 88))
draw = ImageDraw.Draw(img)
if 0:
draw.line((0, 0) + img.size, fill="red")
draw.line((0, img.size[1], img.size[0], 0), fill="blue")
if 0:
draw.point((75, 50), fill="red")
draw.rectangle([20, 50, 30, 65], fill="green", outline="red", width=2)
if 0:
draw.text([20, 50], "Beer", fill="black")
img.save("z.png", "PNG")
|
124505
|
import numpy as np
a1 = np.ones((2, 3), int)
print(a1)
# [[1 1 1]
# [1 1 1]]
a2 = np.full((2, 3), 2)
print(a2)
# [[2 2 2]
# [2 2 2]]
print(np.block([a1, a2]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]]
print(np.block([[a1], [a2]]))
# [[1 1 1]
# [1 1 1]
# [2 2 2]
# [2 2 2]]
print(np.block([[a1, a2], [a2, a1]]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]
# [2 2 2 1 1 1]
# [2 2 2 1 1 1]]
print(np.block([[[a1]], [[a2]]]))
# [[[1 1 1]
# [1 1 1]]
#
# [[2 2 2]
# [2 2 2]]]
print(np.block([[[a1]], [[a2]]]).shape)
# (2, 2, 3)
a3 = np.full(6, 3)
print(a3)
# [3 3 3 3 3 3]
print(np.block([[a1, a2], [a3]]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]
# [3 3 3 3 3 3]]
# print(np.block([[a1, a2], a3]))
# ValueError: List depths are mismatched. First element was at depth 2, but there is an element at depth 1 (arrays[1])
# print(np.block([[a1, a2, a3]]))
# ValueError: all the input array dimensions except for the concatenation axis must match exactly
|
124547
|
from tkinter import *
import vote_bot
from functools import partial
import threading
from utils import loadCredentials
def start(mail,password,option,delay,url):
#print(mail.get(),password.get(),url.get())
process = threading.Thread(target=vote_bot.start_bot,args=(int(option.get()),mail.get(),password.get(),url.get(),int(delay.get()),))
process.start()
if __name__ == '__main__':
cred = loadCredentials()
m = Tk()
m.geometry("230x300")
m.title("BBBot")
m.iconphoto(False, PhotoImage(file='logo.png'))
m.resizable(False,False)
usernameLabel = Label(m,text="Usuário")
usernameLabel.place(x=20,y=10)
usernameEntry = Entry(m,width=30)
usernameEntry.insert(0,cred['mail'])
usernameEntry.place(x=20,y=30)
passwordLabel = Label(m,text="<PASSWORD>ha")
passwordLabel.place(x=20,y=50)
passwordEntry = Entry(m,width=30,show="*")
passwordEntry.insert(0,cred['password'])
passwordEntry.place(x=20,y=70)
urlLabel = Label(m,text="URL votação")
urlLabel.place(x=20,y=90)
urlEntry = Entry(m,width=30)
urlEntry.insert(0,cred['URL'])
urlEntry.place(x=20,y=110)
delayLabel = Label(m,text="Tempo entre cliques. Ajuste de acordo")
delayLabel.place(x=10,y=130)
delayLabel2 = Label(m,text="com a velocidade da internet")
delayLabel2.place(x=10,y=150)
delayEntry = Scale(m,from_=1, to=5,resolution=0.1,orient=HORIZONTAL)
delayEntry.set(2.5)
delayEntry.place(x=55,y=170)
optionLabel = Label(m,text="Eliminado (1 a 3 de cima para baixo)")
optionLabel.place(x=20,y=210)
optionEntry = Entry(m,width=30)
optionEntry.insert(0,cred['option'])
optionEntry.place(x=20,y=230)
action = partial(start,usernameEntry,passwordEntry,optionEntry,delayEntry,urlEntry)
startButton = Button(m,text="Começar", command=action)
startButton.place(x=80,y=260)
m.mainloop()
|
124571
|
import pandas as pd
import numpy as np
import py_entitymatching as em
from .magellan_modified_feature_generation import get_features
#Given a CANDIDATE SET and the list of ACTUAL duplicates (duplicates_df),
#this function adds the 1/0 labels (column name = GOLD) to the candset dataframe
def add_labels_to_candset(duplicates_df, candset_df, ltable_df, rtable_df):
#We are overwriting column names - but thats okay as this is not used anywhere else.
duplicates_df.columns = ["ltable_id", "rtable_id"]
#We merged two DF based on the common attributes. The indicator 'gold' takes three values both, left_only, right_only
df_with_gold = pd.merge(candset_df, duplicates_df, on=['ltable_id', 'rtable_id'], how='left', indicator='gold')
#If it is present in both, then it is a duplicate and we set it to 1 and 0 otherwise
df_with_gold['gold'] = np.where(df_with_gold.gold == 'both', 1, 0)
#This is to handle some Magellan issues
em.set_key(df_with_gold, '_id')
em.set_property(df_with_gold,'ltable', ltable_df)
em.set_property(df_with_gold,'rtable', rtable_df)
em.set_property(df_with_gold,'fk_ltable', "ltable_id")
em.set_property(df_with_gold,'fk_rtable', "rtable_id")
return df_with_gold
def get_features_for_type(column_type):
"""
Get features to be generated for a type
"""
# First get the look up table
lookup_table = dict()
# Features for type str_eq_1w
lookup_table['STR_EQ_1W'] = [('lev_dist'), ('lev_sim'), ('jaro'),
('jaro_winkler'),
('exact_match'),
('jaccard', 'qgm_3', 'qgm_3')]
# Features for type str_bt_1w_5w
lookup_table['STR_BT_1W_5W'] = [('jaccard', 'qgm_3', 'qgm_3'),
('cosine', 'dlm_dc0', 'dlm_dc0'),
('jaccard', 'dlm_dc0', 'dlm_dc0'),
('monge_elkan'), ('lev_dist'), ('lev_sim'),
('needleman_wunsch'),
('smith_waterman')] # dlm_dc0 is the concrete space tokenizer
# Features for type str_bt_5w_10w
lookup_table['STR_BT_5W_10W'] = [('jaccard', 'qgm_3', 'qgm_3'),
('cosine', 'dlm_dc0', 'dlm_dc0'),
('monge_elkan'), ('lev_dist'), ('lev_sim')]
# Features for type str_gt_10w
lookup_table['STR_GT_10W'] = [('jaccard', 'qgm_3', 'qgm_3'),
('cosine', 'dlm_dc0', 'dlm_dc0')]
# Features for NUMERIC type
lookup_table['NUM'] = [('exact_match'), ('abs_norm'), ('lev_dist'),
('lev_sim')]
# Features for BOOLEAN type
lookup_table['BOOL'] = [('exact_match')]
# Features for un determined type
lookup_table['UN_DETERMINED'] = []
# Based on the column type, return the feature functions that should be
# generated.
if column_type is 'str_eq_1w':
features = lookup_table['STR_EQ_1W']
elif column_type is 'str_bt_1w_5w':
features = lookup_table['STR_BT_1W_5W']
elif column_type is 'str_bt_5w_10w':
features = lookup_table['STR_BT_5W_10W']
elif column_type is 'str_gt_10w':
features = lookup_table['STR_GT_10W']
elif column_type is 'numeric':
features = lookup_table['NUM']
elif column_type is 'boolean':
features = lookup_table['BOOL']
elif column_type is 'un_determined':
features = lookup_table['UN_DETERMINED']
else:
raise TypeError('Unknown type')
return features
def extract_features(ltable_df, rtable_df, candset_df):
tokenizers = em.get_tokenizers_for_matching()
sim_functions = em.get_sim_funs_for_matching()
left_attr_types = em.get_attr_types(ltable_df)
right_attr_types = em.get_attr_types(rtable_df)
correspondences = em.get_attr_corres(ltable_df, rtable_df)
feature_dict_list = []
attribute_type_rank = {'boolean':1, 'numeric':2, 'str_eq_1w':3, 'str_bt_1w_5w':4, 'str_bt_5w_10w':5, 'str_gt_10w':6, 'un_determined':7}
for c in correspondences['corres']:
if left_attr_types[c[0]] != right_attr_types[c[1]]:
if attribute_type_rank[left_attr_types[c[0]]] < attribute_type_rank[right_attr_types[c[1]]]:
left_attr_types[c[0]] = right_attr_types[c[1]]
else:
right_attr_types[c[1]] = left_attr_types[c[0]]
feature_records = get_features(ltable_df,rtable_df,left_attr_types, right_attr_types, correspondences, tokenizers, sim_functions)
#Remove all features based on id - they are often useless
feature_records = feature_records[feature_records.left_attribute !='id']
feature_records.reset_index(inplace=True,drop=True)
distance_functions = ["lev_dist", "rdf"]
non_normalized_functions = ["aff", "sw", "swn", "nmw"]
keep_features = [True]*feature_records.shape[0]
for i in range(feature_records.shape[0]):
feature = feature_records.loc[i,"feature_name"]
for func in distance_functions + non_normalized_functions:
if func in feature:
keep_features[i] = False
feature_records = feature_records.loc[keep_features,:]
print("\n\nExtracting the full set of features:")
candset_features_df = em.extract_feature_vecs(candset_df,feature_table=feature_records,attrs_after='gold',show_progress=True,n_jobs=-1)
candset_features_df.fillna(value=0, inplace=True)
return candset_features_df
def extract_features_auto(ltable_df, rtable_df, candset_df):
feature_list = em.get_features_for_matching(ltable_df,rtable_df,validate_inferred_attr_types=False)
#Remove all features based on id - they are often useless
feature_list = feature_list[feature_list.left_attribute !='id']
print("\n\nExtracting the full set of features:")
candset_features_df = em.extract_feature_vecs(candset_df,feature_table=feature_list,attrs_after='gold',show_progress=True)
candset_features_df.fillna(value=0, inplace=True)
return candset_features_df
#High level function which just adds labels and the complete set of features to candset
def gather_features_and_labels(ltable_df, rtable_df, labels_df, candset_df):
labels_df.columns = ["ltable_id", "rtable_id"]
labels_df["ltable_id"] = labels_df["ltable_id"].astype(str)
labels_df["rtable_id"] = labels_df["rtable_id"].astype(str)
candset_df["ltable_id"] = candset_df["ltable_id"].astype(str)
candset_df["rtable_id"] = candset_df["rtable_id"].astype(str)
ltable_df["id"] = ltable_df["id"].astype(str)
rtable_df["id"] = rtable_df["id"].astype(str)
candset_df = add_labels_to_candset(labels_df, candset_df, ltable_df, rtable_df)
candset_features_df = extract_features(ltable_df, rtable_df, candset_df)
return candset_features_df
#Filter out bad features (non similarity, non distance, singular valued)
def gather_similarity_features(candset_features_df, avged = False):
distance_functions = ["lev_dist", "rdf"]
non_normalized_functions = ["aff", "sw", "swn", "nmw"]
cols = candset_features_df.columns
cols_to_be_dropped = []
for col in cols:
for func in distance_functions + non_normalized_functions:
if func in col:
cols_to_be_dropped.append(col)
break
candset_similarity_features_df = candset_features_df.drop(cols_to_be_dropped, axis=1)
similarity_features_df = candset_similarity_features_df.drop(['gold', '_id', 'ltable_id', 'rtable_id'], axis=1)
# Dropping columns that have only one value
cols_to_be_dropped = []
col_count_map = similarity_features_df.nunique()
for col in similarity_features_df.columns:
if col_count_map[col] == 1:
cols_to_be_dropped.append(col)
similarity_features_df = similarity_features_df.drop(cols_to_be_dropped, axis=1)
if (avged==False):
return similarity_features_df
headers= similarity_features_df.columns.values
attributes = []
for h in headers:
arr = h.split("_")
attributes.append(arr[0])
attributes = set(attributes)
avged_df = pd.DataFrame()
for attribute in attributes:
#print("\nFeatures for attribute:", attribute)
matches = np.zeros(candset_features_df.shape[0])
counts = 0
for h in headers:
if attribute in h:
#print(h)
matches = np.add(matches, candset_features_df[h].values)
counts += 1
matches = matches/counts
avged_df[attribute] = matches
return avged_df
|
124585
|
from contrib.views import char_count
from django.contrib import admin
from django.urls import path, re_path
from django.views.generic import TemplateView
urlpatterns = [
path("admin/", admin.site.urls),
path("char_count", char_count, name="char_count"),
re_path(".*", TemplateView.as_view(template_name="index.html")),
]
|
124599
|
import os
from scipy.io import loadmat
import h5py
import numpy as np
from tools.getDistSqrtVar import getDistSqrtVar
from tools.getCNNFeature import getCNNFeature
from tools.get_ilsvrimdb import readAnnotation as ilsvr_readAnnotation
from tools.get_cubimdb import readAnnotation as cub_readAnnotation
from tools.get_vocimdb import readAnnotation as voc_readAnnotation
def x2P(idx_h, idx_w, layerID, convnet):
idx_h = idx_h[np.newaxis, :]
idx_w = idx_w[np.newaxis, :]
pHW = np.concatenate((idx_h, idx_w), axis=0)
Stride = convnet['targetStride'][layerID-1]
centerStart = convnet['targetCenter'][layerID-1]
pHW = centerStart + (pHW-1) * Stride
return pHW
def computeStability(root_path,dataset,dataset_path, truthpart_path, label_name, net, model, convnet, layerID, epochnum, partList, partRate, imdb_mean, selectPatternRatio, patchNumPerPattern):
if "ilsvrcanimalpart" in dataset_path:
objset = ilsvr_readAnnotation(dataset_path, label_name)
elif "vocpart" in dataset_path:
objset = voc_readAnnotation(root_path, dataset, dataset_path, label_name)
elif "cub200" in dataset_path:
objset = cub_readAnnotation(dataset_path, label_name)
imgNum = len(objset)
partNum = len(partList)
validImg = np.zeros(imgNum)
for i in range(partNum):
partID = partList[i]
file_path = os.path.join(truthpart_path,label_name, "truth_part"+str(0) + str(partID)+'.mat')
a = h5py.File(file_path,'r')
truth_center = a['truth']['pHW_center']
for img in range(imgNum):
if type(a[truth_center[img][0]][0]) is np.ndarray:
validImg[img] = True
patNum = round(512*partRate)
selectedPatternNum = round(patNum*selectPatternRatio)
pos = np.zeros((2,patNum,imgNum))
score = np.zeros((patNum, imgNum))
isFlip = False
for imgID in range(imgNum):
if(validImg[imgID]==0):
continue
x,I = getCNNFeature(dataset_path,objset[imgID],net,isFlip,imdb_mean, epochnum, model) # get after conv_mask feature
x = x[:,0:patNum,:,:]
x = np.squeeze(x,axis=0)
xh = x.shape[1]
v = np.max(x, axis=1)
idx = np.argmax(x, axis=1)
tmp = np.argmax(v, axis=1)
v = np.max(v, axis=1)
idx = idx.reshape(idx.shape[0] * idx.shape[1])
idx_h = idx[tmp + np.array(range(0, patNum)) * xh] # idx_h.shape=(patNum,)
idx_w = tmp # idx_w.shape=(patNum,)
theScore = v # v.shape=(patNum,)
thePos = x2P(idx_h,idx_w,layerID,convnet)
pos[:,:,imgID] = thePos
score[:,imgID] = theScore
ih = I.shape[0]
iw = I.shape[1]
distSqrtVar = getDistSqrtVar(truthpart_path, pos, score, patchNumPerPattern, partList, label_name)
distSqrtVar = np.sort(distSqrtVar[np.isnan(distSqrtVar) == 0])
stability = np.mean(distSqrtVar[0:min(selectedPatternNum, len(distSqrtVar))])/np.sqrt(np.power(ih,2)+np.power(iw,2))
return stability
|
124633
|
import tempfile
import unittest
from pyspark import Row
from sourced.ml.models import DocumentFrequencies
from sourced.ml.tests import create_spark_for_test
from sourced.ml.transformers import Indexer
class IndexerTests(unittest.TestCase):
def setUp(self):
data = [Row(to_index="to_index%d" % i, value=i) for i in range(10)]
self.data = data
self.session = create_spark_for_test()
self.data_rdd = self.session.sparkContext \
.parallelize(range(len(data))) \
.map(lambda x: data[x])
def test_call(self):
indexer = Indexer("to_index")
res = indexer(self.data_rdd)
values = indexer.values()
data_reverse = res \
.map(lambda x: Row(to_index=values[x.to_index], value=x.value)) \
.collect()
self.assertEqual(self.data, data_reverse)
def test_save_load(self):
indexer = Indexer("to_index")
res = indexer(self.data_rdd)
with tempfile.NamedTemporaryFile(suffix="-index.asdf") as tmp:
cached_index_path = tmp.name
indexer.save_index(cached_index_path)
docfreq = DocumentFrequencies().load(source=cached_index_path)
document_index = {key: int(val) for (key, val) in docfreq}
indexer = Indexer("to_index", column2id=document_index)
self.assertEqual(res.collect(), indexer(self.data_rdd).collect())
if __name__ == "__main__":
unittest.main()
|
124635
|
class RenderedView(object):
def __init__(self, view_file: str, data):
self.view_file = view_file
self.data = data
|
124655
|
description = 'Email and SMS notifiers'
group = 'lowlevel'
devices = dict(
email = device('nicos.devices.notifiers.Mailer',
mailserver = 'mailhost.frm2.tum.de',
sender = '<EMAIL>',
copies = [
('<EMAIL>', 'all'),
('<EMAIL>', 'all'),
('<EMAIL>', 'all'),
('<EMAIL>', 'all'),
],
subject = '[KWS-1]',
),
smser = device('nicos.devices.notifiers.SMSer',
server = 'triton.admin.frm2',
receivers = [],
),
)
|
124664
|
from . import numpy_ndarray_as
def random(size, nulls=False):
"""Return random xnd.xnd instance of 64 bit floats.
"""
import xnd
import numpy as np
r = numpy_ndarray_as.random(size, nulls=nulls)
if nulls:
xr = xnd.xnd(r.tolist(), dtype='?float64')
for i in np.where(np.isnan(r))[0]:
xr[i] = None
return xr
return xnd.xnd(r.tolist(), dtype='float64')
def numpy_ndarray(xd_arr):
"""Return numpy.ndarray view of a xnd.xnd
"""
import numpy as np
if not xd_arr.dtype.isoptional():
return np.array(xd_arr, copy=False)
raise NotImplementedError(
'numpy.ndarray view of xnd.xnd with optional values')
def pandas_series(xd_arr):
"""Return pandas.Series view of a xnd.xnd
"""
import numpy as np
import pandas as pd
if not xd_arr.dtype.isoptional():
return pd.Series(np.array(xd_arr, copy=False), copy=False)
raise NotImplementedError(
'pandas.Series view of xnd.xnd with optional values')
def pyarrow_array(xd_arr):
"""Return pyarrow.Array view of a xnd.xnd
"""
import pyarrow as pa
if not xd_arr.dtype.isoptional():
pa_buf = pa.py_buffer(memoryview(xd_arr))
return pa.Array.from_buffers(
pa.from_numpy_dtype(str(xd_arr.dtype)),
xd_arr.type.datasize//xd_arr.type.itemsize,
[None, pa_buf])
raise NotImplementedError(
'pyarrow.Array view of xnd.xnd with optional values')
|
124667
|
import unittest
from test import support
import socket
import test_data
import time
from lxml import etree
import uuid
import asyncio
from common_testing_tools import TCPClient
class TCPServiceTests(unittest.TestCase):
def setUp(self):
""" setup method to establish two sockets.
this method is run prior to any other tests running and will create the basic sockets
required for further testing.
"""
self.client_socket_a = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket_b = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def test_old_clientinformation_sent(self):
""" this method tests that most recent geo information is shared upon connection of new client
test procedure:
client A conn -> FTS
client A send geo update -> FTS
client B conn -> FTS
client B <-updated client A geo Information FTS
"""
client_a_object = TCPClient(ip='127.0.0.1', port=15777) # establish client A connection
time.sleep(1)
cot = test_data.TestCoTClient(uid= client_a_object.clientObj.uid).generate_cot()
client_a_object.send_specific_data(cot=cot)
client_b_object = TCPClient(ip='127.0.0.1', port=15777)
data = client_b_object.receive_specific_data(10000)
self.assertIn(cot, data)
def test_clientinformation_is_not_sent_issue(self):
""" this method tests that client data is not resent after disconnections
test procedure:
client A conn-> FTS
client A clientinfo-> FTS
client A close-> FTS
client B conn-> FTS
client B clientinfo-> FTS
client B <-otherclientinfo FTS
client B otherclientinfo != clientAinfo
client B close-> FTS
"""
client_a_object = self.connect_client_to_server(sock = self.client_socket_a, ip = '127.0.0.1', port = 15777, uid = '245397c8-69f7-4f68-979e-685ff7ee1451') # establish client A connection
time.sleep(1)
self.client_socket_a.close()
time.sleep(5) # if this delay is reduced the test fails as FTS doesn't have time to process the changes
client_b_object = self.connect_client_to_server(sock = self.client_socket_b, ip = '127.0.0.1', port = 15777) # establish client B connection
time.sleep(1)
# receive data and assert client_a_data not in resent data
start = time.time()
delay = 3 # 10 second delay
while time.time() < start+delay:
data = b''
self.client_socket_b.settimeout(0.1)
while b'</event>' not in data:
try:
data += self.client_socket_b.recv(1)
except socket.timeout:
break
if data == b'':
break
cot = etree.fromstring(data)
self.assertNotEqual(client_a_object.uid, cot.get('uid'))
self.client_socket_b.close() # disconnect final socket
def test_simple_client_connection(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 15777))
client_object = test_data.TestCoTClient()
sock.send(client_object.generate_cot())
sock.close()
def connect_client_to_server(self, sock: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM), ip: str = '127.0.0.1', port: int = 15777, uid: str = str(uuid.uuid4())) -> test_data.TestCoTClient:
"""this method is used to connect a client to the server and send a basic connection message
"""
sock.connect((ip, port))
client_object = test_data.TestCoTClient(uid=uid)
sock.send(client_object.generate_cot())
return client_object
if __name__ == '__main__':
unittest.main()
|
124669
|
from doctest import testmod
import unittest
from julius import resample, fftconv, lowpass, bands, utils
class DocStringTest(unittest.TestCase):
def test_resample(self):
self.assertEqual(testmod(resample).failed, 0)
def test_fftconv(self):
self.assertEqual(testmod(fftconv).failed, 0)
def test_lowpass(self):
self.assertEqual(testmod(lowpass).failed, 0)
def test_bands(self):
self.assertEqual(testmod(bands).failed, 0)
def test_utils(self):
self.assertEqual(testmod(utils).failed, 0)
|
124701
|
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
start_time = time.time()
options = Options()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
options.add_argument("--disable-extensions")
#driver = webdriver.Chrome(executable_path=r'/home/chromedriver/chromedriver',options=options)
driver = webdriver.Chrome(options=options)
params = {'behavior': 'allow', 'downloadPath': '/home/furas/projekty'}
driver.execute_cdp_cmd('Page.setDownloadBehavior', params)
# downloads are now enabled for this driver instance
driver.get('https://www.macrotrends.net/1476/copper-prices-historical-chart-data')
print('[INFO] loaded', time.time() - start_time)
time.sleep(5)
iframe = driver.find_element_by_xpath("//iframe[@id='chart_iframe']")
driver.switch_to.frame(iframe)
print('[INFO] switched', time.time() - start_time)
xpath = "//a[text()='All Years']"
driver.find_element_by_xpath(xpath).click()
xpath = "//button[@id='dataDownload']"
driver.find_element_by_xpath(xpath).click()
print('[INFO] clicked', time.time() - start_time)
time.sleep(10)
print('[INFO] closing', time.time() - start_time)
driver.close()
|
124713
|
from tg_bot import bot
from app.constants import (
emoji, all_stations, personalization_answer, select_home_station,
select_univer_station
)
from app import new_functions as nf, db
from flask import g
import telebot_login
from tg_bot.keyboards import stations_keyboard, personalization_keyboard
# Personalization message
@bot.message_handler(
func=lambda mess: mess.text.title() == "Персонализация",
content_types=["text"]
)
@telebot_login.login_required_message
def personalisation_handler(message):
user = g.current_tbot_user
home_title = nf.get_key_by_value(all_stations, user.home_station_code)
univer_title = nf.get_key_by_value(all_stations, user.univer_station_code)
answer = personalization_answer.format(home_title, univer_title)
bot.send_message(
chat_id=user.tg_id,
text=answer,
reply_markup=personalization_keyboard(),
parse_mode="HTML"
)
# Choose station type callback
@bot.callback_query_handler(
func=lambda call_back: call_back.data == "Домашняя"
)
@bot.callback_query_handler(
func=lambda call_back: call_back.data == "Университетская"
)
@telebot_login.login_required_callback
def home_station_handler(call_back):
user = g.current_tbot_user
if call_back.data == "Домашняя":
answer = select_home_station
else:
answer = select_univer_station
bot.edit_message_text(
text=answer,
chat_id=user.tg_id,
message_id=call_back.message.message_id,
reply_markup=stations_keyboard()
)
# Choose station callback
@bot.callback_query_handler(
func=lambda call_back: call_back.message.text == select_home_station
)
@bot.callback_query_handler(
func=lambda call_back: call_back.message.text == select_univer_station
)
@telebot_login.login_required_callback
def change_home_station_handler(call_back):
user = g.current_tbot_user
is_both_changed = False
if "домашнюю" in call_back.message.text:
if call_back.data == user.univer_station_code:
user.univer_station_code = user.home_station_code
is_both_changed = True
user.home_station_code = call_back.data
type_station = "Домашняя"
else:
if call_back.data == user.home_station_code:
user.home_station_code = user.univer_station_code
is_both_changed = True
user.univer_station_code = call_back.data
type_station = "Университетская"
answer = "{0} станция изменена на <b>{1}</b>\n".format(
type_station,
nf.get_key_by_value(all_stations, call_back.data)
)
if is_both_changed:
inline_answer = "{0} Изменены обе станции!".format(emoji["warning"])
bot.answer_callback_query(
callback_query_id=call_back.id,
text=inline_answer,
show_alert=True
)
db.session.commit()
bot.edit_message_text(
text=answer,
chat_id=user.tg_id,
message_id=call_back.message.message_id,
parse_mode="HTML"
)
|
124719
|
import math
try:
from ulab import scipy, numpy as np
except ImportError:
import scipy
import numpy as np
A = np.array([[3, 0, 2, 6], [2, 1, 0, 1], [1, 0, 1, 4], [1, 2, 1, 8]])
b = np.array([4, 2, 4, 2])
# forward substitution
result = scipy.linalg.solve_triangular(A, b, lower=True)
ref_result = np.array([1.333333333, -0.666666666, 2.666666666, -0.083333333])
for i in range(4):
print(math.isclose(result[i], ref_result[i], rel_tol=1E-6, abs_tol=1E-6))
# backward substitution
result = scipy.linalg.solve_triangular(A, b, lower=False)
ref_result = np.array([-1.166666666, 1.75, 3.0, 0.25])
for i in range(4):
print(math.isclose(result[i], ref_result[i], rel_tol=1E-6, abs_tol=1E-6))
|
124721
|
from .client import MarathonClient
from .models import MarathonResource, MarathonApp, MarathonTask, MarathonConstraint
from .exceptions import MarathonError, MarathonHttpError, NotFoundError, InvalidChoiceError
from .util import get_log
log = get_log()
|
124724
|
class Dictionary(object):
def __init__(self):
self.my_dict = {}
def look(self, key):
return self.my_dict.get(key, "Can't find entry for {}".format(key))
def newentry(self, key, value):
""" new_entry == PEP8 (forced by Codewars) """
self.my_dict[key] = value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.