id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11537182
|
from jinja2 import Environment, PackageLoader
from .. import constants
from . import utils
class HTMLLayout(object):
def __init__(self, template_path='templates/viz/layout.html.j2'):
self.srcdoc = None
self._env = Environment(
loader=PackageLoader('cartoframes', 'assets'),
autoescape=True
)
self._env.filters['quot'] = utils.quote_filter
self._env.filters['iframe_size'] = utils.iframe_size_filter
self._env.filters['clear_none'] = utils.clear_none_filter
self.html = None
self._template = self._env.get_template(template_path)
def set_content(self, maps, size=None, show_info=None, theme=None, _carto_vl_path=None,
_airship_path=None, title='CARTOframes', is_embed=False,
is_static=False, map_height=None, full_height=False, n_size=None, m_size=None):
self.html = self._parse_html_content(
maps, size, show_info, theme, _carto_vl_path, _airship_path, title,
is_embed, is_static, map_height, full_height, n_size, m_size)
def _parse_html_content(self, maps, size, show_info=None, theme=None, _carto_vl_path=None,
_airship_path=None, title=None, is_embed=False, is_static=False,
map_height=None, full_height=False, n_size=None, m_size=None):
if _carto_vl_path is None:
carto_vl_path = constants.CARTO_VL_URL
else:
carto_vl_path = _carto_vl_path + constants.CARTO_VL_DEV
if _airship_path is None:
airship_components_path = constants.AIRSHIP_COMPONENTS_URL
airship_bridge_path = constants.AIRSHIP_BRIDGE_URL
airship_module_path = constants.AIRSHIP_MODULE_URL
airship_styles_path = constants.AIRSHIP_STYLES_URL
airship_icons_path = constants.AIRSHIP_ICONS_URL
else:
airship_components_path = _airship_path + constants.AIRSHIP_COMPONENTS_DEV
airship_bridge_path = _airship_path + constants.AIRSHIP_BRIDGE_DEV
airship_module_path = _airship_path + constants.AIRSHIP_MODULE_DEV
airship_styles_path = _airship_path + constants.AIRSHIP_STYLES_DEV
airship_icons_path = _airship_path + constants.AIRSHIP_ICONS_DEV
return self._template.render(
width=size[0] if size is not None else None,
height=size[1] if size is not None else None,
maps=maps,
show_info=show_info,
theme=theme,
carto_vl_path=carto_vl_path,
airship_components_path=airship_components_path,
airship_module_path=airship_module_path,
airship_bridge_path=airship_bridge_path,
airship_styles_path=airship_styles_path,
airship_icons_path=airship_icons_path,
title=title,
is_embed=is_embed,
is_static=is_static,
map_height=map_height,
full_height=full_height,
n=n_size,
m=m_size
)
def _repr_html_(self):
return self.html
|
11537221
|
import random
import os
from glob import glob
import numpy as np
from PIL import Image
import tensorflow as tf
from tensorflow.python.training.moving_averages import assign_moving_average
import tensorflow.contrib.layers as ly
from modeling.model import Model
from modeling.loss import Loss
from dataset.parse import parse_trainset, parse_testset
import argparse
parser = argparse.ArgumentParser(description='Model training.')
# experiment
parser.add_argument('--date', type=str, default='0817')
parser.add_argument('--exp-index', type=int, default=2)
parser.add_argument('--f', action='store_true', default=False)
# gpu
parser.add_argument('--start-gpu', type=int, default=0)
parser.add_argument('--num-gpu', type=int, default=2)
# dataset
parser.add_argument('--trainset-path', type=str, default='./dataset/trainset.tfr')
parser.add_argument('--testset-path', type=str, default='./dataset/testset.tfr')
parser.add_argument('--trainset-length', type=int, default=5041)
parser.add_argument('--testset-length', type=int, default=2000) # we flip every image in testset
# training
parser.add_argument('--base-lr', type=float, default=0.0001)
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--weight-decay', type=float, default=0.00002)
parser.add_argument('--epoch', type=int, default=1500)
parser.add_argument('--lr-decay-epoch', type=int, default=1000)
parser.add_argument('--critic-steps', type=int, default=3)
parser.add_argument('--warmup-steps', type=int, default=1000)
parser.add_argument('--workers', type=int, default=2)
parser.add_argument('--clip-gradient', action='store_true', default=False)
parser.add_argument('--clip-gradient-value', type=float, default=0.1)
# modeling
parser.add_argument('--beta', type=float, default=0.9)
parser.add_argument('--lambda-gp', type=float, default=10)
parser.add_argument('--lambda-rec', type=float, default=0.998)
# checkpoint
parser.add_argument('--log-path', type=str, default='./logs/')
parser.add_argument('--checkpoint-path', type=str, default=None)
parser.add_argument('--resume-step', type=int, default=0)
args = parser.parse_args()
# prepare path
base_path = args.log_path
exp_date = args.date
if exp_date is None:
print('Exp date error!')
import sys
sys.exit()
exp_name = exp_date + '/' + str(args.exp_index)
print("Start Exp:", exp_name)
output_path = base_path + exp_name + '/'
model_path = output_path + 'models/'
tensorboard_path = output_path + 'log/'
result_path = output_path + 'results/'
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(tensorboard_path):
os.makedirs(tensorboard_path)
if not os.path.exists(result_path):
os.makedirs(result_path)
elif not args.f:
if args.checkpoint_path is None:
print('Exp exist!')
import sys
sys.exit()
else:
import shutil
shutil.rmtree(model_path)
os.makedirs(model_path)
shutil.rmtree(tensorboard_path)
os.makedirs(tensorboard_path)
# prepare gpu
num_gpu = args.num_gpu
start_gpu = args.start_gpu
gpu_id = str(start_gpu)
for i in range(num_gpu - 1):
gpu_id = gpu_id + ',' + str(start_gpu + i + 1)
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
args.batch_size_per_gpu = int(args.batch_size / args.num_gpu)
model = Model(args)
loss = Loss(args)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
print("Start building model...")
with tf.Session(config=config) as sess:
with tf.device('/cpu:0'):
learning_rate = tf.placeholder(tf.float32, [])
lambda_rec = tf.placeholder(tf.float32, [])
train_op_G = tf.train.AdamOptimizer(
learning_rate=learning_rate, beta1=0.5, beta2=0.9)
train_op_D = tf.train.AdamOptimizer(
learning_rate=learning_rate, beta1=0.5, beta2=0.9)
trainset = tf.data.TFRecordDataset(filenames=[args.trainset_path])
trainset = trainset.shuffle(args.trainset_length)
trainset = trainset.map(parse_trainset, num_parallel_calls=args.workers)
trainset = trainset.batch(args.batch_size).repeat()
train_iterator = trainset.make_one_shot_iterator()
train_im = train_iterator.get_next()
testset = tf.data.TFRecordDataset(filenames=[args.testset_path])
testset = testset.map(parse_testset, num_parallel_calls=args.workers)
testset = testset.batch(args.batch_size).repeat()
test_iterator = testset.make_one_shot_iterator()
test_im = test_iterator.get_next()
print('build model on gpu tower')
models = []
params = []
for gpu_id in range(num_gpu):
with tf.device('/gpu:%d' % gpu_id):
print('tower_%d' % gpu_id)
with tf.name_scope('tower_%d' % gpu_id):
with tf.variable_scope('cpu_variables', reuse=gpu_id > 0):
groundtruth = tf.placeholder(
tf.float32, [args.batch_size_per_gpu, 128, 256, 3], name='groundtruth')
left_gt = tf.slice(groundtruth, [0, 0, 0, 0], [args.batch_size_per_gpu, 128, 128, 3])
reconstruction_ori, reconstruction = model.build_reconstruction(left_gt)
right_recon = tf.slice(reconstruction, [0, 0, 128, 0], [args.batch_size_per_gpu, 128, 128, 3])
loss_rec = loss.masked_reconstruction_loss(groundtruth, reconstruction)
loss_adv_G, loss_adv_D = loss.global_and_local_adv_loss(model, groundtruth, reconstruction)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_G = loss_adv_G * (1 - lambda_rec) + loss_rec * lambda_rec + sum(reg_losses)
loss_D = loss_adv_D
var_G = list(filter(lambda x: x.name.startswith(
'cpu_variables/GEN'), tf.trainable_variables()))
var_D = list(filter(lambda x: x.name.startswith(
'cpu_variables/DIS'), tf.trainable_variables()))
grad_g = train_op_G.compute_gradients(
loss_G, var_list=var_G)
grad_d = train_op_D.compute_gradients(
loss_D, var_list=var_D)
models.append((grad_g, grad_d, loss_G, loss_D, loss_adv_G, loss_rec, reconstruction))
params.append(groundtruth)
print('Done.')
print('Start reducing towers on cpu...')
grad_gs, grad_ds, loss_Gs, loss_Ds, loss_adv_Gs, loss_recs, reconstructions = zip(*models)
groundtruths = params
with tf.device('/gpu:0'):
aver_loss_g = tf.reduce_mean(loss_Gs)
aver_loss_d = tf.reduce_mean(loss_Ds)
aver_loss_ag = tf.reduce_mean(loss_adv_Gs)
aver_loss_rec = tf.reduce_mean(loss_recs)
train_op_G = train_op_G.apply_gradients(
loss.average_gradients(grad_gs))
train_op_D = train_op_D.apply_gradients(
loss.average_gradients(grad_ds))
groundtruths = tf.concat(groundtruths, axis=0)
reconstructions = tf.concat(reconstructions, axis=0)
tf.summary.scalar('loss_g', aver_loss_g)
tf.summary.scalar('loss_d', aver_loss_d)
tf.summary.scalar('loss_ag', aver_loss_ag)
tf.summary.scalar('loss_rec', aver_loss_rec)
tf.summary.image('groundtruth', groundtruths, 2)
tf.summary.image('reconstruction', reconstructions, 2)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(tensorboard_path, sess.graph)
print('Done.')
iters = 0
saver = tf.train.Saver(max_to_keep=5)
if args.checkpoint_path is None:
sess.run(tf.global_variables_initializer())
else:
print('Start loading checkpoint...')
saver.restore(sess, args.checkpoint_path)
iters = args.resume_step
print('Done.')
print('Start training...')
for epoch in range(args.epoch):
if epoch > args.lr_decay_epoch:
learning_rate_val = args.base_lr / 10
else:
learning_rate_val = args.base_lr
for start, end in zip(
range(0, args.trainset_length, args.batch_size),
range(args.batch_size, args.trainset_length, args.batch_size)):
if iters == 0 and args.checkpoint_path is None:
print('Start pretraining G!')
for t in range(args.warmup_steps):
if t % 20 == 0:
print("Step:", t)
images = sess.run([train_im])[0]
if len(images) < args.batch_size:
images = sess.run([train_im])[0]
inp_dict = {}
inp_dict = loss.feed_all_gpu(inp_dict, args.num_gpu, args.batch_size_per_gpu, images, params)
inp_dict[learning_rate] = learning_rate_val
inp_dict[lambda_rec] = 1.
_ = sess.run(
[train_op_G],
feed_dict=inp_dict)
print('Pre-train G Done!')
if (iters < 25 and args.checkpoint_path is None) or iters % 500 == 0:
n_cir = 30
else:
n_cir = args.critic_steps
for t in range(n_cir):
images = sess.run([train_im])[0]
if len(images) < args.batch_size:
images = sess.run([train_im])[0]
inp_dict = {}
inp_dict = loss.feed_all_gpu(inp_dict, args.num_gpu, args.batch_size_per_gpu, images, params)
inp_dict[learning_rate] = learning_rate_val
inp_dict[lambda_rec] = args.lambda_rec
_ = sess.run(
[train_op_D],
feed_dict=inp_dict)
if iters % 50 == 0:
_, g_val, ag_val, rs, d_val = sess.run(
[train_op_G, aver_loss_g, aver_loss_ag, merged, aver_loss_d],
feed_dict=inp_dict)
writer.add_summary(rs, iters)
else:
_, g_val, ag_val, d_val = sess.run(
[train_op_G, aver_loss_g, aver_loss_ag, aver_loss_d],
feed_dict=inp_dict)
if iters % 20 == 0:
print("Iter:", iters, 'loss_g:', g_val, 'loss_d:', d_val, 'loss_adv_g:', ag_val)
iters += 1
saver.save(sess, model_path, global_step=iters)
# testing
if epoch > 0:
ii = 0
g_vals = 0
d_vals = 0
ag_vals = 0
n_batchs = 0
for _ in range(int(args.testset_length / args.batch_size)):
test_oris = sess.run([test_im])[0]
if len(test_oris) < args.batch_size:
test_oris = sess.run([test_im])[0]
inp_dict = {}
inp_dict = loss.feed_all_gpu(inp_dict, args.num_gpu, args.batch_size_per_gpu, test_oris, params)
inp_dict[learning_rate] = learning_rate_val
inp_dict[lambda_rec] = args.lambda_rec
reconstruction_vals, g_val, d_val, ag_val = sess.run(
[reconstruction, aver_loss_g, aver_loss_d, aver_loss_ag],
feed_dict=inp_dict)
g_vals += g_val
d_vals += d_val
ag_vals += ag_val
n_batchs += 1
# Save test result every 100 epochs
if epoch % 100 == 0:
for rec_val, test_ori in zip(reconstruction_vals, test_oris):
rec_hid = (255. * (rec_val + 1) /
2.).astype(np.uint8)
test_ori = (255. * (test_ori + 1) /
2.).astype(np.uint8)
Image.fromarray(rec_hid).save(os.path.join(
result_path, 'img_' + str(ii) + '.' + str(int(iters / 100)) + '.jpg'))
if epoch == 0:
Image.fromarray(test_ori).save(
os.path.join(result_path, 'img_' + str(ii) + '.' + str(int(iters / 100)) + '.ori.jpg'))
ii += 1
g_vals /= n_batchs
d_vals /= n_batchs
ag_vals /= n_batchs
summary = tf.Summary()
summary.value.add(tag='eval/g',
simple_value=g_vals)
summary.value.add(tag='eval/d',
simple_value=d_vals)
summary.value.add(tag='eval/ag',
simple_value=ag_vals)
writer.add_summary(summary, iters)
print("=========================================================================")
print('loss_g:', g_val, 'loss_d:', d_val, 'loss_adv_g:', ag_val)
print("=========================================================================")
if np.isnan(reconstruction_vals.min()) or np.isnan(reconstruction_vals.max()):
print("NaN detected!!")
|
11537223
|
class Solution:
# def myPow(self, x, n):
# """
# :type x: float
# :type n: int
# :rtype: float
# """
# if n == 0:
# return 1
# temp = pow(x, n / 2)
# if n % 2 == 0:
# return temp * temp
# else:
# return temp * temp * x
def myPow(self, x, n):
# https://leetcode.com/discuss/93413/iterative-log-n-solution-with-clear-explanation
# 9 = 2^3 + 2^0 = 1001
# x^9 = x^(2^3)*x(2^0)
# multiple x^i when i place is 1
if n == 0:
return 1
res ,curr = 1, abs(n)
while curr > 0:
if curr & 1 == 1:
res *= x
curr >>= 1
x *= x
if n < 0:
return 1 / res
return res
|
11537244
|
import os
from pages.event_page.fixtures.helpers.create_fixture import create_fixture
import pages.event_page.fixtures.helpers.components as components
# A event page with only a title
def title():
page_data = {
"imported_revision_id": None,
"live": False,
"parent": components.home(),
"coa_global": False,
"title": "Event page with title",
"slug": "Event-page-with-title",
}
return create_fixture(page_data, os.path.basename(__file__))
|
11537260
|
import numpy as np
from spikeinterface.core.job_tools import ChunkRecordingExecutor, _shared_job_kwargs_doc
from spikeinterface.toolkit import get_noise_levels, get_channel_distances
def localize_peaks(recording, peaks, method='center_of_mass',
local_radius_um=150, ms_before=0.3, ms_after=0.6,
**job_kwargs):
"""
Localize peak (spike) in 2D or 3D depending the probe.ndim of the recording.
Parameters
----------
recording: RecordingExtractor
The recording extractor object
peaks: numpy
peak vector given by detect_peaks() in "compact_numpy" way.
method: str
Method to be used ('center_of_mass')
local_radius_um: float
Radius in micrometer to make neihgborhood for channel
around the peak
ms_before: float
The left window before a peak in millisecond
ms_after: float
The left window before a peak in millisecond
{}
Returns
-------
peak_locations: np.array
Array with estimated x-y location for each spike
"""
assert method in ('center_of_mass',)
# find channel neighbours
assert local_radius_um is not None
channel_distance = get_channel_distances(recording)
neighbours_mask = channel_distance < local_radius_um
nbefore = int(ms_before * recording.get_sampling_frequency() / 1000.)
nafter = int(ms_after * recording.get_sampling_frequency() / 1000.)
contact_locations = recording.get_probe().contact_positions
# TODO
# make a memmap for peaks to avoid serilisation
# and run
func = _localize_peaks_chunk
init_func = _init_worker_localize_peaks
init_args = (recording.to_dict(), peaks, method, nbefore, nafter, neighbours_mask, contact_locations)
processor = ChunkRecordingExecutor(recording, func, init_func, init_args, handle_returns=True,
job_name='localize peaks', **job_kwargs)
peak_locations = processor.run()
peak_locations = np.concatenate(peak_locations)
return peak_locations
localize_peaks.__doc__ = localize_peaks.__doc__.format(_shared_job_kwargs_doc)
def _init_worker_localize_peaks(recording, peaks, method, nbefore, nafter, neighbours_mask, contact_locations):
# create a local dict per worker
worker_ctx = {}
if isinstance(recording, dict):
from spikeinterface.core import load_extractor
recording = load_extractor(recording)
worker_ctx['recording'] = recording
worker_ctx['peaks'] = peaks
worker_ctx['method'] = method
worker_ctx['nbefore'] = nbefore
worker_ctx['nafter'] = nafter
worker_ctx['neighbours_mask'] = neighbours_mask
worker_ctx['contact_locations'] = contact_locations
return worker_ctx
def _localize_peaks_chunk(segment_index, start_frame, end_frame, worker_ctx):
# recover variables of the worker
recording = worker_ctx['recording']
peaks = worker_ctx['peaks']
method = worker_ctx['method']
nbefore = worker_ctx['nbefore']
nafter = worker_ctx['nafter']
neighbours_mask = worker_ctx['neighbours_mask']
contact_locations = worker_ctx['contact_locations']
# load trace in memory
traces = recording.get_traces(start_frame=start_frame, end_frame=end_frame, segment_index=segment_index)
# get local peaks (sgment + start_frame/end_frame)
i0 = np.searchsorted(peaks['segment_ind'], segment_index)
i1 = np.searchsorted(peaks['segment_ind'], segment_index + 1)
peak_in_segment = peaks[i0:i1]
i0 = np.searchsorted(peak_in_segment['sample_ind'], start_frame)
i1 = np.searchsorted(peak_in_segment['sample_ind'], end_frame)
local_peaks = peak_in_segment[i0:i1]
# make sample index local to traces
local_peaks.copy()
local_peaks['sample_ind'] -= start_frame
if method == 'center_of_mass':
peak_locations = localize_peaks_center_of_mass(traces, local_peaks, contact_locations, neighbours_mask)
return peak_locations
def localize_peaks_center_of_mass(traces, local_peak, contact_locations, neighbours_mask):
ndim = contact_locations.shape[1]
peak_locations = np.zeros((local_peak.size, ndim), dtype='float64')
# TODO find something faster
for i, peak in enumerate(local_peak):
chan_mask = neighbours_mask[peak['channel_ind'], :]
chan_inds, = np.nonzero(chan_mask)
# TODO find the max between nbefore/nafter
amps = traces[peak['sample_ind'], chan_inds]
amps = np.abs(amps)
com = np.sum(amps[:, np.newaxis] * contact_locations[chan_inds, :], axis=0) / np.sum(amps)
peak_locations[i, :] = com
return peak_locations
|
11537264
|
import pytest
from code42cli.util import _PADDING_SIZE
from code42cli.util import does_user_agree
from code42cli.util import find_format_width
from code42cli.util import format_string_list_to_columns
from code42cli.util import get_url_parts
TEST_HEADER = {"key1": "Column 1", "key2": "Column 10", "key3": "Column 100"}
@pytest.fixture
def context_with_assume_yes(mocker, cli_state):
ctx = mocker.MagicMock()
ctx.obj = cli_state
cli_state.assume_yes = True
return mocker.patch("code42cli.util.get_current_context", return_value=ctx)
@pytest.fixture
def context_without_assume_yes(mocker, cli_state):
ctx = mocker.MagicMock()
ctx.obj = cli_state
cli_state.assume_yes = False
return mocker.patch("code42cli.util.get_current_context", return_value=ctx)
@pytest.fixture
def echo_output(mocker):
return mocker.patch("code42cli.util.echo")
_NAMESPACE = "code42cli.util"
def get_expected_row_width(max_col_len, max_width):
col_size = max_col_len + _PADDING_SIZE
num_cols = int(max_width / col_size) or 1
return col_size * num_cols
def test_does_user_agree_when_user_says_y_returns_true(
mocker, context_without_assume_yes
):
mocker.patch("builtins.input", return_value="y")
assert does_user_agree("Test Prompt")
def test_does_user_agree_when_user_says_capital_y_returns_true(
mocker, context_without_assume_yes
):
mocker.patch("builtins.input", return_value="Y")
assert does_user_agree("Test Prompt")
def test_does_user_agree_when_user_says_n_returns_false(
mocker, context_without_assume_yes
):
mocker.patch("builtins.input", return_value="n")
assert not does_user_agree("Test Prompt")
def test_does_user_agree_when_assume_yes_argument_passed_returns_true_and_does_not_print_prompt(
context_with_assume_yes, capsys
):
result = does_user_agree("Test Prompt")
output = capsys.readouterr()
assert result
assert output.out == output.err == ""
def test_find_format_width_when_zero_records_sets_width_to_header_length():
_, column_width = find_format_width([], TEST_HEADER)
assert column_width["key1"] == len(TEST_HEADER["key1"])
assert column_width["key2"] == len(TEST_HEADER["key2"])
assert column_width["key3"] == len(TEST_HEADER["key3"])
def test_find_format_width_when_records_sets_width_to_greater_of_data_or_header_length():
report = [
{"key1": "test 1", "key2": "value xyz test", "key3": "test test test test"},
{"key1": "1", "key2": "<KEY>", "key3": "test test test test"},
]
_, column_width = find_format_width(report, TEST_HEADER)
assert column_width["key1"] == len(TEST_HEADER["key1"])
assert column_width["key2"] == len(report[0]["key2"])
assert column_width["key3"] == len(report[1]["key3"])
def test_find_format_width_filters_keys_not_present_in_header():
report = [
{"key1": "test 1", "key2": "value xyz test", "key3": "test test test test"},
{"key1": "1", "key2": "<KEY>", "key3": "test test test test"},
]
header_with_subset_keys = {"key1": "Column 1", "key3": "Column 100"}
result, _ = find_format_width(report, header_with_subset_keys)
for item in result:
assert "key2" not in item.keys()
def test_format_string_list_to_columns_when_given_no_string_list_does_not_echo(
echo_output,
):
format_string_list_to_columns([], None)
format_string_list_to_columns(None, None)
assert not echo_output.call_count
def test_format_string_list_to_columns_when_not_given_max_uses_shell_size(
mocker, echo_output
):
terminal_size = mocker.patch("code42cli.util.shutil.get_terminal_size")
max_width = 30
terminal_size.return_value = (max_width, None) # Cols, Rows
columns = ["col1", "col2"]
format_string_list_to_columns(columns)
printed_row = echo_output.call_args_list[0][0][0]
assert len(printed_row) == get_expected_row_width(4, max_width)
assert printed_row == "col1 col2 "
def test_format_string_list_to_columns_when_given_small_max_width_prints_one_column_per_row(
echo_output,
):
max_width = 5
columns = ["col1", "col2"]
format_string_list_to_columns(columns, max_width)
expected_row_width = get_expected_row_width(4, max_width)
printed_row = echo_output.call_args_list[0][0][0]
assert len(printed_row) == expected_row_width
assert printed_row == "col1 "
printed_row = echo_output.call_args_list[1][0][0]
assert len(printed_row) == expected_row_width
assert printed_row == "col2 "
def test_format_string_list_to_columns_uses_width_of_longest_string(echo_output):
max_width = 5
columns = ["col1", "col2_that_is_really_long"]
format_string_list_to_columns(columns, max_width)
expected_row_width = get_expected_row_width(
len("col2_that_is_really_long"), max_width
)
printed_row = echo_output.call_args_list[0][0][0]
assert len(printed_row) == expected_row_width
assert printed_row == "col1 "
printed_row = echo_output.call_args_list[1][0][0]
assert len(printed_row) == expected_row_width
assert printed_row == "col2_that_is_really_long "
def test_url_parts():
server, port = get_url_parts("localhost:3000")
assert server == "localhost"
assert port == 3000
server, port = get_url_parts("localhost")
assert server == "localhost"
assert port is None
server, port = get_url_parts("127.0.0.1")
assert server == "127.0.0.1"
assert port is None
|
11537273
|
from random import random
from dagster import graph, op
@op
def start():
return 1
@op
def unreliable(num: int) -> int:
failure_rate = 0.5
if random() < failure_rate:
raise Exception("blah")
return num
@op
def end(_num: int):
pass
@graph
def unreliable_job():
end(unreliable(start()))
|
11537282
|
from django.db import models
from django.utils.text import slugify
class Ingredient(models.Model):
name = models.CharField(max_length=64, unique=True)
slug = models.SlugField(max_length=64, unique=True)
photo = models.URLField(max_length=255, null=True, blank=True)
measureValue = models.FloatField(null=True, blank=True)
measureUnit = models.CharField(null=True, blank=True, max_length=32)
defaultValue = models.FloatField(null=True, blank=True)
defaultUnit = models.CharField(max_length=32)
energy = models.FloatField()
protein = models.FloatField()
carb = models.FloatField()
fat = models.FloatField()
saturatedFat = models.FloatField()
sugar = models.FloatField()
fibre = models.FloatField()
cholesterol = models.FloatField()
calcium = models.FloatField()
iron = models.FloatField()
sodium = models.FloatField()
potassium = models.FloatField()
magnesium = models.FloatField()
phosphorus = models.FloatField()
thiamin = models.FloatField()
riboflavin = models.FloatField()
niacin = models.FloatField()
folate = models.FloatField()
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
self.full_clean()
super(Ingredient, self).save(*args, **kwargs)
|
11537298
|
from matplotlib import pyplot
from shapely.geometry import Point
from descartes import PolygonPatch
from figures import SIZE, BLUE, GRAY, set_limits
fig = pyplot.figure(1, figsize=SIZE, dpi=90)
a = Point(1, 1).buffer(1.5)
b = Point(2, 1).buffer(1.5)
# 1
ax = fig.add_subplot(121)
patch1 = PolygonPatch(a, fc=GRAY, ec=GRAY, alpha=0.2, zorder=1)
ax.add_patch(patch1)
patch2 = PolygonPatch(b, fc=GRAY, ec=GRAY, alpha=0.2, zorder=1)
ax.add_patch(patch2)
c = a.difference(b)
patchc = PolygonPatch(c, fc=BLUE, ec=BLUE, alpha=0.5, zorder=2)
ax.add_patch(patchc)
ax.set_title('a.difference(b)')
set_limits(ax, -1, 4, -1, 3)
#2
ax = fig.add_subplot(122)
patch1 = PolygonPatch(a, fc=GRAY, ec=GRAY, alpha=0.2, zorder=1)
ax.add_patch(patch1)
patch2 = PolygonPatch(b, fc=GRAY, ec=GRAY, alpha=0.2, zorder=1)
ax.add_patch(patch2)
c = b.difference(a)
patchc = PolygonPatch(c, fc=BLUE, ec=BLUE, alpha=0.5, zorder=2)
ax.add_patch(patchc)
ax.set_title('b.difference(a)')
set_limits(ax, -1, 4, -1, 3)
pyplot.show()
|
11537344
|
import unittest, os
from copy import copy
from ctypes import *
from comtypes.client import GetModule, CreateObject
from comtypes.partial import partial
# ./urlhist.tlb was downloaded somewhere from the internet (?)
GetModule(os.path.join(os.path.dirname(__file__), "urlhist.tlb"))
from comtypes.gen import urlhistLib
# The pwcsTitle and pwcsUrl fields of the _STATURL structure must be
# freed by the caller. The only way to do this without patching the
# generated code directly is to monkey-patch the
# _STATURL.__ctypes_from_outparam__ method like this.
class _(partial, urlhistLib._STATURL):
def __ctypes_from_outparam__(self):
from comtypes.util import cast_field
result = type(self)()
for n, _ in self._fields_:
setattr(result, n, getattr(self, n))
url, title = self.pwcsUrl, self.pwcsTitle
windll.ole32.CoTaskMemFree(cast_field(self, "pwcsUrl", c_void_p))
windll.ole32.CoTaskMemFree(cast_field(self, "pwcsTitle", c_void_p))
return result
from comtypes.test.find_memleak import find_memleak
class Test(unittest.TestCase):
def check_leaks(self, func):
bytes = find_memleak(func, (5, 10))
self.failIf(bytes, "Leaks %d bytes" % bytes)
def test_creation(self):
hist = CreateObject(urlhistLib.UrlHistory)
for x in hist.EnumURLS():
x.pwcsUrl, x.pwcsTitle
## print (x.pwcsUrl, x.pwcsTitle)
## print x
def doit():
for x in hist.EnumURLs():
pass
doit()
self.check_leaks(doit)
if __name__ == "__main__":
unittest.main()
|
11537360
|
import unittest
from katas.kyu_7.string_ends_with import solution
class StringEndsWithTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(solution('abc', 'bc'))
def test_false(self):
self.assertFalse(solution('abc', 'd'))
|
11537391
|
import json
none = "d3043820717d74d9a17694c176d39733"
# region EMR
class DeploymentAction:
"""
# Arguments
action_type: str
should_handle_all_batches: bool
draining_timeout: int
should_decrement_target_capacity: bool
"""
def __init__(
self,
action_type=none,
should_handle_all_batches=none,
draining_timeout=none,
should_decrement_target_capacity=none):
self.action_type = action_type
self.should_handle_all_batches = should_handle_all_batches
self.draining_timeout = draining_timeout
self.should_decrement_target_capacity = should_decrement_target_capacity
# endregion
class DeploymentActionRequest:
def __init__(self, roll):
self.roll = roll
def toJSON(self):
return json.dumps(self.roll, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
|
11537399
|
from alpha.platforms.huobi_usdt_swap.ws_utils import *
class WsMarket(WsUtils):
def __init__(self, host: str = None):
super(WsMarket, self).__init__("/linear-swap-ws", host)
def sub(self, data:dict, callback):
self._sub(json.dumps(data), callback)
def req(self, data:dict, callback):
self._req(json.dumps(data), callback)
|
11537433
|
import os
import wget
YEARS = [2017, 2016]
WEEKS = [i for i in range(1, 52)]
INDICES = ['VCI', 'TCI', 'VHI', 'SMN', 'SMT']
TEMPLATE = 'ftp://ftp.star.nesdis.noaa.gov/pub/corp/scsb/wguo/data/'\
'VHP_4km/geo_TIFF/VHP.G04.C07.{}.P{}{}.{}.{}.tif'
OUTPUT_DIRECTORY = 'data'
def file_type(index):
return 'SM' if index in ['SMN', 'SMT'] else 'VH'
def that_strange_code(year, week):
# There is no explicit logic behind these codes
if year == 2005:
if week < 23:
return 'NL'
else:
return 'NN'
year_codes = [
{'from': 1981, 'to': 1984, 'code': 'NC'},
{'from': 1985, 'to': 1988, 'code': 'NF'},
{'from': 1989, 'to': 1994, 'code': 'NH'},
{'from': 1995, 'to': 2000, 'code': 'NJ'},
{'from': 2001, 'to': 2005, 'code': 'NL'},
{'from': 2005, 'to': 2010, 'code': 'NN'},
{'from': 2011, 'to': 2017, 'code': 'NP'}]
for each in year_codes:
if year >= each['from'] and year <= each['to']:
return each['code']
def int_to_week_code(num):
strNum = str(num)
zeros = ''.join(['0' for _ in range(3 - len(strNum))])
return zeros + strNum
def get_url(year, week, index):
return TEMPLATE.format(
that_strange_code(year, week),
str(year),
int_to_week_code(week),
file_type(index),
index)
def download_from_to(url, path):
if not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as exc:
# Guard against race condition
if exc.errno != errno.EEXIST:
raise
print('\nCollecting', url)
try:
wget.download(url, out=path)
except:
print('File does not exist')
for year in YEARS:
for week in WEEKS:
for index in INDICES:
download_from_to(
get_url(year, week, index),
'{}/{}/{}/'.format(OUTPUT_DIRECTORY, year, index))
|
11537493
|
from des.models import DynamicEmailConfiguration
from django.contrib.auth.models import User, Group
from django.test import TestCase
from django.core import mail
from django.conf import settings
from django_datajsonar.models import Node
from series_tiempo_ar_api.libs.indexing.report.node_admins import GlobalAdmins, NodeAdmins
from series_tiempo_ar_api.libs.indexing.report.report_mail_sender import ReportMailSender
class ReportMailSenderTests(TestCase):
def setUp(self):
self.subject = 'test_subject'
self.body = 'test_body'
self.user = User.objects.create(username='test_user',
password='<PASSWORD>',
email='<EMAIL>')
self.user.groups.add(Group.objects.get(name=settings.READ_DATAJSON_RECIPIENT_GROUP))
self.admins = GlobalAdmins()
self.sender = ReportMailSender(admins=self.admins, subject=self.subject, body=self.body)
def test_send_mail(self):
self.sender.send()
self.assertEqual(len(mail.outbox), 1)
def test_mail_sent_is_to_all_datajson_recipients_users(self):
self.sender.send()
self.assertIn(self.user.email, mail.outbox[0].recipients())
def test_if_no_recipients_mail_is_not_sent(self):
self.user.groups.clear()
self.sender.send()
self.assertEqual(len(mail.outbox), 0)
def test_mail_send_with_attachment(self):
file_name, body = 'test.csv', 'body'
self.sender.add_csv_attachment('test.csv', 'body')
self.sender.send()
attachment_file_name, attachment_body, _ = mail.outbox[0].attachments[0]
self.assertEqual(file_name, attachment_file_name)
self.assertEqual(body, attachment_body)
def test_subject_and_body(self):
self.sender.send()
self.assertEqual(mail.outbox[0].subject, self.subject)
self.assertEqual(mail.outbox[0].body, self.body)
def test_individual_node_report_it_sent_only_to_node_admins(self):
node = Node.objects.create(indexable=True, catalog_id='catalog_id', catalog_url='http://catalog_url.com')
email = '<EMAIL>'
node.admins.add(User.objects.create(username='other_user', password='<PASSWORD>', email=email))
ReportMailSender(admins=NodeAdmins(node), subject=self.subject, body=self.body).send()
self.assertIn(email, mail.outbox[0].recipients())
def test_from_email_is_read_from_des(self):
email = '<EMAIL>'
config = DynamicEmailConfiguration.get_solo()
config.from_email = email
config.save()
self.sender.send()
self.assertEqual(mail.outbox[0].from_email, config.from_email)
def test_add_plaintext_attachment(self):
file_name, body = 'plain.txt', 'body'
self.sender.add_plaintext_attachment(file_name, body)
self.sender.send()
attachment_file_name, attachment_body, _ = mail.outbox[0].attachments[0]
self.assertEqual(file_name, attachment_file_name)
self.assertEqual(body, attachment_body)
def test_sender_is_in_bcc(self):
email = '<EMAIL>'
config = DynamicEmailConfiguration.get_solo()
config.from_email = email
config.save()
self.sender.send()
self.assertIn(config.from_email, mail.outbox[0].bcc)
|
11537518
|
from uuid import uuid4
import pdb
import os
from operator import itemgetter
from itertools import chain
from copy import deepcopy
import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, \
GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier, PassiveAggressiveClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.feature_selection import SelectFromModel, VarianceThreshold, chi2, SelectPercentile, SelectKBest
from sklearn.pipeline import Pipeline
from scipy.sparse import vstack, csr_matrix, hstack, issparse, coo_matrix, \
lil_matrix
from sklearn.preprocessing import MultiLabelBinarizer
from skmultilearn.problem_transform import LabelPowerset, ClassifierChain, \
BinaryRelevance
from sklearn.metrics import precision_recall_fscore_support
from scipy.stats import entropy as get_entropy
from .time_series_to_ir import TimeSeriesToIR
from .base_scrabble import BaseScrabble
from .common import *
from .hcc import StructuredClassifierChain
from .brick_parser2 import get_subclasses, get_subclasses_dict, get_tagset_tree
#from .brick_parser import tagsetTree as tagset_tree
from .dann import DANN
from keras.layers import Input, Dense, Dropout
from keras.models import Sequential
from keras.constraints import max_norm
from keras import regularizers
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
def gen_uuid():
return str(uuid4())
def tree_flatter(tree, init_flag=True):
branches_list = list(tree.values())
d_list = list(tree.keys())
for branches in branches_list:
for branch in branches:
added_d_list = tree_flatter(branch)
d_list = [d for d in d_list if d not in added_d_list]\
+ added_d_list
return d_list
def extend_tree(tree, k, d):
for curr_head, branches in tree.items():
if k==curr_head:
branches.append(d)
for branch in branches:
extend_tree(branch, k, d)
def calc_leaves_depth(tree, d=dict(), depth=0):
curr_depth = depth + 1
for tagset, branches in tree.items():
if d.get(tagset):
d[tagset] = max(d[tagset], curr_depth)
else:
d[tagset] = curr_depth
for branch in branches:
new_d = calc_leaves_depth(branch, d, curr_depth)
for k, v in new_d.items():
if d.get(k):
d[k] = max(d[k], v)
else:
d[k] = v
return d
def augment_tagset_tree(tagsets, subclass_dict, tagset_tree):
for tagset in set(tagsets):
if '-' in tagset:
classname = tagset.split('-')[0]
extend_tree(tagset_tree, classname, {tagset:[]})
try:
subclass_dict[classname].append(tagset)
except:
pdb.set_trace()
subclass_dict[tagset] = []
else:
if tagset not in subclass_dict.keys():
classname = tagset.split('_')[-1]
try:
subclass_dict[classname].append(tagset)
except:
pdb.set_trace()
subclass_dict[tagset] = []
extend_tree(tagset_tree, classname, {tagset:[]})
class Ir2Tagsets(BaseScrabble):
"""docstring for Ir2Tagsets"""
def __init__(self,
target_building,
target_srcids,
building_label_dict,
building_sentence_dict,
building_tagsets_dict,
source_buildings=[],
source_sample_num_list=[],
learning_srcids=[],
known_tags_dict={},
config={}):
super(Ir2Tagsets, self).__init__(
target_building,
target_srcids,
building_label_dict,
building_sentence_dict,
building_tagsets_dict,
source_buildings,
source_sample_num_list,
learning_srcids,
config)
self.ts2ir = None
self.ts_feature_filename = 'temp/features.pkl'
self.known_tags_dict = known_tags_dict
if 'use_cluster_flag' in config:
self.use_cluster_flag = config['use_cluster_flag']
else:
self.use_cluster_flag = True
if 'eda_flag' in config:
self.eda_flag = config['eda_flag'],
else:
self.eda_flag = False
if 'use_brick_flag' in config:
self.use_brick_flag = config['use_brick_flag']
else:
self.use_brick_flag = True
if 'n_jobs' in config:
self.n_jobs = config['n_jobs']
else:
#self.n_jobs = 1
self.n_jobs = 6
if 'ts_flag' in config:
self.ts_flag = config['ts_flag']
else:
self.ts_flag = False
if 'negative_flag' in config:
self.negative_flag = config['negative_flag']
else:
self.negative_flag = True
if 'emptydoc_flag' in config:
self.emptydoc_flag = config['emptydoc_flag']
else:
self.emptydoc_flag = True
if 'tagset_classifier_type' in config:
self.tagset_classifier_type = config['tagset_classifier_type']
else:
self.tagset_classifier_type = 'MLP'
if 'n_estimators' in config:
self.n_estimators = config['n_estimators']
else:
self.n_estimators = 10 # TODO: Find the proper value
if 'vectorizer_type' in config:
self.vectorizer_type = config['vectorizer_type']
else:
#self.vectorizer_type = 'count'
self.vectorizer_type = 'tfidf'
if 'entqs' in config:
self.query_strategy = config['entqs']
else:
self.query_strategy = 'entropy'
if 'use_known_tags' in config:
self.use_known_tags = config['use_known_tags']
else:
self.use_known_tags = False
if 'expand_tagsets_by_hierarchy_flag' in config:
self.expand_tagsets_by_hierarchy_flag = config['expand_tagsets_by_hierarchy_flag']
else:
self.expand_tagsets_by_hierarchy_flag = True
self.epochs = config.get('ir2tagsets.epochs', 400)
self.nb_empty_docs = 50
self._init_brick()
self._init_data(learning_srcids)
def _init_brick(self):
self.brick_srcids = []
version = '1.0.3' #TODO: Parameterize it from the module
self.tagset_list = get_subclasses(version, 'bf:TagSet')
self.point_tagsets = get_subclasses(version, 'brick:Point')
self.tagset_list.append('networkadapter')
self.subclass_dict = get_subclasses_dict(version, 'bf:TagSet')
self.subclass_dict['networkadapter'] = list()
self.subclass_dict['unknown'] = list()
self.subclass_dict['none'] = list()
#self.tagset_tree = deepcopy(tagset_tree)
self.tagset_tree = get_tagset_tree(version)
def get_srcid_domain(self, srcid):
# try get building name
splitted = srcid.split(';')
if len(splitted) == 1:
orig_srcid = srcid
else:
orig_srcid, srcid_postfix = splitted
if orig_srcid == 'brick':
domain = 'brick'
else:
domain = None
for building, sentence_dict in self.building_sentence_dict.items():
if orig_srcid in sentence_dict:
domain = building
assert domain
return domain
def expand_tagsets_by_hierarchy(self):
for srcid, tagsets in self.tagsets_dict.items():
expanded = set(tagsets)
for tagset in tagsets:
for superclass, subclasses in self.subclass_dict.items():
if tagset in subclasses:
expanded.add(superclass)
self.tagsets_dict[srcid] = list(expanded)
def _init_data(self, learning_srcids=[]):
self.sentence_dict = {}
self.label_dict = {}
self.tagsets_dict = {}
self.phrase_dict = {}
self.point_dict = {}
self.building_cluster_dict = {}
for building, source_sample_num in zip(self.source_buildings,
self.source_sample_num_list):
self.sentence_dict.update(self.building_sentence_dict[building])
one_label_dict = self.building_label_dict[building]
self.label_dict.update(one_label_dict)
if learning_srcids:
self.learning_srcids = learning_srcids
else:
sample_srcid_list = select_random_samples(
building = building,
srcids = one_label_dict.keys(),
n = source_sample_num,
use_cluster_flag = self.use_cluster_flag,
sentence_dict = self.building_sentence_dict[building],
shuffle_flag = False
)
self.learning_srcids += sample_srcid_list
one_tagsets_dict = self.building_tagsets_dict[building]
self.tagsets_dict.update(one_tagsets_dict)
for srcid, tagsets in one_tagsets_dict.items():
point_tagset = 'none'
for tagset in tagsets:
if tagset in self.point_tagsets:
point_tagset = tagset
break
self.point_dict[srcid] = point_tagset
if building not in self.building_cluster_dict:
self.building_cluster_dict[building] = get_word_clusters(
self.building_sentence_dict[building])
self.phrase_dict = make_phrase_dict(self.sentence_dict,
self.label_dict)
# validation
for srcid in self.target_srcids:
assert srcid in self.tagsets_dict
if self.expand_tagsets_by_hierarchy_flag:
self.expand_tagsets_by_hierarchy()
def _extend_tagset_list(self, new_tagsets):
self.tagset_list += new_tagsets
self.tagset_list = list(set(self.tagset_list))
def update_model(self, srcids):
self.learning_srcids += list(srcids) * 2
self.target_srcids = [srcid for srcid in self.target_srcids
if srcid not in self.learning_srcids]
invalid_num = sum([srcid not in self.tagsets_dict for srcid in
self.learning_srcids + self.target_srcids]) #debug
self._extend_tagset_list(reduce(adder, [self.tagsets_dict[srcid]
for srcid in self.learning_srcids + self.target_srcids]))
#augment_tagset_tree(self.tagset_list, self.subclass_dict, self.tagset_tree)
self._build_tagset_classifier(self.learning_srcids,
self.target_srcids,
validation_srcids=[])
def _determine_used_phrases(self, phrases, tagsets):
phrases_usages = list()
pred_tags = reduce(adder, [tagset.split('_') for tagset in tagsets], [])
used_cnt = 0.0
unused_cnt = 0.0
for phrase in phrases:
phrase_tags = phrase.split('_')
for tag in phrase_tags:
if tag in ['leftidentifier', 'rightidentifier']:
continue
if tag in pred_tags:
used_cnt += 1 / len(phrase_tags)
else:
unused_cnt += 1 / len(phrase_tags)
if used_cnt == 0:
score = 0
else:
score = used_cnt / (used_cnt + unused_cnt)
return score
def ir2tagset_al_query_samples_phrase_util(self,
test_srcids,
building,
pred_tagsets_dict,
inc_num):
phrase_usage_dict = {}
for srcid in test_srcids:
pred_tagsets = pred_tagsets_dict[srcid]
phrase_usage_dict[srcid] = self._determine_used_phrases(
self.phrase_dict[srcid],
pred_tagsets)
phrase_usages = list(phrase_usage_dict.values())
mean_usage_rate = np.mean(phrase_usages)
std_usage_rate = np.std(phrase_usages)
# Select underexploited sentences.
threshold = mean_usage_rate - std_usage_rate
todo_sentence_dict = dict(
(srcid, alpha_tokenizer(''.join(self.sentence_dict[srcid])))
for srcid, usage_rate
in phrase_usage_dict.items()
if usage_rate < threshold and srcid in test_srcids)
cluster_dict = self.building_cluster_dict[building]
todo_srcids = select_random_samples(
building = building,
srcids = list(todo_sentence_dict.keys()),
n = min(inc_num, len(todo_sentence_dict)),
use_cluster_flag = True,
cluster_dict = cluster_dict,
shuffle_flag = False,
)
#if the numbers are not enough randomly select more:
if len(todo_srcids) < inc_num:
more_num = inc_num - len(todo_srcids)
todo_sentence_dict = dict(
(srcid, alpha_tokenizer(''.join(self.sentence_dict[srcid])))
for srcid, usage_rate
in phrase_usage_dict.items()
if srcid in test_srcids)
cluster_dict = self.building_cluster_dict[building]
todo_srcids += select_random_samples(
building = building,
srcids = list(todo_sentence_dict.keys()),
n = min(more_num, len(todo_sentence_dict)),
use_cluster_flag = True,
cluster_dict = cluster_dict,
shuffle_flag = True
)
return todo_srcids
def ir2tagset_al_query_entropy(self,
target_prob_mat,
#target_prob,
target_srcids,
learning_srcids,
target_building,
inc_num
):
assert len(target_srcids) == target_prob_mat.shape[0]
entropies = get_entropy(target_prob_mat.T)
sorted_entropies = sorted([(srcid, ent) for srcid, ent
in zip(target_srcids, entropies)],
key=itemgetter(1))
cluster_dict = self.building_cluster_dict[target_building]
added_cids = []
todo_srcids = []
new_srcid_cnt = 0
for srcid, ent in sorted_entropies:
if srcid in learning_srcids:
continue
the_cid = None
for cid, cluster in cluster_dict.items():
if srcid in cluster:
the_cid = cid
break
if the_cid in added_cids:
continue
added_cids.append(the_cid)
todo_srcids.append(srcid)
new_srcid_cnt += 1
if new_srcid_cnt == inc_num:
break
return todo_srcids
def select_informative_samples(self, sample_num):
if self.query_strategy == 'phrase_util':
pred = self.predict(self.target_srcids)
new_srcids = self.ir2tagset_al_query_samples_phrase_util(
self.target_srcids,
self.target_building,
pred,
sample_num)
elif self.query_strategy == 'entropy':
_, _, prob_mat = self._predict_and_proba(self.target_srcids, True)
#proba = self.predict_proba(self.target_srcids)
new_srcids = self.ir2tagset_al_query_entropy(
prob_mat,
self.target_srcids,
self.learning_srcids,
self.target_building,
sample_num)
else:
raise ValueError('Query Strategy Wrong: {0}'.format(query_strategy))
return new_srcids
# ESSENTIAL
def learn_auto(self, iter_num=1):
"""Learn from the scratch to the end.
"""
pass
def _augment_phrases_with_ts(self, phrase_dict, srcids, ts2ir):
with open(self.ts_feature_filename, 'rb') as fp:
ts_features = pickle.load(fp, encoding='bytes')
ts_tags_pred = ts2ir.predict(ts_features, srcids)
tag_binarizer = ts2ir.get_binarizer()
pred_tags_list = tag_binarizer.inverse_transform(ts_tags_pred)
for srcid, pred_tags in zip(srcids, pred_tags_list):
phrase_dict[srcid] += list(pred_tags)
return phrase_dict
def _predict_and_proba(self, target_srcids, full_prob=False):
if not target_srcids:
return {}, {}
phrase_dict = {srcid: self.phrase_dict[srcid]
for srcid in target_srcids}
if self.ts_flag:
phrase_dict = self._augment_phrases_with_ts(phrase_dict, target_srcids, self.ts2ir)
if self.use_known_tags:
doc = [' '.join(phrase_dict[srcid] + self.known_tags_dict[srcid])
for srcid in target_srcids]
else:
doc = [' '.join(phrase_dict[srcid]) for srcid in target_srcids]
vect_doc = self.tagset_vectorizer.transform(doc) # should this be fit_transform?
certainty_dict = dict()
tagsets_dict = dict()
if self.tagset_classifier_type in ['MLP', 'DANN']:
pred_mat = self.tagset_classifier.predict(vect_doc)
prob_mat = deepcopy(pred_mat)
pred_mat[pred_mat >= 0.5] = 1
pred_mat[pred_mat < 0.5] = 0
else:
pred_mat = self.tagset_classifier.predict(vect_doc)
prob_mat = self.tagset_classifier.predict_proba(vect_doc)
if not isinstance(pred_mat, np.ndarray):
try:
pred_mat = pred_mat.toarray()
except:
pred_mat = np.asarray(pred_mat)
pred_tagsets_dict = dict()
pred_certainty_dict = dict()
pred_point_dict = dict()
for i, (srcid, pred, prob) in enumerate(zip(target_srcids,
pred_mat,
prob_mat)):
pred_tagsets = self.tagset_binarizer.inverse_transform(np.asarray([pred]))[0]
#pred_tagsets_dict[srcid] = self.tagset_binarizer.inverse_transform(\
# np.asarray([pred]))[0]
if self.expand_tagsets_by_hierarchy_flag:
filtered = deepcopy(list(pred_tagsets))
for curr_tagset in pred_tagsets:
for other_tagset in filtered:
if other_tagset in self.subclass_dict[curr_tagset]:
filtered.remove(curr_tagset)
break
pred_tagsets = tuple(filtered)
max_prob = max(prob) #TODO: implement this for filtered ones
else:
max_prob = max(prob)
pred_tagsets_dict[srcid] = pred_tagsets
pred_certainty_dict[srcid] = max_prob
#pred_certainty_dict[srcid] = 0
pred_certainty_dict = OrderedDict(sorted(pred_certainty_dict.items(), \
key=itemgetter(1), reverse=True))
logging.info('Finished prediction')
if full_prob:
return pred_tagsets_dict, pred_certainty_dict, prob_mat
else:
return pred_tagsets_dict, pred_certainty_dict
def predict(self, target_srcids=None):
if not target_srcids:
target_srcids =self.target_srcids
pred, _ = self._predict_and_proba(target_srcids)
return pred
def predict_proba(self, target_srcids=None):
if not target_srcids:
target_srcids =self.target_srcids
_, proba =self._predict_and_proba(target_srcids)
return proba
def _build_point_classifier(self):
# TODO: Implement this later if needed
# Currently, just collected garbages.
self.point_classifier = RandomForestClassifier(
n_estimators=self.n_estimators,
n_jobs=n_jobs)
# Dataset only for points. Just for testing.
learning_point_dict = dict()
for srcid, tagsets in chain(learning_truths_dict.items(),
validation_truths_dict.items()):
point_tagset = 'none'
for tagset in tagsets:
if tagset in point_tagsets:
point_tagset = tagset
break
learning_point_dict[srcid] = point_tagset
learning_point_dict['dummy'] = 'unknown'
point_truths_dict = dict()
point_srcids = list()
for srcid in learning_srcids:
truths = learning_truths_dict[srcid]
point_tagset = None
for tagset in truths:
if tagset in point_tagsets:
point_tagset = tagset
break
if point_tagset:
point_truths_dict[srcid] = point_tagset
point_srcids.append(srcid)
try:
point_truth_mat = [point_tagsets.index(point_truths_dict[srcid]) \
for srcid in point_srcids]
point_vect_doc = np.vstack([learning_vect_doc[learning_srcids.index(srcid)]
for srcid in point_srcids])
except:
pdb.set_trace()
def _augment_with_ts(self, test_phrases_dict):
# TODO: Implement below
ts_learning_srcids = list()
learning_tags_dict = {srcid: splitter(self.point_dict[srcid])
for srcid in self.learning_srcids}
tag_binarizer = MultiLabelBinarizer()
tag_binarizer.fit(map(splitter, self.point_dict.values()))
with open(self.ts_feature_filename, 'rb') as fp:
ts_features = pickle.load(fp, encoding='bytes')
new_ts_features = list()
for ts_feature in ts_features:
feats = ts_feature[0]
srcid = ts_feature[2]
if srcid in self.learning_srcids + self.validation_srcids:
point_tagset = self.point_dict[srcid]
point_tags = point_tagset.split('_')
point_vec = tag_binarizer.transform([point_tags])
new_feature = [feats, point_vec, srcid]
new_ts_features.append(new_feature)
elif srcid in self.target_srcids:
new_ts_features.append(ts_feature)
ts_features = new_ts_features
self.ts2ir = TimeSeriesToIR(mlb=tag_binarizer)
self.ts2ir.fit(ts_features, self.learning_srcids, self.validation_srcids, learning_tags_dict)
learning_ts_tags_pred = self.ts2ir.predict(ts_features, self.learning_srcids)
for srcid, ts_tags in zip(self.learning_srcids, \
tag_binarizer.inverse_transform(
learning_ts_tags_pred)):
#learning_phrase_dict[srcid] += list(ts_tags)
ts_srcid = srcid + '_ts'
learning_phrase_dict[ts_srcid] = learning_phrase_dict[srcid]\
+ list(ts_tags)
ts_learning_srcids.append(ts_srcid)
learning_truths_dict[ts_srcid] = learning_truths_dict[srcid]
test_ts_tags_pred = self.ts2ir.predict(ts_features, test_srcids)
for srcid, ts_tags in zip(test_srcids, \
tag_binarizer.inverse_transform(
test_ts_tags_pred)):
#ts_srcid = srcid + '_ts'
#test_phrase_dict[ts_srcid] = test_phrase_dict[srcid] + list(ts_tags)
#test_srcids .append(ts_srcid) # TODO: Validate if this works.
test_phrase_dict[srcid] += list(ts_tags)
def _augment_negative_examples(self, doc, srcids):
negative_doc = []
negative_truths_dict = {}
negative_srcids = []
for srcid in self.learning_srcids:
true_tagsets = list(set(self.tagsets_dict[srcid]))
sentence = self.phrase_dict[srcid]
for tagset in true_tagsets:
negative_srcid = srcid + ';' + gen_uuid()
removing_tagsets = set()
new_removing_tagsets = set([tagset])
removing_tags = []
negative_tagsets = list(filter(tagset.__ne__, true_tagsets))
i = 0
while len(new_removing_tagsets) != len(removing_tagsets):
i += 1
if i>5:
pdb.set_trace()
removing_tagsets = deepcopy(new_removing_tagsets)
for removing_tagset in removing_tagsets:
removing_tags += removing_tagset.split('_')
for negative_tagset in negative_tagsets:
for tag in removing_tags:
if tag in negative_tagset.split('_'):
new_removing_tagsets.add(negative_tagset)
negative_sentence = [tag for tag in sentence if\
tag not in removing_tags]
for tagset in removing_tagsets:
negative_tagsets = list(filter(tagset.__ne__,
negative_tagsets))
# negative_sentence = [word for word in sentence \
# if word not in tagset.split('_')]
negative_doc.append(' '.join(negative_sentence))
negative_truths_dict[negative_srcid] = negative_tagsets
negative_srcids.append(negative_srcid)
"""
for i in range(0, self.nb_empty_docs):
# Add empty examples
negative_srcid = gen_uuid()
negative_doc.append('')
negative_srcids.append(negative_srcid)
negative_truths_dict[negative_srcid] = []
"""
doc += negative_doc
srcids += negative_srcids
self.tagsets_dict.update(negative_truths_dict)
return doc, srcids
def _augment_brick_samples(self, doc, srcids):
brick_truths_dict = dict()
self.brick_srcids = []
brick_doc = []
logging.info('Start adding Brick samples')
brick_copy_num = 6
self.brick_tagsets_dict = dict()
self.brick_doc = list()
for tagset in self.tagset_list:
for j in range(0, brick_copy_num):
#multiplier = random.randint(2, 6)
srcid = 'brick;' + gen_uuid()
self.brick_srcids.append(srcid)
self.brick_tagsets_dict[srcid] = [tagset]
tagset_doc = list()
for tag in tagset.split('_'):
tagset_doc += [tag] * random.randint(1,2)
brick_doc.append(' '.join(tagset_doc))
doc += brick_doc
self.tagsets_dict.update(self.brick_tagsets_dict)
srcids += self.brick_srcids
return doc, srcids
def _augment_eda(self):
if eda_flag:
unlabeled_phrase_dict = make_phrase_dict(\
test_sentence_dict, \
test_token_label_dict, \
{target_building:test_srcids},\
False)
prefixer = build_prefixer(target_building)
unlabeled_target_doc = [' '.join(\
map(prefixer, unlabeled_phrase_dict[srcid]))\
for srcid in test_srcids]
# unlabeled_vect_doc = - tagset_vectorizer\
# .transform(unlabeled_target_doc)
unlabeled_vect_doc = np.zeros((len(test_srcids), \
len(tagset_vectorizer.vocabulary_)))
target_doc = [' '.join(unlabeled_phrase_dict[srcid])\
for srcid in test_srcids]
test_vect_doc = tagset_vectorizer.transform(target_doc).toarray()
for building in source_target_buildings:
if building == target_building:
added_test_vect_doc = - test_vect_doc
else:
added_test_vect_doc = test_vect_doc
unlabeled_vect_doc = np.hstack([unlabeled_vect_doc,\
added_test_vect_doc])
if eda_flag:
learning_vect_doc = tagset_vectorizer.transform(learning_doc +
negative_doc).todense()
learning_srcids += negative_srcids
new_learning_vect_doc = deepcopy(learning_vect_doc)
for building in source_target_buildings:
building_mask = np.array([1 if find_key(srcid.split(';')[0],\
total_srcid_dict,\
check_in) == building
else 0 for srcid in learning_srcids])
new_learning_vect_doc = np.hstack([new_learning_vect_doc] \
+ [np.asmatrix(building_mask \
* np.asarray(learning_vect)[0]).T \
for learning_vect \
in learning_vect_doc.T])
learning_vect_doc = new_learning_vect_doc
if use_brick_flag:
new_brick_srcids = list()
new_brick_vect_doc = np.array([])\
.reshape((0, len(tagset_vectorizer.vocabulary) \
* (len(source_target_buildings)+1)))
brick_vect_doc = tagset_vectorizer.transform(brick_doc).todense()
for building in source_target_buildings:
prefixer = lambda srcid: building + '-' + srcid
one_brick_srcids = list(map(prefixer, brick_srcids))
for new_brick_srcid, brick_srcid\
in zip(one_brick_srcids, brick_srcids):
brick_truths_dict[new_brick_srcid] = \
brick_truths_dict[brick_srcid]
one_brick_vect_doc = deepcopy(brick_vect_doc)
for b in source_target_buildings:
if b != building:
one_brick_vect_doc = np.hstack([
one_brick_vect_doc,
np.zeros((len(brick_srcids),
len(tagset_vectorizer.vocabulary)))])
else:
one_brick_vect_doc = np.hstack([
one_brick_vect_doc, brick_vect_doc])
new_brick_vect_doc = np.vstack([new_brick_vect_doc,
one_brick_vect_doc])
new_brick_srcids += one_brick_srcids
learning_vect_doc = np.vstack([learning_vect_doc,
new_brick_vect_doc])
brick_srcids = new_brick_srcids
learning_srcids += brick_srcids
def _build_tagset_classifier(self,
learning_srcids,
target_srcids,
validation_srcids):
learning_srcids = deepcopy(learning_srcids)
# Update TagSet pool to include TagSets not in Brick.
#orig_sample_num = len(learning_srcids)
#new_tagset_list = tree_flatter(self.tagset_tree, [])
#new_tagset_list = [tagset for tagset in new_tagset_list
# if tagset not in ['location', 'equipment']]
#TODO: fix tagset_tree instead of using the above temp fix.
#new_tagset_list = new_tagset_list + [ts for ts in self.tagset_list \
# if ts not in new_tagset_list]
#self.tagset_list = new_tagset_list
self.tagset_binarizer = MultiLabelBinarizer(self.tagset_list)
self.tagset_binarizer.fit([self.tagset_list])
assert self.tagset_list == self.tagset_binarizer.classes_.tolist()
#self.tagsets_dict = {srcid: self.tagsets_dict[srcid]
# for srcid in learning_srcids}
## Init brick tag_list
# TODO: Maybe this should be done in initialization stage.
self.tag_list = list(set(reduce(adder, map(splitter,
self.tagset_list))))
# All possible vocabularies.
vocab_dict = dict([(tag, i) for i, tag in enumerate(self.tag_list)])
# Define Vectorizer
tokenizer = lambda x: x.split()
# TODO: We could use word embedding like word2vec here instead.
if self.vectorizer_type == 'tfidf':
self.tagset_vectorizer = TfidfVectorizer(tokenizer=tokenizer, # TODO: This should be renamed as tags_vectorizer
vocabulary=vocab_dict)
elif self.vectorizer_type == 'meanbembedding':
self.tagset_vectorizer = MeanEmbeddingVectorizer(tokenizer=tokenizer,
vocabulary=vocab_dict)
elif self.vectorizer_type == 'count':
self.tagset_vectorizer = CountVectorizer(tokenizer=tokenizer,
vocabulary=vocab_dict)
else:
raise Exception('Wrong vectorizer type: {0}'
.format(self.vectorizer_type))
if self.ts_flag:
pdb.set_trace()
pass
#TODO: Run self._augment_with_ts()
self._augment_with_ts()
## Transform learning samples
if self.use_known_tags: #TODO: Remove this is not necessary
learning_doc = [' '.join(self.phrase_dict[srcid] +
self.known_tags_dict[srcid])
for srcid in learning_srcids]
target_doc = [' '.join(self.phrase_dict[srcid] +
self.known_tags_dict[srcid])
for srcid in target_srcids]
learning_doc += [' '.join(self.phrase_dict[srcid])
for srcid in learning_srcids]
target_doc += [' '.join(self.phrase_dict[srcid])
for srcid in target_srcids]
learning_srcids *= 2
else:
learning_doc = [' '.join(self.phrase_dict[srcid]) for srcid in learning_srcids]
target_doc = [' '.join(self.phrase_dict[srcid]) for srcid in target_srcids]
## Augment with negative examples.
if self.negative_flag:
learning_doc, learning_srcids = self._augment_negative_examples(learning_doc,
learning_srcids)
## Init Brick samples.
if self.use_brick_flag:
learning_doc, learning_srcids = \
self._augment_brick_samples(learning_doc,
learning_srcids)
# Init domain vector of source
learning_domain_doc = [self.get_srcid_domain(srcid) for srcid in learning_srcids]
# Add empty examples to each domain
if self.emptydoc_flag:
domain_types = set(learning_domain_doc)
for domain_type in domain_types:
for i in range(0, int(self.nb_empty_docs / len(domain_types))):
empty_srcid = gen_uuid()
learning_srcids.append(empty_srcid)
learning_domain_doc.append(domain_type)
learning_doc.append('')
self.tagsets_dict[empty_srcid] = []
# Init domain vector of target
target_domain_doc = [self.get_srcid_domain(srcid) for srcid in target_srcids]
self.domain_vectorizer = CountVectorizer()
self.domain_vectorizer.fit(learning_domain_doc + target_domain_doc)
learning_domain_vect_doc = self.domain_vectorizer.transform(learning_domain_doc).todense()
target_domain_vect_doc = self.domain_vectorizer.transform(target_domain_doc).todense()
self.tagset_vectorizer.fit(learning_doc + target_doc)# + brick_doc)
# Apply Easy-Domain-Adaptation mechanism. Not useful.
if self.eda_flag:
raise Exception('Not implemented')
# TODO: self._augment_eda()
else:
# Make TagSet vectors.
learning_vect_doc = self.tagset_vectorizer.transform(learning_doc).todense()
target_vect_doc = self.tagset_vectorizer.transform(target_doc).todense()
truth_mat = csr_matrix([self.tagset_binarizer.transform(
[self.tagsets_dict[srcid]])[0]
for srcid in learning_srcids])
if self.eda_flag:
raise Exception('Not implemented')
zero_vectors = self.tagset_binarizer.transform(\
[[] for i in range(0, unlabeled_vect_doc.shape[0])])
truth_mat = vstack([truth_mat, zero_vectors])
learning_vect_doc = np.vstack([learning_vect_doc, unlabeled_vect_doc])
logging.info('Start learning multi-label classifier')
## Learn the classifier. StructuredCC is the default model.
if self.tagset_classifier_type == 'RandomForest':
def meta_rf(**kwargs):
#return RandomForestClassifier(**kwargs)
return RandomForestClassifier(n_jobs=self.n_jobs, n_estimators=150)
meta_classifier = meta_rf
params_list_dict = {}
elif self.tagset_classifier_type == 'StructuredCC_BACKUP':
#feature_selector = SelectFromModel(LinearSVC(C=0.001))
feature_selector = SelectFromModel(LinearSVC(C=0.01, penalty='l1', dual=False))
base_base_classifier = PassiveAggressiveClassifier(loss='squared_hinge', C=0.1)
#base_base_classifier = GradientBoostingClassifier()
#base_base_classifier = RandomForestClassifier()
base_classifier = Pipeline([('feature_selection',
feature_selector),
('classification',
base_base_classifier)
])
tagset_classifier = StructuredClassifierChain(
base_classifier,
self.tagset_binarizer,
subclass_dict,
self.tagset_vectorizer.vocabulary,
n_jobs,
use_brick_flag)
elif self.tagset_classifier_type == 'Project':
def meta_proj(**kwargs):
#base_classifier = LinearSVC(C=20, penalty='l1', dual=False)
base_classifier = SVC(kernel='rbf', C=10, class_weight='balanced')
#base_classifier = GaussianProcessClassifier()
tagset_classifier = ProjectClassifier(base_classifier,
self.tagset_binarizer,
self.tagset_vectorizer,
subclass_dict,
n_jobs)
return tagset_classifier
meta_classifier = meta_proj
params_list_dict = {}
elif self.tagset_classifier_type == 'CC':
def meta_cc(**kwargs):
feature_selector = SelectFromModel(LinearSVC(C=1))
#feature_selector = SelectFromModel(LinearSVC(C=0.01, penalty='l1', dual=False))
base_base_classifier = GradientBoostingClassifier(**kwargs)
#base_base_classifier = SGDClassifier(loss='modified_huber', penalty='elasticnet')
#base_base_classifier = PassiveAggressiveClassifier(loss='squared_hinge', C=0.1)
#base_base_classifier = LogisticRegression()
#base_base_classifier = RandomForestClassifier(**kwargs)
base_classifier = Pipeline([('feature_selection',
feature_selector),
('classification',
base_base_classifier)
])
tagset_classifier = ClassifierChain(classifier=base_classifier)
return tagset_classifier
meta_classifier = meta_cc
params_list_dict = {}
elif self.tagset_classifier_type == 'StructuredCC_autoencoder':
def meta_scc(**kwargs):
feature_selector = SelectFromModel(LinearSVC(C=5))
#feature_selector = SelectFromModel(LinearSVC(C=0.01, penalty='l1', dual=False))
base_base_classifier = GradientBoostingClassifier(**kwargs)
#base_base_classifier = SGDClassifier(loss='modified_huber', penalty='elasticnet')
#base_base_classifier = PassiveAggressiveClassifier(loss='squared_hinge', C=0.1)
#base_base_classifier = LogisticRegression()
#base_base_classifier = RandomForestClassifier(**kwargs)
base_classifier = Pipeline([('feature_selection',
feature_selector),
('classification',
base_base_classifier)
])
tagset_classifier = StructuredClassifierChain(
base_classifier,
self.tagset_binarizer,
self.subclass_dict,
self.tagset_vectorizer.vocabulary,
self.n_jobs,
self.use_brick_flag,
self.tagset_vectorizer)
return tagset_classifier
meta_classifier = meta_scc
rf_params_list_dict = {
'n_estimators': [10, 50, 100],
'criterion': ['gini', 'entropy'],
'max_features': [None, 'auto'],
'max_depth': [1, 5, 10, 50],
'min_samples_leaf': [2,4,8],
'min_samples_split': [2,4,8]
}
gb_params_list_dict = {
'loss': ['deviance', 'exponential'],
'learning_rate': [0.1, 0.01, 1, 2],
'criterion': ['friedman_mse', 'mse'],
'max_features': [None, 'sqrt'],
'max_depth': [1, 3, 5, 10],
'min_samples_leaf': [1,2,4,8],
'min_samples_split': [2,4,8]
}
params_list_dict = gb_params_list_dict
elif self.tagset_classifier_type == 'StructuredCC':
def meta_scc(**kwargs):
#feature_selector = SelectFromModel(LinearSVC(C=5))
#feature_selector = SelectFromModel(LinearSVC(C=1))
feature_selector = SelectFromModel(LinearSVC(C=1))
#feature_selector = SelectFromModel(LinearSVC(C=0.01, penalty='l1', dual=False))
base_base_classifier = GradientBoostingClassifier(**kwargs)
#base_base_classifier = SGDClassifier(loss='modified_huber', penalty='elasticnet')
#base_base_classifier = PassiveAggressiveClassifier(loss='squared_hinge', C=0.1)
#base_base_classifier = LogisticRegression()
#base_base_classifier = RandomForestClassifier(**kwargs)
base_classifier = Pipeline([('feature_selection',
feature_selector),
('classification',
base_base_classifier)
])
tagset_classifier = StructuredClassifierChain(
base_classifier,
self.tagset_binarizer,
self.subclass_dict,
self.tagset_vectorizer.vocabulary,
self.n_jobs,
self.use_brick_flag,
self.tagset_vectorizer)
return tagset_classifier
meta_classifier = meta_scc
rf_params_list_dict = {
'n_estimators': [10, 50, 100],
'criterion': ['gini', 'entropy'],
'max_features': [None, 'auto'],
'max_depth': [1, 5, 10, 50],
'min_samples_leaf': [2,4,8],
'min_samples_split': [2,4,8]
}
gb_params_list_dict = {
'loss': ['deviance', 'exponential'],
'learning_rate': [0.1, 0.01, 1, 2],
'criterion': ['friedman_mse', 'mse'],
'max_features': [None, 'sqrt'],
'max_depth': [1, 3, 5, 10],
'min_samples_leaf': [1,2,4,8],
'min_samples_split': [2,4,8]
}
params_list_dict = gb_params_list_dict
elif self.tagset_classifier_type == 'StructuredCC_RF':
base_classifier = RandomForestClassifier()
tagset_classifier = StructuredClassifierChain(base_classifier,
self.tagset_binarizer,
subclass_dict,
self.tagset_vectorizer.vocabulary,
n_jobs)
elif self.tagset_classifier_type == 'StructuredCC_LinearSVC':
def meta_scc_svc(**kwargs):
base_classifier = LinearSVC(loss='hinge', tol=1e-5,\
max_iter=2000, C=2,
fit_intercept=False,
class_weight='balanced')
tagset_classifier = StructuredClassifierChain(base_classifier,
self.tagset_binarizer,
subclass_dict,
self.tagset_vectorizer.vocabulary,
n_jobs)
return tagset_classifier
params_list_dict = {}
meta_classifier = meta_scc_svc
elif self.tagset_classifier_type == 'OneVsRest':
base_classifier = LinearSVC(loss='hinge', tol=1e-5,\
max_iter=2000, C=2,
fit_intercept=False,
class_weight='balanced')
tagset_classifier = OneVsRestClassifier(base_classifier)
elif self.tagset_classifier_type == 'Voting':
def meta_voting(**kwargs):
return VotingClassifier(self.tagset_binarizer, self.tagset_vectorizer,
self.tagset_tree, self.tagset_list)
meta_classifier = meta_voting
params_list_dict = {}
elif self.tagset_classifier_type == 'MLP':
# Def model
data_dim = learning_vect_doc.shape[1]
output_classes = truth_mat.shape[1]
model = self.get_mlp_model(data_dim, output_classes)
elif self.tagset_classifier_type == 'DANN':
data_dim = learning_vect_doc.shape[1]
output_classes = truth_mat.shape[1]
nb_domains = learning_domain_vect_doc.shape[-1]
dann = DANN(data_dim, output_classes, nb_domains,
batch_size=128,
)
else:
raise Exception('Wrong tagset classifier type: {0}'
.format(self.tagset_classifier_type))
if not isinstance(truth_mat, csr_matrix):
truth_mat = csr_matrix(truth_mat)
# TODO: Hyper-parameter optimization. (But expect it'd be slow.)
if self.tagset_classifier_type == 'MLP':
self.tagset_classifier = model
elif self.tagset_classifier_type == 'DANN':
self.tagset_classifier = dann
else:
best_params = {'learning_rate':0.1,
'subsample':0.25,
'n_estimators': 200}
self.tagset_classifier = meta_classifier(**best_params)
# add an empty doc.
#empty_doc_num = min(5, int(learning_vect_doc.shape[0]*0.02))
#learning_vect_doc = np.hstack([learning_vect_doc,
# np.zeros((empty_doc_num, learning_vect_doc.shape[1]))
# ])
# Actual fitting.
if isinstance(self.tagset_classifier, StructuredClassifierChain):
self.tagset_classifier.fit(learning_vect_doc, truth_mat.toarray(), \
orig_sample_num=len(learning_vect_doc)
- len(self.brick_srcids))
elif self.tagset_classifier_type == 'MLP':
self.tagset_classifier.fit(learning_vect_doc,
truth_mat,
batch_size=128,
epochs=self.epochs,
verbose=True)
elif self.tagset_classifier_type == 'DANN':
truth_mat = truth_mat.todense()
target_domain_index = self.domain_vectorizer.vocabulary_[self.target_building]
self.tagset_classifier.fit(learning_vect_doc, truth_mat, learning_domain_vect_doc,
target_vect_doc, target_domain_vect_doc, target_domain_index,
nb_epochs=1500,
)
else:
self.tagset_classifier.fit(learning_vect_doc, truth_mat.toarray())
#self.point_classifier.fit(point_vect_doc, point_truth_mat)
logging.info('Finished learning multi-label classifier')
def get_mlp_model(self, data_dim, output_classes):
model = Sequential()
model.add(Dense(64,
input_shape=(data_dim,),
#bias_regularizer=regularizers.l1(0.0001),
#kernel_regularizer=regularizers.l1(0.001),
#activity_regularizer=regularizers.l1(0.001),
#kernel_constraint=max_norm(3),
activation='relu'))
"""
model.add(Dropout(0.1))
model.add(Dense(64,
input_shape=(data_dim,),
#bias_regularizer=regularizers.l1(0.0001),
#kernel_regularizer=regularizers.l1(0.001),
#activity_regularizer=regularizers.l1(0.001),
kernel_constraint=max_norm(3),
activation='relu'))
"""
model.add(Dropout(0.1))
model.add(Dense(output_classes,
#bias_regularizer=regularizers.l1(0.0001),
#kernel_regularizer=regularizers.l1(0.0001),
#activity_regularizer=regularizers.l2(0.01),
#kernel_constraint=max_norm(3),
activation='sigmoid'))
#model.compile(optimizer='sgd',
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
)
return model
def _parameter_validation(vect_doc, truth_mat, srcids, params_list_dict,
meta_classifier, vectorizer, binarizer,
source_target_buildings, eda_flag):
# TODO: This is not effective for now. Do I need one?
#best_params = {'n_estimators': 50, 'criterion': 'entropy', 'max_features': 'auto', 'max_depth': 5, 'min_samples_leaf': 2, 'min_samples_split': 2}
#best_params = {'criterion': 'entropy'}
#best_params = {'loss': 'exponential', 'learning_rate': 0.01, 'criterion': 'friedman_mse', 'max_features': None, 'max_depth': 10, 'min_samples_leaf': 4, 'min_samples_split': 2}
#tagset_classifier = RandomForestClassifier(n_estimators=100,
# random_state=0,\
# n_jobs=n_jobs)
best_params = {'learning_rate':0.1, 'subsample':0.25}
#best_params = {'C':0.4, 'solver': 'liblinear'}
return meta_classifier(**best_params) # Pre defined setup.
#best_params = {'n_estimators': 120, 'n_jobs':7}
#return meta_classifier(**best_params)
token_type = 'justseparate'
results_dict = dict()
for key, values in params_list_dict.items():
results_dict[key] = {'ha': [0]*len(values),
'a': [0]*len(values),
'mf1': [0]*len(values)}
avg_num = 3
for i in range(0,avg_num):
learning_indices = random.sample(range(0, len(srcids)),
int(len(srcids)/2))
validation_indices = [i for i in range(0, len(srcids))
if i not in learning_indices]
learning_srcids = [srcids[i] for i
in learning_indices]
validation_srcids = [srcids[i] for i
in validation_indices]
for key, values in params_list_dict.items():
for j, value in enumerate(values):
params = {key: value}
classifier = meta_classifier(**params)
classifier.fit(vect_doc[learning_indices], \
truth_mat[learning_indices].toarray())
validation_sentence_dict, \
validation_token_label_dict, \
validation_truths_dict, \
validation_phrase_dict = self.get_multi_buildings_data(\
source_target_buildings, validation_srcids, \
eda_flag, token_type)
validation_pred_tagsets_dict, \
validation_pred_certainty_dict, \
_ = tagsets_prediction(classifier, vectorizer, binarizer, \
validation_phrase_dict, validation_srcids, \
source_target_buildings, eda_flag, None,
ts2ir=None)
validation_result = tagsets_evaluation(validation_truths_dict, \
validation_pred_tagsets_dict, \
validation_pred_certainty_dict,\
validation_srcids, \
None, \
validation_phrase_dict, \
debug_flag=False,
classifier=classifier, \
vectorizer=vectorizer)
results_dict[key]['ha'][j] += validation_result['hierarchy_accuracy']
results_dict[key]['a'][j] += validation_result['accuracy']
results_dict[key]['mf1'][j] += validation_result['macro_f1']
results_dict[key]['macro_f1'][j] += validation_result['macro_f1']
best_params = dict()
for key, results in results_dict.items():
metrics = results_dict[key]['mf1']
best_params[key] = params_list_dict[key][metrics.index(max(metrics))]
classifier = meta_classifier(**best_params)
classifier.fit(vect_doc[learning_indices], \
truth_mat[learning_indices].toarray())
validation_sentence_dict, \
validation_token_label_dict, \
validation_truths_dict, \
validation_phrase_dict = self.get_multi_buildings_data(\
source_target_buildings, validation_srcids, \
eda_flag, token_type)
validation_pred_tagsets_dict, \
validation_pred_certainty_dict, \
_ = tagsets_prediction(classifier, vectorizer, binarizer, \
validation_phrase_dict, validation_srcids, \
source_target_buildings, eda_flag, None,
ts2ir=None)
validation_result = tagsets_evaluation(validation_truths_dict, \
validation_pred_tagsets_dict, \
validation_pred_certainty_dict,\
validation_srcids, \
None, \
validation_phrase_dict, \
debug_flag=False,
classifier=classifier, \
vectorizer=vectorizer)
best_ha = validation_result['hierarchy_accuracy']
best_a = validation_result['accuracy']
best_mf1 = validation_result['macro_f1']
return meta_classifier(**best_params)
def update_phrases(self, phrases):
self.phrase_dict.update(phrases)
|
11537577
|
from django import template
from django.conf import settings
from django.utils import timezone
from datetime import timedelta
from urlparse import urljoin
from cabot.cabotapp.defs import TIMESTAMP_FORMAT
register = template.Library()
@register.simple_tag
def jenkins_human_url(jobname):
return urljoin(settings.JENKINS_API, 'job/{}/'.format(jobname))
@register.filter(name='format_timedelta')
def format_timedelta(delta):
# Getting rid of microseconds.
return str(timedelta(days=delta.days, seconds=delta.seconds))
@register.filter(name='format_timestamp')
def format_timestamp(ts):
# need to wrap this with timezone.localtime, otherwise strftime skips locale conversion
return timezone.localtime(ts).strftime(TIMESTAMP_FORMAT)
|
11537582
|
from ..service import Operation
from ..utils.exceptions import UknownValueError
class ConvertId(Operation):
"""ConvertId EWS Operation converts item and folder
identifiers between formats.
"""
RESULTS_KEY = '@Id'
ID_FORMATS = [
'EntryId',
'EwsId',
'EwsLegacyId',
'HexEntryId',
'OwaId',
'StoreId'
]
def __init__(self, user, item_id, id_type, convert_to):
"""Takes a specific user, item_id, id_type, and the
desired format to conver to as inputs.
Args:
user (str): The mailbox that the ID is associated with
item_id (str): The item ID
id_type (str): The Item ID type
convert_to (str): The format to conver the Item ID to
Raises:
UknownValueError: One or more provided values is unknown
"""
self.user = user
self.item_id = item_id
if id_type not in self.ID_FORMATS:
UknownValueError(provided_value=id_type, known_values=self.ID_FORMATS)
if convert_to not in self.ID_FORMATS:
UknownValueError(provided_value=convert_to, known_values=self.ID_FORMATS)
self.id_type = id_type
self.convert_to = convert_to
def soap(self):
return self.M_NAMESPACE.ConvertId(
self.M_NAMESPACE.SourceIds(
self.T_NAMESPACE.AlternateId(
Format=self.id_type, Id=self.item_id, Mailbox=self.user
)
),
DestinationFormat=self.convert_to
)
|
11537608
|
import autokeras as ak
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import load_model
(x_train, y_train), (x_test, y_test) = mnist.load_data()
cls = ak.ImageClassifier()
cls.fit(x_train, y_train)
|
11537640
|
def mosh_frames(frames):
for frame in frames:
if not frame:
continue
for row in frame:
for col in row:
# col contains the horizontal and vertical components of the vector
col[0] = 0
return frames
|
11537650
|
import logging
from typing import List
import datetime
from django.conf import settings
from django.http import HttpResponse, JsonResponse
from django.utils.datastructures import MultiValueDictKeyError
from django.db import transaction
from django_q.tasks import async_task
from rest_framework.decorators import api_view
from django.db.models import Max, Q
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from .utils import _get_nearby_factories, _get_client_ip
from ..models import Factory, Image, ReportRecord
from ..serializers import FactorySerializer
LOGGER = logging.getLogger("django")
FactoryDoesNotExist = Factory.DoesNotExist
def _in_taiwan(lat, lng):
return (
settings.TAIWAN_MIN_LATITUDE <= lat <= settings.TAIWAN_MAX_LATITUDE
and settings.TAIWAN_MIN_LONGITUDE <= lng <= settings.TAIWAN_MAX_LONGITUDE
)
def _in_reasonable_radius_range(radius):
# NOTE: need discussion about it
return 0.01 <= radius <= 100
def _all_image_id_exist(image_ids: List[str]) -> bool:
images = Image.objects.only("id").filter(id__in=image_ids)
return len(images) == len(image_ids)
def _handle_get_factories(request):
try:
latitude = request.GET["lat"] # 緯度: y
longitude = request.GET["lng"] # 經度: x
radius = request.GET["range"] # km
except MultiValueDictKeyError:
missing_params = [p for p in ("lat", "lng", "range") if p not in request.GET]
missing_params = ", ".join(missing_params)
return HttpResponse(
f"Missing query parameter: {missing_params}.",
status=400,
)
latitude, longitude = float(latitude), float(longitude)
if not _in_taiwan(latitude, longitude):
return HttpResponse(
"The query position is not in the range of Taiwan."
"Valid query parameters should be: "
f"{settings.TAIWAN_MIN_LONGITUDE} < lng < {settings.TAIWAN_MAX_LONGITUDE}, "
f"{settings.TAIWAN_MIN_LATITUDE} < lat < {settings.TAIWAN_MAX_LATITUDE}.",
status=400,
)
radius = float(radius)
if not _in_reasonable_radius_range(radius):
return HttpResponse(
f"`range` should be within 0.01 to 100 km, but got {radius}",
status=400,
)
nearby_factories = _get_nearby_factories(
latitude=latitude,
longitude=longitude,
radius=radius,
)
serializer = FactorySerializer(nearby_factories, many=True)
return JsonResponse(serializer.data, safe=False)
def _handle_create_factory(request):
post_body = request.data
user_ip = _get_client_ip(request)
LOGGER.debug(f"Received request body: {post_body} to create factory")
serializer = FactorySerializer(data=post_body)
if not serializer.is_valid():
LOGGER.warning(f"{user_ip} : <serializer errors> ")
return JsonResponse(
serializer.errors,
status=400,
)
image_ids = post_body.get("images", [])
if not _all_image_id_exist(image_ids):
LOGGER.warning(f"{user_ip} : <please check if every image id exist> ")
return HttpResponse(
"please check if every image id exist",
status=400,
)
num = Factory.raw_objects.aggregate(Max("display_number"))
new_factory_field = {
"name": post_body["name"],
"lat": post_body["lat"],
"lng": post_body["lng"],
"factory_type": post_body.get("type"),
"status_time": datetime.datetime.now(),
"display_number": num["display_number__max"] + 1,
}
new_report_record_field = {
"action_type": "POST",
"action_body": post_body,
"nickname": post_body.get("nickname"),
"contact": post_body.get("contact"),
"others": post_body.get("others", ""),
}
with transaction.atomic():
new_factory = Factory.objects.create(**new_factory_field)
report_record = ReportRecord.objects.create(
factory=new_factory,
**new_report_record_field,
)
Image.objects.filter(id__in=image_ids).update(
factory=new_factory, report_record=report_record
)
serializer = FactorySerializer(new_factory)
LOGGER.info(
f"{user_ip}: <Create new factory> at {(post_body['lng'], post_body['lat'])} "
f"id:{new_factory.id} {new_factory_field['name']} {new_factory_field['factory_type']}",
)
async_task("api.tasks.update_landcode", new_factory.id)
return JsonResponse(serializer.data, safe=False)
@swagger_auto_schema(
method="get",
operation_summary="得到中心座標往外指定範圍的已有工廠資料",
responses={200: openapi.Response("工廠資料", FactorySerializer), 400: "request failed"},
manual_parameters=[
openapi.Parameter(
name="lng",
in_=openapi.IN_QUERY,
description=f"{settings.TAIWAN_MIN_LONGITUDE} < lng < {settings.TAIWAN_MAX_LONGITUDE}",
type=openapi.TYPE_NUMBER,
required=True,
example="Custom Example Data",
),
openapi.Parameter(
name="lat",
in_=openapi.IN_QUERY,
description=f"{settings.TAIWAN_MIN_LATITUDE} < lat < {settings.TAIWAN_MAX_LATITUDE}",
type=openapi.TYPE_NUMBER,
required=True,
),
openapi.Parameter(
name="range",
in_=openapi.IN_QUERY,
description="km",
type=openapi.TYPE_NUMBER,
required=True,
),
],
)
@swagger_auto_schema(
method="post",
operation_summary="新增指定 id 的工廠欄位資料",
request_body=FactorySerializer,
responses={200: openapi.Response("新增的工廠資料", FactorySerializer), 400: "request failed"},
auto_schema=None,
)
@api_view(["GET", "POST"])
def get_nearby_or_create_factories(request):
if request.method == "GET":
return _handle_get_factories(request)
elif request.method == "POST":
return _handle_create_factory(request)
@swagger_auto_schema(
method="get",
operation_summary="使用地段號取得工廠資料",
responses={200: openapi.Response("工廠資料", FactorySerializer), 400: "request failed"},
manual_parameters=[
openapi.Parameter(
name="sectcode",
in_=openapi.IN_QUERY,
description="地號可以到 https://easymap.land.moi.gov.tw/ 查詢, 例如新莊區海山頭段石龜小段的段號就是 0308",
type=openapi.TYPE_NUMBER,
required=True,
example="0308",
),
openapi.Parameter(
name="landcode",
in_=openapi.IN_QUERY,
description="段號, 目前只接受八碼的格式, 例如 82號之18 (82-18) 請使用 00820018 來搜尋",
type=openapi.TYPE_NUMBER,
required=True,
example="00820018"
)
],
)
@api_view(["GET"])
def get_factory_by_sectcode(request):
try:
sectcode:str = request.GET["sectcode"]
landcode:str = request.GET["landcode"]
except MultiValueDictKeyError:
missing_params = [p for p in ("sectcode", "landcode") if p not in request.GET]
missing_params = ", ".join(missing_params)
return HttpResponse(
f"Missing query parameter: {missing_params}.",
status=400,
)
# landcode length should be 8
if len(landcode) != 8 and not landcode.isnumeric():
return HttpResponse(
f"The landcode should be number and length is 8 (e.g. landcode 82-18 should be 00820018)",
status=400
)
# 因為在資料庫裡面, landcode 有多種儲存格式 82-18, 00820018 所以需要將 landcode 轉成這兩種格式來搜尋
landcode_1 = f"{int(landcode[:4])}-{int(landcode[4:])}"
try:
factory = Factory.objects.filter(Q(sectcode=sectcode), Q(landcode=landcode) | Q(landcode=landcode_1)).get()
serializer = FactorySerializer(factory)
return JsonResponse(serializer.data, safe=False)
except FactoryDoesNotExist as e:
return HttpResponse(
f"Does not exist",
status=404
)
|
11537679
|
import unittest
from dojo import remove_white_spaces, get_square_root_of_length, get_matrix
class DojoTest(unittest.TestCase):
def test_remove_white_space_1(self):
self.assertEqual(remove_white_spaces(
'have a nice day'), "haveaniceday")
def test_remove_white_space_2(self):
self.assertEqual(remove_white_spaces(
'have a good day'), "haveagoodday")
def test_remove_white_space_3(self):
self.assertEqual(remove_white_spaces(
'have a wonderful day'), "haveawonderfulday")
def test_get_square_root_of_sentence_1(self):
self.assertEqual(get_square_root_of_length(
'haveawonderfulday'), [4, 5])
def test_get_square_root_of_sentence_2(self):
self.assertEqual(get_square_root_of_length('haveagoodday'), [3, 4])
def test_get_square_root_of_sentence_3(self):
self.assertEqual(get_square_root_of_length(
'havewonderfulnicegoodokayday'), [5, 6])
def test_get_matrix_1(self):
self.assertEqual(get_matrix('havewonderfulnicegoodokayday', [5, 6]),
['havewo', 'nderfu', 'lniceg', 'oodoka', 'yday'])
def test_get_matrix_2(self):
self.assertEqual(get_matrix('haveawonderfulday', [4, 5]),
['havea', 'wonde', 'rfuld', 'ay'])
def test_get_matrix_3(self):
self.assertEqual(get_matrix('aaabbbcccbbb', [3, 4]), [
'aaab', 'bbcc', 'cbbb'])
if __name__ == '__main__':
unittest.main()
# Ingrid - Icaro - Sami - Juan
|
11537746
|
from lyrebird import application
from lyrebird.log import get_logger
from pathlib import Path
from copy import deepcopy
import imp
import traceback
import datetime
logger = get_logger()
class Reporter:
def __init__(self):
self.scripts = []
workspace = application.config.get('reporter.workspace')
if not workspace:
logger.debug(f'reporter.workspace not set.')
else:
self._read_reporter(workspace)
logger.debug(f'Load statistics scripts {self.scripts}')
def _read_reporter(self, workspace):
target_dir = Path(workspace)
if not target_dir.exists():
logger.error('Reporter workspace not found')
for report_script_file in target_dir.iterdir():
if report_script_file.name.startswith('_'):
continue
if not report_script_file.is_file():
logger.warning(f'Skip report script: is not a file, {report_script_file}')
continue
if report_script_file.suffix != '.py':
logger.warning(f'Skip report script: is not a python file, {report_script_file}')
continue
try:
_script_module = imp.load_source('reporter_script', str(report_script_file))
except Exception:
logger.warning(
f'Skip report script: load script failed, {report_script_file}\n{traceback.format_exc()}')
continue
if not hasattr(_script_module, 'report'):
logger.warning(f'Skip report script: not found a report method in script, {report_script_file}')
continue
if not callable(_script_module.report):
logger.warning(f'Skip report script: report method not callable, {report_script_file}')
continue
self.scripts.append(_script_module.report)
def report(self, data):
task_manager = application.server.get('task')
def send_report():
new_data = deepcopy(data)
for script in self.scripts:
try:
script(new_data)
except Exception:
logger.error(f'Send report failed:\n{traceback.format_exc()}')
task_manager.add_task('send-report', send_report)
last_page = None
last_page_in_time = None
lyrebird_start_time = None
def _page_out():
global last_page
global last_page_in_time
if last_page and last_page_in_time:
duration = datetime.datetime.now() - last_page_in_time
application.reporter.report({
'action': 'page.out',
'page': last_page,
'duration': duration.total_seconds()
})
def page_in(name):
_page_out()
global last_page
global last_page_in_time
application.reporter.report({
'action': 'page.in',
'page': name
})
last_page = name
last_page_in_time = datetime.datetime.now()
def start():
global lyrebird_start_time
lyrebird_start_time = datetime.datetime.now()
application.reporter.report({
'action': 'start'
})
def stop():
_page_out()
application.reporter.report({
'action': 'stop',
'duration': (datetime.datetime.now() - lyrebird_start_time).total_seconds()
})
|
11537771
|
import pytest
from _pytest.outcomes import Skipped
from ..helpers import importorskip
def test_importorskip_local(monkeypatch):
"""Test ``importorskip`` run on local machine with non-existent module, which should skip."""
monkeypatch.delenv("ARVIZ_CI_MACHINE", raising=False)
with pytest.raises(Skipped):
importorskip("non-existent-function")
def test_importorskip_ci(monkeypatch):
"""Test ``importorskip`` run on CI machine with non-existent module, which should fail."""
monkeypatch.setenv("ARVIZ_CI_MACHINE", 1)
with pytest.raises(ModuleNotFoundError):
importorskip("non-existent-function")
|
11537775
|
from django.contrib.auth.password_validation import MinimumLengthValidator
class StaffMinimumLengthValidator(MinimumLengthValidator):
def __init__(self, staff_min_length=14,
other_min_length=9):
self.staff_min_length = staff_min_length
self.other_min_length = other_min_length
super().__init__(self.staff_min_length)
def validate(self, password, user=None):
if user and user.is_staff is True:
self.min_length = self.staff_min_length
else:
self.min_length = self.other_min_length
super().validate(password, user)
|
11537781
|
import logging
from datetime import datetime, timedelta
from typing import Tuple
import airflow
from airflow import DAG
from airflow.utils.dates import days_ago
from dbnd import parameter, pipeline, task
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": days_ago(2),
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
class TestDocDecoratedDags:
def test_code_example(self):
#### DOC START
# support airflow 1.10.0
from airflow.operators.python_operator import PythonOperator
if airflow.version.version == "1.10.0":
class PythonOperator_airflow_1_10_0(PythonOperator):
template_fields = ("templates_dict", "op_kwargs")
PythonOperator = PythonOperator_airflow_1_10_0
@task
def calculate_alpha(alpha: int):
logging.info("I am running")
return alpha
@task
def prepare_data(data: str) -> Tuple[str, str]:
return data, data
def read_and_write(input_path, output_path):
logging.error("I am running")
input_value = open(input_path, "r").read()
with open(output_path, "w") as fp:
fp.write(input_value)
fp.write("\n\n")
fp.write(str(datetime.now().strftime("%Y-%m-%dT%H:%M:%S")))
return "success"
with DAG(dag_id="dbnd_operators", default_args=default_args) as dag_operators:
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = calculate_alpha(2)
t2, t3 = prepare_data(t1)
tp = PythonOperator(
task_id="some_python_function",
python_callable=read_and_write,
op_kwargs={"input_path": t3, "output_path": "/tmp/output.txt"},
)
tp.set_upstream(t3.op)
t1_op = t1.op
airflow_op_kwargs = {"priority_weight": 50}
# Define DAG context
with DAG(
dag_id="dbnd_operators", default_args=default_args
) as dag_operators:
t1 = calculate_alpha(2, task_airflow_op_kwargs=airflow_op_kwargs)
#### DOC END
def test_prepare_data_jinja_templating(self):
#### DOC START
@pipeline
def current_date(p_date=None):
return p_date
with DAG(dag_id=f"current_date_dag", default_args=default_args) as dag:
current_date(p_date="{{ ts }}")
#### DOC END
def test_prepare_data_no_jinja_templating(self):
#### DOC START
@pipeline
def current_date(p_date=parameter[str].disable_jinja_templating):
return p_date
with DAG(dag_id=f"current_date_dag", default_args=default_args) as dag:
current_date(p_date="{{ ts }}")
#### DOC END
|
11537788
|
from subprocess import call
import sys
from os import listdir
from os.path import isfile, join
# Schedule multiple conversions for the strecha-mvs dataset
#vers_to_run = [14,15,16,17,18,19,20]
#in_vers = [ 1, 1, 1, 1, 1, 1, 1]
vers_to_run = [21,22,23,24,25,26,27]
in_vers = [ 2, 2, 2, 2, 2, 2, 2]
for index in range(0,len(vers_to_run)):
call('python strecha-mvs-converter.py '+str(in_vers[index])+' '+str(vers_to_run[index]), shell=True)
|
11537796
|
from collections import defaultdict
import json
from delorean import Delorean
from gryphon.lib.models.exchange import Balance
from gryphon.lib.money import Money
from gryphon.lib.models.event import Event
from gryphon.lib.models.exchange import Balance
def get_balance_time_series_from_audits(audits):
fiat_balances = []
btc_balances = []
for audit in audits:
timestamp = int(Delorean(audit.time_created, "UTC").epoch) * 1000
# load the fiat and btc balances from the audit data
data = json.loads(audit.data)
try:
# Old data format from before Jan 28, 2015
raw_fiat = data['exchange_balance']['fiat_available']
raw_btc = data['exchange_balance']['btc_available']
fiat = Money.loads(raw_fiat).to("USD")
btc = Money.loads(raw_btc)
except KeyError:
# New data format from after Jan 28, 2015
try:
balance_data = data['balance_data']
except KeyError:
continue
# convert to Money objects
for currency, balance_str in balance_data.iteritems():
balance_data[currency] = Money.loads(balance_str)
balance = Balance(balance_data)
fiat = balance.fiat().to('USD')
btc = balance['BTC']
fiat_datapoint = [
timestamp,
str(fiat.amount),
]
fiat_balances.append(fiat_datapoint)
btc_datapoint = [
timestamp,
str(btc.amount),
]
btc_balances.append(btc_datapoint)
return fiat_balances, btc_balances
def get_audits_for_exchange(db, exchange_name, start_time=None, end_time=None, data_filter=None):
audit_query = db\
.query(Event)\
.filter(Event.event_type == 'AUDIT')\
.filter(Event.exchange_name == exchange_name)\
.filter(Event.time_created > start_time)\
.filter(Event.time_created < end_time)
if data_filter:
audit_query = audit_query.filter(Event.data.contains(data_filter))
audits = audit_query.all()
return audits
def get_all_audits_in_period(db, start_time, end_time, data_filter=None):
audit_query = db\
.query(Event)\
.filter(Event.event_type == 'AUDIT')\
.filter(Event.time_created > start_time)\
.filter(Event.time_created < end_time)
if data_filter:
audit_query = audit_query.filter(Event.data.contains(data_filter))
audits = audit_query.all()
return audits
def get_balance_time_series_for_exchange(db, exchange_name, start_time, end_time):
audits = get_audits_for_exchange(db, exchange_name, start_time, end_time)
series = get_balance_time_series_from_audits(audits)
return series
def get_drift_from_audits(audits):
drift_by_currency = Balance()
for audit in audits:
if 'drift' in audit.data:
data = json.loads(audit.data)
for currency, str_amount in data['drift'].iteritems():
drift_by_currency += Money.loads(str_amount)
return drift_by_currency
def get_total_drift_in_period(db, start_time, end_time):
audits = get_all_audits_in_period(db, start_time, end_time, data_filter='drift')
return get_drift_from_audits(audits)
def get_drift_for_exchange_in_period(db, exchange_name, start_time, end_time):
audits = get_audits_for_exchange(db, exchange_name, start_time, end_time, data_filter='drift')
return get_drift_from_audits(audits)
|
11537812
|
import unit_test_framework
class APIUnitTestUser(unit_test_framework.APIUnitTest):
uri = "/api/v1/user"
get_tests = [{"name": "Read local users"}]
post_tests = [
{
"name": "Create RSA internal CA",
"uri": "/api/v1/system/ca",
"payload": {
"method": "internal",
"descr": "INTERNAL_CA_RSA",
"trust": True,
"keytype": "RSA",
"keylen": 2048,
"digest_alg": "sha256",
"lifetime": 3650,
"dn_commonname": "internal-ca-unit-test.example.com"
},
},
{
"name": "Create user certificate with RSA key",
"uri": "/api/v1/system/certificate",
"caref": True, # Locator for tests that need a caref dynamically added in post_post()
"payload": {
"method": "internal",
"descr": "USER_CERT",
"keytype": "RSA",
"keylen": 2048,
"digest_alg": "sha256",
"lifetime": 3650,
"dn_commonname": "new_user",
"type": "user"
}
},
{
"name": "Create server certificate with RSA key",
"uri": "/api/v1/system/certificate",
"caref": True, # Locator for tests that need a caref dynamically added in post_post()
"payload": {
"method": "internal",
"descr": "SERVER_CERT",
"keytype": "RSA",
"keylen": 2048,
"digest_alg": "sha256",
"lifetime": 3650,
"dn_commonname": "internal-cert-unit-test.example.com",
"type": "server"
}
},
{
"name": "Create local user",
"resp_time": 2, # Allow a couple seconds for user database to be updated
"user_cert": True, # Locator for tests that need a user cert ref ID dynamically added by post_post()
"payload": {
"disabled": False,
"username": "new_user",
"password": "<PASSWORD>",
"descr": "NEW USER",
"authorizedkeys": "test auth key",
"ipsecpsk": "test psk",
"expires": "11/22/2050",
"priv": ["page-system-usermanager"]
},
},
{
"name": "Create a disabled local user using the previously created local user",
"resp_time": 2, # Allow a couple seconds for user database to be updated
"auth_payload": {"client-id": "new_user", "client-token": "<PASSWORD>"},
"payload": {
"disabled": True,
"username": "disabled_user",
"password": "<PASSWORD>",
},
},
{
"name": "Check disabled user's inability to authenticate",
"status": 401,
"return": 3,
"auth_payload": {"client-id": "disabled_user", "client-token": "<PASSWORD>"},
},
{
"name": "Check username requirement and login using created user",
"status": 400,
"return": 5000,
},
{
"name": "Check username unique constraint",
"status": 400,
"return": 5002,
"payload": {
"username": "new_user"
}
},
{
"name": "Check username character constraint",
"status": 400,
"return": 5036,
"payload": {
"username": "!@#"
}
},
{
"name": "Check reserved username constraint",
"status": 400,
"return": 5037,
"payload": {
"username": "root"
}
},
{
"name": "Check username length constraint",
"status": 400,
"return": 5038,
"payload": {
"username": "THIS_USERNAME_IS_TOO_LONG_FOR_PFSENSE_TO_HANDLE"
}
},
{
"name": "Check password requirement",
"status": 400,
"return": 5003,
"payload": {
"username": "another_user"
}
},
{
"name": "Check privilege validation",
"status": 400,
"return": 5006,
"payload": {
"username": "another_user",
"password": "<PASSWORD>",
"priv": ["INVALID"]
}
},
{
"name": "Check user expiration date validation",
"status": 400,
"return": 5040,
"payload": {
"username": "another_user",
"password": "<PASSWORD>",
"expires": "INVALID"
}
},
{
"name": "Check user certificate exists constraint",
"status": 400,
"return": 5041,
"payload": {
"username": "another_user",
"password": "<PASSWORD>",
"cert": "INVALID"
}
}
]
put_tests = [
{
"name": "Update local user",
"payload": {
"disabled": False,
"username": "new_user",
"password": "<PASSWORD>",
"descr": "UPDATED NEW USER",
"authorizedkeys": "updated test auth key",
"ipsecpsk": "updated test psk",
"expires": "11/22/2051",
"cert": []
},
"resp_time": 2 # Allow a couple seconds for user database to be updated
},
{
"name": "Check privilege validation",
"status": 400,
"return": 5006,
"payload": {
"username": "new_user",
"password": "<PASSWORD>",
"priv": ["INVALID"]
}
},
{
"name": "Check user expiration date validation",
"status": 400,
"return": 5040,
"payload": {
"username": "new_user",
"password": "<PASSWORD>",
"expires": "INVALID"
}
},
{
"name": "Check user certificate exists constraint",
"status": 400,
"return": 5041,
"payload": {
"username": "new_user",
"password": "<PASSWORD>",
"cert": "INVALID"
}
},
{
"name": "Check ability to add server certificate as a user certificate",
"server_cert": True,
"payload": {
"username": "new_user",
"password": "<PASSWORD>"
}
},
{
"name": "Update local user to re-add user certificate",
"user_cert": True,
"payload": {
"disabled": False,
"username": "new_user",
"password": "<PASSWORD>",
"descr": "UPDATED NEW USER",
"authorizedkeys": "updated test auth key",
"ipsecpsk": "updated test psk",
"expires": "11/22/2051"
},
"resp_time": 2 # Allow a couple seconds for user database to be updated
},
]
delete_tests = [
{
"name": "Check inability to delete user certificate while in use",
"uri": "/api/v1/system/certificate",
"status": 400,
"return": 1005,
"payload": {"descr": "USER_CERT"}
},
{
"name": "Delete local user",
"payload": {"username": "new_user"}
},
{
"name": "Delete disabled user",
"payload": {"username": "disabled_user"}
},
{
"name": "Check deletion of non-existing user",
"status": 400,
"return": 5001,
"payload": {"username": "INVALID"}
},
{
"name": "Check deletion of system users",
"status": 400,
"return": 5005,
"payload": {"username": "admin"}
},
{
"name": "Check ability to delete user certificate after user was deleted",
"uri": "/api/v1/system/certificate",
"payload": {"descr": "USER_CERT"}
},
{
"name": "Delete server certificate used for testing",
"uri": "/api/v1/system/certificate",
"payload": {"descr": "SERVER_CERT"}
},
{
"name": "Delete CA used for testing",
"uri": "/api/v1/system/ca",
"payload": {"descr": "INTERNAL_CA_RSA"}
}
]
def post_post(self):
# Check our first POST response for the created CA's refid
if len(self.post_responses) == 1:
# Variables
counter = 0
# Loop through all tests and auto-add the caref ID to tests that have the caref key set
for test in self.post_tests:
if "payload" in test.keys() and "caref" in test.keys():
self.post_tests[counter]["payload"]["caref"] = self.post_responses[0]["data"]["refid"]
counter = counter + 1
# Check the second and third POST responses for the user and server certificates
if len(self.post_responses) == 3:
# Variables
post_counter = 0
put_counter = 0
# Loop through all tests and auto-add the refid to payloads that have the user_cert or server_cert set
for test in self.post_tests:
if "payload" in test.keys() and "user_cert" in test.keys():
self.post_tests[post_counter]["payload"]["cert"] = [self.post_responses[1]["data"]["refid"]]
if "payload" in test.keys() and "server_cert" in test.keys():
self.post_tests[post_counter]["payload"]["cert"] = [self.post_responses[2]["data"]["refid"]]
post_counter = post_counter + 1
# Do the same for PUT tests
for test in self.put_tests:
if "payload" in test.keys() and "user_cert" in test.keys():
self.put_tests[put_counter]["payload"]["cert"] = [self.post_responses[1]["data"]["refid"]]
if "payload" in test.keys() and "server_cert" in test.keys():
self.put_tests[put_counter]["payload"]["cert"] = [self.post_responses[2]["data"]["refid"]]
put_counter = put_counter + 1
APIUnitTestUser()
|
11537863
|
from injector import inject
from domain.connection.GetConnectionList.GetConnectionListMapping import GetConnectionListMapping
from domain.connection.GetConnectionList.GetConnectionListQuery import GetConnectionListQuery
from domain.connection.GetConnectionList.GetConnectionListResponse import GetConnectionListResponse
from domain.connection.GetConnectionList.GetConnectionListSpecifications import GetConnectionListSpecifications
from infrastructure.cqrs.IQueryHandler import IQueryHandler
from infrastructure.dependency.scopes import IScoped
class GetConnectionListQueryHandler(IQueryHandler[GetConnectionListQuery], IScoped):
@inject
def __init__(self,
specifications: GetConnectionListSpecifications):
self.specifications = specifications
def handle(self, query: GetConnectionListQuery) -> GetConnectionListResponse:
result = GetConnectionListResponse()
result.Count = self.specifications.count(query=query)
result.PageNumber = query.request.PageNumber
result.PageSize = query.request.PageSize
data_query = self.specifications.specify(query=query)
result.Data = GetConnectionListMapping.to_dtos(data_query)
return result
|
11537895
|
from config.extensions import db
from sqlalchemy.dialects.postgresql import JSONB
class Setting(db.Model):
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.Text(), nullable=False)
value = db.Column(JSONB, nullable=False)
def save(self, commit=True):
db.session.add(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def delete(self, commit=True):
db.session.delete(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
# Exception block is just for rolling back the transaction
# So re raise it.
raise
def __repr__(self):
return '<Setting {}>'.format(self.id)
|
11537939
|
import librosa
import numpy as np
from specAugment import spec_augment_tensorflow
from matplotlib import pyplot as plt
import random
import math
from scipy.fftpack import fft
## ok
def ArgumentAudio(y, sr,n_steps=3,rate=1.2):
"""Agument Audio feature
:param y: np.ndarray [shape=(n,)], real-valued the input signal (audio time series)
:param sr: sample rate of 'y'
:param size: the length (seconds) of random crop from original audio, default as 3 seconds
:return: MFCC feature
"""
y_ps = librosa.effects.pitch_shift(y, sr, n_steps=3) # frequency change audio high or low
y_ts = librosa.effects.time_stretch(y, rate=1.2)
return y_ps
def ArgumentMel(mel_spectrogram):
"""
Agument Mel feature
param y: np.ndarray [shape=(n,)], real-valued the input signal (audio time series)
param sr: sample rate of 'y'
param size: the length (seconds) of random crop from original audio, default as 3 seconds
return: MFCC feature
"""
warped_masked_spectrogram = spec_augment_tensorflow.spec_augment(mel_spectrogram=mel_spectrogram)
#warped_masked_spectrogram = spec_augment_pytorch.spec_augment(mel_spectrogram=mel_spectrogram)
#plt.figure(figsize=(10, 4))
#librosa.display.specshow(librosa.power_to_db(warped_masked_spectrogram, ref=np.max), y_axis='mel', fmax=8000, x_axis='time')
#plt.title("Augmented Spectrogram")
#plt.tight_layout()
#plt.show()
#plt.savefig('bbb')}
return warped_masked_spectrogram
def ArgumentNoise(signal, snr_low=15, snr_high=30, nb_augmented=2):
'''
Function to add noise to a signals with a desired Signal Noise ratio (SNR)
'''
# Signal length
signal_len = len(signal)
# Generate White noise
noise = np.random.normal(size=(nb_augmented, signal_len))
# Compute signal and noise power
s_power = np.sum((signal / (2.0 ** 15)) ** 2) / signal_len
n_power = np.sum((noise / (2.0 ** 15)) ** 2, axis=1) / signal_len
# Random SNR: Uniform [15, 30]
snr = np.random.randint(snr_low, snr_high)
# Compute K coeff for each noise
K = np.sqrt((s_power / n_power) * 10 ** (- snr / 10))
K = np.ones((signal_len, nb_augmented)) * K
# Generate noisy signal
return signal + K.T * noise
|
11537959
|
import os
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.feature import HashingTF, RegexTokenizer, StopWordsRemover, IDF
from pyspark.sql import SparkSession
from pysparkling import *
from pysparkling.ml import ColumnPruner, H2OGBM, H2ODeepLearning, H2OAutoML, H2OXGBoost
# Initiate SparkSession
spark = SparkSession.builder.appName("App name").getOrCreate()
hc = H2OContext.getOrCreate()
## This method loads the data, perform some basic filtering and create Spark's dataframe
def load():
dataPath = "file://" + os.path.abspath("../examples/smalldata/smsData.txt")
row_rdd = spark.sparkContext.textFile(dataPath).map(lambda x: x.split("\t", 1)).filter(lambda r: r[0].strip())
return spark.createDataFrame(row_rdd, ["label", "text"])
##
## Define the pipeline stages
##
## Tokenize the messages
tokenizer = RegexTokenizer(inputCol="text",
outputCol="words",
minTokenLength=3,
gaps=False,
pattern="[a-zA-Z]+")
## Remove ignored words
stopWordsRemover = StopWordsRemover(inputCol=tokenizer.getOutputCol(),
outputCol="filtered",
stopWords=["the", "a", "", "in", "on", "at", "as", "not", "for"],
caseSensitive=False)
## Hash the words
hashingTF = HashingTF(inputCol=stopWordsRemover.getOutputCol(),
outputCol="wordToIndex",
numFeatures=1 << 10)
## Create inverse document frequencies model
idf = IDF(inputCol=hashingTF.getOutputCol(),
outputCol="tf_idf",
minDocFreq=4)
gbm = H2OGBM(splitRatio=0.8,
seed=1,
featuresCols=[idf.getOutputCol()],
labelCol="label")
dl = H2ODeepLearning(epochs=10,
seed=1,
l1=0.001,
l2=0.0,
hidden=[200, 200],
featuresCols=[idf.getOutputCol()],
labelCol="label")
automl = H2OAutoML(convertUnknownCategoricalLevelsToNa=True,
maxRuntimeSecs=60*100, # 100 minutes
maxModels=10,
seed=1,
labelCol="label")
xgboost = H2OXGBoost(convertUnknownCategoricalLevelsToNa=True,
featuresCols=[idf.getOutputCol()],
labelCol="label")
data = load()
def trainPipelineModel(idf, hashingTF, stopWordsRemover, tokenizer, algoStage, data):
## Remove all temporary columns before algorithm stage
colPruner = ColumnPruner(columns=[hashingTF.getOutputCol(), stopWordsRemover.getOutputCol(), tokenizer.getOutputCol()])
## Remove temporary column produced by the IDF stage
idfColPruner = ColumnPruner(columns=[idf.getOutputCol()])
## Create the pipeline by defining all the stages
pipeline = Pipeline(stages=[tokenizer, stopWordsRemover, hashingTF, idf, colPruner, algoStage, idfColPruner])
## Test exporting and importing the pipeline. On Systems where HDFS & Hadoop is not available, this call store the pipeline
## to local file in the current directory. In case HDFS & Hadoop is available, this call stores the pipeline to HDFS home
## directory for the current user. Absolute paths can be used as wells. The same holds for the model import/export bellow.
pipelinePath = "file://" + os.path.abspath("../build/pipeline")
pipeline.write().overwrite().save(pipelinePath)
loaded_pipeline = Pipeline.load(pipelinePath)
## Train the pipeline model
modelPath = "file://" + os.path.abspath("../build/model")
model = loaded_pipeline.fit(data)
model.write().overwrite().save(modelPath)
return PipelineModel.load(modelPath)
def isSpam(smsText, model):
smsTextDF = spark.createDataFrame([(smsText,)], ["text"]) # create one element tuple
prediction = model.transform(smsTextDF)
return prediction.select("prediction").first()["prediction"] == "spam"
def assertPredictions(model):
isSpamMsg = isSpam("Michal, h2oworld party tonight in MV?", model)
assert not isSpamMsg
print(isSpamMsg)
isSpamMsg = isSpam("We tried to contact you re your reply to our offer of a Video Handset? 750 anytime any networks mins? UNLIMITED TEXT?", model)
assert isSpamMsg
print(isSpamMsg)
estimators = [gbm, dl, automl, xgboost]
for estimator in estimators:
model = trainPipelineModel(idf, hashingTF, stopWordsRemover, tokenizer, estimator, data)
assertPredictions(model)
|
11538012
|
from dorna import Dorna
import json
import time
# creat Dorna object and connect to the robot
robot = Dorna()
robot.connect()
# home all the joints
robot.home(["j0", "j1", "j2", "j3"])
"""
move to j0 = 0, ..., j4 = 0
wait for the motion to be done, timeout = 1000 seconds
"""
result = robot.play({"command": "move", "prm":{"path": "joint", "movement":0, "joint":[0, 0, 0, 0, 0]}})
result = json.loads(result)
wait = robot._wait_for_command(result, time.time()+1000)
# move in cartesian space, -10 inches toward X direction
robot.play({"command": "move", "prm":{"path": "line", "movement":1, "x":-10}})
|
11538027
|
from ._unselected import Unselected
from plotly.graph_objs.scattergl import unselected
from ._textfont import Textfont
from ._stream import Stream
from ._selected import Selected
from plotly.graph_objs.scattergl import selected
from ._marker import Marker
from plotly.graph_objs.scattergl import marker
from ._line import Line
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.scattergl import hoverlabel
from ._error_y import ErrorY
from ._error_x import ErrorX
|
11538034
|
class Solution:
def numMusicPlaylists(self, N: int, L: int, K: int) -> int:
dp = [[0]*(1 + N) for _ in range(1 + L)]
dp[0][0] = 1
MOD = 10 ** 9 + 7
for i in range(1, 1 + L):
for j in range(1, 1 + N):
# new song
dp[i][j] = dp[i - 1][j - 1] * (N - j + 1) % MOD
if j > K:
# old song
dp[i][j] = (dp[i][j] + dp[i - 1][j] * (j - K)) % MOD
return dp[L][N]
|
11538039
|
from os import listdir
import os
import subprocess
import shutil
import sys
import tensorflow as tf
import pathlib
path = 'data'
config_name = 'data.yml'
with open(config_name, 'r') as file:
config = file.read()
run_dir = 'run'
from config import MODELS_DIR
model_dir = MODELS_DIR
pathlib.Path(model_dir).mkdir(parents=True, exist_ok=True)
# train models given as args (seperated by commas) or all predicates if none given
predicates = sys.argv[1].split(",") if len(sys.argv) > 1 else listdir(path)
def train_text(predicate_id):
# copy predicate specific config
data_dir = os.path.join(path, predicate_id)
replace = {'data_dir':data_dir}
pred_config = config.format(**replace)
with open(os.path.join(data_dir,config_name), 'w') as f:
f.write(pred_config)
# train model
should_stop = False
try:
ecode = subprocess.run(['onmt-main', 'train_and_eval', '--model', 'lstm.py', '--config', os.path.join(os.path.join(data_dir,config_name))])
print(ecode)
except KeyboardInterrupt:
should_stop = True
return should_stop
for predicate_id in predicates:
for _ in range(1): # try again if training fails
print(predicate_id)
# delete run folder if exists
if os.path.exists(run_dir) and os.path.isdir(run_dir):
shutil.rmtree(run_dir)
should_stop = train_text(predicate_id)
# Move best model
best_dir = os.path.join(run_dir,'export','best')
if not os.path.exists(best_dir):
print("NO MODEL FOR",predicate_id)
if should_stop:
break
continue
model_paths = [os.path.join(best_dir, basename) for basename in listdir(best_dir)]
latest_file = max(model_paths, key=os.path.getctime)
model_save_location = os.path.join(model_dir, predicate_id)
if os.path.exists(model_save_location) and os.path.isdir(model_save_location):
shutil.rmtree(model_save_location)
shutil.move(latest_file, model_save_location)
shutil.copyfile(os.path.join(path, predicate_id, "header.txt") ,os.path.join(model_save_location, "header.txt"))
print("Saved model for", predicate_id)
break
if should_stop:
break
|
11538054
|
from typing import *
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.modules.utils import _pair
from .math import normpdf, normcdf
def keep_variance(x, min_variance):
return x + min_variance
class AvgPool2d(nn.AvgPool2d):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, inputs_mean, inputs_variance) -> list:
outputs_mean = F.avg_pool2d(inputs_mean, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
outputs_variance = F.avg_pool2d(inputs_variance, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
outputs_variance = outputs_variance / (inputs_mean.size(2) * inputs_mean.size(3))
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
# TODO: avg pooling means that every neuron is multiplied by the same
# weight, that is 1/number of neurons in the channel
# outputs_variance*1/(H*W) should be enough already
return [outputs_mean, outputs_variance]
class Softmax(nn.Softmax):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, features_mean, features_variance, eps=1e-5) -> list:
"""Softmax function applied to a multivariate Gaussian distribution.
It works under the assumption that features_mean and features_variance
are the parameters of a the indepent gaussians that contribute to the
multivariate gaussian.
Mean and variance of the log-normal distribution are computed following
https://en.wikipedia.org/wiki/Log-normal_distribution."""
log_gaussian_mean = features_mean + 0.5 * features_variance
log_gaussian_variance = 2 * log_gaussian_mean
log_gaussian_mean = torch.exp(log_gaussian_mean)
log_gaussian_variance = torch.exp(log_gaussian_variance)
log_gaussian_variance = log_gaussian_variance * (torch.exp(features_variance) - 1)
constant = torch.sum(log_gaussian_mean, dim=self.dim) + eps
constant = constant.unsqueeze(self.dim)
outputs_mean = log_gaussian_mean / constant
outputs_variance = log_gaussian_variance / (constant ** 2)
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return [outputs_mean, outputs_variance]
class ReLU(nn.ReLU):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, features_mean, features_variance) -> list:
features_stddev = torch.sqrt(features_variance)
div = features_mean / features_stddev
pdf = normpdf(div)
cdf = normcdf(div)
outputs_mean = features_mean * cdf + features_stddev * pdf
outputs_variance = (features_mean ** 2 + features_variance) * cdf \
+ features_mean * features_stddev * pdf - outputs_mean ** 2
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return [outputs_mean, outputs_variance]
class LeakyReLU(nn.LeakyReLU):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, features_mean, features_variance) -> list:
features_stddev = torch.sqrt(features_variance)
div = features_mean / features_stddev
pdf = normpdf(div)
cdf = normcdf(div)
negative_cdf = 1.0 - cdf
mu_cdf = features_mean * cdf
stddev_pdf = features_stddev * pdf
squared_mean_variance = features_mean ** 2 + features_variance
mean_stddev_pdf = features_mean * stddev_pdf
mean_r = mu_cdf + stddev_pdf
variance_r = squared_mean_variance * cdf + mean_stddev_pdf - mean_r ** 2
mean_n = - features_mean * negative_cdf + stddev_pdf
variance_n = squared_mean_variance * negative_cdf - mean_stddev_pdf - mean_n ** 2
covxy = - mean_r * mean_n
outputs_mean = mean_r - self._negative_slope * mean_n
outputs_variance = variance_r \
+ self._negative_slope * self._negative_slope * variance_n \
- 2.0 * self._negative_slope * covxy
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return [outputs_mean, outputs_variance]
class Dropout2d(nn.Dropout2d):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, inputs_mean, inputs_variance) -> list:
if self.training:
binary_mask = torch.ones_like(inputs_mean)
binary_mask = F.dropout2d(binary_mask, self.p, self.training, self.inplace)
outputs_mean = inputs_mean * binary_mask
outputs_variance = inputs_variance * binary_mask ** 2
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return outputs_mean, outputs_variance
outputs_variance = inputs_variance
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return [inputs_mean, outputs_variance]
class MaxPool2d(nn.MaxPool2d):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def _max_pool_internal(self, mu_a, mu_b, var_a, var_b):
stddev = torch.sqrt(var_a + var_b)
ab = mu_a - mu_b
alpha = ab / stddev
pdf = normpdf(alpha)
cdf = normcdf(alpha)
z_mu = stddev * pdf + ab * cdf + mu_b
z_var = ((mu_a + mu_b) * stddev * pdf +
(mu_a ** 2 + var_a) * cdf +
(mu_b ** 2 + var_b) * (1.0 - cdf) - z_mu ** 2)
if self._keep_variance_fn is not None:
z_var = self._keep_variance_fn(z_var)
return z_mu, z_var
def _max_pool_1x2(self, inputs_mean, inputs_variance):
mu_a = inputs_mean[:, :, :, 0::2]
mu_b = inputs_mean[:, :, :, 1::2]
var_a = inputs_variance[:, :, :, 0::2]
var_b = inputs_variance[:, :, :, 1::2]
outputs_mean, outputs_variance = self._max_pool_internal(
mu_a, mu_b, var_a, var_b)
return outputs_mean, outputs_variance
def _max_pool_2x1(self, inputs_mean, inputs_variance):
mu_a = inputs_mean[:, :, fc00:e968:6179::de52:7100, :]
mu_b = inputs_mean[:, :, fdf8:f53e:61e4::18, :]
var_a = inputs_variance[:, :, fc00:e968:6179::de52:7100, :]
var_b = inputs_variance[:, :, fdf8:f53e:61e4::18, :]
outputs_mean, outputs_variance = self._max_pool_internal(
mu_a, mu_b, var_a, var_b)
return outputs_mean, outputs_variance
def forward(self, inputs_mean, inputs_variance) -> list:
z_mean, z_variance = self._max_pool_1x2(inputs_mean, inputs_variance)
outputs_mean, outputs_variance = self._max_pool_2x1(z_mean, z_variance)
return [outputs_mean, outputs_variance]
class Linear(nn.Linear):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, inputs_mean, inputs_variance) -> list:
outputs_mean = F.linear(inputs_mean, self.weight, self.bias)
outputs_variance = F.linear(inputs_variance, self.weight ** 2, None)
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return [outputs_mean, outputs_variance]
class BatchNorm2d(nn.BatchNorm2d):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, inputs_mean, inputs_variance) -> list:
self._check_input_dim(inputs_mean)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
outputs_mean = F.batch_norm(
inputs_mean,
self.running_mean if not self.training or self.track_running_stats else None,
self.running_var if not self.training or self.track_running_stats else None,
self.weight, self.bias, bn_training, exponential_average_factor, self.eps)
outputs_variance = inputs_variance
weight = ((self.weight.unsqueeze(0)).unsqueeze(2)).unsqueeze(3)
outputs_variance = outputs_variance * weight ** 2
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return [outputs_mean, outputs_variance]
class Conv2d(nn.Conv2d):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, inputs_mean, inputs_variance) -> list:
if self.padding_mode == 'zeros':
outputs_mean = F.conv2d(
inputs_mean, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
outputs_variance = F.conv2d(
inputs_variance, self.weight ** 2, None, self.stride, self.padding, self.dilation, self.groups)
else:
outputs_mean = F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
self.weight, self.bias, self.stride, _pair(0), self.dilation, self.groups)
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return [outputs_mean, outputs_variance]
class ConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, inputs_mean, inputs_variance, output_size: Optional[List[int]] = None) -> list:
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for ConvTranspose2d')
output_padding = self._output_padding(inputs_mean, output_size,
self.stride, self.padding, self.kernel_size, self.dilation)
outputs_mean = F.conv_transpose2d(inputs_mean, self.weight, self.bias, self.stride, self.padding,
output_padding, self.groups, self.dilation)
outputs_variance = F.conv_transpose2d(inputs_variance, self.weight ** 2, None, self.stride, self.padding,
output_padding, self.groups, self.dilation)
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return [outputs_mean, outputs_variance]
class Upsample(nn.Upsample):
def __init__(self, keep_variance_fn=None, **kwargs):
super().__init__(**kwargs)
self._keep_variance_fn = keep_variance_fn
def forward(self, inputs_mean, inputs_variance) -> list:
outputs_mean = super().forward(inputs_mean)
outputs_variance = super().forward(inputs_variance)
if self._keep_variance_fn is not None:
outputs_variance = self._keep_variance_fn(outputs_variance)
return [outputs_mean, outputs_variance]
class Sequential(nn.Sequential):
# def forward(self, inputs, inputs_variance):
# for module in self._modules.values():
# inputs, inputs_variance = module(inputs, inputs_variance)
#
# return inputs, inputs_variance
def forward(self, *input):
for module in self:
input = module(*input)
return input
|
11538073
|
import torch
from torch import nn
from lib.nn import ResidualBlock, ResidualGatedBlock
from lib.stochastic import NormalStochasticBlock2d
class TopDownLayer(nn.Module):
"""
Top-down layer, including stochastic sampling, KL computation, and small
deterministic ResNet with upsampling.
The architecture when doing inference is roughly as follows:
p_params = output of top-down layer above
bu = inferred bottom-up value at this layer
q_params = merge(bu, p_params)
z = stochastic_layer(q_params)
possibly get skip connection from previous top-down layer
top-down deterministic ResNet
When doing generation only, the value bu is not available, the
merge layer is not used, and z is sampled directly from p_params.
If this is the top layer, at inference time, the uppermost bottom-up value
is used directly as q_params, and p_params are defined in this layer
(while they are usually taken from the previous layer), and can be learned.
"""
def __init__(self,
z_dim,
n_res_blocks,
n_filters,
is_top_layer=False,
downsampling_steps=None,
nonlin=None,
merge_type=None,
batchnorm=True,
dropout=None,
stochastic_skip=False,
res_block_type=None,
gated=None,
learn_top_prior=False,
top_prior_param_shape=None,
analytical_kl=False):
super().__init__()
self.is_top_layer = is_top_layer
self.z_dim = z_dim
self.stochastic_skip = stochastic_skip
self.learn_top_prior = learn_top_prior
self.analytical_kl = analytical_kl
# Define top layer prior parameters, possibly learnable
if is_top_layer:
self.top_prior_params = nn.Parameter(
torch.zeros(top_prior_param_shape),
requires_grad=learn_top_prior)
# Downsampling steps left to do in this layer
dws_left = downsampling_steps
# Define deterministic top-down block: sequence of deterministic
# residual blocks with downsampling when needed.
block_list = []
for _ in range(n_res_blocks):
do_resample = False
if dws_left > 0:
do_resample = True
dws_left -= 1
block_list.append(
TopDownDeterministicResBlock(
n_filters,
n_filters,
nonlin,
upsample=do_resample,
batchnorm=batchnorm,
dropout=dropout,
res_block_type=res_block_type,
gated=gated,
))
self.deterministic_block = nn.Sequential(*block_list)
# Define stochastic block with 2d convolutions
self.stochastic = NormalStochasticBlock2d(
c_in=n_filters,
c_vars=z_dim,
c_out=n_filters,
transform_p_params=(not is_top_layer),
)
if not is_top_layer:
# Merge layer, combine bottom-up inference with top-down
# generative to give posterior parameters
self.merge = MergeLayer(
channels=n_filters,
merge_type=merge_type,
nonlin=nonlin,
batchnorm=batchnorm,
dropout=dropout,
res_block_type=res_block_type,
)
# Skip connection that goes around the stochastic top-down layer
if stochastic_skip:
self.skip_connection_merger = SkipConnectionMerger(
channels=n_filters,
nonlin=nonlin,
batchnorm=batchnorm,
dropout=dropout,
res_block_type=res_block_type,
)
def forward(self,
input_=None,
skip_connection_input=None,
inference_mode=False,
bu_value=None,
n_img_prior=None,
forced_latent=None,
use_mode=False,
force_constant_output=False,
mode_pred=False,
use_uncond_mode=False):
# Check consistency of arguments
inputs_none = input_ is None and skip_connection_input is None
if self.is_top_layer and not inputs_none:
raise ValueError("In top layer, inputs should be None")
# If top layer, define parameters of prior p(z_L)
if self.is_top_layer:
p_params = self.top_prior_params
# Sample specific number of images by expanding the prior
if n_img_prior is not None:
p_params = p_params.expand(n_img_prior, -1, -1, -1)
# Else the input from the layer above is the prior parameters
else:
p_params = input_
# In inference mode, get parameters of q from inference path,
# merging with top-down path if it's not the top layer
if inference_mode:
if self.is_top_layer:
q_params = bu_value
else:
if use_uncond_mode:
q_params = p_params
else:
q_params = self.merge(bu_value, p_params)
# In generative mode, q is not used
else:
q_params = None
# Sample from either q(z_i | z_{i+1}, x) or p(z_i | z_{i+1})
# depending on whether q_params is None
x, data_stoch = self.stochastic(
p_params=p_params,
q_params=q_params,
forced_latent=forced_latent,
use_mode=use_mode,
force_constant_output=force_constant_output,
analytical_kl=self.analytical_kl,
mode_pred=mode_pred,
use_uncond_mode=use_uncond_mode
)
# Skip connection from previous layer
if self.stochastic_skip and not self.is_top_layer:
x = self.skip_connection_merger(x, skip_connection_input)
# Save activation before residual block: could be the skip
# connection input in the next layer
x_pre_residual = x
# Last top-down block (sequence of residual blocks)
x = self.deterministic_block(x)
keys = ['z', 'kl_samplewise', 'kl_spatial', 'logprob_p', 'logprob_q']
data = {k: data_stoch[k] for k in keys}
return x, x_pre_residual, data
class BottomUpLayer(nn.Module):
"""
Bottom-up deterministic layer for inference, roughly the same as the
small deterministic Resnet in top-down layers. Consists of a sequence of
bottom-up deterministic residual blocks with downsampling.
"""
def __init__(self,
n_res_blocks,
n_filters,
downsampling_steps=0,
nonlin=None,
batchnorm=True,
dropout=None,
res_block_type=None,
gated=None):
super().__init__()
bu_blocks = []
for _ in range(n_res_blocks):
do_resample = False
if downsampling_steps > 0:
do_resample = True
downsampling_steps -= 1
bu_blocks.append(
BottomUpDeterministicResBlock(
c_in=n_filters,
c_out=n_filters,
nonlin=nonlin,
downsample=do_resample,
batchnorm=batchnorm,
dropout=dropout,
res_block_type=res_block_type,
gated=gated,
))
self.net = nn.Sequential(*bu_blocks)
def forward(self, x):
return self.net(x)
class ResBlockWithResampling(nn.Module):
"""
Residual block that takes care of resampling steps (each by a factor of 2).
The mode can be top-down or bottom-up, and the block does up- and
down-sampling by a factor of 2, respectively. Resampling is performed at
the beginning of the block, through strided convolution.
The number of channels is adjusted at the beginning and end of the block,
through convolutional layers with kernel size 1. The number of internal
channels is by default the same as the number of output channels, but
min_inner_channels overrides this behaviour.
Other parameters: kernel size, nonlinearity, and groups of the internal
residual block; whether batch normalization and dropout are performed;
whether the residual path has a gate layer at the end. There are a few
residual block structures to choose from.
"""
def __init__(self,
mode,
c_in,
c_out,
nonlin=nn.LeakyReLU,
resample=False,
res_block_kernel=None,
groups=1,
batchnorm=True,
res_block_type=None,
dropout=None,
min_inner_channels=None,
gated=None):
super().__init__()
assert mode in ['top-down', 'bottom-up']
if min_inner_channels is None:
min_inner_channels = 0
inner_filters = max(c_out, min_inner_channels)
# Define first conv layer to change channels and/or up/downsample
if resample:
if mode == 'bottom-up': # downsample
self.pre_conv = nn.Conv2d(in_channels=c_in,
out_channels=inner_filters,
kernel_size=3,
padding=1,
stride=2,
groups=groups)
elif mode == 'top-down': # upsample
self.pre_conv = nn.ConvTranspose2d(in_channels=c_in,
out_channels=inner_filters,
kernel_size=3,
padding=1,
stride=2,
groups=groups,
output_padding=1)
elif c_in != inner_filters:
self.pre_conv = nn.Conv2d(c_in, inner_filters, 1, groups=groups)
else:
self.pre_conv = None
# Residual block
self.res = ResidualBlock(
channels=inner_filters,
nonlin=nonlin,
kernel=res_block_kernel,
groups=groups,
batchnorm=batchnorm,
dropout=dropout,
gated=gated,
block_type=res_block_type,
)
# Define last conv layer to get correct num output channels
if inner_filters != c_out:
self.post_conv = nn.Conv2d(inner_filters, c_out, 1, groups=groups)
else:
self.post_conv = None
def forward(self, x):
if self.pre_conv is not None:
x = self.pre_conv(x)
x = self.res(x)
if self.post_conv is not None:
x = self.post_conv(x)
return x
class TopDownDeterministicResBlock(ResBlockWithResampling):
def __init__(self, *args, upsample=False, **kwargs):
kwargs['resample'] = upsample
super().__init__('top-down', *args, **kwargs)
class BottomUpDeterministicResBlock(ResBlockWithResampling):
def __init__(self, *args, downsample=False, **kwargs):
kwargs['resample'] = downsample
super().__init__('bottom-up', *args, **kwargs)
class MergeLayer(nn.Module):
"""
Merge two 4D input tensors by concatenating along dim=1 and passing the
result through 1) a convolutional 1x1 layer, or 2) a residual block
"""
def __init__(self,
channels,
merge_type,
nonlin=nn.LeakyReLU,
batchnorm=True,
dropout=None,
res_block_type=None):
super().__init__()
try:
iter(channels)
except TypeError: # it is not iterable
channels = [channels] * 3
else: # it is iterable
if len(channels) == 1:
channels = [channels[0]] * 3
assert len(channels) == 3
if merge_type == 'linear':
self.layer = nn.Conv2d(channels[0] + channels[1], channels[2], 1)
elif merge_type == 'residual':
self.layer = nn.Sequential(
nn.Conv2d(channels[0] + channels[1], channels[2], 1, padding=0),
ResidualGatedBlock(channels[2],
nonlin,
batchnorm=batchnorm,
dropout=dropout,
block_type=res_block_type),
)
def forward(self, x, y):
x = torch.cat((x, y), dim=1)
return self.layer(x)
class SkipConnectionMerger(MergeLayer):
"""
By default for now simply a merge layer.
"""
merge_type = 'residual'
def __init__(self, channels, nonlin, batchnorm, dropout, res_block_type):
super().__init__(channels,
self.merge_type,
nonlin,
batchnorm,
dropout=dropout,
res_block_type=res_block_type)
|
11538075
|
class TestDirectory:
params = {
'test_create': [{
'data': [{
'name': 'directory',
'type': 'directory',
'config': {
'conf.spoolDir': '/home/test-directory-collector',
'conf.filePattern': '*.csv',
'conf.dataFormat': 'DELIMITED',
'conf.dataFormatConfig.csvFileFormat': 'CUSTOM',
'conf.dataFormatConfig.csvCustomDelimiter': '|',
}
}],
'er': [{"config": {"conf.dataFormat": "DELIMITED", "conf.dataFormatConfig.csvCustomDelimiter": "|",
"conf.dataFormatConfig.csvFileFormat": "CUSTOM", "conf.filePattern": "*.csv",
"conf.spoolDir": "/home/test-directory-collector"}, "name": "directory",
"type": "directory"}]
}],
'test_edit': [{
'data': [{
'name': 'directory',
'type': 'directory',
'config': {
'conf.spoolDir': '/home/test-directory-collector',
'conf.filePattern': '*1.csv',
'conf.dataFormat': 'DELIMITED',
'conf.dataFormatConfig.csvFileFormat': 'CUSTOM',
'conf.dataFormatConfig.csvCustomDelimiter': '~',
}
}],
'er': [{"config": {"conf.dataFormat": "DELIMITED", "conf.dataFormatConfig.csvCustomDelimiter": "~",
"conf.dataFormatConfig.csvFileFormat": "CUSTOM", "conf.filePattern": "*1.csv",
"conf.spoolDir": "/home/test-directory-collector"}, "name": "directory",
"type": "directory"}]
}]
}
def test_create(self, api_client, data, er):
result = api_client.post('/sources', json=list(data))
assert result.json == er
def test_edit(self, api_client, data, er):
result = api_client.put('/sources', json=list(data))
assert result.json == er
def test_get(self, api_client):
result = api_client.get('/sources')
assert result.json == ["directory"]
def test_delete(self, api_client):
api_client.delete('sources/directory')
assert api_client.get('/sources').json == []
|
11538077
|
from __future__ import absolute_import, division, print_function
import sys
import random
from cctbx.development import random_structure
from cctbx.development import debug_utils
from cctbx import adptbx
from cctbx.array_family import flex
from iotbx import csv_utils
import libtbx
import libtbx.utils
from libtbx.test_utils import approx_equal
import scitbx.random
from scitbx.random import variate, normal_distribution, gamma_distribution
from scitbx.math import distributions
from smtbx import absolute_structure
try:
distributions.students_t_distribution(1)
except RuntimeError as e:
# XXX Student's t distribution is not supported with GCC 3.2 builds
if str(e).startswith("Implementation not available in this build."):
students_t_available = False
print("Skipping exercise_hooft_analysis() with Student's t distribution.")
else:
raise RuntimeError(e)
else:
students_t_available = True
class test_case(object):
d_min=1
use_students_t_errors=False
elements = ("N", "C", "C", "S") * 5
def __init__(self, space_group_info, **kwds):
libtbx.adopt_optional_init_args(self, kwds)
self.space_group_info = space_group_info
self.structure = random_structure.xray_structure(
space_group_info,
elements=self.elements,
volume_per_atom=20.,
min_distance=1.5,
general_positions_only=True,
use_u_aniso=False,
u_iso=adptbx.b_as_u(10),
)
self.structure.set_inelastic_form_factors(1.54, "sasaki")
self.scale_factor = 0.05 + 10 * flex.random_double()
fc = self.structure.structure_factors(
anomalous_flag=True, d_min=self.d_min, algorithm="direct").f_calc()
fo = fc.as_amplitude_array()
fo.set_observation_type_xray_amplitude()
if self.use_students_t_errors:
nu = random.uniform(1, 10)
normal_g = variate(normal_distribution())
gamma_g = variate(gamma_distribution(0.5*nu, 2))
errors = normal_g(fc.size())/flex.sqrt(2*gamma_g(fc.size()))
else:
# use gaussian errors
g = variate(normal_distribution())
errors = g(fc.size())
fo2 = fo.as_intensity_array()
self.fo2 = fo2.customized_copy(
data=(fo2.data()+errors)*self.scale_factor,
sigmas=flex.double(fc.size(), 1),
)
self.fc = fc
xs_i = self.structure.inverse_hand()
self.fc_i = xs_i.structure_factors(
anomalous_flag=True, d_min=self.d_min, algorithm="direct").f_calc()
fo2_twin = self.fc.customized_copy(
data=self.fc.data()+self.fc_i.data()).as_intensity_array()
self.fo2_twin = fo2_twin.customized_copy(
data=(errors + fo2_twin.data()) * self.scale_factor,
sigmas=self.fo2.sigmas())
class hooft_analysis_test_case(test_case):
def exercise(self, debug=False):
if debug:
distribution = distributions.normal_distribution()
observed_deviations = (
self.fo2.data() - self.scale_factor*self.fc.as_intensity_array().data())
observed_deviations = observed_deviations.select(
flex.sort_permutation(observed_deviations))
expected_deviations = distribution.quantiles(observed_deviations.size())
csv_utils.writer(
open('delta_F_npp.csv', 'wb'), (expected_deviations, observed_deviations))
# first with the correct absolute structure
gaussian = absolute_structure.hooft_analysis(self.fo2, self.fc)
analyses = [gaussian]
NPP = absolute_structure.bijvoet_differences_probability_plot(gaussian)
if self.use_students_t_errors:
nu_calc = absolute_structure.maximise_students_t_correlation_coefficient(
NPP.y, min_nu=1, max_nu=200)
t_analysis = absolute_structure.students_t_hooft_analysis(
self.fo2, self.fc, nu_calc, probability_plot_slope=NPP.fit.slope())
analyses.append(gaussian)
tPP = absolute_structure.bijvoet_differences_probability_plot(
t_analysis, use_students_t_distribution=True, students_t_nu=nu_calc)
if tPP.distribution.degrees_of_freedom() < 100:
assert tPP.correlation.coefficient() > NPP.correlation.coefficient()
else:
assert approx_equal(NPP.correlation.coefficient(), 1, 0.005)
for analysis in analyses:
assert approx_equal(analysis.hooft_y, 0, 1e-2)
assert approx_equal(analysis.p2_true, 1)
assert approx_equal(analysis.p2_false, 0)
assert approx_equal(analysis.p3_true, 1)
assert approx_equal(analysis.p3_false, 0)
assert approx_equal(analysis.p3_racemic_twin, 0)
if debug:
csv_utils.writer(open('npp.csv', 'wb'), (NPP.x,NPP.y))
if self.use_students_t_errors:
csv_utils.writer(open('tpp.csv', 'wb'), (tPP.x,tPP.y))
assert approx_equal(NPP.fit.y_intercept(), 0)
# and now with the wrong absolute structure
gaussian = absolute_structure.hooft_analysis(self.fo2, self.fc_i)
analyses = [gaussian]
NPP = absolute_structure.bijvoet_differences_probability_plot(gaussian)
if self.use_students_t_errors:
nu_calc = absolute_structure.maximise_students_t_correlation_coefficient(
NPP.y, min_nu=1, max_nu=200)
t_analysis = absolute_structure.students_t_hooft_analysis(
self.fo2, self.fc_i, nu_calc, probability_plot_slope=NPP.fit.slope())
analyses.append(gaussian)
tPP = absolute_structure.bijvoet_differences_probability_plot(
t_analysis, use_students_t_distribution=True)
if tPP.distribution.degrees_of_freedom() < 100:
assert tPP.correlation.coefficient() > NPP.correlation.coefficient()
else:
assert approx_equal(NPP.correlation.coefficient(), 1, 0.002)
assert approx_equal(NPP.fit.y_intercept(), 0)
for analysis in analyses:
assert approx_equal(analysis.hooft_y, 1, 1e-2)
assert approx_equal(analysis.p2_true, 0)
assert approx_equal(analysis.p2_false, 1)
assert approx_equal(analysis.p3_true, 0)
assert approx_equal(analysis.p3_false, 1)
assert approx_equal(analysis.p3_racemic_twin, 0)
# test for the case of a racemic twin
gaussian = absolute_structure.hooft_analysis(self.fo2_twin, self.fc)
analyses = [gaussian]
NPP = absolute_structure.bijvoet_differences_probability_plot(gaussian)
if self.use_students_t_errors:
nu_calc = absolute_structure.maximise_students_t_correlation_coefficient(
NPP.y, min_nu=1, max_nu=200)
t_analysis = absolute_structure.students_t_hooft_analysis(
self.fo2_twin, self.fc, nu_calc, probability_plot_slope=NPP.fit.slope())
tPP = absolute_structure.bijvoet_differences_probability_plot(
t_analysis, use_students_t_distribution=True)
if tPP.distribution.degrees_of_freedom() < 100:
assert tPP.correlation.coefficient() > NPP.correlation.coefficient()
else:
assert approx_equal(NPP.correlation.coefficient(), 1, 0.002)
assert approx_equal(NPP.fit.y_intercept(), 0)
for analysis in analyses:
assert approx_equal(analysis.hooft_y, 0.5, 1e-2)
assert approx_equal(analysis.p3_true, 0)
assert approx_equal(analysis.p3_false, 0)
assert approx_equal(analysis.p3_racemic_twin, 1)
def run_call_back(flags, space_group_info):
if not space_group_info.group().is_centric():
if flags.fix_random_seeds:
random.seed(1)
flex.set_random_seed(1)
scitbx.random.set_random_seed(1)
hooft_analysis_test_case(
space_group_info,use_students_t_errors=False).exercise(debug=flags.Debug)
if students_t_available:
hooft_analysis_test_case(
space_group_info,use_students_t_errors=True).exercise(debug=flags.Debug)
def run():
debug_utils.parse_options_loop_space_groups(
sys.argv[1:],
run_call_back,
keywords=("fix_random_seeds",),
)
if __name__ == '__main__':
libtbx.utils.show_times_at_exit()
run()
|
11538088
|
import unittest
from pathlib import Path
from unittest.mock import patch
from dbt_invoke import properties
from dbt_invoke.internal import _utils
from test import TestDbtInvoke
PARENT_DIR = Path(__file__).parent
DESCRIPTION = 'A fake test description.'
COL_TESTS = ['not_null']
class TestProperties(TestDbtInvoke):
def test_create_update_delete_property_files(self):
"""
Test the create -> update -> delete cycle of property files
:return: None
"""
# Create property files
with patch('builtins.input', return_value='y'):
properties.update(
self.ctx,
project_dir=self.project_dir,
profiles_dir=self.profiles_dir,
log_level='DEBUG',
)
# Check that the property files contain the expected contents
all_files_actual_properties = dict()
for file_location, exp_props in self.expected_properties.items():
full_file_path = Path(self.project_dir, file_location)
actual_props = _utils.parse_yaml(full_file_path)
self.assertEqual(exp_props, actual_props)
# Simulate a manual update of the property files
for section in actual_props:
if section.lower() != 'version':
actual_props[section][0]['description'] = DESCRIPTION
actual_props[section][0]['columns'][0]['tests'] = COL_TESTS
all_files_actual_properties[full_file_path] = actual_props
_utils.write_yaml(full_file_path, actual_props)
# Automatically update property files, using threads
properties.update(
self.ctx,
project_dir=self.project_dir,
profiles_dir=self.profiles_dir,
threads=2,
log_level='DEBUG',
)
# Check that the automatic update did not overwrite the
# previous manual update
for full_file_path, exp_props in all_files_actual_properties.items():
actual_props = _utils.parse_yaml(full_file_path)
self.assertEqual(exp_props, actual_props)
# Initiate then abort deletion of property files
with patch('builtins.input', return_value='n'):
properties.delete(
self.ctx,
project_dir=self.project_dir,
profiles_dir=self.profiles_dir,
log_level='DEBUG',
)
# Check that the property files still exist
for full_file_path in all_files_actual_properties:
self.assertTrue(full_file_path.exists())
# Delete property files
with patch('builtins.input', return_value='y'):
properties.delete(
self.ctx,
project_dir=self.project_dir,
profiles_dir=self.profiles_dir,
log_level='DEBUG',
)
# Check that the property files no longer exist
for full_file_path in all_files_actual_properties:
self.assertFalse(full_file_path.exists())
if __name__ == '__main__':
unittest.main()
|
11538111
|
import re
import base64
from ..ismitem import ISMItem
class ProtectionHeader(ISMItem):
def __init__(self, name: str):
super(ProtectionHeader, self).__init__(name)
self.SystemID = None # type: str
self.kid = bytes([0] * 16) # type: bytes
def generate(self):
'''
get kid from innertext
'''
# <KEY>AG0ALgBjAG8AbQAvAGwAaQBjAGUAbgBzAGUALwBwAGwAYQB5AHIAZQBhAGQAeQA8AC8ATABBAF8AVQBSAEwAPgA8AC8ARABBAFQAQQA+ADwALwBXAFIATQBIAEUAQQBEAEUAUgA+AA==
try:
data = base64.b64decode(self.innertext).replace(b'\x00', b'')
b64_kid = re.findall(b'<KID>(.+?)</KID>', data)[0].decode('utf-8')
_kid = base64.b64decode(b64_kid)
self.kid = bytes([_kid[3], _kid[2], _kid[1], _kid[0], _kid[5], _kid[4], _kid[7], _kid[6], *list(_kid[8:])])
except Exception as e:
print(f'ProtectionHeader generate failed, reason:{e}')
|
11538126
|
import asyncio
import betterproto
from betterproto.grpc.util.async_channel import AsyncChannel
from dataclasses import dataclass
import pytest
from typing import AsyncIterator
@dataclass
class Message(betterproto.Message):
body: str = betterproto.string_field(1)
@pytest.fixture
def expected_responses():
return [Message("Hello world 1"), Message("Hello world 2"), Message("Done")]
class ClientStub:
async def connect(self, requests: AsyncIterator):
await asyncio.sleep(0.1)
async for request in requests:
await asyncio.sleep(0.1)
yield request
await asyncio.sleep(0.1)
yield Message("Done")
async def to_list(generator: AsyncIterator):
return [value async for value in generator]
@pytest.fixture
def client():
# channel = Channel(host='127.0.0.1', port=50051)
# return ClientStub(channel)
return ClientStub()
@pytest.mark.asyncio
async def test_send_from_before_connect_and_close_automatically(
client, expected_responses
):
requests = AsyncChannel()
await requests.send_from(
[Message(body="Hello world 1"), Message(body="Hello world 2")], close=True
)
responses = client.connect(requests)
assert await to_list(responses) == expected_responses
@pytest.mark.asyncio
async def test_send_from_after_connect_and_close_automatically(
client, expected_responses
):
requests = AsyncChannel()
responses = client.connect(requests)
await requests.send_from(
[Message(body="Hello world 1"), Message(body="Hello world 2")], close=True
)
assert await to_list(responses) == expected_responses
@pytest.mark.asyncio
async def test_send_from_close_manually_immediately(client, expected_responses):
requests = AsyncChannel()
responses = client.connect(requests)
await requests.send_from(
[Message(body="Hello world 1"), Message(body="Hello world 2")], close=False
)
requests.close()
assert await to_list(responses) == expected_responses
@pytest.mark.asyncio
async def test_send_individually_and_close_before_connect(client, expected_responses):
requests = AsyncChannel()
await requests.send(Message(body="Hello world 1"))
await requests.send(Message(body="Hello world 2"))
requests.close()
responses = client.connect(requests)
assert await to_list(responses) == expected_responses
@pytest.mark.asyncio
async def test_send_individually_and_close_after_connect(client, expected_responses):
requests = AsyncChannel()
await requests.send(Message(body="Hello world 1"))
await requests.send(Message(body="Hello world 2"))
responses = client.connect(requests)
requests.close()
assert await to_list(responses) == expected_responses
|
11538138
|
from django.test import TestCase, Client, override_settings, TransactionTestCase
from django.conf import settings
from django.urls import reverse
from io import BytesIO
from django.contrib.auth.models import User
from django.contrib.admin.sites import AdminSite
from django_q.models import OrmQ
import rest_framework.response
import xlrd
from infrastructure.models import FinancialYear, QuarterlySpendFile, AnnualSpendFile, Expenditure, Project, BudgetPhase
from infrastructure.tests import utils
from infrastructure.utils import load_excel
from infrastructure.upload import process_annual_document
from scorecard.models import Geography
def mock_project_row():
"""This is the project data that mock_existing_project_row will update"""
mock_data = { 'Function': 'Administrative and Corporate Support',
'Project Description': 'P-CNIN FURN & OFF EQUIP',
'Project Number': 'PC002003005_00002',
'Type': 'New',
'MTSF Service Outcome': 'An efficient, effective and development-oriented public service',
'IUDF': 'Growth',
'Own Strategic Objectives': 'OWN MUNICIPAL STRATEGIC OBJECTIVE',
'Asset Class': 'Furniture and Office Equipment',
'Asset Sub-Class': '',
'Ward Location': 'Administrative or Head Office',
'GPS Longitude': '0', 'GPS Latitude': '0',
'Audited Outcome 2017/18': 2000.0,
'Full Year Forecast 2018/19': 3000.0,
'Budget year 2019/20': 4000.0,
'Budget year 2020/21': 5000.0,
'Budget year 2021/22': 6000.0
}
yield mock_data
def mock_data_existing_project_row():
"""This should update the project from mock_project_row()"""
mock_data = { 'Function': 'Administrative and Corporate Support',
'Project Description': 'P-CNIN FURN & OFF EQUIP',
'Project Number': 'PC002003005_00002',
'Type': 'Renewal',
'MTSF Service Outcome': 'A project update',
'IUDF': 'Governance',
'Own Strategic Objectives': 'TO BE CORRECTED',
'Asset Class': 'Furniture and Office',
'Asset Sub-Class': 'Equipment',
'Ward Location': 'Head Office',
'GPS Longitude': '1', 'GPS Latitude': '2',
'Audited Outcome 2018/19': 2100.0,
'Full Year Forecast 2019/20': 3100.0,
'Budget year 2020/21': 4000.0,
'Budget year 2021/22': 5000.0,
'Budget year 2022/23': 6100.0
}
yield mock_data
def mock_new_project_row():
"""Composite key fields should not match mock_project_row() so that this creates a new project"""
mock_data = { 'Function': 'Administrative and Corporate Support',
'Project Description': 'P-CNIN FURN & OFF EQUIP - NEW DESCRIPTION',
'Project Number': 'PC002003005_00002',
'Type': 'Renewal',
'MTSF Service Outcome': 'A project update',
'IUDF': 'Governance',
'Own Strategic Objectives': 'TO BE CORRECTED',
'Asset Class': 'Furniture and Office',
'Asset Sub-Class': 'Equipment',
'Ward Location': 'Head Office',
'GPS Longitude': '1', 'GPS Latitude': '2',
'Audited Outcome 2018/19': 2100.0,
'Full Year Forecast 2019/20': 3100.0,
'Budget year 2020/21': 4000.0,
'Budget year 2021/22': 5000.0,
'Budget year 2022/23': 6100.0
}
yield mock_data
def verify_expenditure(this, amount, budget_phase, year):
expenditure = Expenditure.objects.get(amount=amount)
this.assertEquals(expenditure.budget_phase.name, budget_phase)
this.assertEquals(expenditure.financial_year.budget_year, year)
class FileTest(TransactionTestCase):
fixtures = ["seeddata"]
def setUp(self):
self.client = Client()
self.username = 'admin'
self.email = '<EMAIL>'
self.password = 'password'
self.user = User.objects.create_superuser(self.username, self.email, self.password)
FileTest.geography = Geography.objects.create(
geo_level="municipality",
geo_code="BUF",
province_name="Eastern Cape",
province_code="EC",
category="A",
)
def test_file_upload(self):
"""Scope of Test: Testing the file upload in Django Admin to processing file and add to Django_Q"""
self.client.login(username=self.username, password=self.password)
fy = FinancialYear.objects.get(budget_year="2019/2020")
self.assertEquals(AnnualSpendFile.objects.all().count(), 0)
self.assertEqual(OrmQ.objects.count(), 0)
# the app name, the name of the model and the name of the view
upload_url = reverse('admin:infrastructure_annualspendfile_add')
with open('infrastructure/tests/test_files/test.xlsx', 'rb', ) as f:
resp = self.client.post(upload_url, {'financial_year': fy.pk, 'document': f}, follow=True)
self.assertContains(resp, "Dataset is currently being processed.", status_code=200)
spend_file = AnnualSpendFile.objects.first()
self.assertEquals(spend_file.status, AnnualSpendFile.PROGRESS)
self.assertEqual(OrmQ.objects.count(), 1)
task = OrmQ.objects.first()
task_file_id = task.task()["args"][0]
task_method = task.func()
self.assertEqual(task_method, 'infrastructure.upload.process_annual_document')
self.assertEqual(task_file_id, spend_file.id)
# run the code
process_annual_document(task_file_id)
self.assertEquals(AnnualSpendFile.objects.count(), 1)
spend_file = AnnualSpendFile.objects.first()
self.assertEquals(spend_file.status, AnnualSpendFile.SUCCESS)
self.assertEquals(Project.objects.count(), 2)
response = self.client.get("/api/v1/infrastructure/search/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'PC002003005_00002')
def test_file_upload_fail(self):
"""Scope of Test: Testing if the file upload fail in Django Admin to processing file and add to Django_Q"""
self.client.login(username=self.username, password=<PASSWORD>)
fy = FinancialYear.objects.get(budget_year="2019/2020")
self.assertEquals(FinancialYear.objects.all().count(), 1)
self.assertEquals(AnnualSpendFile.objects.all().count(), 0)
url = reverse('admin:infrastructure_annualspendfile_add')
with open('infrastructure/tests/test_files/failtest.xlsx', 'rb', ) as f:
resp = self.client.post(url, {'financial_year': fy.pk, 'document': f}, follow=True)
self.assertContains(resp, "Dataset is currently being processed.", status_code=200)
spend_file = AnnualSpendFile.objects.first()
self.assertEquals(spend_file.status, AnnualSpendFile.PROGRESS)
self.assertEqual(OrmQ.objects.count(), 1)
task = OrmQ.objects.first()
task_file_id = task.task()["args"][0]
task_method = task.func()
self.assertEqual(task_method, 'infrastructure.upload.process_annual_document')
self.assertEqual(task_file_id, spend_file.id)
self.assertRaises(ValueError, process_annual_document, task_file_id)
self.assertEquals(AnnualSpendFile.objects.all().count(), 1)
spend_file = AnnualSpendFile.objects.first()
self.assertEquals(spend_file.status, AnnualSpendFile.ERROR)
def test_year_error(self):
"""Scope of Test: Test if a file uploaded in Django Admin with incorrect year selection and content returns an error to Django_Q"""
self.client.login(username=self.username, password=self.password)
fy = FinancialYear.objects.get(budget_year="2019/2020")
self.assertEquals(FinancialYear.objects.all().count(), 1)
self.assertEquals(AnnualSpendFile.objects.all().count(), 0)
url = reverse('admin:infrastructure_annualspendfile_add')
with open('infrastructure/tests/test_files/failyear.xlsx', 'rb', ) as f:
resp = self.client.post(url, {'financial_year': fy.pk, 'document': f}, follow=True)
self.assertContains(resp, "Dataset is currently being processed.", status_code=200)
spend_file = AnnualSpendFile.objects.first()
self.assertEquals(spend_file.status, AnnualSpendFile.PROGRESS)
self.assertEqual(OrmQ.objects.count(), 1)
task = OrmQ.objects.first()
task_file_id = task.task()["args"][0]
task_method = task.func()
self.assertEqual(task_method, 'infrastructure.upload.process_annual_document')
self.assertEqual(task_file_id, spend_file.id)
self.assertRaises(ValueError, process_annual_document, task_file_id)
self.assertEquals(AnnualSpendFile.objects.all().count(), 1)
spend_file = AnnualSpendFile.objects.first()
self.assertEquals(spend_file.status, AnnualSpendFile.ERROR)
def test_upload_project(self):
"""Scope of Test: With no existing projects run an upload and check that the correct fields are populated"""
self.assertEquals(BudgetPhase.objects.all().count(), 5)
geography = Geography.objects.get(geo_code="BUF")
fy = FinancialYear.objects.get(budget_year="2019/2020")
utils.load_file(geography, mock_project_row(), fy)
project = Project.objects.get(function="Administrative and Corporate Support")
self.assertEquals(project.project_description, "P-CNIN FURN & OFF EQUIP")
self.assertEquals(project.project_number, "PC002003005_00002")
self.assertEquals(project.project_type, "New")
self.assertEquals(project.mtsf_service_outcome, "An efficient, effective and development-oriented public service")
self.assertEquals(project.iudf, "Growth")
self.assertEquals(project.own_strategic_objectives, "OWN MUNICIPAL STRATEGIC OBJECTIVE")
self.assertEquals(project.asset_class, "Furniture and Office Equipment")
self.assertEquals(project.asset_subclass, "")
self.assertEquals(project.ward_location, "Administrative or Head Office")
self.assertEquals(project.longitude, 0.0)
self.assertEquals(project.latitude, 0.0)
self.assertEquals(Expenditure.objects.all().count(), 5)
verify_expenditure(self, 2000.00, "Audited Outcome", "2017/2018")
verify_expenditure(self, 3000.00, "Full Year Forecast", "2018/2019")
verify_expenditure(self, 4000.00, "Budget year", "2019/2020")
verify_expenditure(self, 5000.00, "Budget year", "2020/2021")
verify_expenditure(self, 6000.00, "Budget year", "2021/2022")
def test_update_project(self):
"""Scope of Test: With an existing project import a new project with the same composite key and check that non-key fields are updated"""
geography = Geography.objects.get(geo_code="BUF")
utils.load_file(geography, mock_project_row(), "2019/2020")
project = Project.objects.get(project_description="P-CNIN FURN & OFF EQUIP")
self.assertEquals(project.project_description, "P-CNIN FURN & OFF EQUIP")
self.assertEquals(project.project_number, "PC002003005_00002")
self.assertEquals(project.project_type, "New")
self.assertEquals(project.mtsf_service_outcome, "An efficient, effective and development-oriented public service")
self.assertEquals(project.iudf, "Growth")
self.assertEquals(project.own_strategic_objectives, "OWN MUNICIPAL STRATEGIC OBJECTIVE")
self.assertEquals(project.asset_class, "Furniture and Office Equipment")
self.assertEquals(project.asset_subclass, "")
self.assertEquals(project.ward_location, "Administrative or Head Office")
self.assertEquals(project.longitude, 0.0)
self.assertEquals(project.latitude, 0.0)
utils.load_file(geography, mock_data_existing_project_row(), "2020/2021")
self.assertEquals(Project.objects.all().count(), 1)
project = Project.objects.get(project_description="P-CNIN FURN & OFF EQUIP")
self.assertEquals(project.project_type, "Renewal")
self.assertEquals(project.mtsf_service_outcome, "A project update")
self.assertEquals(project.iudf, "Governance")
self.assertEquals(project.own_strategic_objectives, "TO BE CORRECTED")
self.assertEquals(project.asset_class, "Furniture and Office")
self.assertEquals(project.asset_subclass, "Equipment")
self.assertEquals(project.ward_location, "Head Office")
self.assertEquals(project.longitude, 1.0)
self.assertEquals(project.latitude, 2.0)
self.assertEquals(Expenditure.objects.all().count(), 8)
verify_expenditure(self, 2100.00, "Audited Outcome", "2018/2019")
verify_expenditure(self, 3100.00, "Full Year Forecast", "2019/2020")
verify_expenditure(self, 6100.00, "Budget year", "2022/2023")
def test_new_project(self):
"""Scope of Test: With an existing project import a new project with a new composite key and check that a new project is created"""
geography = Geography.objects.get(geo_code="BUF")
utils.load_file(geography, mock_project_row(), "2019/2020")
project = Project.objects.get(project_description="P-CNIN FURN & OFF EQUIP")
self.assertEquals(project.project_description, "P-CNIN FURN & OFF EQUIP")
self.assertEquals(project.project_number, "PC002003005_00002")
self.assertEquals(project.project_type, "New")
self.assertEquals(project.mtsf_service_outcome, "An efficient, effective and development-oriented public service")
self.assertEquals(project.iudf, "Growth")
self.assertEquals(project.own_strategic_objectives, "OWN MUNICIPAL STRATEGIC OBJECTIVE")
self.assertEquals(project.asset_class, "Furniture and Office Equipment")
self.assertEquals(project.asset_subclass, "")
self.assertEquals(project.ward_location, "Administrative or Head Office")
self.assertEquals(project.longitude, 0.0)
self.assertEquals(project.latitude, 0.0)
utils.load_file(geography, mock_new_project_row(), "2020/2021")
self.assertEquals(Project.objects.all().count(), 2)
project = Project.objects.get(project_description="P-CNIN FURN & OFF EQUIP - NEW DESCRIPTION")
self.assertEquals(project.project_type, "Renewal")
self.assertEquals(project.mtsf_service_outcome, "A project update")
self.assertEquals(project.iudf, "Governance")
self.assertEquals(project.own_strategic_objectives, "TO BE CORRECTED")
self.assertEquals(project.asset_class, "Furniture and Office")
self.assertEquals(project.asset_subclass, "Equipment")
self.assertEquals(project.ward_location, "Head Office")
self.assertEquals(project.longitude, 1.0)
self.assertEquals(project.latitude, 2.0)
self.assertEquals(Expenditure.objects.all().count(), 10)
expenditure_id = Expenditure.objects.get(amount=2100).project_id
new_expenditures = Expenditure.objects.filter(project_id=expenditure_id)
self.assertEquals(new_expenditures.count(), 5)
verify_expenditure(self, 2100.00, "Audited Outcome", "2018/2019")
verify_expenditure(self, 3100.00, "Full Year Forecast", "2019/2020")
verify_expenditure(self, 6100.00, "Budget year", "2022/2023")
|
11538151
|
import setuptools
from setuptools import setup,find_packages
import os
from fliscopt import __version__
with open("README.md", 'r') as f:
long_description = f.read()
setup(
name='fliscopt',
version=__version__,
description='Flight scheduling optimization using Genetic Algorithm variants and other algorithms. ',
license="MIT",
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
packages=find_packages(),
platforms=['linux','macos','unix'],
install_requires=[
'matplotlib','rich'
],
python_requires='>=3.7.10')
|
11538158
|
from flask_unchained.bundles.security.services import SecurityService as BaseSecurityService
class SecurityService(BaseSecurityService):
pass
|
11538197
|
from .connection import SimulaQronConnection
from .socket import Socket
from .broadcast_channel import BroadcastChannel
|
11538203
|
from ctflib import *
from pwn import *
io = remote('pwn.kosenctf.com', '8002')
io.recvline() # I encrypted my secret!!!
c = int(io.recvline())
io.recvline() # I encrypt your message ;)
pt1 = int('1'+'0'*382, 2)
io.sendline(str(pt1))
io.recvline()
c1 = int(io.recvline())
pt2 = pt1+1
io.sendline(str(pt2))
io.recvline()
c2 = int(io.recvline())
n = (c2-c1)
n %= n*n
assert(pow(1+n, pt1, n*n) == c1)
assert(pow(1+n, pt2, n*n) == c2)
print(n)
key = (c-1)//n
print(its(key))
io.interactive()
|
11538215
|
import csv
import html
import sys
import wordfreq
if len(sys.argv) != 3:
print('Usage: python3 sort.py target-lang pairs.csv')
sys.exit(1)
targetLang = sys.argv[1]
pairsPath = sys.argv[2]
pairs = {}
with open(pairsPath, 'r', encoding='utf-8') as pairsFile:
reader = csv.reader(pairsFile, delimiter='\t')
for row in reader:
words = wordfreq.tokenize(html.unescape(row[0]), targetLang)
freqs = [wordfreq.zipf_frequency(word, targetLang, wordlist='combined')
for word in words]
minfreq = min(freqs)
avgfreq = sum(freqs) / float(len(freqs))
pairs[row[0]] = (minfreq, avgfreq, row[1])
pairList = list(pairs.items())
pairList.sort(reverse = True, key=lambda i: i[1])
for pair in pairList:
sys.stdout.buffer.write((pair[0] + '\t' + pair[1][2] + '\n').encode('utf-8'))
|
11538325
|
import six
from py2swagger.plugins import Py2SwaggerPlugin, Py2SwaggerPluginException
from py2swagger.introspector import BaseDocstringIntrospector
from py2swagger.utils import OrderedDict, load_class
class SimplePlugin(Py2SwaggerPlugin):
summary = 'Plugin for all applications'
description = 'Parse schemas from configuration file. Add to your config:\n'\
'PLUGIN_SETTINGS[\'endpoints\'] = [(path, method, callback), ...]'
def __init__(self):
super(SimplePlugin, self).__init__()
self._paths = {}
self._security_definitions = {}
@staticmethod
def _operation(introspector):
operation = OrderedDict(
tags=introspector.tags,
summary=introspector.parser.get_summary(),
description=introspector.parser.get_description(),
parameters=introspector.parameters,
responses=introspector.responses,
security=introspector.security
)
# Remove empty keys
for key, value in list(operation.items()):
if not value:
operation.pop(key)
return operation
def _add_operation(self, path, method, introspector):
if path not in self._paths:
self._paths[path] = {}
self._paths[path][method] = self._operation(introspector)
def _add_security_definitions(self, introspector):
self._security_definitions.update(introspector.security_definitions)
def _introspect(self, path, method, callback):
introspector = BaseDocstringIntrospector(callback)
self._add_operation(path, method, introspector)
self._add_security_definitions(introspector)
def run(self, arguments, endpoints=None, *args, **kwargs):
"""
Return part of swagger object. This part contains "paths", "definitions" and "securityDefinitions"
:return: dict
"""
if endpoints is None:
raise Py2SwaggerPluginException('Configuration is missed. Please add PLUGIN_SETTINGS[\'endpoints\'] to your '
'configuration file.')
for path, method, callback in endpoints:
if isinstance(callback, six.string_types):
callback = load_class(callback)
self._introspect(path, method, callback)
return {
'paths': self._paths,
'securityDefinitions': self._security_definitions
}
|
11538347
|
import ztom
import csv
import json
import datetime
"""
This will save markets data and tickers and to tickers.csv and markets.json in
current folder. These files could be used as an offline data sources for ztom.
"""
# parameters
number_of_fetches = 10
exchange_id = "binance"
append_tickers = False
symbols_to_save = [] # or ["ETH/BTC", "BNB/ETH", "BNB/BTC"] to save these particular symbols in markets and tickers
"""
/// start
"""
print("Started")
storage = ztom.DataStorage(".")
storage.register("tickers", ["fetch_id", "timestamp", "symbol", "ask", "bid", "askVolume", "bidVolume"],
overwrite=not append_tickers)
last_fetch_id = 0
# getting last fetch_id in csv file
if append_tickers:
with open(storage.entities["tickers"]["full_path"], "r") as csvfile:
not_header = False
for row in csv.reader(csvfile):
last_row = row
if last_row is not None and last_row[0] != storage.entities["tickers"]["headers"][0]:
last_fetch_id = int(last_row[0]) + 1
print("Last fetch_id: {}".format(last_fetch_id))
ex = ztom.ccxtExchangeWrapper.load_from_id(exchange_id) # type: ztom.ccxtExchangeWrapper
ex.enable_requests_throttle()
ex.load_markets()
markets_to_save = dict()
if len(symbols_to_save) > 0:
markets_to_save = {k: v for k, v in ex.markets.items() if k in symbols_to_save}
else:
markets_to_save = ex.markets
with open('markets.json', 'w') as outfile:
json.dump(markets_to_save, outfile)
print("Init exchange")
for i in range(0, number_of_fetches):
if i > 0:
sleep_time = ex.requests_throttle.sleep_time()
print("Request in current period {}/{} sleeping for {} ".format(
ex.requests_throttle.total_requests_current_period,
ex.requests_throttle.requests_per_period,
sleep_time))
print("Fetching tickers {}/{}...".format(i + 1, number_of_fetches))
tickers = ex.fetch_tickers() # type: dict
print("... done")
tickers_to_save = list()
time_stamp = datetime.datetime.now().timestamp()
for symbol, ticker in tickers.items():
if (len(symbols_to_save) > 0 and symbol in symbols_to_save) or len(symbols_to_save) == 0:
tickers_to_save.append({"fetch_id": i+last_fetch_id, "timestamp": time_stamp, "symbol": symbol,
"ask": ticker["ask"],
"bid": ticker["bid"],
"askVolume": ticker["askVolume"],
"bidVolume": ticker["bidVolume"]})
storage.save_dict_all("tickers", tickers_to_save)
print("OK")
|
11538355
|
import os
from src.domain.ErrorTypes import ErrorTypes
from src.utils.code_generation import SpecialRequirementHandlerForParameters
__types_requiring_special_handling={"object", "dict", "regex", "template", "code", "ALL"}
def handle_arguments(parameters, args):
parameter_string_code=[]
for param in parameters:
param_string = handle_parameter(parameters[param], args)
parameter_string_code.extend([param + "=" + param_string, ", "])
if (len(parameters) > 0):
parameter_string_code.pop()
return parameter_string_code
def handle_instantination_or_call(parameters, initial_code, args):
parameter_string_code = handle_arguments(parameters, args)
result=[initial_code]
result.extend(parameter_string_code)
result.extend([")", os.linesep])
return result
def handle_parameter(parameter, args):
type_info = __get_type_info(parameter["type"])
# In the future, we may specialize here for objects and dicts. For now, we assume that families handle any object/dict if given.
# We handle them as whole in case (e.g. schema)
if(type_info["type"] == "array"):
value = ["["]
special_reqs_to_child={}
if ("special_requirements" in parameter and "special_requirements" not in parameter["value"]):
special_reqs_to_child=parameter["special_requirements"]
for val in parameter["value"]:
value.extend([handle_parameter({"value": val, "type": type_info["array_of"], "special_requirements": special_reqs_to_child}, args), ", "])
if (len(parameter["value"]) > 0):
value.pop()
value.append("]")
value = ''.join(value)
return value
else:
if ("special_requirements" in parameter and parameter["type"] in parameter["special_requirements"]):
return SpecialRequirementHandlerForParameters.handle_parameter(parameter, parameter["special_requirements"][parameter["type"]], args)
else:
if(parameter["type"] in __types_requiring_special_handling):
args["errors"].append(ErrorTypes.SPECIAL_REQUIREMENT_NOT_SPECIFIED_ERROR)
return None
return handle_primitive(parameter["value"])
def handle_primitive(value):
if (isinstance(value, str)):
value = __add_quotes_around_val(value)
else:
value = str(value)
return value
def __add_quotes_around_val(val):
return '"'+str(val)+'"'
def __get_type_info(raw_type):
result={}
if(raw_type.startswith("array")):
result["type"]="array"
result["array_of"]=raw_type[6:-1]
else:
result["type"]=raw_type
return result
def merge_with_additional_code(gen_code, additional_local_code):
final_code=[]
final_code.extend(additional_local_code)
final_code.append(os.linesep)
final_code.extend(gen_code)
return final_code
|
11538385
|
import sys
import logging
import copy
from datetime import datetime
from dateutil import parser
from croniter import croniter
from reconcile import queries
from reconcile.utils.ocm import OCMMap
from reconcile.utils.state import State
from reconcile.utils.data_structures import get_or_init
from reconcile.utils.semver_helper import sort_versions
QONTRACT_INTEGRATION = 'ocm-upgrade-scheduler'
def fetch_current_state(clusters):
settings = queries.get_app_interface_settings()
ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
settings=settings)
current_state = []
for cluster in clusters:
cluster_name = cluster['name']
ocm = ocm_map.get(cluster_name)
upgrade_policies = \
ocm.get_upgrade_policies(cluster_name)
for upgrade_policy in upgrade_policies:
upgrade_policy['cluster'] = cluster_name
current_state.append(upgrade_policy)
return ocm_map, current_state
def fetch_desired_state(clusters):
desired_state = []
for cluster in clusters:
cluster_name = cluster['name']
upgrade_policy = cluster['upgradePolicy']
upgrade_policy['cluster'] = cluster_name
upgrade_policy['current_version'] = cluster['spec']['version']
upgrade_policy['channel'] = cluster['spec']['channel']
desired_state.append(upgrade_policy)
return desired_state
def update_history(history, upgrade_policies):
"""Update history with information from clusters
with upgrade policies.
Args:
history (dict): history in the following format:
{
"check_in": "2021-08-29 18:01:27.730441",
"versions": {
"version1": {
"workloads": {
"workload1": {
"soak_days": 21,
"reporting": [
"cluster1",
"cluster2"
]
},
"workload2": {
"soak_days": 6,
"reporting": [
"cluster3"
]
}
}
}
}
}
upgrade_policies (list): query results of clusters upgrade policies
"""
default_workload_history = {
'soak_days': 0.0,
'reporting': [],
}
now = datetime.utcnow()
check_in = parser.parse(get_or_init(history, 'check_in', str(now)))
versions = get_or_init(history, 'versions', {})
# we iterate over clusters upgrade policies and update the version history
for item in upgrade_policies:
current_version = item['current_version']
version_history = get_or_init(versions, current_version, {})
version_workloads = get_or_init(version_history, 'workloads', {})
cluster = item['cluster']
workloads = item['workloads']
# we keep the version history per workload
for w in workloads:
workload_history = get_or_init(
version_workloads, w,
copy.deepcopy(default_workload_history))
reporting = workload_history['reporting']
# if the cluster is already reporting - accumulate it.
# if not - add it to the reporting list (first report)
if cluster in reporting:
workload_history['soak_days'] += \
(now - check_in).total_seconds() / 86400 # seconds in day
else:
workload_history['reporting'].append(cluster)
history['check_in'] = str(now)
def get_version_history(dry_run, upgrade_policies, ocm_map):
"""Get a summary of versions history per OCM instance
Args:
dry_run (bool): save updated history to remote state
upgrade_policies (list): query results of clusters upgrade policies
ocm_map (OCMMap): OCM clients per OCM instance
Returns:
dict: version history per OCM instance
"""
settings = queries.get_app_interface_settings()
accounts = queries.get_aws_accounts()
state = State(
integration=QONTRACT_INTEGRATION,
accounts=accounts,
settings=settings
)
results = {}
# we keep a remote state per OCM instance
for ocm_name in ocm_map.instances():
history = state.get(ocm_name, {})
update_history(history, upgrade_policies)
results[ocm_name] = history
if not dry_run:
state.add(ocm_name, history, force=True)
return results
def version_conditions_met(version, history, ocm_name,
workloads, upgrade_conditions):
"""Check that upgrade conditions are met for a version
Args:
version (string): version to check
history (dict): history of versions per OCM instance
ocm_name (string): name of OCM instance
upgrade_conditions (dict): query results of upgrade conditions
workloads (list): strings representing types of workloads
Returns:
bool: are version upgrade conditions met
"""
conditions_met = True
# check soak days condition is met for this version
soak_days = upgrade_conditions.get('soakDays', None)
if soak_days is not None:
ocm_history = history[ocm_name]
version_history = ocm_history['versions'].get(version, {})
for w in workloads:
workload_history = version_history.get('workloads', {}).get(w, {})
if soak_days > workload_history.get('soak_days', 0.0):
conditions_met = False
return conditions_met
def calculate_diff(current_state, desired_state, ocm_map, version_history):
"""Check available upgrades for each cluster in the desired state
according to upgrade conditions
Args:
current_state (list): current state of upgrade policies
desired_state (list): desired state of upgrade policies
ocm_map (OCMMap): OCM clients per OCM instance
version_history (dict): version history per OCM instance
Returns:
list: upgrade policies to be applied
"""
diffs = []
now = datetime.utcnow()
for d in desired_state:
# ignore clusters with an existing upgrade policy
cluster = d['cluster']
ocm = ocm_map.get(cluster)
c = [c for c in current_state if c['cluster'] == cluster]
if c:
# there can only be one upgrade policy per cluster
if len(c) != 1:
raise ValueError(
f'[{cluster}] expected only one upgrade policy')
[c] = c
version = c.get('version') # may not exist in automatic upgrades
if version and ocm.version_blocked(version):
logging.debug(
f'[{cluster}] found existing upgrade policy ' +
f'with blocked version {version}')
item = {
'action': 'delete',
'cluster': cluster,
'version': version,
'id': c['id'],
}
diffs.append(item)
else:
logging.debug(
f'[{cluster}] skipping cluster with ' +
'existing upgrade policy')
continue
# ignore clusters with an upgrade schedule not within the next 2 hours
schedule = d['schedule']
next_schedule = croniter(schedule).get_next(datetime)
next_schedule_in_hours = \
(next_schedule - now).total_seconds() / 3600 # seconds in hour
if next_schedule_in_hours > 2:
logging.debug(
f'[{cluster}] skipping cluster with no upcoming upgrade')
continue
# choose version that meets the conditions and add it to the diffs
available_upgrades = \
ocm.get_available_upgrades(d['current_version'], d['channel'])
for version in reversed(sort_versions(available_upgrades)):
logging.debug(
f'[{cluster}] checking conditions for version {version}')
if ocm.version_blocked(version):
logging.debug(
f'[{cluster}] version {version} is blocked')
continue
if version_conditions_met(version, version_history, ocm.name,
d['workloads'], d['conditions']):
logging.debug(
f'[{cluster}] conditions met for version {version}')
item = {
'action': 'create',
'cluster': cluster,
'version': version,
'schedule_type': 'manual',
'next_run': next_schedule.strftime('%Y-%m-%dT%H:%M:%SZ'),
}
diffs.append(item)
break
return diffs
def sort_diffs(diff):
if diff['action'] == 'delete':
return 1
else:
return 2
def act(dry_run, diffs, ocm_map):
diffs.sort(key=sort_diffs)
for diff in diffs:
action = diff.pop('action')
cluster = diff.pop('cluster')
ocm = ocm_map.get(cluster)
if action == 'create':
logging.info([action, cluster, diff['version'], diff['next_run']])
if not dry_run:
ocm.create_upgrade_policy(cluster, diff)
elif action == 'delete':
logging.info([action, cluster, diff['version']])
if not dry_run:
ocm.delete_upgrade_policy(cluster, diff)
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
clusters = queries.get_clusters()
clusters = [c for c in clusters if c.get('upgradePolicy') is not None]
if not clusters:
logging.debug("No upgradePolicy definitions found in app-interface")
sys.exit(0)
ocm_map, current_state = fetch_current_state(clusters)
desired_state = fetch_desired_state(clusters)
version_history = get_version_history(dry_run, desired_state, ocm_map)
diffs = calculate_diff(
current_state, desired_state, ocm_map, version_history)
act(dry_run, diffs, ocm_map)
|
11538390
|
import pytest
from tenable_io.api.networks import NetworkRequest, BulkAssignRequest
from tenable_io.api.models import Network, NetworkList, ScannerList
@pytest.mark.vcr()
def test_networks_list(client):
networks = client.networks_api.list()
assert isinstance(networks, NetworkList), u'The `list` method did not return type `NetworkList`.'
@pytest.mark.vcr()
def test_networks_details(client):
network = client.networks_api.details('00000000-0000-0000-0000-000000000000') # default network uuid
assert isinstance(network, Network), u'The `details` method did not return type `Network`.'
@pytest.mark.vcr()
def test_networks_create(client):
network = client.networks_api.create(NetworkRequest(name="test_network", description="automated_test"))
assert isinstance(network, Network), u'The `create` method did not return type `Network`.'
@pytest.mark.vcr()
def test_networks_update(client):
network = client.networks_api.create(NetworkRequest(name="test_network2", description="automated_test"))
network = client.networks_api.update(network.uuid, NetworkRequest(name="updated_network_name"))
assert isinstance(network, Network), u'The `update` method did not return type `Network`.'
assert network.name == 'updated_network_name', u'Expected the network name to be updated.'
@pytest.mark.vcr()
def test_networks_delete(client):
network = client.networks_api.create(NetworkRequest(name="test_network3", description="automated_test"))
assert client.networks_api.delete(network.uuid)
@pytest.mark.vcr()
def test_networks_list_scanners(client):
networks = client.networks_api.list()
scanners = client.networks_api.list_scanners(networks.networks[0].uuid)
assert isinstance(scanners, ScannerList), u'The `list_scanners` method did not return type `ScannerList`.'
@pytest.mark.vcr()
def test_networks_list_assignable_scanners(client):
networks = client.networks_api.list()
scanners = client.networks_api.list_assignable_scanners(networks.networks[1].uuid)
assert isinstance(scanners, ScannerList), u'The `list_scanners` method did not return type `ScannerList`.'
assert len(scanners.scanners) > 0, u'Expected at least 1 assignable scanner.'
@pytest.mark.vcr()
def test_networks_assign_scanner(client):
networks = client.networks_api.list()
scanners = client.networks_api.list_assignable_scanners(networks.networks[1].uuid)
assert client.networks_api.assign_scanner(network_id=networks.networks[1].uuid,
scanner_uuid=scanners.scanners[0].uuid)
@pytest.mark.vcr()
def test_networks_bulk_assign_scanners(client):
networks = client.networks_api.list()
scanners = client.networks_api.list_assignable_scanners(networks.networks[1].uuid)
assert client.networks_api.bulk_assign_scanners(network_id=networks.networks[1].uuid,
bulk_assign=BulkAssignRequest(
scanner_uuids=[scanners.scanners[0].uuid]
))
|
11538401
|
import os
import random
import shlex
import sys
import traceback
from os.path import abspath, dirname, exists as pexists
from pathlib import Path
import numpy as np
sys.dont_write_bytecode = True
if __name__ == '__main__' and __package__ is None:
parent_dir = dirname(dirname(abspath(__file__)))
sys.path.append(parent_dir)
from neuropredict.regress import cli
from neuropredict import config as cfg
from neuropredict.tests._test_utils import remove_neuropredict_results
from pyradigm import RegressionDataset
from pyradigm.utils import (make_random_RegrDataset,
dataset_with_new_features_same_everything_else)
feat_generator = np.random.randn
random.seed(42)
test_dir = Path(__file__).resolve().parent
out_dir = test_dir.joinpath('..', 'tests', 'scratch_regress')
if not pexists(out_dir):
os.makedirs(out_dir)
min_size = 300
max_size = 500
max_dim = 100
min_num_modalities = 3
max_num_modalities = 10
train_perc = 0.5
num_rep_cv = 45
num_procs = 3
covar_list = ('age', 'gender', 'dummy')
covar_types = ('age', 'gender', 'float')
covar_arg = ','.join(['age', ])
deconf_method = 'residualize'
red_dim = 'sqrt'
estimator = 'randomforestregressor'
dr_method = 'variancethreshold' # 'selectkbest_f_classif'
gs_level = 'none' # 'light'
out_path1 = os.path.join(out_dir, 'random_regr_ds1.pkl')
out_path2 = os.path.join(out_dir, 'random_regr_ds2.pkl')
if pexists(out_path1) and pexists(out_path2):
ds_one = RegressionDataset(dataset_path=out_path1)
ds_two = RegressionDataset(dataset_path=out_path2)
else:
ds_one = make_random_RegrDataset(min_size=min_size, max_size=max_size,
max_dim=max_dim,
attr_names=covar_list,
attr_types=covar_types
)
ds_one.description = 'ds_one'
ds_one.save(out_path1)
ds_two = dataset_with_new_features_same_everything_else(ds_one, max_dim)
ds_two.description = 'ds_two'
ds_two.save(out_path2)
def test_basic_run():
remove_neuropredict_results(out_dir)
sys.argv = shlex.split('np_regress -y {} {} -t {} -n {} -c {} -g {} -o {} '
'-e {} -dr {} '
''.format(out_path1, out_path2, train_perc, num_rep_cv,
num_procs, gs_level, out_dir, estimator,
dr_method))
cli()
def test_each_combination_works():
"""Ensures each of combination of dim. reduction and regressor works"""
nrep = 10
nproc = 1
gsl = 'none' # to speed up the process
failed_combos = list()
for clf_name in cfg.regressor_choices:
for fs_name in cfg.all_dim_red_methods:
# skipping the test for LLE* to avoid numerical issues
if fs_name.startswith('lle'):
continue
# ensure a fresh start
remove_neuropredict_results(out_dir)
try:
cli_str = 'np_regress -y {} -t {} -n {} -c {} -g {} -o {} ' \
'-e {} -dr {} -g {} -c {}' \
''.format(out_path1, train_perc, nrep, num_procs,
gs_level, out_dir, estimator, dr_method,
gsl, nproc)
sys.argv = shlex.split(cli_str)
cli()
except:
failed_combos.append('{:35} {:35}'.format(clf_name, fs_name))
traceback.print_exc()
print('\nCombinations failed:\n{}'.format('\n'.join(failed_combos)))
if len(failed_combos) > 4:
print('5 or more combinations of DR and REGR failed! Fix them')
test_each_combination_works()
|
11538437
|
import os
import threading
import requests
import sqlite3
from datetime import datetime, timedelta
from threading import Lock, Timer
from plugin import *
class spacex_launches(plugin):
def __init__(self, bot):
super().__init__(bot)
self.db_name = self.bot.get_server_name()
os.makedirs(os.path.dirname(os.path.realpath(self.config['db_location'])), exist_ok=True)
self.db_connection = sqlite3.connect(self.config['db_location'], check_same_thread=False)
self.db_cursor = self.db_connection.cursor()
self.db_cursor.execute(f"CREATE TABLE IF NOT EXISTS '{self.db_name}' (nickname TEXT primary key not null)")
self.db_mutex = Lock()
self.upcoming_launches_timers = {} # {flight_id -> upcoming_launch_info}
self.check_upcoming_launches_timer = utils.repeated_timer(timedelta(minutes=15).total_seconds(), self.check_upcoming_launches)
self.check_upcoming_launches_timer.start()
class upcoming_launch_info:
def __init__(self, launch_datetime, timers):
self.launch_datetime = launch_datetime
self.timers = timers
def unload_plugin(self):
self.check_upcoming_launches_timer.cancel()
for info in self.upcoming_launches_timers.values():
for timer in info.timers:
timer.cancel()
@utils.timed_lru_cache(expiration=timedelta(minutes=3))
def get_upcoming_launches(self):
return requests.get(r'https://api.spacexdata.com/v2/launches/upcoming').json()
@utils.timed_lru_cache(expiration=timedelta(minutes=3), typed=True)
def get_launch_by_id(self, flight_id):
return requests.get(r'https://api.spacexdata.com/v2/launches/all?flight_number=%s' % flight_id).json()[0]
@utils.timed_lru_cache(expiration=timedelta(minutes=3))
def get_latest_launch(self):
return requests.get(r'https://api.spacexdata.com/v2/launches/latest').json()
def check_upcoming_launches(self):
self.logger.debug('checking upcoming launches...')
next_launches = self.get_upcoming_launches()
for next_launch in next_launches:
self.handle_upcoming_launch(next_launch)
def handle_upcoming_launch(self, next_launch):
now = datetime.now()
flight_id = next_launch['flight_number']
next_launch_time = datetime.fromtimestamp(next_launch['launch_date_unix']) if next_launch['launch_date_unix'] else None
if flight_id in self.upcoming_launches_timers:
if self.upcoming_launches_timers[flight_id].launch_datetime != next_launch_time:
old_launch_time = self.upcoming_launches_timers[flight_id].launch_datetime
self.logger.info(f'launch {flight_id} was just rescheduled: {old_launch_time} -> {next_launch_time}')
if old_launch_time - timedelta(days=self.config['ignore_rescheduled_launches_further_than_days']) < now:
self.inform_rescheduled_launch(next_launch, old_launch_time)
self.logger.debug(f'canceling timers for {flight_id}')
for timer in self.upcoming_launches_timers[flight_id].timers: timer.cancel()
del self.upcoming_launches_timers[flight_id]
else:
self.logger.debug(f'timers for {flight_id} already set')
return
self.logger.debug(f'setting launch time for flight {flight_id}: {next_launch_time}')
self.upcoming_launches_timers[flight_id] = self.upcoming_launch_info(next_launch_time, [])
if next_launch_time:
self.add_reminder_at(next_launch_time - timedelta(hours=12), flight_id)
self.add_reminder_at(next_launch_time - timedelta(minutes=30), flight_id)
def add_reminder_at(self, time, flight_id):
now = datetime.now()
if time < now: return
total_seconds = (time - now).total_seconds()
if total_seconds > threading.TIMEOUT_MAX:
self.logger.debug(f'{flight_id} flight reminder not set, timeout value is too large')
else:
timer = Timer(total_seconds, self.remind_upcoming_launch, kwargs={'flight_id': flight_id})
self.upcoming_launches_timers[flight_id].timers.append(timer)
timer.start()
self.logger.debug(f'reminder at {time} set for upcoming launch: {flight_id}')
def inform_rescheduled_launch(self, launch, old_launch_time):
new_launch_time = datetime.fromtimestamp(launch['launch_date_unix']) if launch['launch_date_unix'] else None
assert new_launch_time != old_launch_time
users_to_call = self.get_users_to_call()
if not self.config['inform_about_rescheduled_launches'] or not users_to_call: return
flight_id = launch['flight_number']
old_time_str = color.green(old_launch_time.strftime('%Y-%m-%d %H:%M')) + utils.get_str_utc_offset() if old_launch_time else '<unknown>'
new_time_str = color.green(new_launch_time.strftime('%Y-%m-%d %H:%M')) + utils.get_str_utc_offset() if new_launch_time else '<unknown>'
rocket_name = color.cyan(launch['rocket']['rocket_name'])
flight_id_str = color.orange(flight_id)
if self.config['call_users_for_rescheduled_launches']: prefix = ', '.join(users_to_call)
else: prefix = ''
suffix = f'{rocket_name} launch {flight_id_str} was just rescheduled: {old_time_str} -> {new_time_str}'
if prefix:
if self.bot.is_msg_too_long(f'{prefix}: {suffix}'):
self.bot.say(f'{prefix}')
self.bot.say(f'{suffix}')
else:
self.bot.say(f'{prefix}: {suffix}')
else:
self.bot.say(f'{suffix}')
def remind_upcoming_launch(self, flight_id):
self.logger.info(f'reminding about next upcoming launch: {flight_id}')
users_to_call = self.get_users_to_call()
if not users_to_call: return
launch = self.get_launch_by_id(flight_id)
launch_time = datetime.fromtimestamp(launch['launch_date_unix']) if launch['launch_date_unix'] else None
if launch['launch_success'] is not None \
or not launch['launch_date_unix'] \
or (launch_time < datetime.now()) \
or flight_id not in self.upcoming_launches_timers \
or launch_time != self.upcoming_launches_timers[flight_id].launch_datetime:
self.logger.warning(f'launch {flight_id} probably canceled / rescheduled, skipping...')
return
self.bot.say(', '.join(users_to_call))
self.bot.say(self.get_launch_info_str(launch))
@command
@doc('get upcoming SpaceX launches info')
def spacex_next(self, sender_nick, **kwargs):
self.logger.info(f'{sender_nick} wants spacex upcoming launch')
launches = self.get_upcoming_launches()[0:self.config['next_launches']]
if not launches:
self.bot.say('no scheduled launches')
return
for launch in launches:
self.bot.say(self.get_launch_info_str(launch))
def get_launch_info_str(self, launch):
past = not launch['upcoming'] if launch['launch_date_unix'] else False
time = self.get_launch_time_str(launch, color.green)
time = f' {time}' if time else ''
flight_id = color.orange(f'[{launch["flight_number"]}]')
rocket_name = color.cyan(launch['rocket']['rocket_name'])
reused = launch['reuse']['core'] or launch['reuse']['side_core1'] or launch['reuse']['side_core2']
reused = 'Reused' if reused else 'Unused'
launch_site = launch['launch_site']['site_name']
uri = self.get_video_uri(launch)
uri = f': {uri}' if uri else ''
try:
payload_weight = sum([payload['payload_mass_kg'] for payload in launch['rocket']['second_stage']['payloads']])
payload_weight = f' with {payload_weight}kg payload'
except (KeyError, TypeError): payload_weight = ''
try:
orbits = [payload['orbit'] for payload in launch['rocket']['second_stage']['payloads']]
orbits = ', '.join(list(set(orbits)))
orbits = f' to {orbits}'
except (KeyError, TypeError): orbits = ''
payload_info = f'{payload_weight}{orbits}'
result = f'{flight_id} ' if self.config['include_flight_id'] else ''
result += f'{reused} {rocket_name} {"launched" if past else "launches"}{time} from {launch_site}{payload_info}{uri}'
return result
def get_launch_time_str(self, launch, colorize_func=lambda x: x):
if not launch['launch_date_unix']:
return ''
result = datetime.fromtimestamp(launch['launch_date_unix'])
if not launch['is_tentative']:
return colorize_func(result.strftime('%Y-%m-%d')) + ' ' + colorize_func(result.strftime('%H:%M')) + utils.get_str_utc_offset()
if launch['tentative_max_precision'] == 'year':
return colorize_func(result.strftime('%Y'))
if launch['tentative_max_precision'] == 'month':
return colorize_func(result.strftime('%Y-%m'))
if launch['tentative_max_precision'] == 'day':
return colorize_func(result.strftime('%Y-%m-%d'))
return colorize_func(result.strftime('%Y-%m-%d')) + ' ' + colorize_func(result.strftime('%H:%M')) + utils.get_str_utc_offset()
def get_video_uri(self, launch):
if launch['links']['video_link']: return launch['links']['video_link']
if launch['upcoming'] and \
(datetime.fromtimestamp(launch['launch_date_unix']) < (datetime.now() + timedelta(hours=2))) and \
launch['tentative_max_precision'] not in ['month', 'year']:
return r'http://www.spacex.com/webcast'
return ''
@command
@command_alias('spacex_prev')
@doc('get last SpaceX launch info')
def spacex_last(self, sender_nick, **kwargs):
self.logger.info(f'{sender_nick} wants spacex latest launch')
latest_launch = self.get_latest_launch()
prefix = '[LAUNCH SUCCESS] ' if latest_launch['launch_success'] else '[LAUNCH FAIL] '
land_success = [c['land_success'] for c in latest_launch['rocket']['first_stage']['cores']]
if land_success.count(None) == len(land_success):
prefix += '[NO LANDING ATTEMPT]'
else:
land_success = list(filter(lambda l: l is not None, land_success))
if True in land_success and False in land_success: prefix += '[LANDING PARTIALLY SUCCESS]'
elif True in land_success: prefix += '[LANDING SUCCESS]'
else: prefix += '[LANDING FAIL]'
prefix = color.orange(prefix)
if self.config['include_details'] and latest_launch['details']:
self.bot.say(self.get_launch_info_str(latest_launch))
self.bot.say(f'{prefix} {latest_launch["details"]}')
else:
self.bot.say(f'{prefix} {self.get_launch_info_str(latest_launch)}')
def get_users_to_call(self):
with self.db_mutex:
self.db_cursor.execute(f"SELECT nickname FROM '{self.db_name}'")
db_result = self.db_cursor.fetchall()
db_result = [irc_nickname(n[0]) for n in db_result]
on_channel = self.bot.get_usernames_on_channel()
result = []
for saved in db_result:
for present in on_channel:
if saved in present:
result.append(present)
break
return result
@command
@doc('will keep you updated on all upcoming SpaceX launches')
def spacex_remind(self, sender_nick, **kwargs):
with self.db_mutex:
self.db_cursor.execute(f"INSERT OR REPLACE INTO '{self.db_name}' VALUES (?)", (sender_nick.casefold(),))
self.db_connection.commit()
self.bot.say_ok()
@command
@doc('stop informing me about upcoming SpaceX launches')
def spacex_rm_remind(self, sender_nick, **kwargs):
with self.db_mutex:
self.db_cursor.execute(f"DELETE FROM '{self.db_name}' WHERE nickname = ? COLLATE NOCASE", (sender_nick.casefold(),))
self.db_connection.commit()
self.bot.say_ok()
|
11538445
|
from functools import wraps
import json
import rpyc
from rpyc.core.protocol import Connection
class RpycUtil(object):
"""RpycUtil-Rpyc客户端工具"""
def __init__(self, host, port):
super(RpycUtil, self).__init__()
self.host = host
self.port = port
self.conn = None
def connect(self):
self.disconnect()
client_config={
"allow_all_attrs": True
}
self.conn = rpyc.connect(self.host, self.port, config=client_config)
def disconnect(self):
if isinstance(self.conn, Connection):
if not self.conn.closed:
self.conn.close()
self.conn = None
def ensure_connected(func):
@wraps(func)
def func_wrapper(*args):
self = args[0]
if not isinstance(self.conn, Connection) or self.conn.closed:
self.connect()
print('running {0}()'.format(func.__name__))
# 如果和rpyc服务端断开了,则自动重新连接
try:
return func(*args)
except Exception as e:
if isinstance(e, EOFError):
self.connect()
return func(*args)
else:
raise e
return func_wrapper
@ensure_connected
def add_job_json(self, params={}):
# 将dict转化成对应的json格式的str, 避免rpyc传递后变成netref对象,导致直接使用时不能被序列化
job_params = json.dumps(params, ensure_ascii=False)
return self.conn.root.add_job_json(job_params)
@ensure_connected
def get_jobs_json(self):
ret = self.conn.root.get_jobs_json()
return ret
@ensure_connected
def remove_job(self, job_id):
ret = self.conn.root.remove_job(job_id)
return ret
|
11538455
|
import textwrap
from storybro.play.settings import PlayerSettings
from storybro.stories.block import Block
from storybro.stories.story import Story
from storybro.story.utils import cut_trailing_sentence
class BlockFormatter:
def __init__(self, settings: PlayerSettings):
self.settings: PlayerSettings = settings
# before saving to disk
def filter_block(self, block: Block) -> Block:
if block.attrs.get('type') == 'input':
return self.filter_input(block)
else:
return self.filter_output(block)
def filter_input(self, block: Block) -> Block:
return Block(block.text.strip(' \t\n\r'), attrs=block.attrs.copy())
def filter_output(self, block: Block) -> Block:
text = cut_trailing_sentence(block.text)
if len(text) == 0:
text = ""
else:
text = text.replace('."', '".')
text = text.replace("#", "")
text = text.replace("*", "")
text = text.replace("\n\n", "\n")
text = text.strip(' \t\n\r')
if not text.endswith("."):
text = text + "."
return Block(text, attrs=block.attrs.copy())
# before sending to generator
def process_block(self, block: Block) -> Block:
if block.attrs.get('type') == 'input':
return self.process_input(block)
else:
return self.process_output(block)
def process_input(self, block: Block) -> Block:
return Block("\n> " + block.text + "\n", block.attrs.copy())
def process_output(self, block: Block) -> Block:
return Block(block.text, block.attrs.copy())
def process_story(self, story: Story) -> str:
return "".join([self.process_block(b).text for b in story.blocks])
# before sending to user
def render_block(self, block: Block) -> Block:
if block.attrs.get('type') == 'input':
return self.render_input(block)
else:
return self.render_output(block)
def fill_text(self, text, is_input, is_pinned):
input_icon = self.settings.icon_for_input if is_input else self.settings.icon_for_output
pin_icon = self.settings.icon_for_pins if is_pinned else " "
initial_indent = input_icon + pin_icon + " "
return textwrap.fill(
text,
width=self.settings.fill_width,
replace_whitespace=True,
drop_whitespace=True,
initial_indent=initial_indent,
subsequent_indent=" ",
fix_sentence_endings=True)
def render_input(self, block: Block) -> Block:
rendered_input = block.text
is_input = block.attrs.get('type') == 'input'
is_pinned = block.attrs.get('pinned')
if self.settings.fill_width:
rendered_input = "\n".join([self.fill_text(l, is_input, is_pinned) for l in rendered_input.splitlines()])
return Block(rendered_input, block.attrs.copy())
def render_output(self, block: Block) -> Block:
rendered_output = block.text
is_input = block.attrs.get('type') == 'input'
is_pinned = block.attrs.get('pinned')
if self.settings.fill_width:
rendered_output = "\n".join([self.fill_text(l, is_input, is_pinned) for l in rendered_output.splitlines()])
rendered_output = f"{self.settings.top_separator}\n{rendered_output}\n{self.settings.bottom_separator}"
return Block(rendered_output, block.attrs.copy())
def render_story(self, story: Story, last_n: int = 0) -> str:
blocks = story.blocks
if last_n:
blocks = blocks[-last_n:]
return "\n".join([self.render_block(b).text for b in blocks])
|
11538459
|
from django.urls import path
from newsletter_subscription.ajax_views import ajax_subscribe
from newsletter_subscription.views import form, resubscribe, subscribe
def newsletter_subscriptions_urlpatterns(**kwargs):
return [
path(
"",
form,
kwargs,
name="newsletter_subscription_form",
),
path(
"s/<str:code>/",
subscribe,
kwargs,
name="newsletter_subscription_subscribe",
),
path(
"r/<str:code>/",
resubscribe,
kwargs,
name="newsletter_subscription_resubscribe",
),
path(
"ajax_subscribe/",
ajax_subscribe,
kwargs,
name="newsletter_subscription_ajax_subscribe",
),
]
|
11538463
|
from datetime import date, datetime
from dateutil.tz import tzoffset
from flask import request
from flask.json import JSONEncoder
def test_view_arg(api):
'''Should successfully serialize and parametrize path args
'''
@api.route('/<str_arg>/<int_arg>/<float_arg>/<bool_arg>/<date_arg>/<datetime_arg>')
def _(str_arg: str, int_arg: int, float_arg: float,
bool_arg: bool, date_arg: date, datetime_arg: datetime):
# asserts everything was deserialized properly
assert str_arg == 'test'
assert int_arg == 1
assert float_arg == 1.5
assert bool_arg is True
assert date_arg == date(2019, 9, 8)
assert datetime_arg == datetime(2019, 7, 6, 5, 4, 3, 0, tzoffset(None, -3600))
return {
'str_arg': str_arg,
'int_arg': int_arg,
'float_arg': float_arg,
'bool_arg': bool_arg,
'date_arg': date_arg,
'datetime_arg': datetime_arg
}
with api.flask_app.test_client() as client:
response = client.get(
'/test/1/1.5/true/2019-09-08/2019-07-06T05:04:03-01:00')
# asserts everything was serialized properly
assert response.get_json().get('str_arg') == 'test'
assert response.get_json().get('int_arg') == 1
assert response.get_json().get('float_arg') == 1.5
assert response.get_json().get('bool_arg') is True
assert response.get_json().get('date_arg') == '2019-09-08'
assert response.get_json().get('datetime_arg') == '2019-07-06T05:04:03-01:00'
def test_query_args(api):
'''Should successfully serialize and parametrize query args
'''
@api.route('/')
def _(str_arg: str, int_arg: int, float_arg: float,
bool_arg: bool, list_arg: list, date_arg: date, datetime_arg: datetime):
# asserts everything was deserialized properly
assert str_arg == 'test'
assert int_arg == 1
assert float_arg == 1.5
assert bool_arg is True
assert list_arg == ['item1', 'item2']
assert date_arg == date(2019, 9, 8)
assert datetime_arg == datetime(2019, 7, 6, 5, 4, 3, 0, tzoffset(None, -3600))
return {
'str_arg': str_arg,
'int_arg': int_arg,
'float_arg': float_arg,
'bool_arg': bool_arg,
'list_arg': list_arg,
'date_arg': date_arg,
'datetime_arg': datetime_arg
}
with api.flask_app.test_client() as client:
response = client.get(
'/?str_arg=test' +
'&int_arg=1' +
'&float_arg=1.5' +
'&bool_arg=true' +
'&list_arg=item1' +
'&list_arg=item2' +
'&date_arg=2019-09-08' +
'&datetime_arg=2019-07-06T05:04:03-01:00')
# asserts everything was serialized properly
assert response.get_json().get('str_arg') == 'test'
assert response.get_json().get('int_arg') == 1
assert response.get_json().get('float_arg') == 1.5
assert response.get_json().get('bool_arg') is True
assert response.get_json().get('list_arg') == ['item1', 'item2']
assert response.get_json().get('date_arg') == '2019-09-08'
assert response.get_json().get('datetime_arg') == '2019-07-06T05:04:03-01:00'
def test_extra_query_args(api):
'''Should not serialize and parametrize args that are not expected by the view func
'''
@api.route('/')
def _(arg: str):
assert arg == 'test_one'
assert request.args['extra_arg'] == 'test_extra'
with api.flask_app.test_client() as client:
client.get('/?arg=test_one&extra_arg=test_extra')
def test_kwargs(api):
'''Should send all extra args to **kwargs
'''
@api.route('/')
def _(arg, **kwargs):
kwargs['arg'] = arg
assert kwargs['arg'] == 'arg'
assert kwargs['arg_kw1'] == 'kw1'
assert kwargs['arg_kw2'] == 'kw2'
return kwargs
with api.flask_app.test_client() as client:
response = client.get('/?arg=arg&arg_kw1=kw1&arg_kw2=kw2')
assert response.get_json().get('arg') == 'arg'
assert response.get_json().get('arg_kw1') == 'kw1'
assert response.get_json().get('arg_kw2') == 'kw2'
|
11538469
|
from cv2blur import CV2Blur
from cv2nothing import CV2Nothing
from cv2add import CV2Add
from cv2multiply import CV2Multiply
from cv2gausblur import CV2GausBlur
from cv2copypaste import CV2CopyPaste
from cv2zero import CV2Zero
class CV2Selector(object):
def __init__(self):
self.selections = {"nothing": CV2Nothing(),
"blur": CV2Blur(),
"add": CV2Add(),
"multiply": CV2Multiply(),
"gausblur": CV2GausBlur(),
"copypaste" : CV2CopyPaste(),
"zero" : CV2Zero()}
self.selection = None
def select(self, name):
assert name in self.selections.keys()
self.selection = self.selections[name]
def apply(self, image):
return self.selection.apply(image)
|
11538508
|
import asyncio
from panini.app import App
from panini.middleware.reader_emulator_middleware import ReaderEmulatorMiddleware
from panini.middleware.writer_emulator_middleware import WriterEmulatorMiddleware
from panini.middleware.nats_timeout import NATSTimeoutMiddleware
app = App(service_name="publisher", host="127.0.0.1", port=4222)
@app.task()
async def request_task():
for i in range(10):
try:
print("request listener.store.request")
response = await app.request(
"listener.store.request", {"data": f"request.data.{i}"}
)
print("response", response)
await asyncio.sleep(1.5)
except Exception as ex:
app.logger.exception(ex)
@app.task()
async def publish_task():
for i in range(10):
try:
print("publish listener.store.listen")
await app.publish("listener.store.listen", {"data": f"publish.data.{i}"})
await asyncio.sleep(1)
except Exception as ex:
app.logger.exception(ex)
if __name__ == "__main__":
# folder = "resources"
# filename = "events.publisher.2021-03-19-16-21-30.jsonl"
# app.add_middleware(ReaderEmulatorMiddleware, folder=folder, filename=f"{folder}/{filename}", compare_output=True)
# app.add_middleware(ReaderEmulatorMiddleware, prefix='prefix', run_emulator=False)
app.start()
|
11538541
|
from bases import *
from taco_scrapi import *
from baseladvd import *
from usuarios_api import *
def main():
urltacos='http://taco-randomizer.herokuapp.com/'
urlUsers='https://randomuser.me/api/'
t=AppTaco()
u=AppUser()
db=basedatos()
for i in range(1, 51):
taquitos=AppTaco.get_taco(t,urltacos)
tq=basedatos.insertarTaco_db(db,i,taquitos)
for i in range(1, 26):
usuarios=AppUser.get_usuario(u,urlUsers)
gg=basedatos.insertarCliente_db(db,i,usuarios)
if __name__ == '__main__':
main()
|
11538568
|
PHENOTYPE_GROUPS = {
"HP:0001298": {"name": "Encephalopathy", "abbr": "ENC"},
"HP:0012759": {"name": "Neurodevelopmental abnormality", "abbr": "NDEV"},
"HP:0001250": {"name": "Seizures", "abbr": "EP"},
"HP:0100022": {"name": "Abnormality of movement", "abbr": "MOVE"},
"HP:0000707": {"name": "Neurology, other", "abbr": "NEUR"},
"HP:0003011": {"name": "Abnormality of the musculature", "abbr": "MUSC"},
"HP:0001638": {"name": "Cardiomyopathy", "abbr": "CARD"},
"HP:0001507": {"name": "Growth abnormality", "abbr": "GROW"},
"HP:0001392": {"name": "Abnormality of the liver", "abbr": "LIV"},
"HP:0011458": {"name": "Abdominal symptom", "abbr": "GI"},
"HP:0012373": {"name": "Abnormal eye physiology", "abbr": "EYE"},
"HP:0000077": {"name": "Abnormality of the kidney", "abbr": "KIDN"},
"HP:0000951": {"name": "Abnormality of the skin", "abbr": "SKIN"},
"HP:0001939": {"name": "Abnormality of metabolism/homeostasis", "abbr": "METAB"},
"HP:0000118": {"name": "As yet undefined/to be added", "abbr": "UND"},
"HP:0002011": {"name": "Abnormal CNS morphology", "abbr": "MRI"},
}
COHORT_TAGS = ["endo", "mito", "ketogenic diet", "pedhep", "other"]
# Downloaded resources can be real downloaded files or demo files (located in scout/scout/demo/resources)
UPDATE_DISEASES_RESOURCES = {
"genemap_lines": ["genemap2.txt", "genemap2_reduced.txt"],
"hpo_gene_lines": ["phenotype_to_genes.txt", "phenotype_to_genes_reduced.txt"],
}
|
11538570
|
import setup_django
from django.db import models
from api.models import WhiteListItem, BlackListItem
"""
Remove duplicates, keeping the one with greatest id
"""
def remove_duplicates(model, unique_fields):
duplicates = (model.objects.values(*unique_fields)
.annotate(max_id=models.Max('id'),
count_id=models.Count('id'))
.filter(count_id__gt=1)
.order_by())
print duplicates
for duplicate in duplicates:
dup = {}
for x in unique_fields:
dup[x] = duplicate[x]
(model.objects.filter(**dup)
.exclude(id=duplicate['max_id'])
.delete())
if __name__ == '__main__':
remove_duplicates(WhiteListItem, ['user', 'url'])
remove_duplicates(BlackListItem, ['user', 'url'])
|
11538579
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pdb
class SoftmaxModel(nn.Module):
def __init__(self, D_in, D_out):
super(SoftmaxModel, self).__init__()
self.linear = nn.Linear(D_in, D_out)
self.D_in = D_in
self.D_out = D_out
def forward(self, x):
# TODO: fix hardcoded
x = np.reshape(x, (x.shape[0], self.D_in))
return self.linear(x)
# Unflattens flattened gradient
def reshape(self, flat_gradient):
layers = []
layers.append( torch.from_numpy(np.reshape(flat_gradient[0:self.D_in*self.D_out], (self.D_out, self.D_in))).type(torch.FloatTensor) )
layers.append( torch.from_numpy(flat_gradient[self.D_in*self.D_out:self.D_in*self.D_out + self.D_out]).type(torch.FloatTensor) )
return layers
|
11538589
|
from dfs import dfs
# Smaller graph used for dfs
start = {
"value": 0, "children": [
{"value": 1, "children": [
{"value": 3, "children": []},
{"value": 4, "children": []}
]}, {
"value": 2, "children": [
{"value": 5, "children": []},
{"value": 6, "children": []}
]
}
]
}
# Bigger graph specifically for this algorithm
# TODO
# Should be 6
print(dfs(start, 6)["value"])
|
11538608
|
import base64
import getopt
import glob
import io
import math
import os
import random
import re
import shutil
import sys
import tempfile
import time
import urllib
import zipfile
# import cupy
import cv2
import h5py
import moviepy
import moviepy.editor
import numpy
import scipy
import scipy.io
import torch
import torchvision
class Basic(torch.nn.Module):
def __init__(self, strType, intChannels):
super(Basic, self).__init__()
if strType == 'relu-conv-relu-conv':
self.netMain = torch.nn.Sequential(
torch.nn.PReLU(num_parameters=intChannels[0], init=0.25),
torch.nn.Conv2d(in_channels=intChannels[0], out_channels=intChannels[1], kernel_size=3, stride=1, padding=1),
torch.nn.PReLU(num_parameters=intChannels[1], init=0.25),
torch.nn.Conv2d(in_channels=intChannels[1], out_channels=intChannels[2], kernel_size=3, stride=1, padding=1)
)
elif strType == 'conv-relu-conv':
self.netMain = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=intChannels[0], out_channels=intChannels[1], kernel_size=3, stride=1, padding=1),
torch.nn.PReLU(num_parameters=intChannels[1], init=0.25),
torch.nn.Conv2d(in_channels=intChannels[1], out_channels=intChannels[2], kernel_size=3, stride=1, padding=1)
)
# end
if intChannels[0] == intChannels[2]:
self.netShortcut = None
elif intChannels[0] != intChannels[2]:
self.netShortcut = torch.nn.Conv2d(in_channels=intChannels[0], out_channels=intChannels[2], kernel_size=1, stride=1, padding=0)
# end
# end
def forward(self, tenInput):
if self.netShortcut is None:
return self.netMain(tenInput) + tenInput
elif self.netShortcut is not None:
return self.netMain(tenInput) + self.netShortcut(tenInput)
# end
# end
# end
class Downsample(torch.nn.Module):
def __init__(self, intChannels):
super(Downsample, self).__init__()
self.netMain = torch.nn.Sequential(
torch.nn.PReLU(num_parameters=intChannels[0], init=0.25),
torch.nn.Conv2d(in_channels=intChannels[0], out_channels=intChannels[1], kernel_size=3, stride=2, padding=1),
torch.nn.PReLU(num_parameters=intChannels[1], init=0.25),
torch.nn.Conv2d(in_channels=intChannels[1], out_channels=intChannels[2], kernel_size=3, stride=1, padding=1)
)
# end
def forward(self, tenInput):
return self.netMain(tenInput)
# end
# end
class Upsample(torch.nn.Module):
def __init__(self, intChannels):
super(Upsample, self).__init__()
self.netMain = torch.nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
torch.nn.PReLU(num_parameters=intChannels[0], init=0.25),
torch.nn.Conv2d(in_channels=intChannels[0], out_channels=intChannels[1], kernel_size=3, stride=1, padding=1),
torch.nn.PReLU(num_parameters=intChannels[1], init=0.25),
torch.nn.Conv2d(in_channels=intChannels[1], out_channels=intChannels[2], kernel_size=3, stride=1, padding=1)
)
# end
def forward(self, tenInput):
return self.netMain(tenInput)
# end
# end
def spatial_filter(tenInput, strType):
tenOutput = None
if strType == 'laplacian':
tenLaplacian = tenInput.new_zeros(tenInput.shape[1], tenInput.shape[1], 3, 3)
for intKernel in range(tenInput.shape[1]):
tenLaplacian[intKernel, intKernel, 0, 1] = -1.0
tenLaplacian[intKernel, intKernel, 0, 2] = -1.0
tenLaplacian[intKernel, intKernel, 1, 1] = 4.0
tenLaplacian[intKernel, intKernel, 1, 0] = -1.0
tenLaplacian[intKernel, intKernel, 2, 0] = -1.0
# end
tenOutput = torch.nn.functional.pad(input=tenInput, pad=[ 1, 1, 1, 1 ], mode='replicate')
tenOutput = torch.nn.functional.conv2d(input=tenOutput, weight=tenLaplacian)
elif strType == 'median-3':
tenOutput = torch.nn.functional.pad(input=tenInput, pad=[ 1, 1, 1, 1 ], mode='reflect')
tenOutput = tenOutput.unfold(2, 3, 1).unfold(3, 3, 1)
tenOutput = tenOutput.contiguous().view(tenOutput.shape[0], tenOutput.shape[1], tenOutput.shape[2], tenOutput.shape[3], 3 * 3)
tenOutput = tenOutput.median(-1, False)[0]
elif strType == 'median-5':
tenOutput = torch.nn.functional.pad(input=tenInput, pad=[ 2, 2, 2, 2 ], mode='reflect')
tenOutput = tenOutput.unfold(2, 5, 1).unfold(3, 5, 1)
tenOutput = tenOutput.contiguous().view(tenOutput.shape[0], tenOutput.shape[1], tenOutput.shape[2], tenOutput.shape[3], 5 * 5)
tenOutput = tenOutput.median(-1, False)[0]
# end
return tenOutput
# end
class Inpaint(torch.nn.Module):
def __init__(self):
super(Inpaint, self).__init__()
self.netContext = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=4, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True),
torch.nn.PReLU(num_parameters=64, init=0.25),
torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True),
torch.nn.PReLU(num_parameters=64, init=0.25)
)
self.netInput = Basic('conv-relu-conv', [ 3 + 1 + 64 + 1, 32, 32 ])
for intRow, intFeatures in [ (0, 32), (1, 64), (2, 128), (3, 256) ]:
self.add_module(str(intRow) + 'x0' + ' - ' + str(intRow) + 'x1', Basic('relu-conv-relu-conv', [ intFeatures, intFeatures, intFeatures ]))
self.add_module(str(intRow) + 'x1' + ' - ' + str(intRow) + 'x2', Basic('relu-conv-relu-conv', [ intFeatures, intFeatures, intFeatures ]))
self.add_module(str(intRow) + 'x2' + ' - ' + str(intRow) + 'x3', Basic('relu-conv-relu-conv', [ intFeatures, intFeatures, intFeatures ]))
# end
for intCol in [ 0, 1 ]:
self.add_module('0x' + str(intCol) + ' - ' + '1x' + str(intCol), Downsample([ 32, 64, 64 ]))
self.add_module('1x' + str(intCol) + ' - ' + '2x' + str(intCol), Downsample([ 64, 128, 128 ]))
self.add_module('2x' + str(intCol) + ' - ' + '3x' + str(intCol), Downsample([ 128, 256, 256 ]))
# end
for intCol in [ 2, 3 ]:
self.add_module('3x' + str(intCol) + ' - ' + '2x' + str(intCol), Upsample([ 256, 128, 128 ]))
self.add_module('2x' + str(intCol) + ' - ' + '1x' + str(intCol), Upsample([ 128, 64, 64 ]))
self.add_module('1x' + str(intCol) + ' - ' + '0x' + str(intCol), Upsample([ 64, 32, 32 ]))
# end
self.netImage = Basic('conv-relu-conv', [ 32, 32, 3 ])
self.netDisparity = Basic('conv-relu-conv', [ 32, 32, 1 ])
# end
def forward(self, tenImage, tenDisparity, tenExisting):
# tenDepth = (objCommon['fltFocal'] * objCommon['fltBaseline']) / (tenDisparity + 0.0000001)
# tenValid = (spatial_filter(tenDisparity / tenDisparity.max(), 'laplacian').abs() < 0.03).float()
# tenPoints = depth_to_points(tenDepth * tenValid, objCommon['fltFocal'])
# tenPoints = tenPoints.view(1, 3, -1)
tenMean = [ tenImage.view(tenImage.shape[0], -1).mean(1, True).view(tenImage.shape[0], 1, 1, 1), tenDisparity.view(tenDisparity.shape[0], -1).mean(1, True).view(tenDisparity.shape[0], 1, 1, 1) ]
tenStd = [ tenImage.view(tenImage.shape[0], -1).std(1, True).view(tenImage.shape[0], 1, 1, 1), tenDisparity.view(tenDisparity.shape[0], -1).std(1, True).view(tenDisparity.shape[0], 1, 1, 1) ]
tenImage = tenImage.clone()
tenImage -= tenMean[0]
tenImage /= tenStd[0] + 0.0000001
tenDisparity = tenDisparity.clone()
tenDisparity -= tenMean[1]
tenDisparity /= tenStd[1] + 0.0000001
tenContext = self.netContext(torch.cat([ tenImage, tenDisparity ], 1))
# tenRender, tenExisting = render_pointcloud(tenPoints + tenShift, torch.cat([ tenImage, tenDisparity, tenContext ], 1).view(1, 68, -1), objCommon['intWidth'], objCommon['intHeight'], objCommon['fltFocal'], objCommon['fltBaseline'])
# tenExisting = (tenExisting > 0.0).float()
# tenExisting = tenExisting * spatial_filter(tenExisting, 'median-5')
tenRender = torch.cat([tenImage, tenDisparity, tenContext], 1)
tenExisting = tenExisting * spatial_filter(tenExisting, 'median-5')
tenRender = tenRender * tenExisting.clone().detach()
tenColumn = [ None, None, None, None ]
tenColumn[0] = self.netInput(torch.cat([ tenRender, tenExisting ], 1))
tenColumn[1] = self._modules['0x0 - 1x0'](tenColumn[0])
tenColumn[2] = self._modules['1x0 - 2x0'](tenColumn[1])
tenColumn[3] = self._modules['2x0 - 3x0'](tenColumn[2])
intColumn = 1
for intRow in range(len(tenColumn)):
tenColumn[intRow] = self._modules[str(intRow) + 'x' + str(intColumn - 1) + ' - ' + str(intRow) + 'x' + str(intColumn)](tenColumn[intRow])
if intRow != 0:
tenColumn[intRow] += self._modules[str(intRow - 1) + 'x' + str(intColumn) + ' - ' + str(intRow) + 'x' + str(intColumn)](tenColumn[intRow - 1])
# end
# end
intColumn = 2
for intRow in range(len(tenColumn) -1, -1, -1):
tenColumn[intRow] = self._modules[str(intRow) + 'x' + str(intColumn - 1) + ' - ' + str(intRow) + 'x' + str(intColumn)](tenColumn[intRow])
if intRow != len(tenColumn) - 1:
tenUp = self._modules[str(intRow + 1) + 'x' + str(intColumn) + ' - ' + str(intRow) + 'x' + str(intColumn)](tenColumn[intRow + 1])
if tenUp.shape[2] != tenColumn[intRow].shape[2]: tenUp = torch.nn.functional.pad(input=tenUp, pad=[ 0, 0, 0, -1 ], mode='constant', value=0.0)
if tenUp.shape[3] != tenColumn[intRow].shape[3]: tenUp = torch.nn.functional.pad(input=tenUp, pad=[ 0, -1, 0, 0 ], mode='constant', value=0.0)
tenColumn[intRow] += tenUp
# end
# end
intColumn = 3
for intRow in range(len(tenColumn) -1, -1, -1):
tenColumn[intRow] = self._modules[str(intRow) + 'x' + str(intColumn - 1) + ' - ' + str(intRow) + 'x' + str(intColumn)](tenColumn[intRow])
if intRow != len(tenColumn) - 1:
tenUp = self._modules[str(intRow + 1) + 'x' + str(intColumn) + ' - ' + str(intRow) + 'x' + str(intColumn)](tenColumn[intRow + 1])
if tenUp.shape[2] != tenColumn[intRow].shape[2]: tenUp = torch.nn.functional.pad(input=tenUp, pad=[ 0, 0, 0, -1 ], mode='constant', value=0.0)
if tenUp.shape[3] != tenColumn[intRow].shape[3]: tenUp = torch.nn.functional.pad(input=tenUp, pad=[ 0, -1, 0, 0 ], mode='constant', value=0.0)
tenColumn[intRow] += tenUp
# end
# end
tenImage = self.netImage(tenColumn[0])
tenImage *= tenStd[0] + 0.0000001
tenImage += tenMean[0]
tenDisparity = self.netDisparity(tenColumn[0])
tenDisparity *= tenStd[1] + 0.0000001
tenDisparity += tenMean[1]
return {
'tenExisting': tenExisting,
'tenImage': tenImage.clamp(0.0, 1.0) if self.training == False else tenImage,
'tenDisparity': torch.nn.functional.threshold(input=tenDisparity, threshold=0.0, value=0.0)
}
# end
# end
netInpaint = Inpaint().cuda().eval()
netInpaint.load_state_dict({ strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.hub.load_state_dict_from_url(url='http://content.sniklaus.com/kenburns/network-inpainting.pytorch', file_name='kenburns-inpainting').items() })
def pointcloud_inpainting(tenImage, tenDisparity, tenExisting):
return netInpaint(tenImage, tenDisparity, tenExisting)
# end
|
11538708
|
import tensorflow as tf
import reader
import numpy as np
class_dim = 10
EPOCHS = 100
BATCH_SIZE = 32
init_model = None
model = tf.keras.models.Sequential([
tf.keras.applications.ResNet50V2(include_top=False, weights=None, input_shape=(128, None, 1)),
tf.keras.layers.ActivityRegularization(l2=0.5),
tf.keras.layers.Dropout(rate=0.5),
tf.keras.layers.GlobalMaxPooling2D(),
tf.keras.layers.Dense(units=class_dim, activation=tf.nn.softmax)
])
model.summary()
# 定义优化方法
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
train_dataset = reader.train_reader_tfrecord('dataset/train.tfrecord', EPOCHS, batch_size=BATCH_SIZE)
test_dataset = reader.test_reader_tfrecord('dataset/test.tfrecord', batch_size=BATCH_SIZE)
if init_model:
model.load_weights(init_model)
for batch_id, data in enumerate(train_dataset):
# [可能需要修改参数】 设置的梅尔频谱的shape
sounds = data['data'].numpy().reshape((-1, 128, 128, 1))
labels = data['label']
# 执行训练
with tf.GradientTape() as tape:
predictions = model(sounds)
# 获取损失值
train_loss = tf.keras.losses.sparse_categorical_crossentropy(labels, predictions)
train_loss = tf.reduce_mean(train_loss)
# 获取准确率
train_accuracy = tf.keras.metrics.sparse_categorical_accuracy(labels, predictions)
train_accuracy = np.sum(train_accuracy.numpy()) / len(train_accuracy.numpy())
# 更新梯度
gradients = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if batch_id % 20 == 0:
print("Batch %d, Loss %f, Accuracy %f" % (batch_id, train_loss.numpy(), train_accuracy))
if batch_id % 200 == 0 and batch_id != 0:
test_losses = list()
test_accuracies = list()
for d in test_dataset:
# [可能需要修改参数】 设置的梅尔频谱的shape
test_sounds = d['data'].numpy().reshape((-1, 128, 128, 1))
test_labels = d['label']
test_result = model(test_sounds)
# 获取损失值
test_loss = tf.keras.losses.sparse_categorical_crossentropy(test_labels, test_result)
test_loss = tf.reduce_mean(test_loss)
test_losses.append(test_loss)
# 获取准确率
test_accuracy = tf.keras.metrics.sparse_categorical_accuracy(test_labels, test_result)
test_accuracy = np.sum(test_accuracy.numpy()) / len(test_accuracy.numpy())
test_accuracies.append(test_accuracy)
print('=================================================')
print("Test, Loss %f, Accuracy %f" % (
sum(test_losses) / len(test_losses), sum(test_accuracies) / len(test_accuracies)))
print('=================================================')
# 保存模型
model.save(filepath='models/resnet50.h5')
model.save_weights(filepath='models/model_weights.h5')
|
11538733
|
import unittest
from datahub.utilities.urns.data_flow_urn import DataFlowUrn
from datahub.utilities.urns.data_job_urn import DataJobUrn
from datahub.utilities.urns.error import InvalidUrnError
class TestDataJobUrn(unittest.TestCase):
def test_parse_urn(self) -> None:
data_job_urn_str = (
"urn:li:dataJob:(urn:li:dataFlow:(airflow,flow_id,prod),job_id)"
)
data_job_urn = DataJobUrn.create_from_string(data_job_urn_str)
assert data_job_urn.get_data_flow_urn() == DataFlowUrn.create_from_string(
"urn:li:dataFlow:(airflow,flow_id,prod)"
)
assert data_job_urn.get_job_id() == "job_id"
assert data_job_urn.__str__() == data_job_urn_str
assert data_job_urn == DataJobUrn(
"dataJob", ["urn:li:dataFlow:(airflow,flow_id,prod)", "job_id"]
)
def test_invalid_urn(self) -> None:
with self.assertRaises(InvalidUrnError):
DataJobUrn.create_from_string(
"urn:li:abc:(urn:li:dataFlow:(airflow,flow_id,prod),job_id)"
)
with self.assertRaises(InvalidUrnError):
DataJobUrn.create_from_string("urn:li:dataJob:(urn:li:user:abc,job_id)")
with self.assertRaises(InvalidUrnError):
DataJobUrn.create_from_string(
"urn:li:dataJob:(urn:li:dataFlow:(airflow,flow_id,prod))"
)
|
11538751
|
from quarkchain.utils import sha3_256
class Accumulator:
def __init__(self):
self.hlist = []
def append(self, data):
h = sha3_256(data)
for i in reversed(range(len(self.hlist))):
if self.hlist[i] is None:
self.hlist[i] = h
return
else:
h = sha3_256(self.hlist[i] + h)
self.hlist[i] = None
self.hlist.insert(0, h)
def hash(self):
d = b""
for v in self.hlist:
if v is None:
d += bytes(32)
else:
d += v
return sha3_256(d)
def calc_list_hash(l):
hl = [sha3_256(d) for d in l]
hlist = []
while len(hl) != 0:
nhl = []
for i in range(len(hl) // 2):
nhl.append(sha3_256(hl[i * 2] + hl[i * 2 + 1]))
if len(hl) % 2 != 0:
hlist.append(hl[-1])
else:
hlist.append(None)
hl = nhl
d = b""
for v in reversed(hlist):
if v is None:
d += bytes(32)
else:
d += v
return sha3_256(d)
def main():
ll = []
n = 10
a = Accumulator()
assert calc_list_hash(ll) == a.hash()
for i in range(n):
ll.append(i.to_bytes(4, byteorder="little"))
a.append(i.to_bytes(4, byteorder="little"))
assert calc_list_hash(ll) == a.hash()
print("passed")
if __name__ == "__main__":
main()
|
11538768
|
import eosapi
def dlbw(_from, _to, net, cpu):
args = {'from':_from,
'receiver':_to,
'stake_net_quantity':'%.4f EOS'%(net,),
'stake_cpu_quantity':'%.4f EOS'%(cpu,),
'transfer':False
}
eosapi.push_action('eosio', 'delegatebw', args, {_from:'active'})
def t1():
while True:
import time
import traceback
try:
ct1.test3(500)
time.sleep(1.0)
except Exception as e:
traceback.print_exc()
time.sleep(1.0)
def t2():
while True:
import time
try:
ht.test3()
time.sleep(1.0)
except Exception as e:
print(e)
time.sleep(1.0)
def t3():
while True:
import time
try:
it.test3()
time.sleep(1.0)
except Exception as e:
print(e)
time.sleep(1.0)
|
11538770
|
from django.db import models
# This table is responsible for mapping between group types for a Team/Project and event columns
# to add group keys
class GroupTypeMapping(models.Model):
class Meta:
constraints = [
models.UniqueConstraint(fields=["team", "group_type"], name="unique group types for team"),
models.UniqueConstraint(fields=["team", "group_type_index"], name="unique event column indexes for team"),
models.CheckConstraint(
check=models.Q(group_type_index__lte=5), name="group_type_index is less than or equal 5"
),
]
team: models.ForeignKey = models.ForeignKey("Team", on_delete=models.CASCADE)
group_type: models.CharField = models.CharField(max_length=400, null=False, blank=False)
group_type_index: models.IntegerField = models.IntegerField(null=False, blank=False)
|
11538825
|
import os
# Pre any tests discovery, windows support for running unit tests in WCA.
# We do not want to run our code in Windows environment but we want be able to run tests on NT.
if os.name == 'nt':
import sys
from unittest import mock
# For some tests we require USER env set
os.environ['USER'] = 'root'
# Mock all CDLL lib calls
mock.patch('ctypes.CDLL').__enter__()
# Mock all unavailable packages for Windows
sys.modules['kazoo'] = mock.MagicMock()
sys.modules['kazoo.client'] = mock.MagicMock()
sys.modules['kazoo.client'].KazooClient = mock.MagicMock(
'windowspath_mock_kazoo', return_value=mock.Mock(get=mock.Mock(return_value=[b'value'])))
sys.modules['kazoo.handlers'] = mock.MagicMock()
sys.modules['kazoo.exceptions'] = mock.MagicMock()
sys.modules['kazoo.handlers.threading'] = mock.MagicMock()
sys.modules['kazoo.handlers.utils'] = mock.MagicMock()
sys.modules['resource'] = mock.MagicMock()
# Those functions are not available on windows
os.geteuid = mock.MagicMock()
os.getuid = mock.MagicMock()
# For internal WCA we need always to join path using posix style.
import posixpath
import ntpath
ntpath.join = posixpath.join
|
11538867
|
import os
import json
import gensim
import datetime
import numpy as np
import pandas as pd
from collections import namedtuple
from collections import OrderedDict
from collections import defaultdict
from smart_open import smart_open
from gensim.models import doc2vec
from gensim.models.doc2vec import TaggedDocument
from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
from src.utils.io import load_label
class Text2Vec():
"""
Text2Vec: Document embeddings based on transcripts
---
Attributes
-----------
model: doc2vec.Doc2Vec()
doc2vec instance to build on transcripts and corpus
model_name: str
name of doc2vec instance
all_docs: list()
all documents to train doc2vec model
save_dir: str
saving directory for doc2vec model
data_config: dict()
configuration file for data
model_config: dict()
configuration file for model
interview_transcript: namedtuple
data structure used for training
-----------------------------------------
Functions
-----------
prepare_data(corpus): public
prepare training data
load_config(): public
load configuration for model
build_model(): public
build doc2vec model
train_model(): public
train doc2vec model
infer_embedding(partition): public
infer embeddings given documents using trained model
load_embedding(partition): public
load embeddings inferred from trained model
evaluate_model(given_word): public
evaluate doc2vec model by finding similar words
save_model(): public
save doc2vec model
load_model(): public
load doc2vec model
"""
def __init__(self, build_on_corpus=False):
# para build_on_corpus: involve Turkish corpus in training or not
self.model = None
self.model_name = ''
self.fitted = False
self.all_docs = []
self.save_dir = ''
self.data_config = json.load(open('./config/data.json', 'r'))
self.model_config = json.load(open('./config/model.json', 'r'))
self.interview_transcript = namedtuple('Interview_Transcript', 'words tags sentiment')
self.prepare_data(build_on_corpus)
self.load_config()
def prepare_data(self, corpus):
"""prepared training data
"""
labels = dict()
_, _, level_dev, level_train = load_label()
labels['train'] = level_train
labels['dev'] = level_dev
# evaluate without test partition as there is no label
for partition in ['train', 'dev']:
with smart_open(self.data_config['transcript_preproc'][partition], 'rb', encoding='utf-8') as all_data:
for line_no, line in enumerate(all_data):
tokens = gensim.utils.to_unicode(line).split()
words = tokens
tags = [line_no]
sentiment = [labels[partition][line_no]] if partition != 'test' else [None]
self.all_docs.append(self.interview_transcript(words, tags, sentiment))
# use addition Turkish corpus for performance boost
if corpus:
with smart_open(self.data_config['turkish_corpus_proc'], 'rb', encoding='utf-8') as all_data:
for line_no, line in enumerate(all_data):
tokens = gensim.utils.to_unicode(line).split()
words = tokens
tags = [line_no]
sentiment = [None]
self.all_docs.append(self.interview_transcript(words, tags, sentiment))
def load_config(self):
"""load configuration for model
"""
self.dm = self.model_config['doc2vec']['dm']
self.vector_size = self.model_config['doc2vec']['vector_size']
self.window_size = self.model_config['doc2vec']['window_size']
self.negative = self.model_config['doc2vec']['negative']
self.hs = self.model_config['doc2vec']['hs']
self.min_count = self.model_config['doc2vec']['min_count']
self.sample = self.model_config['doc2vec']['sample']
self.epochs = self.model_config['doc2vec']['epochs']
def build_model(self):
"""build doc2vec model
"""
self.model = doc2vec.Doc2Vec(dm=self.dm,
vector_size=self.vector_size,
window=self.window_size,
negative=self.negative,
hs=self.hs,
min_count=self.min_count,
sample=self.sample,
epochs=self.epochs,
workers=8)
self.model_name = str(self.model).replace('/','-')
self.save_dir = os.path.join(self.model_config['doc2vec']['save_dir'], self.model_name)
print("\ndoc2vec %s model initialized." % self.model_name)
if not os.path.isdir(self.save_dir):
self.fitted = False
else:
self.fitted = True
print("\nbuilding vocabulary for doc2vec model ...")
self.model.build_vocab(self.all_docs)
print("\nvocabulary scanned & built.")
def train_model(self):
"""train doc2vec model
"""
if self.fitted:
print("\nmodel already trained ---", self.model_name)
self.load_model()
return
print("\ntraining doc2vec %s model (with 8 threads) ..." % self.model_name)
self.model.train(self.all_docs,
total_examples=len(self.all_docs),
epochs=self.model.epochs)
print("\ntraining completed.")
self.save_model()
def infer_embedding(self, partition):
"""infer embeddings given documents using trained model
"""
infer_docs = []
labels = dict()
_, _, level_dev, level_train = load_label()
labels['train'] = level_train
labels['dev'] = level_dev
with smart_open(self.data_config['transcript_preproc'][partition], 'rb', encoding='utf-8') as all_data:
for line_no, line in enumerate(all_data):
tokens = gensim.utils.to_unicode(line).split()
words = tokens
tags = [line_no]
sentiment = [labels[partition][line_no]]
infer_docs.append(self.interview_transcript(words, tags, sentiment))
infer_vecs = [self.model.infer_vector(doc.words, alpha=.1) for doc in infer_docs]
infer_labels = [doc.sentiment for doc in infer_docs]
# save inferred vectors and labels
print("\nsaving inferred vectors and labels to file")
if os.path.isdir(self.save_dir):
np.save(os.path.join(self.save_dir, 'vectors_%s' % partition), infer_vecs)
np.save(os.path.join(self.save_dir, 'labels_%s' % partition), infer_labels)
def load_embedding(self, partition):
"""load embeddings inferred from trained model
"""
if os.path.isdir(self.save_dir):
infer_vecs = np.load(os.path.join(self.save_dir, 'vectors_%s.npy' % partition))
infer_labels = np.load(os.path.join(self.save_dir, 'labels_%s.npy' % partition))
else:
infer_vecs, infer_labels = [], []
return infer_vecs, infer_labels
def evaluate_model(self):
"""evaluate doc2vec model by finding similar words
"""
given_word = 'iyi'
given_doc_id = 9
output = smart_open(os.path.join(self.save_dir, 'evaluation.txt'), 'w', encoding='utf-8')
similar_words = self.model.wv.most_similar(given_word, topn=20)
print("--" * 20)
print("\nmost similar words to given word %s for doc2vec %s model are as follows" % (given_word, self.model_name))
output.write("--\n")
output.write("\nmost similar words to given word %s for doc2vec %s model are as follows" % (given_word, self.model_name))
for idx, word in enumerate(similar_words):
print(idx, word)
output.write("%d %s\n" % (idx, word))
output.write("--\n")
inferred_doc_vec = self.model.infer_vector(self.all_docs[given_doc_id].words)
print("--" * 20)
print("\nmost similar transcripts in document embedding space:\n%s:\n%s" % (self.model, self.model.docvecs.most_similar([inferred_doc_vec], topn=3, clip_start=0, clip_end=len(self.all_docs))))
output.write("\nmost similar transcripts in document embedding space:\n%s:\n%s" % (self.model, self.model.docvecs.most_similar([inferred_doc_vec], topn=3, clip_start=0, clip_end=len(self.all_docs))))
output.write("--\n")
sims = self.model.docvecs.most_similar(given_doc_id, topn=len(self.all_docs), clip_start=0, clip_end=len(self.all_docs))
print("--" * 20)
print("\nTarget: (%d): <<%s>>\n" % (given_doc_id, ' '.join(self.all_docs[given_doc_id].words)))
output.write("\nTarget: (%d): <<%s>>\n" % (given_doc_id, ' '.join(self.all_docs[given_doc_id].words)))
output.write("--\n")
for label, index in [('MOST',0), ('MEDIAN',len(sims)//2), ('LEAST',len(sims)-1)]:
print("\nall the cosine similarity distance\n%s %s: <<%s>>\n" % (label, sims[index], ' '.join(self.all_docs[sims[index][0]].words)))
output.write("\nall the cosine similarity distance\n%s %s: <<%s>>\n" % (label, sims[index], ' '.join(self.all_docs[sims[index][0]].words)))
output.write("--\n")
print("--" * 20)
output.close()
def save_model(self):
"""save doc2vec model
"""
print("\nsaving doc2vec %s model to file" % self.model_name)
os.mkdir(self.save_dir)
self.model.save(os.path.join(self.save_dir, 'doc2vec.model'))
readme_notes = np.array(["This %s model is trained on %s" % (self.model_name, str(datetime.datetime.now()))])
np.savetxt(os.path.join(self.save_dir, 'readme.txt'), readme_notes, fmt="%s")
def load_model(self):
"""load doc2vec model
"""
if os.path.isdir(self.save_dir):
print("\nloading doc2vec %s model from file" % self.model_name)
self.model = doc2vec.Doc2Vec.load(os.path.join(self.save_dir, 'doc2vec.model'))
else:
print("\n%s model does not exist" % self.model_name)
def process_metadata_tensorboard(self):
infer_vector_train, infer_label_train = self.load_embedding('train')
infer_vector_dev, infer_label_dev = self.load_embedding('dev')
length_train, length_dev = len(infer_label_train), len(infer_label_dev)
print("\nsaving embeddings to metadata file for tensorboard projector visualization")
with smart_open(os.path.join(self.save_dir, 'label.tsv'), 'wb', encoding='utf-8') as label_f:
label_f.write("Index\tLabel\n")
for i in range(length_train):
label_f.write("train_%d\t%d\n" % (i+1, infer_label_train[i]))
for j in range(length_dev):
label_f.write("dev_%d\t%d\n" % (j+1, infer_label_dev[j]))
with smart_open(os.path.join(self.save_dir, 'metadata.tsv'), 'wb', encoding='utf-8') as data_f:
for a in range(len(infer_vector_train)):
for b in range(len(infer_vector_train[a])):
data_f.write("%f\t" % infer_vector_train[a][b])
data_f.write("\n")
for c in range(len(infer_vector_dev)):
for d in range(len(infer_vector_dev[c])):
data_f.write("%f\t" % infer_vector_dev[c][d])
data_f.write("\n")
print("\nmetadata processing done\nplease upload the .tsv files onto projector.tensorflow.org to visualize embeddings")
|
11538902
|
import unittest
from organizer.repository import Repository
class TestRepository(unittest.TestCase):
def testQueryListReturnsNoneForNoMatches(self):
repository = Repository("fakedir")
theList = ["foo", "bar", "biz", "bat"]
self.assertEquals(None, repository._queryList("bilbo", theList))
def testQueryListReturnsBestMatch(self):
repository = Repository("fakedir")
theList = ["bil", "billbo", "BILbiz", "BILBO"]
self.assertEquals("BILBO",repository._queryList("bilbo", theList))
def testScoreNameIncreasesByTenForExactMatches(self):
repository = Repository("fakedir")
masterList = [[0,"bil"], [1,"billbo"], [2,"BILbiz"], [3,"BILBO"]]
repository._scoreName("bilbo", masterList)
self.assertEquals([13,"BILBO"],masterList[3])
def testScoreNameIncreasesByOneForPartialMatches(self):
repository = Repository("fakedir")
masterList = [[0,"bil"], [1,"billbo"], [2,"BILbiz"], [3,"BILBO"]]
repository._scoreName("bi",masterList)
self.assertEquals([4,"BILBO"],masterList[3])
def testScoreNameNotIncreaseOnNonMatch(self):
repository = Repository("fakedir")
masterList = [[0,"bil"], [1,"billbo"], [2,"BILbiz"], [3,"BILBO"]]
repository._scoreName("xx",masterList)
self.assertEquals([3,"BILBO"],masterList[3])
def testScoreNameNotIncreaseOnBlankMatch(self):
repository = Repository("fakedir")
masterList = [[0,"bil"], [1,"billbo"], [2,"BILbiz"], [3,"BILBO"]]
repository._scoreName("",masterList)
self.assertEquals([3,"BILBO"],masterList[3])
|
11538980
|
import pyemma.msm as msm
import pyemma.plots as mplt
import helper
import numpy as np
def selectTPTSets(MSM_object, indexA, indexB):
""" Extract from the sets of the PCCA clustering the sets that will serve
as the extrems of the TPT
"""
PCCAsets = MSM_object.metastable_sets
SetA = PCCAsets[indexA]
SetB = PCCAsets[indexB]
return SetA, SetB
def createTPT(MSM_object, A, B):
""" Calculate the reactive flux between sets A and B.
Return a ReactiveFlux object"""
return msm.tpt(MSM_object, A, B)
def coarseTPT(TPT_object, MSM_object):
"""Coarse the TPT object into one with as many states as the PCCA to ease
visualization """
(foo, coarseTPT) = TPT_object.coarse_grain(MSM_object.metastable_sets)
return coarseTPT
def plotTPT(TPT_object, state_labels='auto', outfile=None):
""" Plot the flux network from a Reactive Flux object"""
flux_figure = mplt.plot_flux(TPT_object,state_labels=state_labels)
if not outfile is None:
flux_figure.savefig(outfile)
return flux_figure
def writeTPTOutput(TPT_object):
""" Write the main properties of the TPT calculated"""
helper.makeFolder("tpt")
with open("tpt/sets.dat","w") as f:
f.write("TPT created with PCCA sets: %d and %d"%(TPT_object.A[0],TPT_object.B[0]))
np.savetxt("tpt/backward_committor.dat", TPT_object.backward_committor)
np.savetxt("tpt/forward_committor.dat", TPT_object.forward_committor)
np.savetxt("tpt/gross_flux.dat", TPT_object.gross_flux)
np.savetxt("tpt/net_flux.dat", TPT_object.net_flux)
|
11538999
|
expected_output = {
"03AA.BB00.0000.0200.0001": {
"interface": "Port-channel1",
"redundancy_mode": "all-active",
"df_wait_time": 3,
"split_horizon_label": 16,
"state": "Ready",
"encap_type": "mpls",
"ordinal": 1,
"core_isolation": "No",
"rd": {
"4.4.4.3:1" : {
"export_rt": ["100:2"],
}
},
"forwarder_list": [
"3.3.3.3",
"4.4.4.3"
],
},
}
|
11539046
|
import torch
from torch.autograd import Variable, grad, gradcheck
from qmctorch.wavefunction.jastrows.distance import ElectronElectronDistance
import unittest
torch.set_default_tensor_type(torch.DoubleTensor)
def hess(out, pos):
# compute the jacobian
z = Variable(torch.ones(out.shape))
jacob = grad(out, pos,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
# compute the diagonal element of the Hessian
z = Variable(torch.ones(jacob.shape[0]))
hess = torch.zeros(jacob.shape)
for idim in range(jacob.shape[1]):
tmp = grad(jacob[:, idim], pos,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
hess[:, idim] = tmp[:, idim]
return hess
class TestElecElecDistance(unittest.TestCase):
def setUp(self):
self.nup, self.ndown = 1, 1
self.nelec = self.nup + self.ndown
self.nbatch = 5
self.pos = torch.rand(self.nbatch, self.nelec * 3)
self.pos.requires_grad = True
self.edist = ElectronElectronDistance(self.nelec)
def test_grad_distance(self):
"""test computation the gradient of the distance.
Note that the edist function does not compute all the derivative terms
Instead when calling dr = edis(pos,1) -> Nbatch x 3 x Nelec x Nelec
with : dr(:,0,i,j) = d/d x_i r_{ij}
dr(:,1,i,j) = d/d y_i r_{ij}
dr(:,2,i,j) = d/d z_i r_{ij}
Autograd will compute drr_grad[i] = d/d_xi (sum_{a,b} r_{ab})
Therefore the edist(pos,1) misses (on purposes) the terms d/dx_i r_{ki}
However we always have
d r_{ij} / d x_i = d r_{ij} / d x_j (= - d r_{ji} / d x_i)
"""
# elec-elec distance
r = self.edist(self.pos)
# derivative of r wrt the first elec d/dx_i r_{ij}
di_r = self.edist(self.pos, derivative=1)
# derivative of r wrt the second elec d/dx_j r_{ij} (see notes)
dj_r = di_r
# sum
dr = di_r + dj_r
# compute the der with autograd
dr_grad = grad(r, self.pos,
grad_outputs=torch.ones_like(r))[0]
# check sum
assert(torch.allclose(dr.sum(), dr_grad.sum(), atol=1E-5))
# see the notes for the explanation of the factor 2
dr = dr.sum(-1).permute(0, 2, 1).reshape(5, -1)
assert(torch.allclose(dr, dr_grad))
if __name__ == "__main__":
# unittest.main()
t = TestElecElecDistance()
t.setUp()
t.test_grad_distance()
|
11539081
|
SECRET_KEY = "fake-secret-key"
INSTALLED_APPS = [
"tests",
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
MIDDLEWARE_CLASSES = []
KAWASEMI = {
"CHANNELS": {
"hipchat": {
"_backend": "kawasemi.backends.hipchat.HipChatChannel",
# Required
"api_id": "api_id",
"token": "token",
# Optional
"base_url": "base_url",
"color": "red",
"notify": True
},
"slack": {
"_backend": "kawasemi.backends.slack.SlackChannel",
# Required
"url": "url",
# Optional
"username": "username",
"icon_emoji": ":smile:",
"channel": "channel"
},
"twitter": {
"_backend": "kawasemi.backends.twitter.TwitterChannel",
# Required
"api_key": "api_key",
"api_secret": "api_secret",
"access_token": "access_token",
"access_token_secret": "access_token_secret"
},
"yo": {
"_backend": "kawasemi.backends.yo.YoChannel",
# Required
"api_token": "api_token",
# Optional
"username": "username"
}
}
}
|
11539082
|
import mechanize
url = "http://www.webscantest.com/business/access.php?serviceid="
attackNumber = 1
for i in range(5):
res = mechanize.urlopen(url+str(i))
content = res.read()
# check if the content is accessible
if content.find("You service") > 0:
print "Possible Direct Object Reference"
output = open('response/'+str(attackNumber)+'.txt', 'w')
output.write(content)
output.close()
print attackNumber
attackNumber += 1
|
11539096
|
from django.conf import settings # noqa
from appconf import AppConf
class SymposionAppConf(AppConf):
VOTE_THRESHOLD = 3
|
11539119
|
from .abstract import Module, MultiSingleInputModule, MultiMultiInputModule
from .. import utils
__all__ = ['Activation', 'Sum', 'SequentialSum', 'ConcurrentSum']
@utils.add_simple_repr
@utils.no_dim_change_op
class Activation(Module):
"""
Applies a non-linear function to the incoming input.
Parameters
----------
activation
non-linear function to activate the linear result.
It accepts any callable function
as well as a recognizable ``str``.
A list of possible ``str`` is in :func:`~neuralnet_pytorch.utils.function`.
input_shape
shape of the input tensor. Can be ``None``.
kwargs
extra keyword arguments to pass to activation.
"""
def __init__(self, activation='relu', input_shape=None, **kwargs):
super().__init__(input_shape)
self.activation = utils.function(activation, **kwargs)
def forward(self, input, *args, **kwargs):
return self.activation(input)
def extra_repr(self):
s = 'activation={}'.format(self.activation.__name__)
return s
class Sum(MultiSingleInputModule):
"""
Sums the outputs of multiple modules given an input tensor.
A subclass of :class:`~neuralnet_pytorch.layers.MultiSingleInputModule`.
See Also
--------
:class:`~neuralnet_pytorch.layers.MultiSingleInputModule`
:class:`~neuralnet_pytorch.layers.MultiMultiInputModule`
:class:`~neuralnet_pytorch.layers.SequentialSum`
:class:`~neuralnet_pytorch.layers.ConcurrentSum`
:class:`~neuralnet_pytorch.resizing.Cat`
:class:`~neuralnet_pytorch.resizing.SequentialCat`
:class:`~neuralnet_pytorch.resizing.ConcurrentCat`
"""
def __init__(self, *modules_or_tensors):
super().__init__(*modules_or_tensors)
def forward(self, input, *args, **kwargs):
outputs = super().forward(input)
return sum(outputs)
@property
@utils.validate
def output_shape(self):
if None in self.input_shape:
return None
shapes_transposed = [item for item in zip(*self.input_shape)]
input_shape_none_filtered = list(map(utils.get_non_none, shapes_transposed))
return tuple(input_shape_none_filtered)
class SequentialSum(Sum):
"""
Sums the intermediate outputs of multiple sequential modules given an input tensor.
A subclass of :class:`~neuralnet_pytorch.layers.Sum`.
See Also
--------
:class:`~neuralnet_pytorch.layers.MultiSingleInputModule`
:class:`~neuralnet_pytorch.layers.MultiMultiInputModule`
:class:`~neuralnet_pytorch.layers.Sum`
:class:`~neuralnet_pytorch.layers.ConcurrentSum`
:class:`~neuralnet_pytorch.resizing.Cat`
:class:`~neuralnet_pytorch.resizing.SequentialCat`
:class:`~neuralnet_pytorch.resizing.ConcurrentCat`
"""
def __init__(self, *modules):
super().__init__(*modules)
def forward(self, input, *args, **kwargs):
outputs = []
output = input
for name, module in self.named_children():
if name.startswith('tensor'):
outputs.append(module())
else:
output = module(output)
outputs.append(output)
return sum(outputs)
class ConcurrentSum(MultiMultiInputModule):
"""
Sums the outputs of multiple modules given input tensors.
A subclass of :class:`~neuralnet_pytorch.layers.MultiMultiInputModule`.
See Also
--------
:class:`~neuralnet_pytorch.layers.MultiSingleInputModule`
:class:`~neuralnet_pytorch.layers.MultiMultiInputModule`
:class:`~neuralnet_pytorch.layers.Sum`
:class:`~neuralnet_pytorch.layers.SequentialSum`
:class:`~neuralnet_pytorch.resizing.Cat`
:class:`~neuralnet_pytorch.resizing.SequentialCat`
:class:`~neuralnet_pytorch.resizing.ConcurrentCat`
"""
def __init__(self, *modules_or_tensors):
super().__init__(*modules_or_tensors)
def forward(self, *input, **kwargs):
outputs = super().forward(*input)
return sum(outputs)
@property
@utils.validate
def output_shape(self):
if None in self.input_shape:
return None
shapes_transposed = [item for item in zip(*self.input_shape)]
input_shape_none_filtered = list(map(utils.get_non_none, shapes_transposed))
return tuple(input_shape_none_filtered)
|
11539131
|
from HTMLParser import HTMLParser
from flask.ext.restful import fields
class HTMLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = HTMLStripper()
s.feed(html)
return s.get_data()
class HTMLField(fields.Raw):
def format(self, value):
return strip_tags(str(value))
|
11539132
|
from statsmodels.tsa.vector_ar.vecm import coint_johansen
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.api import VAR
import statsmodels.api as sm
import copy
import pingouin
from scipy.stats import pearsonr
p_val = .05
def adfuller_df(df, maxlag, regression = "c"):
dct = {}
for key, val in df.items():
dct[key] = adfuller(val, maxlag=maxlag, autolag = "aic", regression = regression)[1]#.pvalue
return dct
def cointegration_df(df):
dct = {}
for key1 in df:
dct[key1] = {}
for key2 in df:
if key1 == key2:
dct[key1][key2] = np.nan
else:
dct[key1][key2] = coint_johansen(df[[key1,key2]], det_order=0, k_ar_diff=1).lr1
return pd.DataFrame(dct)
|
11539141
|
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
if not matrix:
return False
m, n = len(matrix), len(matrix[0])
i, j = 0, n - 1
while -1 < i < m and -1 < j < n:
tmp = matrix[i][j]
if tmp == target:
return True
elif tmp < target:
i += 1
else:
j -= 1
return False
|
11539181
|
from nanopore.analyses.abstractAnalysis import AbstractAnalysis
from jobTree.src.bioio import fastqRead, fastaRead, system
from nanopore.analyses.utils import samIterator, getFastaDictionary, UniqueList
import pysam, os, itertools
from collections import Counter
from math import log
class IndelKmerAnalysis(AbstractAnalysis):
"""Runs kmer analysis"""
def indelKmerFinder(self, aligned):
r = UniqueList(); s = self.kmerSize+1
for i in xrange(len(aligned)):
r.add(aligned[i])
if r[0] == None or (len(r) == s and r[self.kmerSize] == None) or (None not in r and len(r) == s):
r.remove(last=False)
elif None in r and len(r) == s:
yield (r[0],r[self.kmerSize])
r.remove(last=False)
def countIndelKmers(self):
sam = pysam.Samfile(self.samFile)
refKmers, readKmers = Counter(), Counter()
refDict = getFastaDictionary(self.referenceFastaFile)
for x in refDict:
refDict[x] = tuple(refDict[x])
for record in samIterator(sam):
refSeq = refDict[sam.getrname(record.rname)]
readSeq = tuple(record.query)
readAligned, refAligned = zip(*record.aligned_pairs)
for start, end in self.indelKmerFinder(readAligned):
s = readSeq[start:end+1]
readKmers[s] += 1
refKmers[s[::-1]] += 1
for start, end in self.indelKmerFinder(refAligned):
s = refSeq[start:end+1]
refKmers[s] += 1
refKmers[s[::-1]] += 1
return (refKmers, readKmers)
def analyzeCounts(self, refKmers, readKmers, name):
refSize, readSize = sum(refKmers.values()), sum(readKmers.values())
outf = open(os.path.join(self.outputDir, name + "kmer_counts.txt"), "w")
outf.write("kmer\trefCount\trefFraction\treadCount\treadFraction\tlogFoldChange\n")
if refSize > 0 and readSize > 0:
for kmer in itertools.product("ATGC", repeat=5):
refFraction, readFraction = 1.0 * refKmers[kmer] / refSize, 1.0 * readKmers[kmer] / readSize
if refFraction == 0:
foldChange = "-Inf"
elif readFraction == 0:
foldChange = "Inf"
else:
foldChange = -log(readFraction / refFraction)
outf.write("\t".join(map(str,["".join(kmer), refKmers[kmer], refFraction, readKmers[kmer], readFraction, foldChange]))+"\n")
outf.close()
system("Rscript nanopore/analyses/kmer_analysis.R {} {} {} {} {}".format(os.path.join(self.outputDir, name + "kmer_counts.txt"), os.path.join(self.outputDir, name + "pval_kmer_counts.txt"), os.path.join(self.outputDir, name + "top_bot_sigkmer_counts.txt"), os.path.join(self.outputDir, name + "volcano_plot.pdf"), "Indel_Kmer"))
def run(self, kmerSize=5):
AbstractAnalysis.run(self)
self.kmerSize = kmerSize
#analyze kmers around the boundaries of indels
indelRefKmers, indelReadKmers = self.countIndelKmers()
if len(indelRefKmers) > 0 and len(indelReadKmers) > 0:
self.analyzeCounts(indelRefKmers, indelReadKmers, "indel_bases_")
self.finish()
|
11539200
|
from asyncpraw.util import camel_to_snake, snake_case_keys
from .. import UnitTest
class TestSnake(UnitTest):
def test_camel_to_snake(self):
test_strings = (
("camelCase", "camel_case"),
("PascalCase", "pascal_case"),
("camelCasePlace", "camel_case_place"),
("Pascal8Camel8Snake", "pascal8_camel8_snake"),
("HTTPResponseCode", "http_response_code"),
("ResponseHTTP", "response_http"),
("ResponseHTTP200", "response_http200"),
("getHTTPResponseCode", "get_http_response_code"),
("get200HTTPResponseCode", "get200_http_response_code"),
("getHTTP200ResponseCode", "get_http200_response_code"),
("12PolarBears", "12_polar_bears"),
("11buzzingBees", "11buzzing_bees"),
("TacocaT", "tacoca_t"),
("fooBARbaz", "foo_ba_rbaz"),
("foo_BAR_baz", "foo_bar_baz"),
("Base_BASE", "base_base"),
("Case_Case", "case_case"),
("FACE_Face", "face_face"),
)
for camel, snake in test_strings:
assert camel_to_snake(camel) == snake
def test_camel_to_snake_dict(self):
test_strings = {
"camelCase": "camel_case",
"PascalCase": "pascal_case",
"camelCasePlace": "camel_case_place",
"Pascal8Camel8Snake": "pascal8_camel8_snake",
"HTTPResponseCode": "http_response_code",
"ResponseHTTP": "response_http",
"ResponseHTTP200": "response_http200",
"getHTTPResponseCode": "get_http_response_code",
"get200HTTPResponseCode": "get200_http_response_code",
"getHTTP200ResponseCode": "get_http200_response_code",
"12PolarBears": "12_polar_bears",
"11buzzingBees": "11buzzing_bees",
"TacocaT": "tacoca_t",
"fooBARbaz": "foo_ba_rbaz",
"foo_BAR_baz": "foo_bar_baz",
"Base_BASE": "base_base",
"Case_Case": "case_case",
"FACE_Face": "face_face",
}
new_test = snake_case_keys(test_strings)
for key, item in new_test.items():
assert key == item
|
11539207
|
import tarfile
import pytest
from ..exception import StdErrException
from . import BaseTestCase
class TestCase(BaseTestCase):
def test_notarfile(self):
self.createfile('foo', size=100)
with pytest.raises(StdErrException):
self.runcommandline('tar -tf foo')
def test_tar(self):
self.setup_filesystem()
good = [
'dir1/',
'dir1/dir1-1/',
'dir1/dir1-1/dir1-1-1/',
'dir1/dir1-1/dir1-1-1/file1-1-1-1',
'dir1/dir1-1/dir1-1-2/',
'dir1/dir1-2/',
'dir1/dir1-2/dir1-2-1/',
'dir1/dir1-2/file1-2-1',
'dir1/dir1-2/file1-2-2',
'dir1/dir1-3/',
'dir1/file1-1',
'dir1/file1-2',
'dir2/',
'dir2/dir2-1/',
'dir2/dir2-2/',
'dir2/dir2-2/file2-2-1',
'file1',
]
for archive in ('foo.tar', 'foo.tar.bz2', 'foo.tar.gz'):
# Create an archive
res = self.runcommandline('tar -cf {0} dir1 dir2 file1'.format(archive))
assert res[0] == ''
list_ = []
for tarinfo in tarfile.open(archive):
name = tarinfo.name
if tarinfo.isdir():
name += '/'
list_.append(name)
list_.sort()
assert list_ == good
# List the archive
x = self.runcommandline('tar -tf {0}'.format(archive))[0].split()
x.sort()
assert x == good
# Extract the archive
"""
d = os.getcwd()
os.mkdir('extractdir')
os.chdir('extractdir')
x = self.runcommandline(
'tar -xf {0}'.format(os.path.join(d, archive)))[0].split()
assert x == good
"""
|
11539228
|
from setuptools import setup
from os import path
from pyungo import __version__
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.rst")) as f:
README = f.read()
setup(
name="pyungo",
version=__version__,
description="Function dependencies resolution and execution",
long_description=README,
url="https://github.com/cedricleroy/pyungo",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
extra_require={"all": ["multiprocess", "pycontracts", "jsonschema"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="dag workflow function dependency",
packages=["pyungo"],
)
|
11539240
|
import logging
import time
from decimal import Decimal
from watchmen.common.utils.data_utils import get_id_name_by_datasource
from watchmen.database.datasource.container import data_source_container
from watchmen.pipeline.core.action.utils import update_retry_callback, update_recovery_callback
from watchmen.pipeline.core.by.parse_on_parameter import parse_parameter_joint
from watchmen.pipeline.core.context.action_context import ActionContext, get_variables
from watchmen.pipeline.core.monitor.model.pipeline_monitor import ActionStatus
from watchmen.pipeline.core.parameter.parse_parameter import parse_parameter
from watchmen.pipeline.core.parameter.utils import check_and_convert_value_by_factor
from watchmen.pipeline.core.retry.retry_template import retry_template, RetryPolicy
from watchmen.pipeline.storage.read_topic_data import query_topic_data
from watchmen.pipeline.storage.write_topic_data import update_topic_data_one
from watchmen.pipeline.utils.units_func import get_factor
from watchmen.topic.storage.topic_schema_storage import get_topic_by_id
log = logging.getLogger("app." + __name__)
def init(action_context: ActionContext):
def write_factor():
start = time.time()
# create action status monitor
status = ActionStatus()
status.type = "write-factor"
status.uid = action_context.get_pipeline_id()
previous_data = action_context.previousOfTriggerData
current_data = action_context.currentOfTriggerData
action = action_context.action
if action.topicId is not None:
pipeline_topic = action_context.get_pipeline_context().pipelineTopic
target_topic = get_topic_by_id(action.topicId)
variables = get_variables(action_context)
where_ = parse_parameter_joint(action.by, current_data, variables, pipeline_topic, target_topic)
status.by = where_
target_data = query_topic_data(where_,
target_topic, action_context.get_current_user())
target_factor = get_factor(action.factorId, target_topic)
source_ = action.source
arithmetic = action.arithmetic
result = None
current_value_ = check_and_convert_value_by_factor(target_factor,
parse_parameter(source_, current_data, variables))
if arithmetic is None or arithmetic == "none": # mean AS IS
result = {target_factor.name: current_value_}
elif arithmetic == "sum":
previous_value_ = check_and_convert_value_by_factor(target_factor,
parse_parameter(source_, previous_data, variables))
if previous_value_ is None:
previous_value_ = 0
value_ = Decimal(current_value_) - Decimal(previous_value_)
result = {target_factor.name: {"_sum": value_}}
elif arithmetic == "count":
if previous_data is None:
result = {target_factor.name: {"_count": 1}}
else:
result = {target_factor.name: {"_count": 0}}
elif arithmetic == "avg":
result = {target_factor.name: {"_avg": current_value_}}
updates_ = result
trigger_pipeline_data_list = []
if target_data is not None:
if target_topic.type == "aggregate":
args = [updates_, where_, target_topic, action_context.get_current_user()]
retry_callback = (update_retry_callback, args)
recovery_callback = (update_recovery_callback, args)
execute_ = retry_template(retry_callback, recovery_callback, RetryPolicy())
result = execute_()
trigger_pipeline_data_list.append(result)
else:
trigger_pipeline_data_list.append(update_topic_data_one(
updates_, target_data,
action_context.get_pipeline_id(),
target_data[get_id_name_by_datasource(
data_source_container.get_data_source_by_id(target_topic.dataSourceId))],
target_topic, action_context.get_current_user()))
else:
raise Exception("can't insert data in write factor action ")
status.updateCount = status.updateCount + 1
elapsed_time = time.time() - start
status.completeTime = elapsed_time
return status, trigger_pipeline_data_list
return write_factor
|
11539251
|
import tfnn
from tfnn.preprocessing.shuffle import shuffle
def train_test_split(data, train_rate=0.7, randomly=True):
data_copy = data.copy()
_n_train_samples = int(data.n_samples * train_rate)
if randomly:
data_copy = shuffle(data_copy)
train_data_data = data_copy.data[:_n_train_samples, :]
test_data_data = data_copy.data[_n_train_samples:, :]
train_data_data_xs = train_data_data[:, :data.n_xfeatures]
train_data_data_ys = train_data_data[:, data.n_xfeatures:]
test_data_data_xs = test_data_data[:, :data.n_xfeatures]
test_data_data_ys = test_data_data[:, data.n_xfeatures:]
t_data = tfnn.Data(train_data_data_xs, train_data_data_ys, name='train')
v_data = tfnn.Data(test_data_data_xs, test_data_data_ys, name='validate')
return [t_data, v_data]
|
11539255
|
import os
import pytest
from pyspark import SparkConf, SparkContext
from sagemaker_pyspark import classpath_jars
from sagemaker_pyspark.S3Resources import S3DataPath
@pytest.fixture(autouse=True)
def with_spark_context():
os.environ['SPARK_CLASSPATH'] = ":".join(classpath_jars())
conf = (SparkConf()
.set("spark.driver.extraClassPath", os.environ['SPARK_CLASSPATH']))
if SparkContext._active_spark_context is None:
SparkContext(conf=conf)
yield SparkContext._active_spark_context
# TearDown
SparkContext.stop(SparkContext._active_spark_context)
def test_s3_data_path():
bucket = "bucket"
prefix = "dir/file"
s3_obj = S3DataPath(bucket, prefix)
assert s3_obj.toS3UriString() == "s3://{}/{}".format(bucket, prefix)
|
11539267
|
from Common import psteff
import sys
import collections
import operator
import cPickle
import logging
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
def get_query_to_clst(cluster_map):
q_to_c = {}
# Here we assign a cluster
# to each query by a dictionary q_to_c
for cluster, data in cluster_map.items():
for query in data['queries']:
q_to_c[query] = cluster
return q_to_c
class CACBInfer(psteff.PSTInfer):
def __init__(self):
psteff.PSTInfer.__init__(self)
def _load_pickle(self, input_file):
f = open(input_file, 'r')
self.data = cPickle.load(f)
self.query_dict = cPickle.load(f)
self.cluster_map = cPickle.load(f)
self.inv_query_dict = sorted(self.query_dict.items(), key=operator.itemgetter(1))
f.close()
self.query_to_clst = get_query_to_clst(self.cluster_map)
def suggest(self, prefix):
# Convert to cluster ids
concepts = [self.query_to_clst.get(q, q) for q in prefix]
concepts = [c for n, c in enumerate(concepts) if n == 0 or concepts[n] != concepts[n-1]]
# Suggestion is a list of cluster ids
suggestions = pstfast.PSTInfer.suggest(self, concepts)
sugg = zip(suggestions['suggestions'], suggestions['scores'])
suggestions['suggestions'] = []
suggestions['scores'] = []
suggestions['clusters'] = []
for clst_sugg, prob in sugg:
if clst_sugg in self.cluster_map:
# Check the concept id and get the query with
# the most clicks
sugg = self.cluster_map[clst_sugg]['queries'][0]
score = self.cluster_map[clst_sugg]['ranks'][0]
else:
# The concept id is the query
sugg = clst_sugg
score = 0
suggestions['clusters'].append(clst_sugg)
suggestions['suggestions'].append(sugg)
suggestions['scores'].append(score)
return suggestions
class CACB(psteff.PST):
def __init__(self, D=4):
psteff.PST.__init__(self, D)
self.query_to_clst = {}
self.cluster_map = {}
def with_cluster(self, cluster_file):
self.cluster_map = cPickle.load(open(cluster_file, 'r'))
self.query_to_clst = get_query_to_clst(self.cluster_map)
logger.info('Loaded {} queries'.format(len(self.query_to_clst)))
logger.info('Loaded {} clusters'.format(len(self.cluster_map)))
def prune(self, K=5):
smoothing = 1.0/len(self.query_dict)
def _prune(node, parent):
if len(node.probs) > 0:
total_sequence_freq = sum(node.probs.values())
if total_sequence_freq < 5:
self.delete_children(parent, node)
else:
for child in list_nodes:
_prune(child, node)
_prune(self.root, None)
self.num_nodes = self.get_count()
def save(self, output_path):
def _flatten(node):
sorted_probs = sorted(node.probs.items(), key=operator.itemgetter(1), reverse=True)[:10]
reprs = [(node.node_id, sorted_probs)]
list_child = node.children.values()
for child in list_child:
reprs.append(_flatten(child))
return reprs
reprs = _flatten(self.root)
logger.info('Saving CACB to {} / {} nodes.'.format(output_path, self.num_nodes))
f = open(output_path, 'w')
cPickle.dump(reprs, f)
cPickle.dump(self.query_dict, f)
cPickle.dump(self.cluster_map, f)
f.close()
def add_session(self, session):
prefix = session.strip().split('\t')
coverage = 0
concepts = []
for q in prefix:
if q in self.query_to_clst:
concepts.append(self.query_to_clst[q])
coverage += 1
else:
concepts.append(q)
concepts = [c for n, c in enumerate(concepts) if n == 0 or concepts[n] != concepts[n-1]]
psteff.PST.add_session(self, concepts)
return coverage
|
11539334
|
import torch
from torchvision import datasets
from torchvision import transforms
from typing import List, Tuple
from datasets import utils
# Transformations
RC = transforms.RandomCrop(32, padding=4)
RHF = transforms.RandomHorizontalFlip()
RVF = transforms.RandomVerticalFlip()
NRM = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
TT = transforms.ToTensor()
TPIL = transforms.ToPILImage()
# Transforms object for trainset with augmentation
transform_with_aug = transforms.Compose([RC, RHF, TT, NRM])
# Transforms object for testset with NO augmentation
transform_no_aug = transforms.Compose([TT, NRM])
DATASET_ROOT = './data/'
class CIFAR10TrainingSetWrapper(utils.DataSetWrapper):
def __init__(self, class_group: Tuple[int], negative_samples=False):
dataset = datasets.CIFAR10(root=DATASET_ROOT, train=True,
download=True, transform=transform_with_aug)
super().__init__(dataset, class_group, negative_samples)
class CIFAR10TestingSetWrapper(utils.DataSetWrapper):
def __init__(self, class_group: Tuple[int], negative_samples=False):
dataset = datasets.CIFAR10(root=DATASET_ROOT, train=False,
download=True, transform=transform_no_aug)
super().__init__(dataset, class_group, negative_samples)
class CIFAR100TrainingSetWrapper(utils.DataSetWrapper):
def __init__(self, class_group: Tuple[int], negative_samples=False):
dataset = datasets.CIFAR100(root=DATASET_ROOT, train=True,
download=True, transform=transform_with_aug)
super().__init__(dataset, class_group, negative_samples)
class CIFAR100TestingSetWrapper(utils.DataSetWrapper):
def __init__(self, class_group: Tuple[int], negative_samples=False):
dataset = datasets.CIFAR100(root=DATASET_ROOT, train=False,
download=True, transform=transform_no_aug)
super().__init__(dataset, class_group, negative_samples)
|
11539348
|
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import torchvision
""" Code for the components of the U-Net model is taken from:
https://github.com/milesial/Pytorch-UNet/blob/master/unet/unet_parts.py"""
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double or tiple conv"""
def __init__(self, in_channels, out_channels, bilinear=True, double=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels) if double else TripleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels, ks=1):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=ks)
def forward(self, x):
return self.conv(x)
""" The UNet network is based on code from: https://github.com/milesial/Pytorch-UNet/blob/master/unet/unet_model.py,
but includes changes/adaptions."""
class UNet(nn.Module):
"""
Unet network to compute keypoint detector values, descriptors, and scores.
"""
def __init__(self, n_channels, n_classes, layer_size):
"""
Initialize the network with one encoder and two decoders.
Args:
num_channels (int): number of channels in the input image (we use 3 for one RGB image).
num_classes (int): number of classes (output channels from the decoder), 1 in our case.
layer_size (int): size of the first layer if the encoder. The size of the following layers are
determined from this.
"""
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
bilinear = True
m = layer_size
self.inc = DoubleConv(n_channels, m) # 384 x 512 (height/width after layer given 384 x 512 input image)
self.down1 = Down(m, m * 2) # 192 x 256
self.down2 = Down(m * 2, m * 4) # 96 x 128
self.down3 = Down(m * 4, m * 8) # 48 x 64
self.down4 = Down(m * 8, m * 16) # 24 x 32
self.up1_pts = Up(m * 24, m * 8, bilinear)
self.up2_pts = Up(m * 12, m * 4, bilinear)
self.up3_pts = Up(m * 6, m * 2, bilinear)
self.up4_pts = Up(m * 3, m, bilinear)
self.outc_pts = OutConv(m, n_classes)
self.up1_score = Up(m * 24, m * 8, bilinear)
self.up2_score = Up(m * 12, m * 4, bilinear)
self.up3_score = Up(m * 6, m * 2, bilinear)
self.up4_score = Up(m * 3, m, bilinear)
self.outc_score = OutConv(m, n_classes)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
"""
Forward pass of network to get keypoint detector values, descriptors and, scores
Args:
x (torch.tensor, Bx3xHxW): RGB images to input to the network.
Returns:
logit_pts (torch.tensor, Bx1xHxW): detector values for each pixel, which will be used to compute the
final keypoint coordinates.
score (torch.tensor, Bx1xHxW): an importance score for each pixel.
descriptors (torch.tensor, BxCxHxW): descriptors for each pixel, C is length of descriptor.
"""
batch_size, _, height, width = x.size()
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x4_up_pts = self.up1_pts(x5, x4)
x3_up_pts = self.up2_pts(x4_up_pts, x3)
x2_up_pts = self.up3_pts(x3_up_pts, x2)
x1_up_pts = self.up4_pts(x2_up_pts, x1)
logits_pts = self.outc_pts(x1_up_pts)
x4_up_score = self.up1_score(x5, x4)
x3_up_score = self.up2_score(x4_up_score, x3)
x2_up_score = self.up3_score(x3_up_score, x2)
x1_up_score = self.up4_score(x2_up_score, x1)
score = self.outc_score(x1_up_score)
score = self.sigmoid(score)
# Resize outputs of each encoder layer to the size of the original image. Features are interpolated using
# bilinear interpolation to get gradients for back-prop. Concatenate along the feature channel to get
# pixel-wise descriptors of size BxCxHxW.
f1 = F.interpolate(x1, size=(height, width), mode='bilinear')
f2 = F.interpolate(x2, size=(height, width), mode='bilinear')
f3 = F.interpolate(x3, size=(height, width), mode='bilinear')
f4 = F.interpolate(x4, size=(height, width), mode='bilinear')
f5 = F.interpolate(x5, size=(height, width), mode='bilinear')
feature_list = [f1, f2, f3, f4, f5]
descriptors = torch.cat(feature_list, dim=1)
return logits_pts, score, descriptors
|
11539356
|
from time import sleep
import boto3
import botocore
from botocore.client import Config
import click
def get_s3_client():
return boto3.client(
's3', 'us-east-1', config=Config(
s3={'addressing_style': 'path'}
)
)
def download_file_from_s3(key, bucket):
s3_client = get_s3_client()
data = s3_client.get_object(
Bucket=bucket,
Key=key
)
return data['Body'].read().decode("utf-8")
class AthenaWaiterException(Exception):
pass
class AthenaWaiter(object):
"""Not only can wait more than the AWS S3 waiter,
but it also checks if the query has failed
or was canceled and stops instead of waiting
until it times out.
"""
def __init__(self, max_tries=30, interval=5):
self.s3_client = get_s3_client()
self.athena_client = boto3.client(
'athena',
region_name='us-east-1'
)
self.max_tries = max_tries
self.interval = interval
def object_exists(self, bucket='', key=''):
exists = True
try:
self.s3_client.head_object(Bucket=bucket, Key=key)
except botocore.exceptions.ClientError as exc:
if exc.response['Error']['Code'] == '404':
exists = False
else:
raise
return exists
def check_status(self, query_id):
status = self.athena_client.get_query_execution(
QueryExecutionId=query_id
)['QueryExecution']['Status']
if status['State'] in ['FAILED', 'CANCELLED']:
raise AthenaWaiterException(
'Query Error: {0}'
.format(status['StateChangeReason'])
)
def wait(self, bucket='', key='', query_id=''):
click.echo(
'Waiting for file ({0}) in {1}'
.format(key, bucket),
nl=False
)
success = False
for _ in range(self.max_tries):
if self.object_exists(bucket=bucket, key=key):
success = True
click.echo('')
break
self.check_status(query_id)
click.echo('.', nl=False)
sleep(self.interval)
if not success:
raise AthenaWaiterException(
'Exceeded the maximum number of tries ({0})'
.format(self.max_tries)
)
|
11539385
|
import theano.tensor as TT
import theano
#import util
import numpy as np
import pdb
#batch_size = 256
#leng = 10
#fea = 200
#v_fea = 1025
#nout = 512
#
#n_words = 4
#
#wt = theano.shared(util.random_matrix(fea, nout),name='wt')
#bt = theano.shared(util.random_vector(nout),name='bt')
#
#we = theano.shared(util.random_matrix(v_fea, nout),name='we')
#be = theano.shared(util.random_vector( nout),name='be')
#
#
#wt_w = theano.shared(util.random_matrix(fea, nout), name= 'wt_w')
#bt_w = theano.shared(util.random_vector(nout), name='bt_w')
#
#v_a = theano.shared(util.random_vector(nout), name='v_a')
#b_a = theano.shared(util.random_vector(n_words), name='b_a')
#v_t = theano.shared(util.random_matrix(nout, nout), name='v_t')
#b_vt = theano.shared(util.random_vector(nout), name = 'b_vt')
#
#
#x = TT.ftensor3('x')
v = TT.matrix('v')
def fun(x):
return TT.nonzero(x, True)
def step(x):
return TT.max(TT.nonzero(x,True))
def test(x3, idx, idx2):
return x3[idx, idx2,:]
def test2(idx_row, idxcol, x3):
return x3[idx_row, idxcol,:]
def test3(idxs, x3):
return x3[idxs[0,:], idxs[1,:], :]
def test4(x3):
return TT.nonzero_values(x3)
v3 = TT.ftensor3('v')
idx = TT.imatrix('i')
idx2 = TT.imatrix('i2')
output_test2 = test2(idx, idx2, v3)
output_test3 = test3(idx, v3)
output4 = test4(v3)
x_scan, updates = theano.scan(test2,
outputs_info=None,
sequences=[idx, idx2],
non_sequences = [v3]
)
func = theano.function(inputs = [idx,idx2, v3], outputs = x_scan)
func2 = theano.function(inputs = [idx, idx2, v3], outputs = output_test2)
#func3 = theano.function(inputs = [idx, v3], outputs = output_test3)
func4 = theano.function(inputs = [v3], outputs = output4)
x3 = np.random.rand(3,4,2).astype('float32')
idx = np.array([[0,1,2]],dtype = 'int32')
idx2 = np.array([[2,2,1]], dtype = 'int32')
x3[:] = 0
x3[1,1,:] = 1.0
x3[2,2,:] = 2
x3[0,1,:] = 3
print x3
print x3.shape
print func4(x3)
#words = TT.ftensor3('words')
#
#dim_words = words.dimshuffle((1,0,2)) # now x is batch * n_words * dict_len
#dim_x = x.dimshuffle((1,0,2))
#
#def step_(x_t, words_t, w_t, b_t, w_t_w, b_t_w, v_t, b_vt, v_a, b_a):
# TT_x = TT.dot(x_t, w_t) + b_t
# TT_w = TT.dot(words_t, w_t_w) + b_t_w # 10 * 512
# TT_w_dim = TT_w.dimshuffle(('x', 0, 1))
# TT_ws = TT.extra_ops.repeat(TT_w_dim, TT_x.shape[0], axis = 0) # sentenlen * n_words * 512
#
# TT_xv = TT.dot(TT_x, v_t) + b_vt # sentence_len * 512
# TT_xv_dim = TT_xv.dimshuffle(0,'x', 1)
# TT_xvs = TT.extra_ops.repeat(TT_xv_dim, TT_ws.shape[1], axis = 1)
# TT_act = TT.tanh( TT_ws + TT_xvs ) # sentence * n_words * 512
# beta = TT.dot(TT_act, v_a ) + b_a # this is broadcastable: sentence * n_words
# z = TT.exp(beta - beta.max(axis=-1, keepdims=True))
# alpha = z / z.sum(axis=-1, keepdims=True) # sentence * n_words.
# TT_att = TT.dot(alpha, TT_w) # now, good, sentence * 512
# return TT.concatenate((TT_x, TT_att), axis = 1)
# #return TT.concatenate((TT_x, TT.extra_ops.repeat(TT_att_f, TT_x.shape[0], axis = 0)), axis = 1)
#
#x_scan, updates = theano.scan(step_,
# outputs_info=None,
# sequences=[dim_x, dim_words],
# non_sequences=[wt,
# bt,
# wt_w,
# bt_w,
# v_t,
# b_vt,
# v_a,
# b_a
# ]
# )
#
#func = theano.function(inputs = [ x, words], outputs = [x_scan], updates = updates)
##x_scan_simple, updates_simple = theano.scan(step_simple, outputs_info=None, sequences=[x], non_sequences=[wt, bt] )
##x_e = x_scan.dimshuffle((1,0,2))
##x_e_simple = x_scan_simple.dimshuffle((1,0,2))
###x_e_simple = x_scan_simple
##func = theano.function(inputs = [ x, words], outputs = [x_e], updates = updates)
##func_simple = theano.function(inputs = [ x], outputs = [x_e_simple], updates = updates)
##
##def append_(x_t, word_t, w_t, b_t):
## TT_w = TT.dot(word_t, w_t) + b_t
## return TT.concatenate((x_t, TT.flatten(TT_w)))
##
##x_scan_0, updates_app = theano.scan(append_, outputs_info = None, sequences = [x_0, words], non_sequences = [ self.find('wt'), self.find('bt')])
##x_0 = x_scan_0.dimshuffle(('x',0,1))
#
#
#np_x = np.asarray(np.random.rand(leng, batch_size, fea), dtype='float32')
#np_v = np.asarray(np.random.rand(batch_size, fea), dtype='float32')
#np_words = np.asarray(np.random.rand(n_words,batch_size, fea), dtype='float32')
#
##np_x = np.asarray(np.random.rand(batch_size, leng, fea), dtype='float32')
##np_words = np.asarray(np.random.rand(batch_size, 4, fea), dtype='float32')
#print np_x.shape
#print np_words.shape
#test = func(np_x, np_words)
##test_simple = func_simple(np_x)
#print 'test',test[0].shape
#
#print test[0][0,:]
#print test[0][0,:].shape
#print test[0][0,:].sum(axis = 0)
#print test[0][0,:].sum(axis = 1)
#
#
##
### Now, we start the visual word.
##x_0 = TT.dot(v, self.find('we')) + self.find('be') # now batch_size * e_size
##def append_(x_t, word_t, w_t, b_t):
## TT_w = TT.dot(word_t, w_t) + b_t
## return TT.concatenate((x_t, TT.flatten(TT_w)))
##
##x_scan_0, updates_app = theano.scan(append_, outputs_info = None, sequences = [x_0, dim_words], non_sequences = [ we, be])
##
##x_0 = x_scan_0.dimshuffle(('x',0,1))
###
##y = TT.concatenate((x_0, x_e))
##func_x0 = theano.function(inputs = [ v, words ], outputs = [x_0], updates = updates_app)
##
##test_x0 = func_x0(np_v, np_words)
#
##print 'test_x0', test_x0[0].shape
|
11539414
|
from django.test import TestCase
class TestCertURL(TestCase):
def test_url_gives_200(self):
"""
The certificate view should receive a 200.
"""
response = self.client.get(
"/.well-known/pki-validation/BA7CD11C05866445FBFE053E2C1AAA8C.txt"
)
assert response.status_code == 200
def test_cert_view_has_correct_content(self):
"""
The certificate content should be correct.
"""
response = self.client.get(
"/.well-known/pki-validation/BA7CD11C05866445FBFE053E2C1AAA8C.txt"
)
cert_text = b"172DCB53812428BBE4077B6343FBD43719C8C85D599860CE17B66824DB9BDAFD comodoca.com 598504fd4f315"
assert response.content == cert_text
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.