code stringlengths 281 23.7M |
|---|
class TestAviarySDK():
def test_get_backend(self, aviary_testing_model):
backend = sdk.get_aviary_backend()
assert backend
def test_get_aviary(self, aviary_testing_model):
completions = sdk.completions(model=aviary_testing_model, prompt='test')
assert completions
def test_list_models(self, aviary_testing_model):
all_models = sdk.models()
assert len(all_models)
assert (aviary_testing_model in all_models)
def test_metadata(self, aviary_testing_model):
result = sdk.metadata(aviary_testing_model)
assert ('aviary_metadata' in result.keys())
def test_completions(self, aviary_testing_model):
prompt = 'test query'
result = sdk.completions(aviary_testing_model, prompt)
assert result
def test_query(self, aviary_testing_model):
prompt = 'test query'
result = sdk.query(aviary_testing_model, prompt)
assert result['choices'][0]['message']['content']
assert result['usage']
def test_stream(self, aviary_testing_model):
prompt = 'test query'
for chunk in sdk.stream(aviary_testing_model, prompt):
assert (chunk['choices'][0]['delta'] or chunk['choices'][0]['finish_reason'])
assert chunk['usage'] |
_on_failure
.parametrize('number_of_nodes', [2])
.parametrize('deposit', [0])
.parametrize('enable_rest_api', [True])
def test_api_channel_set_reveal_timeout(api_server_test_instance: APIServer, raiden_network: List[RaidenService], token_addresses, settle_timeout):
(app0, app1) = raiden_network
token_address = token_addresses[0]
partner_address = app1.address
request = grequests.patch(api_url_for(api_server_test_instance, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address), json=dict(reveal_timeout=0))
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
request = grequests.patch(api_url_for(api_server_test_instance, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address), json=dict(reveal_timeout=(settle_timeout + 1)))
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
reveal_timeout = int((settle_timeout / 2))
request = grequests.patch(api_url_for(api_server_test_instance, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address), json=dict(reveal_timeout=reveal_timeout))
response = request.send().response
assert_response_with_code(response, HTTPStatus.OK)
token_network_address = views.get_token_network_address_by_token_address(views.state_from_raiden(app0), app0.default_registry.address, token_address)
assert token_network_address
channel_state = views.get_channelstate_by_token_network_and_partner(chain_state=views.state_from_raiden(app0), token_network_address=token_network_address, partner_address=app1.address)
assert channel_state
assert (channel_state.reveal_timeout == reveal_timeout) |
def _call_return(call: Dict[(str, Any)]) -> Callable[([Optional[Callable[(..., Any)]], Optional[Callable[(..., Any)]]], Any)]:
global _js_result_timeout
call_id = call['call']
def return_func(callback: Optional[Callable[(..., Any)]]=None, error_callback: Optional[Callable[(..., Any)]]=None) -> Any:
if (callback is not None):
_call_return_callbacks[call_id] = (callback, error_callback)
else:
for w in range(_js_result_timeout):
if (call_id in _call_return_values):
return _call_return_values.pop(call_id)
sleep(0.001)
return return_func |
def match_qdmr_arg_to_groundings(arg, grnds):
matches = []
arg = clean_qdmr_arg(arg)
for grnd in grnds:
if (grnd.iscol() or grnd.istbl() or grnd.isval()):
name = grnd.keys[(- 1)]
if is_text_match(arg, name):
matches.append(grnd)
else:
raise ValueError(f'Do not know how to match to {grnd}')
return matches |
class ClipGradNorm(object):
def __init__(self, start_iteration=0, end_iteration=(- 1), max_norm=0.5):
self.start_iteration = start_iteration
self.end_iteration = end_iteration
self.max_norm = max_norm
self.last_epoch = (- 1)
def __call__(self, parameters):
self.last_epoch += 1
clip = False
if (self.last_epoch >= self.start_iteration):
clip = True
if ((self.end_iteration > 0) and (self.last_epoch < self.end_iteration)):
clip = True
if clip:
clip_grad_norm_(parameters, max_norm=self.max_norm)
def state_dict(self):
return {key: value for (key, value) in self.__dict__.items()}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict) |
_loss('label_smoothing_cross_entropy')
class LabelSmoothingCrossEntropyLoss(ClassyLoss):
def __init__(self, ignore_index=(- 100), reduction='mean', smoothing_param=None):
super().__init__()
self._ignore_index = ignore_index
self._reduction = reduction
self._smoothing_param = smoothing_param
self.loss_function = SoftTargetCrossEntropyLoss(self._ignore_index, self._reduction, normalize_targets=False)
self._eps = np.finfo(np.float32).eps
def from_config(cls, config: Dict[(str, Any)]) -> 'LabelSmoothingCrossEntropyLoss':
assert ('smoothing_param' in config), 'Label Smoothing needs a smoothing parameter'
return cls(ignore_index=config.get('ignore_index', (- 100)), reduction=config.get('reduction', 'mean'), smoothing_param=config.get('smoothing_param'))
def compute_valid_targets(self, target, classes):
target_shape_list = list(target.size())
valid_mask = (target != self._ignore_index)
valid_targets = (target.float() * valid_mask.float())
if ((len(target_shape_list) == 1) or ((len(target_shape_list) == 2) and (target_shape_list[1] == 1))):
valid_targets = convert_to_one_hot(valid_targets.view((- 1), 1), classes)
valid_targets = (valid_targets.float() * valid_mask.view((- 1), 1).float())
return valid_targets
def smooth_targets(self, valid_targets, classes):
valid_targets /= (self._eps + valid_targets.sum(dim=1, keepdim=True))
if (classes > 0):
smoothed_targets = (valid_targets + (self._smoothing_param / classes))
smoothed_targets /= (self._eps + smoothed_targets.sum(dim=1, keepdim=True))
return smoothed_targets
def forward(self, output, target):
valid_targets = self.compute_valid_targets(target=target, classes=output.shape[1])
assert (valid_targets.shape == output.shape), 'LabelSmoothingCrossEntropyLoss requires output and target to be same size'
smoothed_targets = self.smooth_targets(valid_targets=valid_targets, classes=output.shape[1])
return self.loss_function(output, smoothed_targets) |
class VersionChange(enum.Enum):
unknown = enum.auto()
equal = enum.auto()
downgrade = enum.auto()
patch = enum.auto()
minor = enum.auto()
major = enum.auto()
def matches_filter(self, filterstr: str) -> bool:
allowed_values: Dict[(str, List[VersionChange])] = {'major': [VersionChange.major], 'minor': [VersionChange.major, VersionChange.minor], 'patch': [VersionChange.major, VersionChange.minor, VersionChange.patch], 'never': []}
return (self in allowed_values[filterstr]) |
def get_operator(values):
n = len(values)
pauli_list = []
for i in range(n):
for j in range(i):
x_p = np.zeros(n, dtype=bool)
z_p = np.zeros(n, dtype=bool)
z_p[i] = True
z_p[j] = True
pauli_list.append([((2.0 * values[i]) * values[j]), Pauli((z_p, x_p))])
return (WeightedPauliOperator(paulis=pauli_list), sum((values * values))) |
def test_get_expected_output_filenames_for_example():
from reana.reana_dev.run import get_expected_output_filenames_for_example
for (example, output) in (('', ('plot.png',)), ('reana-demo-helloworld', ('greetings.txt',)), ('reana-demo-root6-roofit', ('plot.png',)), ('reana-demo-alice-lego-train-test-run', ('plot.pdf',))):
assert (output == get_expected_output_filenames_for_example(example)) |
class ModelParallel(nn.Module):
def __init__(self, chunks, device_list):
super(ModelParallel, self).__init__()
self.chunks = chunks
self.device_list = device_list
def c(self, input, i):
if ((input.type() == 'torch.FloatTensor') and ('cuda' in self.device_list[i])):
input = input.type('torch.cuda.FloatTensor')
elif ((input.type() == 'torch.cuda.FloatTensor') and ('cpu' in self.device_list[i])):
input = input.type('torch.FloatTensor')
return input
def forward(self, input):
for (i, chunk) in enumerate(self.chunks):
if (i < (len(self.chunks) - 1)):
input = self.c(chunk(self.c(input, i).to(self.device_list[i])), (i + 1)).to(self.device_list[(i + 1)])
else:
input = chunk(input)
return input |
class RealGaborLayer(nn.Module):
def __init__(self, in_features, out_features, bias=True, is_first=False, omega0=10.0, sigma0=10.0, trainable=False):
super().__init__()
self.omega_0 = omega0
self.scale_0 = sigma0
self.is_first = is_first
self.in_features = in_features
self.freqs = nn.Linear(in_features, out_features, bias=bias)
self.scale = nn.Linear(in_features, out_features, bias=bias)
def forward(self, input):
omega = (self.omega_0 * self.freqs(input))
scale = (self.scale(input) * self.scale_0)
return (torch.cos(omega) * torch.exp((- (scale ** 2)))) |
(callback=triggered)
(user='darren', host='radiant', key='/home/darren/.ssh/id_rsa.pub', python='/home/darren/venv/bin/python')
def echo(e):
print('echo: {}'.format(threading.current_thread().name))
with open('/tmp/echo.out', 'w') as pr:
pr.write('Echo! {}'.format(e))
return 'Echo! {}'.format(e) |
def test_map_iterator():
sm = m.StringMap({'hi': 'bye', 'black': 'white'})
assert (sm['hi'] == 'bye')
assert (len(sm) == 2)
assert (sm['black'] == 'white')
with pytest.raises(KeyError):
assert sm['orange']
sm['orange'] = 'banana'
assert (sm['orange'] == 'banana')
expected = {'hi': 'bye', 'black': 'white', 'orange': 'banana'}
for k in sm:
assert (sm[k] == expected[k])
for (k, v) in sm.items():
assert (v == expected[k])
it = iter(m.StringMap({}))
for _ in range(3):
with pytest.raises(StopIteration):
next(it) |
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / (0.0001 + self.count))
def __str__(self):
if (self.count == 0):
return str(self.val)
return ('%.4f (%.4f)' % (self.val, self.avg)) |
def test(models, dataloaders, mode='val'):
assert ((mode == 'val') or (mode == 'test'))
models['backbone'].eval()
total = 0
correct = 0
with torch.no_grad():
for (inputs, labels) in dataloaders[mode]:
inputs = inputs.cuda()
labels = labels.cuda()
scores = models['backbone'](inputs)[0]
(_, preds) = torch.max(scores.data, 1)
total += labels.size(0)
correct += (preds == labels).sum().item()
return ((100 * correct) / total) |
def test_follow_redirects_filtered_by_site_after_redirect():
link = '/resource'
redirected = '/redirected'
filtered = '
with start_server(Response(link, 301, {'Location': redirected}), Response(redirected, 301, {'Location': filtered})) as url:
hosts = [socket.gethostname().lower()]
assert (filtered == follow_redirects(url(link), hosts)) |
.parametrize('prefix,path,expected', [('test', 'foo', 'test/foo'), ('test', 'bar', 'test/bar'), ('test', '/bar', 'test/bar'), ('test', '../foo', 'test/foo'), ('test', 'foo/bar/baz', 'test/baz'), ('test', 'foo/../baz', 'test/baz'), (None, 'foo', 'foo'), (None, 'foo/bar/baz', 'baz')])
def test_filepath(prefix, path, expected):
userfiles = DelegateUserfiles(None, None, 'local_us', prefix)
assert (userfiles.get_file_id_path(path) == expected) |
.parametrize('method', ['advi', 'ADVI+adapt_diag', 'advi_map', 'jitter+adapt_diag', 'adapt_diag', 'map', 'adapt_full', 'jitter+adapt_full'])
def test_exec_nuts_init(method):
if method.endswith('adapt_full'):
with pytest.warns(UserWarning, match='experimental feature'):
check_exec_nuts_init(method)
else:
check_exec_nuts_init(method) |
.parametrize('rank, attn_axes, expected', quadratic_attention)
def test_build_quadratic_attention(rank, attn_axes, expected):
result = build_quadratic_attention_equation(rank, attn_axes)
(dot_product_equation, combine_equation, attn_scores_rank) = result
assert (dot_product_equation == expected[0])
assert (combine_equation == expected[1])
assert (attn_scores_rank == expected[2]) |
class Migration(migrations.Migration):
dependencies = [('api', '0050_remove_infractions_active_default_value')]
operations = [migrations.AlterField(model_name='deletedmessage', name='embeds', field=django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.jsonb.JSONField(validators=[]), blank=True, help_text='Embeds attached to this message.', size=None))] |
class ScratchPadBaseConfic(Config):
auto_fullscreen = True
screens = []
groups = [libqtile.config.ScratchPad('SCRATCHPAD', dropdowns=[libqtile.config.DropDown('dd-a', spawn_cmd('dd-a'), on_focus_lost_hide=False), libqtile.config.DropDown('dd-b', spawn_cmd('dd-b'), on_focus_lost_hide=False), libqtile.config.DropDown('dd-c', spawn_cmd('dd-c'), on_focus_lost_hide=True), libqtile.config.DropDown('dd-d', spawn_cmd('dd-d'), on_focus_lost_hide=True), libqtile.config.DropDown('dd-e', spawn_cmd('dd-e'), match=libqtile.config.Match(title='dd-e'), on_focus_lost_hide=False)]), libqtile.config.ScratchPad('SINGLE_SCRATCHPAD', dropdowns=[libqtile.config.DropDown('dd-e', spawn_cmd('dd-e'), on_focus_lost_hide=False), libqtile.config.DropDown('dd-f', spawn_cmd('dd-f'), on_focus_lost_hide=False)], single=True), libqtile.config.Group('a'), libqtile.config.Group('b')]
layouts = [libqtile.layout.max.Max()]
floating_layout = libqtile.resources.default_config.floating_layout
keys = []
mouse = [] |
class MinimumEigenOptimizationResult(OptimizationResult):
def __init__(self, x: Union[(List[float], np.ndarray)], fval: float, variables: List[Variable], status: OptimizationResultStatus, samples: Optional[List[SolutionSample]]=None, min_eigen_solver_result: Optional[MinimumEigensolverResult]=None, raw_samples: Optional[List[SolutionSample]]=None) -> None:
super().__init__(x=x, fval=fval, variables=variables, status=status, raw_results=None, samples=samples)
self._min_eigen_solver_result = min_eigen_solver_result
self._raw_samples = raw_samples
def min_eigen_solver_result(self) -> MinimumEigensolverResult:
return self._min_eigen_solver_result
def get_correlations(self) -> np.ndarray:
states = [v.x for v in self.samples]
probs = [v.probability for v in self.samples]
n = len(states[0])
correlations = np.zeros((n, n))
for (k, prob) in enumerate(probs):
b = states[k]
for i in range(n):
for j in range(i):
if (b[i] == b[j]):
correlations[(i, j)] += prob
else:
correlations[(i, j)] -= prob
return correlations
def raw_samples(self) -> Optional[List[SolutionSample]]:
return self._raw_samples |
.functions
(df=df_strategy())
(deadline=None, max_examples=10)
def test_bin_numeric_expected_columns(df):
df = df.bin_numeric(from_column_name='a', to_column_name='a_bin')
expected_columns = ['a', 'Bell__Chart', 'decorated-elephant', '#$%^', 'cities', 'a_bin']
assert (set(df.columns) == set(expected_columns)) |
def test_assert_key_type_value_no_value_raises():
info = ContextItemInfo(key='key1', key_in_context=True, expected_type=str, is_expected_type=True, has_value=False)
with pytest.raises(KeyInContextHasNoValueError) as err_info:
Context().assert_key_type_value(info, 'mydesc')
assert (str(err_info.value) == "mydesc found key1 in context but it doesn't have a value.") |
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
seed(123)
vocab = build_vocab(imgs, params)
itow = {(i + 1): w for (i, w) in enumerate(vocab)}
wtoi = {w: (i + 1) for (i, w) in enumerate(vocab)}
(L, label_start_ix, label_end_ix, label_length) = encode_captions(imgs, params, wtoi)
N = len(imgs)
f_lb = h5py.File((params['output_h5'] + '_label.h5'), 'w')
f_lb.create_dataset('labels', dtype='uint32', data=L)
f_lb.create_dataset('label_start_ix', dtype='uint32', data=label_start_ix)
f_lb.create_dataset('label_end_ix', dtype='uint32', data=label_end_ix)
f_lb.create_dataset('label_length', dtype='uint32', data=label_length)
f_lb.close()
out = {}
out['ix_to_word'] = itow
out['images'] = []
for (i, img) in enumerate(imgs):
jimg = {}
jimg['split'] = img['split']
if ('filename' in img):
jimg['file_path'] = os.path.join(img.get('filepath', ''), img['filename'])
if ('cocoid' in img):
jimg['id'] = img['cocoid']
elif ('imgid' in img):
jimg['id'] = img['imgid']
if (params['images_root'] != ''):
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
(jimg['width'], jimg['height']) = _img.size
out['images'].append(jimg)
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json']) |
class Critic(nn.Module):
def __init__(self, nb_states, nb_actions, hidden=256, init_w=0.3):
super(Critic, self).__init__()
self.fc1 = nn.Linear(nb_states, hidden)
self.fc2 = nn.Linear((hidden + nb_actions), hidden)
self.fc3 = nn.Linear(hidden, hidden)
self.fc4 = nn.Linear(hidden, hidden)
self.fc5 = nn.Linear(hidden, 1)
self.fc11 = nn.Linear(nb_states, hidden)
self.fc21 = nn.Linear((hidden + nb_actions), hidden)
self.fc31 = nn.Linear(hidden, hidden)
self.fc41 = nn.Linear(hidden, hidden)
self.fc51 = nn.Linear(hidden, 1)
self.relu = nn.ReLU()
self.init_weights(init_w)
def init_weights(self, init_w):
torch.nn.init.kaiming_uniform_(self.fc1.weight.data, a=0, mode='fan_in', nonlinearity='leaky_relu')
torch.nn.init.kaiming_uniform_(self.fc2.weight.data, a=0, mode='fan_in', nonlinearity='leaky_relu')
torch.nn.init.kaiming_uniform_(self.fc3.weight.data, a=0, mode='fan_in', nonlinearity='leaky_relu')
torch.nn.init.kaiming_uniform_(self.fc4.weight.data, a=0, mode='fan_in', nonlinearity='leaky_relu')
self.fc5.weight.data.uniform_((- init_w), init_w)
torch.nn.init.kaiming_uniform_(self.fc11.weight.data, a=0, mode='fan_in', nonlinearity='leaky_relu')
torch.nn.init.kaiming_uniform_(self.fc21.weight.data, a=0, mode='fan_in', nonlinearity='leaky_relu')
torch.nn.init.kaiming_uniform_(self.fc31.weight.data, a=0, mode='fan_in', nonlinearity='leaky_relu')
torch.nn.init.kaiming_uniform_(self.fc41.weight.data, a=0, mode='fan_in', nonlinearity='leaky_relu')
self.fc51.weight.data.uniform_((- init_w), init_w)
def forward(self, inp):
(x, a) = inp
x = x.view(x.size(0), (45 * 80))
q1 = self.fc1(x)
q1 = self.relu(q1)
q1 = self.fc2(torch.cat([q1, a], 1))
q1 = self.relu(q1)
q1 = self.fc3(q1)
q1 = self.relu(q1)
q1 = self.fc4(q1)
q1 = self.relu(q1)
q1 = self.fc5(q1)
q2 = self.fc11(x)
q2 = self.relu(q2)
q2 = self.fc21(torch.cat([q2, a], 1))
q2 = self.relu(q2)
q2 = self.fc31(q2)
q2 = self.relu(q2)
q2 = self.fc41(q2)
q2 = self.relu(q2)
q2 = self.fc51(q2)
return (q1, q2) |
class ProcessTest(unittest.TestCase):
def tearDown(self) -> None:
current_process = psutil.Process()
self.assertEqual(len(current_process.children()), 0, 'zombie children processes!')
def test_returncode(self) -> None:
with self.assertRaisesRegex(SystemExit, '^0$'):
main(['--timeout', '60', 'echo', 'hello'])
with self.assertRaisesRegex(SystemExit, '^123$'):
main(['--timeout', '60', '--', 'bash', '-c', 'exit 123'])
def test_proccess_args(self) -> None:
with self.assertRaisesRegex(SystemExit, '^0$'):
main(['echo', '--some', '-args'])
('sys.stdout', new_callable=io.StringIO)
def test_timeout(self, stdout: MagicMock) -> None:
with self.assertRaisesRegex(SystemExit, '^-15$'):
main(['--timeout', '0.0001', '--poll_rate', '0.0001', 'sleep', '60'])
self.assertIn('reached timeout, terminating...', stdout.getvalue())
('sys.stdout', new_callable=io.StringIO)
def test_timeout_kill(self, stdout: MagicMock) -> None:
with self.assertRaisesRegex(SystemExit, '^-9$'):
main(['--timeout', '0.1', '--poll_rate', '0.1', '--kill_timeout', '0.1', '--', 'bash', '-c', "trap 'echo received term' TERM; sleep 10"])
self.assertIn('reached safe termination timeout, killing...', stdout.getvalue())
def test_start_on_file(self) -> None:
start_on_file = 'memory://start'
(fs, path) = fsspec.core.url_to_fs(start_on_file)
args = ['--timeout', '0.001', '--poll_rate', '0.001', '--start_on_file', start_on_file, '--', 'echo', 'banana']
with patch('sys.stdout', new_callable=io.StringIO) as stdout:
with self.assertRaisesRegex(SystemExit, f'^{TIMEOUT_EXIT_CODE}$'):
main(args)
self.assertIn('reached timeout before launching, terminating...', stdout.getvalue())
fs.touch(path)
with self.assertRaisesRegex(SystemExit, '^0$'):
main(args)
def test_exit_on_file(self) -> None:
start_on_file = 'memory://end'
(fs, path) = fsspec.core.url_to_fs(start_on_file)
args = ['--poll_rate', '0.001', '--exit_on_file', start_on_file, '--', 'sleep', '60']
with patch('sys.stdout', new_callable=io.StringIO) as stdout:
with self.assertRaisesRegex(SystemExit, '^-15$'):
main((['--timeout', '0.001'] + args))
self.assertIn('reached timeout, terminating', stdout.getvalue())
fs.touch(path)
with patch('sys.stdout', new_callable=io.StringIO) as stdout:
with self.assertRaisesRegex(SystemExit, '^-15$'):
main((['--timeout', '60'] + args))
self.assertIn('exists, terminating', stdout.getvalue()) |
def mat2quat(mat):
mat = np.asarray(mat, dtype=np.float64)
assert (mat.shape[(- 2):] == (3, 3)), 'Invalid shape matrix {}'.format(mat)
(Qxx, Qyx, Qzx) = (mat[(..., 0, 0)], mat[(..., 0, 1)], mat[(..., 0, 2)])
(Qxy, Qyy, Qzy) = (mat[(..., 1, 0)], mat[(..., 1, 1)], mat[(..., 1, 2)])
(Qxz, Qyz, Qzz) = (mat[(..., 2, 0)], mat[(..., 2, 1)], mat[(..., 2, 2)])
K = np.zeros((mat.shape[:(- 2)] + (4, 4)), dtype=np.float64)
K[(..., 0, 0)] = ((Qxx - Qyy) - Qzz)
K[(..., 1, 0)] = (Qyx + Qxy)
K[(..., 1, 1)] = ((Qyy - Qxx) - Qzz)
K[(..., 2, 0)] = (Qzx + Qxz)
K[(..., 2, 1)] = (Qzy + Qyz)
K[(..., 2, 2)] = ((Qzz - Qxx) - Qyy)
K[(..., 3, 0)] = (Qyz - Qzy)
K[(..., 3, 1)] = (Qzx - Qxz)
K[(..., 3, 2)] = (Qxy - Qyx)
K[(..., 3, 3)] = ((Qxx + Qyy) + Qzz)
K /= 3.0
q = np.empty((K.shape[:(- 2)] + (4,)))
it = np.nditer(q[(..., 0)], flags=['multi_index'])
while (not it.finished):
(vals, vecs) = np.linalg.eigh(K[it.multi_index])
q[it.multi_index] = vecs[([3, 0, 1, 2], np.argmax(vals))]
if (q[it.multi_index][0] < 0):
q[it.multi_index] *= (- 1)
it.iternext()
return q |
def update_shared_token_timestamp(message: Message, context: ContextTypes.DEFAULT_TYPE) -> str:
chat_data = cast(Dict, context.chat_data)
key = 'shared_token_timestamp'
last_time = chat_data.get(key)
current_time = message.date
chat_data[key] = current_time
if (last_time is None):
return '... Error... No time found....\nOh my god. Where is the time. Has someone seen the time?'
time_diff = (current_time - last_time)
return f'{time_diff.days}' |
class EventsDialog(Factory.Popup):
__events__ = ('on_release', 'on_press')
def __init__(self, **kwargs):
super(EventsDialog, self).__init__(**kwargs)
def on_release(self, instance):
pass
def on_press(self, instance):
pass
def close(self):
self.dismiss() |
class EvenniaTest(TestCase):
account_typeclass = DefaultAccount
object_typeclass = DefaultObject
character_typeclass = DefaultCharacter
exit_typeclass = DefaultExit
room_typeclass = DefaultRoom
script_typeclass = DefaultScript
('evennia.scripts.taskhandler.deferLater', _mock_deferlater)
def setUp(self):
self.backups = (SESSIONS.data_out, SESSIONS.disconnect, settings.DEFAULT_HOME, settings.PROTOTYPE_MODULES)
SESSIONS.data_out = Mock()
SESSIONS.disconnect = Mock()
self.account = create.create_account('TestAccount', email='', password='testpassword', typeclass=self.account_typeclass)
self.account2 = create.create_account('TestAccount2', email='', password='testpassword', typeclass=self.account_typeclass)
self.room1 = create.create_object(self.room_typeclass, key='Room', nohome=True)
self.room1.db.desc = 'room_desc'
settings.DEFAULT_HOME = ('#%i' % self.room1.id)
settings.PROTOTYPE_MODULES = 'evennia.utils.tests.data.prototypes_example'
self.room2 = create.create_object(self.room_typeclass, key='Room2')
self.exit = create.create_object(self.exit_typeclass, key='out', location=self.room1, destination=self.room2)
self.obj1 = create.create_object(self.object_typeclass, key='Obj', location=self.room1, home=self.room1)
self.obj2 = create.create_object(self.object_typeclass, key='Obj2', location=self.room1, home=self.room1)
self.char1 = create.create_object(self.character_typeclass, key='Char', location=self.room1, home=self.room1)
self.char1.permissions.add('Developer')
self.char2 = create.create_object(self.character_typeclass, key='Char2', location=self.room1, home=self.room1)
self.char1.account = self.account
self.account.db._last_puppet = self.char1
self.char2.account = self.account2
self.account2.db._last_puppet = self.char2
self.script = create.create_script(self.script_typeclass, key='Script')
self.account.permissions.add('Developer')
dummysession = ServerSession()
dummysession.init_session('telnet', ('localhost', 'testmode'), SESSIONS)
dummysession.sessid = 1
SESSIONS.portal_connect(dummysession.get_sync_data())
session = SESSIONS.session_from_sessid(1)
SESSIONS.login(session, self.account, testmode=True)
self.session = session
def tearDown(self):
flush_cache()
SESSIONS.data_out = self.backups[0]
SESSIONS.disconnect = self.backups[1]
settings.DEFAULT_HOME = self.backups[2]
settings.PROTOTYPE_MODULES = self.backups[3]
del SESSIONS[self.session.sessid]
self.account.delete()
self.account2.delete()
super().tearDown() |
def test_get_news_articles_with_invalid_site(graphql_client):
user = UserFactory()
parent = GenericPageFactory()
NewsArticleFactory(title='Article 1', parent=parent, owner=user, first_published_at=datetime.datetime(2010, 1, 1, 10, 0, 0))
SiteFactory(hostname='pycon2', root_page=parent)
query = 'query NewsArticles($hostname: String!, $language: String!) {\n newsArticles(hostname: $hostname, language: $language) {\n id\n title\n }\n }'
response = graphql_client.query(query, variables={'hostname': 'invalid', 'language': 'en'})
assert (response['errors'][0]['message'] == 'Site invalid not found') |
class IndexedDatasetBuilder(object):
element_sizes = {np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float: 4, np.double: 8}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
bytes = self.out_file.write(np.array((tensor.numpy() + 1), dtype=self.dtype))
self.data_offsets.append((self.data_offsets[(- 1)] + (bytes / self.element_size)))
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append((self.dim_offsets[(- 1)] + len(tensor.size())))
def merge_file_(self, another_file):
index = IndexedDataset(another_file, read_data=False)
assert (index.dtype == self.dtype)
begin = self.data_offsets[(- 1)]
for offset in index.data_offsets[1:]:
self.data_offsets.append((begin + offset))
self.sizes.extend(index.sizes)
begin = self.dim_offsets[(- 1)]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append((begin + dim_offset))
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', (len(self.data_offsets) - 1), len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close() |
class TestTaskNeedsYield(TestNameCheckVisitorBase):
_fails(ErrorCode.task_needs_yield)
def test_constfuture(self):
from asynq import asynq, ConstFuture
()
def bad_async_fn():
return ConstFuture(3)
_fails(ErrorCode.task_needs_yield)
def test_async(self):
from asynq import asynq
()
def async_fn():
pass
()
def bad_async_fn():
return async_fn.asynq()
_fails(ErrorCode.task_needs_yield)
def test_not_yielded(self):
from asynq import asynq
from pyanalyze.tests import async_fn
()
def capybara(oid):
return async_fn.asynq(oid)
def test_not_yielded_replacement(self):
self.assert_is_changed('\n from asynq import asynq\n from pyanalyze.tests import async_fn\n\n ()\n def capybara(oid):\n async_fn.asynq(oid)\n ', '\n from asynq import asynq\n from pyanalyze.tests import async_fn\n\n ()\n def capybara(oid):\n yield async_fn.asynq(oid)\n ') |
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return ((now - datetime.timedelta(days=1)) <= self.pub_date <= now)
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?' |
def _pbs_to_saga_jobstate(state):
if (state == 'C'):
return api.DONE
elif (state == 'F'):
return api.DONE
elif (state == 'H'):
return api.PENDING
elif (state == 'Q'):
return api.PENDING
elif (state == 'S'):
return api.PENDING
elif (state == 'W'):
return api.PENDING
elif (state == 'R'):
return api.RUNNING
elif (state == 'E'):
return api.RUNNING
elif (state == 'T'):
return api.RUNNING
elif (state == 'X'):
return api.CANCELED
else:
return api.UNKNOWN |
def test_transform_point_multi(runner):
result = runner.invoke(main_group, ['transform', '--dst-crs', 'EPSG:32618', '--precision', '2'], '[-78.0, 23.0]\n[-78.0, 23.0]', catch_exceptions=False)
assert (result.exit_code == 0)
assert (result.output.strip() == '[192457.13, 2546667.68]\n[192457.13, 2546667.68]') |
(allow_output_mutation=True, show_spinner=False, hash_funcs=HASH_FUNCS)
_grad()
def flow_w_to_z(flow_model, w, attributes, lighting):
w_cuda = torch.Tensor(w)
att_cuda = torch.from_numpy(np.asarray(attributes)).float().unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1))
light_cuda = torch.Tensor(lighting)
features = torch.cat([light_cuda, att_cuda], dim=1).clone().detach()
zero_padding = torch.zeros(1, 18, 1)
z = flow_model(w_cuda, features, zero_padding)[0].clone().detach()
return z |
class BasePjitPartitioner(BasePartitioner):
_property
def _local_chunker(self) -> LocalChunker:
return LocalChunker(self.mesh)
_property
def mesh(self) -> Mesh:
return default_mesh(self._num_partitions, self._model_parallel_submesh, self._backend)
def partition(self, fn: Callable, in_axis_resources, out_axis_resources, static_argnums: Union[(int, Sequence[int])]=(), donate_argnums: Union[(int, Sequence[int])]=()) -> PjittedFnWithContext:
pjitted = pjit(fn, in_axis_resources=in_axis_resources, out_axis_resources=out_axis_resources, static_argnums=static_argnums, donate_argnums=donate_argnums, backend=self._backend)
return PjittedFnWithContext(pjitted, self.mesh)
def compile(self, partitioned_fn: PjittedFnWithContext, *args) -> CompiledPartitionedCallable:
return partitioned_fn.lower(*args).compile() |
class GherkinTerminalReporter(TerminalReporter):
def __init__(self, config: Config) -> None:
super().__init__(config)
def pytest_runtest_logreport(self, report: TestReport) -> Any:
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
(cat, letter, word) = res
if ((not letter) and (not word)):
return None
if isinstance(word, tuple):
(word, word_markup) = word
elif rep.passed:
word_markup = {'green': True}
elif rep.failed:
word_markup = {'red': True}
elif rep.skipped:
word_markup = {'yellow': True}
feature_markup = {'blue': True}
scenario_markup = word_markup
if ((self.verbosity <= 0) or (not hasattr(report, 'scenario'))):
return super().pytest_runtest_logreport(rep)
if (self.verbosity == 1):
self.ensure_newline()
self._tw.write('Feature: ', **feature_markup)
self._tw.write(report.scenario['feature']['name'], **feature_markup)
self._tw.write('\n')
self._tw.write(' Scenario: ', **scenario_markup)
self._tw.write(report.scenario['name'], **scenario_markup)
self._tw.write(' ')
self._tw.write(word, **word_markup)
self._tw.write('\n')
elif (self.verbosity > 1):
self.ensure_newline()
self._tw.write('Feature: ', **feature_markup)
self._tw.write(report.scenario['feature']['name'], **feature_markup)
self._tw.write('\n')
self._tw.write(' Scenario: ', **scenario_markup)
self._tw.write(report.scenario['name'], **scenario_markup)
self._tw.write('\n')
for step in report.scenario['steps']:
self._tw.write(f''' {step['keyword']} {step['name']}
''', **scenario_markup)
self._tw.write(f' {word}', **word_markup)
self._tw.write('\n\n')
self.stats.setdefault(cat, []).append(rep)
return None |
class PulseAudioOperation(PulseAudioMainloopChild):
_state_name = {pa.PA_OPERATION_RUNNING: 'Running', pa.PA_OPERATION_DONE: 'Done', pa.PA_OPERATION_CANCELLED: 'Cancelled'}
def __init__(self, callback_lump, pa_operation: pa.pa_operation) -> None:
context = callback_lump.context
assert (context.mainloop is not None)
assert (pa_operation is not None)
context.check_ptr_not_null(pa_operation)
super().__init__(context.mainloop)
self.callback_lump = callback_lump
self._pa_operation = pa_operation
def _get_state(self) -> None:
assert (self._pa_operation is not None)
return pa.pa_operation_get_state(self._pa_operation)
def delete(self) -> None:
if (self._pa_operation is not None):
pa.pa_operation_unref(self._pa_operation)
self._pa_operation = None
self.callback_lump = None
self.context = None
def cancel(self):
assert (self._pa_operation is not None)
pa.pa_operation_cancel(self._pa_operation)
return self
def wait(self):
while self.is_running:
self.mainloop.wait()
return self
def is_running(self) -> bool:
return (self._get_state() == pa.PA_OPERATION_RUNNING)
def is_done(self) -> bool:
return (self._get_state() == pa.PA_OPERATION_DONE)
def is_cancelled(self) -> bool:
return (self._get_state() == pa.PA_OPERATION_CANCELLED) |
def unique_config_sections(config_file):
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file) as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = ((section + '_') + str(section_counters[section]))
section_counters[section] += 1
line = line.replace(section, _section)
output_stream.write(line)
output_stream.seek(0)
return output_stream |
class Normal(Distribution):
def __init__(self, name, mean, stdv, input_type=None, startpoint=None):
super().__init__(name=name, mean=mean, stdv=stdv, startpoint=startpoint)
self.dist_type = 'Normal'
def pdf(self, x):
z = ((x - self.mean) / self.stdv)
p = (self.std_normal.pdf(z) / self.stdv)
return p
def cdf(self, x):
z = ((x - self.mean) / self.stdv)
p = self.std_normal.cdf(z)
return p
def ppf(self, p):
z = self.std_normal.ppf(p)
x = ((self.stdv * z) + self.mean)
return x
def sample(self, n=1000):
u = np.random.rand(n)
samples = self.ppf(u)
return samples
def u_to_x(self, u):
x = ((u * self.stdv) + self.mean)
return x
def x_to_u(self, x):
u = ((x - self.mean) / self.stdv)
return u
def jacobian(self, u, x):
J = np.diag(np.repeat((1 / self.stdv), u.size))
return J
def set_location(self, loc=0):
self.mean = loc
def set_scale(self, scale=1):
self.stdv = scale |
class TABlock(nn.Module):
def __init__(self, block, num_segments, tam_cfg=dict()):
super().__init__()
self.tam_cfg = deepcopy(tam_cfg)
self.block = block
self.num_segments = num_segments
self.tam = TAM(in_channels=block.conv1.out_channels, num_segments=num_segments, **self.tam_cfg)
if (not isinstance(self.block, Bottleneck)):
raise NotImplementedError('TA-Blocks have not been fully implemented except the pattern based on Bottleneck block.')
def forward(self, x):
assert isinstance(self.block, Bottleneck)
def _inner_forward(x):
identity = x
out = self.block.conv1(x)
out = self.tam(out)
out = self.block.conv2(out)
out = self.block.conv3(out)
if (self.block.downsample is not None):
identity = self.block.downsample(x)
out = (out + identity)
return out
if (self.block.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.block.relu(out)
return out |
class KinopoiskPage(object):
content = None
def __init__(self, source_name, instance, content=None, request=None):
self.request = (request or Request())
self.source_name = source_name
self.instance = instance
if (content is not None):
self.content = content
def element(self):
return self.content
def xpath(self):
raise NotImplementedError()
def extract(self, name, to_str=False, to_int=False, to_float=False):
if (name in self.xpath):
xpath = self.xpath[name]
elements = self.element.xpath(xpath)
if ((xpath[(- 7):] == '/text()') or ('/' in xpath)):
value = (' '.join(elements) if elements else '')
elif (re.search('\\[\\d+\\]$', xpath) and (len(elements) > 0)):
value = elements[0]
else:
value = elements
if to_str:
value = self.prepare_str(value)
if to_int:
value = (self.prepare_int(value) if value else None)
if (to_float and (value != '')):
value = (float(value) if value else None)
return value
else:
raise ValueError('Xpath element with name `{}` is not configured'.format(name))
def prepare_str(self, value):
if six.PY2:
value = re.compile('\xa0').sub(' ', value)
value = re.compile('\x97').sub('', value)
value = re.compile(', \\.\\.\\.').sub('', value)
else:
value = unicodedata.normalize('NFKC', value)
value = restore_characters(value)
return value.strip()
def prepare_int(self, value):
value = self.prepare_str(value)
value = value.replace(' ', '')
value = int(value)
return value
def prepare_date(self, value):
value = self.prepare_str(value).strip()
if (not value):
return None
months = ['', '', '', '', '', '', '', '', '', '', '', '']
for (i, month) in enumerate(months, start=1):
if (month in value):
value = value.replace(month, ('%02d' % i))
break
value = value.replace('\xa0', '-')
from dateutil import parser
return parser.parse(value, dayfirst=True).date()
def prepare_profit(self, value):
profit = value
if ('=' in profit):
profit = profit[(profit.index('=') + 1):]
profit = ''.join(profit.split())
profit = profit[1:]
return self.prepare_int(profit)
def find_profit(self, td):
for tag in [td.find('a'), td.find('div')]:
if tag:
for value in tag.contents:
if ('$' in value):
return self.prepare_profit(value)
def cut_from_to(self, content, after, before):
start = content.find(after)
end = content.find(before)
if ((start != (- 1)) and (end != (- 1))):
content = content[start:end]
return content
def extract_title(self):
title = self.extract('title', to_str=True)
title = re.sub('^(.+) \\(\\d{4}\\)$', '\\1', title)
title = re.sub('^(.+) \\( * *\\)$', '\\1', title)
return title.rstrip()
def get(self):
if self.instance.id:
self.content = self.request.get_content(self.instance.get_url(self.source_name))
self.parse()
return
raise NotImplementedError('This method must be implemented in subclass')
def parse(self):
raise NotImplementedError('You must implement KinopoiskPage.parse() method')
def split_triple_dots(self, role):
role = role.strip().split(' ... ')
if ((len(role) == 1) and (role[0][:3] == '...')):
role = role[0].strip().split('... ')
return role |
_LAYERS.register_module(name='ConvAWS')
class ConvAWS2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.register_buffer('weight_gamma', torch.ones(self.out_channels, 1, 1, 1))
self.register_buffer('weight_beta', torch.zeros(self.out_channels, 1, 1, 1))
def _get_weight(self, weight):
weight_flat = weight.view(weight.size(0), (- 1))
mean = weight_flat.mean(dim=1).view((- 1), 1, 1, 1)
std = torch.sqrt((weight_flat.var(dim=1) + 1e-05)).view((- 1), 1, 1, 1)
weight = ((weight - mean) / std)
weight = ((self.weight_gamma * weight) + self.weight_beta)
return weight
def forward(self, x):
weight = self._get_weight(self.weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.weight_gamma.data.fill_((- 1))
local_missing_keys = []
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, local_missing_keys, unexpected_keys, error_msgs)
if (self.weight_gamma.data.mean() > 0):
for k in local_missing_keys:
missing_keys.append(k)
return
weight = self.weight.data
weight_flat = weight.view(weight.size(0), (- 1))
mean = weight_flat.mean(dim=1).view((- 1), 1, 1, 1)
std = torch.sqrt((weight_flat.var(dim=1) + 1e-05)).view((- 1), 1, 1, 1)
self.weight_beta.data.copy_(mean)
self.weight_gamma.data.copy_(std)
missing_gamma_beta = [k for k in local_missing_keys if (k.endswith('weight_gamma') or k.endswith('weight_beta'))]
for k in missing_gamma_beta:
local_missing_keys.remove(k)
for k in local_missing_keys:
missing_keys.append(k) |
def test_invalid_skip_keyword_parameter(pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n pytest.skip("skip_module_level", unknown=1)\n\n def test_func():\n assert 0\n ')
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*TypeError:*['unknown']*"]) |
def _make_init(cls, attrs, pre_init, pre_init_has_args, post_init, frozen, slots, cache_hash, base_attr_map, is_exc, cls_on_setattr, attrs_init):
has_cls_on_setattr = ((cls_on_setattr is not None) and (cls_on_setattr is not setters.NO_OP))
if (frozen and has_cls_on_setattr):
msg = "Frozen classes can't use on_setattr."
raise ValueError(msg)
needs_cached_setattr = (cache_hash or frozen)
filtered_attrs = []
attr_dict = {}
for a in attrs:
if ((not a.init) and (a.default is NOTHING)):
continue
filtered_attrs.append(a)
attr_dict[a.name] = a
if (a.on_setattr is not None):
if (frozen is True):
msg = "Frozen classes can't use on_setattr."
raise ValueError(msg)
needs_cached_setattr = True
elif (has_cls_on_setattr and (a.on_setattr is not setters.NO_OP)):
needs_cached_setattr = True
unique_filename = _generate_unique_filename(cls, 'init')
(script, globs, annotations) = _attrs_to_init_script(filtered_attrs, frozen, slots, pre_init, pre_init_has_args, post_init, cache_hash, base_attr_map, is_exc, needs_cached_setattr, has_cls_on_setattr, attrs_init)
if (cls.__module__ in sys.modules):
globs.update(sys.modules[cls.__module__].__dict__)
globs.update({'NOTHING': NOTHING, 'attr_dict': attr_dict})
if needs_cached_setattr:
globs['_cached_setattr_get'] = _obj_setattr.__get__
init = _make_method(('__attrs_init__' if attrs_init else '__init__'), script, unique_filename, globs)
init.__annotations__ = annotations
return init |
class HorizontalFlip(DualTransform):
identity_param = False
def __init__(self):
super().__init__('apply', [False, True])
def apply_aug_image(self, image, apply=False, **kwargs):
if apply:
image = F.hflip(image)
return image
def apply_deaug_mask(self, mask, apply=False, **kwargs):
if apply:
mask = F.hflip(mask)
return mask
def apply_deaug_label(self, label, apply=False, **kwargs):
return label
def apply_deaug_keypoints(self, keypoints, apply=False, **kwargs):
if apply:
keypoints = F.keypoints_hflip(keypoints)
return keypoints |
class PreviewContractViewTests(TestCase):
def setUp(self):
self.user = baker.make(settings.AUTH_USER_MODEL, is_staff=True, is_superuser=True)
self.client.force_login(self.user)
self.contract = baker.make_recipe('sponsors.tests.empty_contract', sponsorship__start_date=date.today())
self.url = reverse('admin:sponsors_contract_preview', args=[self.contract.pk])
('sponsors.views_admin.render_contract_to_pdf_response')
def test_render_pdf_by_default(self, mocked_render):
response = HttpResponse()
mocked_render.return_value = response
r = self.client.get(self.url)
self.assertEqual(r, response)
self.assertEqual(r.get('X-Frame-Options'), 'SAMEORIGIN')
self.assertEqual(mocked_render.call_count, 1)
self.assertEqual(mocked_render.call_args[0][1], self.contract)
self.assertIsInstance(mocked_render.call_args[0][0], WSGIRequest)
('sponsors.views_admin.render_contract_to_docx_response')
def test_render_docx_if_specified_in_the_querystring(self, mocked_render):
response = HttpResponse()
mocked_render.return_value = response
r = self.client.get((self.url + '?format=docx'))
self.assertEqual(r, response)
self.assertEqual(r.get('X-Frame-Options'), 'SAMEORIGIN')
self.assertEqual(mocked_render.call_count, 1)
self.assertEqual(mocked_render.call_args[0][1], self.contract)
self.assertIsInstance(mocked_render.call_args[0][0], WSGIRequest) |
class MNIST(object):
def __init__(self, **options):
transform = transforms.Compose([transforms.Resize(32), transforms.ToTensor()])
batch_size = options['batch_size']
data_root = os.path.join(options['dataroot'], 'mnist')
pin_memory = (True if options['use_gpu'] else False)
trainset = MNISTRGB(root=data_root, train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=options['workers'], pin_memory=pin_memory)
testset = MNISTRGB(root=data_root, train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=options['workers'], pin_memory=pin_memory)
self.trainloader = trainloader
self.testloader = testloader
self.num_classes = 10 |
def test_config_settings(tmp_path):
pyproject_toml: Path = (tmp_path / 'pyproject.toml')
pyproject_toml.write_text('[tool.cibuildwheel.config-settings]\nexample = "one"\nother = ["two", "three"]\n')
options_reader = OptionsReader(config_file_path=pyproject_toml, platform='linux', env={})
assert (options_reader.get('config-settings', table={'item': '{k}="{v}"', 'sep': ' '}) == 'example="one" other="two" other="three"') |
def test_pretrainedinit():
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
modelB = FooModule()
funcB = PretrainedInit(checkpoint='modelA.pth')
modelC = nn.Linear(1, 2)
funcC = PretrainedInit(checkpoint='modelA.pth', prefix='linear.')
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
funcB(modelB)
assert torch.equal(modelB.linear.weight, torch.full(modelB.linear.weight.shape, 1.0))
assert torch.equal(modelB.linear.bias, torch.full(modelB.linear.bias.shape, 2.0))
assert torch.equal(modelB.conv2d.weight, torch.full(modelB.conv2d.weight.shape, 1.0))
assert torch.equal(modelB.conv2d.bias, torch.full(modelB.conv2d.bias.shape, 2.0))
assert torch.equal(modelB.conv2d_2.weight, torch.full(modelB.conv2d_2.weight.shape, 1.0))
assert torch.equal(modelB.conv2d_2.bias, torch.full(modelB.conv2d_2.bias.shape, 2.0))
funcC(modelC)
assert torch.equal(modelC.weight, torch.full(modelC.weight.shape, 1.0))
assert torch.equal(modelC.bias, torch.full(modelC.bias.shape, 2.0)) |
class TestDurations():
source = '\n from _pytest import timing\n def test_something():\n pass\n def test_2():\n timing.sleep(0.010)\n def test_1():\n timing.sleep(0.002)\n def test_3():\n timing.sleep(0.020)\n '
def test_calls(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess('--durations=10')
assert (result.ret == 0)
result.stdout.fnmatch_lines_random(['*durations*', '*call*test_3*', '*call*test_2*'])
result.stdout.fnmatch_lines(['(8 durations < 0.005s hidden. Use -vv to show these durations.)'])
def test_calls_show_2(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess('--durations=2')
assert (result.ret == 0)
lines = result.stdout.get_lines_after('*slowest*durations*')
assert ('4 passed' in lines[2])
def test_calls_showall(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess('--durations=0')
assert (result.ret == 0)
tested = '3'
for x in tested:
for y in ('call',):
for line in result.stdout.lines:
if ((('test_%s' % x) in line) and (y in line)):
break
else:
raise AssertionError(f'not found {x} {y}')
def test_calls_showall_verbose(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess('--durations=0', '-vv')
assert (result.ret == 0)
for x in '123':
for y in ('call',):
for line in result.stdout.lines:
if ((('test_%s' % x) in line) and (y in line)):
break
else:
raise AssertionError(f'not found {x} {y}')
def test_with_deselected(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess('--durations=2', '-k test_3')
assert (result.ret == 0)
result.stdout.fnmatch_lines(['*durations*', '*call*test_3*'])
def test_with_failing_collection(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
pytester.makepyfile(test_collecterror='xyz')
result = pytester.runpytest_inprocess('--durations=2', '-k test_1')
assert (result.ret == 2)
result.stdout.fnmatch_lines(['*Interrupted: 1 error during collection*'])
result.stdout.no_fnmatch_line('*duration*')
def test_with_not(self, pytester: Pytester, mock_timing) -> None:
pytester.makepyfile(self.source)
result = pytester.runpytest_inprocess('-k not 1')
assert (result.ret == 0) |
class BinaryPrecision(MulticlassPrecision):
def __init__(self: TBinaryPrecision, *, threshold: float=0.5, device: Optional[torch.device]=None) -> None:
super().__init__(num_classes=2, device=device)
self.threshold = threshold
_mode()
def update(self: TBinaryPrecision, input: torch.Tensor, target: torch.Tensor) -> TBinaryPrecision:
input = input.to(self.device)
target = target.to(self.device)
(num_tp, num_fp, num_label) = _binary_precision_update(input, target, self.threshold)
self.num_tp += num_tp
self.num_fp += num_fp
self.num_label += num_label
return self |
class WnliProcessor(DataProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = f'{set_type}-{line[0]}'
text_a = line[1]
text_b = line[2]
label = (None if (set_type == 'test') else line[(- 1)])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
('pypyr.steps.dsl.fileinoutrewriter.StreamRewriter', spec=StreamRewriter)
def test_streamreplacepairsrewriterstep_run_step_substitutions(mock_rewriter):
context = Context({'k1': 'b', 'k2': 'd', 'k3': '{k2}', 'root': {'in': 'inpathhere', 'out': 'outpathhere', 'replacePairs': {'a': '{k1}', 'c': '{k3}'}, 'encodingIn': 'encIn', 'encodingOut': 'encOut'}})
obj = StreamReplacePairsRewriterStep('blah.name', 'root', context)
assert (obj.path_in == 'inpathhere')
assert (obj.path_out == 'outpathhere')
assert (obj.context == context)
assert (obj.logger.name == 'blah.name')
assert (obj.replace_pairs == {'a': 'b', 'c': 'd'})
assert (obj.encoding_in == 'encIn')
assert (obj.encoding_out == 'encOut')
iter_replace_strings_target = 'pypyr.steps.dsl.fileinoutrewriter.StreamReplacePairsRewriterStep.iter_replace_strings'
with patch(iter_replace_strings_target) as mock_iter:
obj.run_step()
mock_iter.assert_called_once_with({'a': 'b', 'c': 'd'})
assert (mock_rewriter.mock_calls[0] == call(mock_iter.return_value, encoding_in='encIn', encoding_out='encOut'))
mock_rewriter.return_value.files_in_to_out.assert_called_once_with(in_path='inpathhere', out_path='outpathhere') |
class GetCrtcTransform(rq.ReplyRequest):
_request = rq.Struct(rq.Card8('opcode'), rq.Opcode(27), rq.RequestLength(), rq.Card32('crtc'))
_reply = rq.Struct(rq.ReplyCode(), rq.Card8('status'), rq.Card16('sequence_number'), rq.ReplyLength(), rq.Object('pending_transform', Render_Transform), rq.Bool('has_transforms'), rq.Pad(3), rq.Object('current_transform', Render_Transform), rq.Pad(4), rq.LengthOf('pending_filter_name', 2), rq.LengthOf('pending_filter_params', 2), rq.LengthOf('current_filter_name', 2), rq.LengthOf('current_filter_params', 2), rq.String8('pending_filter_name'), rq.List('pending_filter_params', rq.Card32Obj), rq.String8('current_filter_name'), rq.List('current_filter_params', rq.Card32Obj)) |
def _permissions_actions(caller, raw_inp, **kwargs):
choices = kwargs.get('available_choices', [])
(perm, action) = _default_parse(raw_inp, choices, ('examine', 'e'), ('remove', 'r', 'delete', 'd'))
if perm:
if (action == 'examine'):
return ('node_examine_entity', {'text': _display_perm(caller, perm), 'back': 'permissions'})
elif (action == 'remove'):
res = _add_perm(caller, perm, delete=True)
caller.msg(res)
else:
res = _add_perm(caller, raw_inp.strip())
caller.msg(res)
return 'node_permissions' |
class Citadel(Ship):
def validate(self, item):
if (item.category.name != 'Structure'):
pyfalog.error("Passed item '{0}' (category: {1}) is not under Structure category", item.name, item.category.name)
raise ValueError(('Passed item "%s" (category: (%s)) is not under Structure category' % (item.name, item.category.name)))
def __deepcopy__(self, memo):
copy = Citadel(self.item)
return copy
def __repr__(self):
return 'Citadel(ID={}, name={}) at {}'.format(self.item.ID, self.item.name, hex(id(self))) |
class ConfirmButton(discord.ui.Button):
def __init__(self):
super().__init__(style=discord.ButtonStyle.green, label='Confirm')
async def callback(self, interaction: discord.Interaction):
(await interaction.response.defer())
self.view.proceed = True
for child in self.view.children:
child.disabled = True
(await interaction.edit_original_response(view=self.view))
self.view.stop() |
class ItemCondition(str, Enum):
NEW_ITEM = 'NewItem'
NEW_WITH_WARRANTY = 'NewWithWarranty'
NEW_OEM = 'NewOEM'
NEW_OPEN_BOX = 'NewOpenBox'
USED_LIKE_NEW = 'UsedLikeNew'
USED_VERY_GOOD = 'UsedVeryGood'
USED_GOOD = 'UsedGood'
USED_ACCEPTABLE = 'UsedAcceptable'
USED_POOR = 'UsedPoor'
USED_REFURBISHED = 'UsedRefurbished'
COLLECTIBLE_LIKE_NEW = 'CollectibleLikeNew'
COLLECTIBLE_VERY_GOOD = 'CollectibleVeryGood'
COLLECTIBLE_GOOD = 'CollectibleGood'
COLLECTIBLE_ACCEPTABLE = 'CollectibleAcceptable'
COLLECTIBLE_POOR = 'CollectiblePoor'
REFURBISHED_WITH_WARRANTY = 'RefurbishedWithWarranty'
REFURBISHED = 'Refurbished'
CLUB = 'Club' |
.wrap
def apply_feature_processors_to_kjt(features: KeyedJaggedTensor, feature_processors: Dict[(str, nn.Module)]) -> KeyedJaggedTensor:
processed_weights = []
features_dict = features.to_dict()
for key in features.keys():
jt = features_dict[key]
if (key in feature_processors):
fp_jt = feature_processors[key](jt)
processed_weights.append(fp_jt.weights())
else:
processed_weights.append(torch.ones(jt.values().shape[0], device=jt.values().device))
return KeyedJaggedTensor(keys=features.keys(), values=features.values(), weights=(torch.cat(processed_weights) if processed_weights else features.weights_or_none()), lengths=features.lengths(), offsets=features._offsets, stride=features._stride, length_per_key=features._length_per_key, offset_per_key=features._offset_per_key, index_per_key=features._index_per_key) |
(name='a')
def fixture_a() -> FixtureA:
return Fsm(alphabet={Charclass('a'), Charclass('b'), (~ Charclass('ab'))}, states={0, 1, 2}, initial=0, finals={1}, map={0: {Charclass('a'): 1, Charclass('b'): 2, (~ Charclass('ab')): 2}, 1: {Charclass('a'): 2, Charclass('b'): 2, (~ Charclass('ab')): 2}, 2: {Charclass('a'): 2, Charclass('b'): 2, (~ Charclass('ab')): 2}}) |
def main(rag_example_args: 'RagExampleArguments', processing_args: 'ProcessingArguments', index_hnsw_args: 'IndexHnswArguments'):
logger.info('Step 1 - Create the dataset')
assert os.path.isfile(rag_example_args.csv_path), 'Please provide a valid path to a csv file'
dataset = load_dataset('csv', data_files=[rag_example_args.csv_path], split='train', delimiter='\t', column_names=['title', 'text'])
dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc)
ctx_encoder = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=device)
ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
new_features = Features({'text': Value('string'), 'title': Value('string'), 'embeddings': Sequence(Value('float32'))})
dataset = dataset.map(partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer), batched=True, batch_size=processing_args.batch_size, features=new_features)
passages_path = os.path.join(rag_example_args.output_dir, 'my_knowledge_dataset')
dataset.save_to_disk(passages_path)
logger.info('Step 2 - Index the dataset')
index = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('embeddings', custom_index=index)
index_path = os.path.join(rag_example_args.output_dir, 'my_knowledge_dataset_hnsw_index.faiss')
dataset.get_index('embeddings').save(index_path)
logger.info('Step 3 - Load RAG')
retriever = RagRetriever.from_pretrained(rag_example_args.rag_model_name, index_name='custom', indexed_dataset=dataset)
model = RagSequenceForGeneration.from_pretrained(rag_example_args.rag_model_name, retriever=retriever)
tokenizer = RagTokenizer.from_pretrained(rag_example_args.rag_model_name)
logger.info('Step 4 - Have fun')
question = (rag_example_args.question or "What does Moses' rod turn into ?")
input_ids = tokenizer.question_encoder(question, return_tensors='pt')['input_ids']
generated = model.generate(input_ids)
generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)[0]
logger.info(('Q: ' + question))
logger.info(('A: ' + generated_string)) |
def _load_header(fid, pointer):
if ((pointer != 0) and (pointer is not None)):
fid.seek(pointer)
temp = dict()
(temp['id'], reserved, temp['length'], temp['link_count']) = _HeaderStruct.unpack(fid.read(24))
temp['pointer'] = pointer
return temp
else:
return None |
def is_rectangle(face):
angles = [(math.pi - l.calc_angle()) for l in face.loops]
right_angles = len([a for a in angles if (((math.pi / 2) - 0.001) < a < ((math.pi / 2) + 0.001))])
straight_angles = len([a for a in angles if ((- 0.001) < a < 0.001)])
return ((right_angles == 4) and (straight_angles == (len(angles) - 4))) |
class MultinodeConstraintFcn(FcnEnum):
STATES_EQUALITY = (MultinodeConstraintFunctions.Functions.states_equality,)
CONTROLS_EQUALITY = (MultinodeConstraintFunctions.Functions.controls_equality,)
ALGEBRAIC_STATES_EQUALITY = (MultinodeConstraintFunctions.Functions.algebraic_states_equality,)
CUSTOM = (MultinodeConstraintFunctions.Functions.custom,)
COM_EQUALITY = (MultinodeConstraintFunctions.Functions.com_equality,)
COM_VELOCITY_EQUALITY = (MultinodeConstraintFunctions.Functions.com_velocity_equality,)
TIME_CONSTRAINT = (MultinodeConstraintFunctions.Functions.time_equality,)
TRACK_TOTAL_TIME = (MultinodeConstraintFunctions.Functions.track_total_time,)
STOCHASTIC_HELPER_MATRIX_EXPLICIT = (MultinodeConstraintFunctions.Functions.stochastic_helper_matrix_explicit,)
STOCHASTIC_HELPER_MATRIX_IMPLICIT = (MultinodeConstraintFunctions.Functions.stochastic_helper_matrix_implicit,)
STOCHASTIC_COVARIANCE_MATRIX_CONTINUITY_IMPLICIT = (MultinodeConstraintFunctions.Functions.stochastic_covariance_matrix_continuity_implicit,)
STOCHASTIC_DF_DW_IMPLICIT = (MultinodeConstraintFunctions.Functions.stochastic_df_dw_implicit,)
def get_type():
return MultinodeConstraintFunctions |
def allocate_batch(indices, lengths, src_sizes, tgt_sizes, batch_size_words, batch_size_sents, batch_size_multiplier, max_src_len, max_tgt_len, min_src_len, min_tgt_len, cleaning=1):
try:
import pyximport
cython_available = True
except ModuleNotFoundError as e:
cython_available = False
if ((not cython_available) or ((tgt_sizes is None) or (src_sizes is None))):
return allocate_batch_slow(indices, lengths, src_sizes, tgt_sizes, batch_size_words, batch_size_sents, batch_size_multiplier, max_src_len, max_tgt_len, min_src_len, min_tgt_len, cleaning)
pyximport.install(setup_args={'include_dirs': np.get_include()}, inplace=True)
from .fast_extensions import fast_batch_allocate
cleaning = int(cleaning)
if isinstance(indices, list):
indices = np.asarray(indices)
return fast_batch_allocate(indices, lengths, src_sizes, tgt_sizes, batch_size_words, batch_size_sents, batch_size_multiplier, max_src_len, max_tgt_len, min_src_len, min_tgt_len, cleaning) |
def from_pickle(data, db_obj=None):
def process_item(item):
dtype = type(item)
if (dtype in (str, int, float, bool, bytes, SafeString, SafeBytes)):
return item
elif _IS_PACKED_DBOBJ(item):
return unpack_dbobj(item)
elif _IS_PACKED_SESSION(item):
return unpack_session(item)
elif (dtype == tuple):
return tuple((process_item(val) for val in item))
elif (dtype == dict):
return dict(((process_item(key), process_item(val)) for (key, val) in item.items()))
elif (dtype == set):
return set((process_item(val) for val in item))
elif (dtype == OrderedDict):
return OrderedDict(((process_item(key), process_item(val)) for (key, val) in item.items()))
elif (dtype == deque):
return deque((process_item(val) for val in item))
elif hasattr(item, '__iter__'):
try:
return item.__class__((process_item(val) for val in item))
except (AttributeError, TypeError):
return [process_item(val) for val in item]
return item
def process_tree(item, parent):
dtype = type(item)
if (dtype in (str, int, float, bool, bytes, SafeString, SafeBytes)):
return item
elif _IS_PACKED_DBOBJ(item):
return unpack_dbobj(item)
elif (dtype == tuple):
return tuple((process_tree(val, item) for val in item))
elif (dtype == list):
dat = _SaverList(_parent=parent)
dat._data.extend((process_tree(val, dat) for val in item))
return dat
elif (dtype == dict):
dat = _SaverDict(_parent=parent)
dat._data.update(((process_item(key), process_tree(val, dat)) for (key, val) in item.items()))
return dat
elif (dtype == set):
dat = _SaverSet(_parent=parent)
dat._data.update(set((process_tree(val, dat) for val in item)))
return dat
elif (dtype == OrderedDict):
dat = _SaverOrderedDict(_parent=parent)
dat._data.update(((process_item(key), process_tree(val, dat)) for (key, val) in item.items()))
return dat
elif (dtype == deque):
dat = _SaverDeque(_parent=parent)
dat._data.extend((process_item(val) for val in item))
return dat
elif hasattr(item, '__iter__'):
try:
return item.__class__((process_tree(val, parent) for val in item))
except (AttributeError, TypeError):
dat = _SaverList(_parent=parent)
dat._data.extend((process_tree(val, dat) for val in item))
return dat
return item
if db_obj:
dtype = type(data)
if (dtype == list):
dat = _SaverList(_db_obj=db_obj)
dat._data.extend((process_tree(val, dat) for val in data))
return dat
elif (dtype == dict):
dat = _SaverDict(_db_obj=db_obj)
dat._data.update(((process_item(key), process_tree(val, dat)) for (key, val) in data.items()))
return dat
elif (dtype == set):
dat = _SaverSet(_db_obj=db_obj)
dat._data.update((process_tree(val, dat) for val in data))
return dat
elif (dtype == OrderedDict):
dat = _SaverOrderedDict(_db_obj=db_obj)
dat._data.update(((process_item(key), process_tree(val, dat)) for (key, val) in data.items()))
return dat
elif (dtype == deque):
dat = _SaverDeque(_db_obj=db_obj)
dat._data.extend((process_item(val) for val in data))
return dat
return process_item(data) |
def build_python_from_data(datas, save_path):
result_code = 'from pymiere.core import PymiereBaseObject, PymiereBaseCollection, Array, _format_object_to_py, _format_object_to_es\n'
for (name, data) in datas.items():
print("Generating object '{}'".format(name))
result_code += generate_class(data, (list(datas.keys()) + ['Array']))
result_code = result_code.replace('class $(PymiereBaseObject):', 'class Dollar(PymiereBaseObject):')
result_code = result_code.replace('super($, self).__init__(pymiere_id)', 'super(Dollar, self).__init__(pymiere_id)')
with open(save_path, 'w') as f:
f.write(result_code) |
def _pest_control_score(x, seed=None):
U = 0.1
n_stages = x.size
n_simulations = 100
init_pest_frac_alpha = 1.0
init_pest_frac_beta = 30.0
spread_alpha = 1.0
spread_beta = (17.0 / 3.0)
control_alpha = 1.0
control_price_max_discount = {1: 0.2, 2: 0.3, 3: 0.3, 4: 0.0}
tolerance_develop_rate = {1: (1.0 / 7.0), 2: (2.5 / 7.0), 3: (2.0 / 7.0), 4: (0.5 / 7.0)}
control_price = {1: 1.0, 2: 0.8, 3: 0.7, 4: 0.5}
control_beta = {1: (2.0 / 7.0), 2: (3.0 / 7.0), 3: (3.0 / 7.0), 4: (5.0 / 7.0)}
payed_price_sum = 0
above_threshold = 0
if (seed is not None):
init_pest_frac = np.random.RandomState(seed).beta(init_pest_frac_alpha, init_pest_frac_beta, size=(n_simulations,))
else:
init_pest_frac = np.random.beta(init_pest_frac_alpha, init_pest_frac_beta, size=(n_simulations,))
curr_pest_frac = init_pest_frac
for i in range(n_stages):
if (seed is not None):
spread_rate = np.random.RandomState(seed).beta(spread_alpha, spread_beta, size=(n_simulations,))
else:
spread_rate = np.random.beta(spread_alpha, spread_beta, size=(n_simulations,))
do_control = (x[i] > 0)
if do_control:
if (seed is not None):
control_rate = np.random.RandomState(seed).beta(control_alpha, control_beta[x[i]], size=(n_simulations,))
else:
control_rate = np.random.beta(control_alpha, control_beta[x[i]], size=(n_simulations,))
next_pest_frac = _pest_spread(curr_pest_frac, spread_rate, control_rate, True)
control_beta[x[i]] += (tolerance_develop_rate[x[i]] / float(n_stages))
payed_price = (control_price[x[i]] * (1.0 - ((control_price_max_discount[x[i]] / float(n_stages)) * float(np.sum((x == x[i]))))))
else:
next_pest_frac = _pest_spread(curr_pest_frac, spread_rate, 0, False)
payed_price = 0
payed_price_sum += payed_price
above_threshold += np.mean((curr_pest_frac > U))
curr_pest_frac = next_pest_frac
return (payed_price_sum + above_threshold) |
def test_normalize_percent_characters():
expected = '%3Athis_should_be_lowercase%DF%AB%4C'
assert (expected == normalize_percent_characters('%3athis_should_be_lowercase%DF%ab%4c'))
assert (expected == normalize_percent_characters('%3Athis_should_be_lowercase%DF%AB%4C'))
assert (expected == normalize_percent_characters('%3Athis_should_be_lowercase%DF%aB%4C')) |
def test_unlock_account_with_passwordfile(keystore_mock):
account_manager = AccountManager(keystore_mock)
password_file_path = os.path.join(keystore_mock, 'passwordfile.txt')
with open(password_file_path, 'r') as password_file:
privkey = unlock_account_with_passwordfile(account_manager=account_manager, address_hex='0x0d5a0e4FECE4b84365b9B8DbA6e6D41348C73645', password_file=password_file)
assert privkey |
class MPNetConfig(PretrainedConfig):
model_type = 'mpnet'
def __init__(self, vocab_size=30527, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, relative_attention_num_buckets=32, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.relative_attention_num_buckets = relative_attention_num_buckets |
class PykickstartLintConfig(PocketLintConfig):
def __init__(self):
PocketLintConfig.__init__(self)
self.falsePositives = [FalsePositive('^W1113.*: Keyword argument before variable positional arguments list in the definition of __init__ function$'), FalsePositive('W0707.*raise-missing-from'), FalsePositive('W1406.*redundant-u-string-prefix'), FalsePositive('W1514.*unspecified-encoding')]
def ignoreNames(self):
return {'translation-canary', '.tox'} |
.parametrize('method', ['get_absolute_url', 'get_delete_url', 'get_down_vote_url', 'get_hashid', 'get_remove_vote_url', 'get_review_url', 'get_slug', 'get_up_vote_url', 'get_update_url', 'get_vote_url', '__str__'])
def test_proposal_model_method_works(db, method):
proposal = f.ProposalFactory()
assert getattr(proposal, method)() |
def select_2(train_embs, test_embs, downstream_train_examples, downstream_test_examples, tag, phase2_selection):
cos = nn.CosineSimilarity(dim=1, eps=1e-06)
bar = tqdm(range(len(downstream_test_examples)), desc='phase 2 similar select')
if (not os.path.isdir(f'{args.output_dir}/{tag}/prompts')):
os.makedirs(f'{args.output_dir}/{tag}/prompts', exist_ok=True)
for (test_id, one_test_instance) in enumerate(downstream_test_examples):
prev_prompt_string_len = 0
if (phase2_selection in ['similar']):
test_e_reshape = test_embs[test_id].reshape(1, (- 1))
scores = cos(test_e_reshape, train_embs).numpy()
sorted_indices = np.argsort(scores)
elif (phase2_selection in ['random']):
sorted_indices = np.random.permutation(range(len(downstream_train_examples)))
if (not args.in_domain):
sorted_indices = sorted_indices[(- 2):]
selected_indices = []
num_indices = len(sorted_indices)
for idx in range((num_indices - 1), (- 1), (- 1)):
cur_len = get_instance_length(sorted_indices[idx], downstream_train_examples)
if (cur_len > 250):
continue
prev_prompt_string_len += cur_len
cur_prompt_string_len = (prev_prompt_string_len + len(tokenizer(f'''Is the following sentence 'positive' or 'negative'?
{downstream_test_examples[test_id]['text']}.
answer:''')['input_ids']))
if (cur_prompt_string_len > 1000):
break
selected_indices.append(idx)
one_test_emb = test_embs[test_id]
indices_scores = []
for idx in selected_indices:
indices_scores.append([idx, cos(train_embs[sorted_indices[idx]].reshape(1, (- 1)), one_test_emb.reshape(1, (- 1))).item()])
indices_scores = sorted(indices_scores, key=(lambda x: x[1]), reverse=True)
new_selected_indices = [x[0] for x in indices_scores]
if (phase2_selection in ['similar']):
assert (new_selected_indices == selected_indices), f'new_selected_indices={new_selected_indices}, selected_indices={selected_indices}'
selected_indices = new_selected_indices
select_num = len(selected_indices)
second_phase_selected_indices = []
cur_train_data = []
for idx in range((select_num - 1), (- 1), (- 1)):
cur_train_data.append({'input': f'''Is the following sentence 'positive' or 'negative'?
{downstream_train_examples[sorted_indices[selected_indices[idx]]]['text']}.
answer:''', 'output': label_map[downstream_train_examples[sorted_indices[selected_indices[idx]]]['label']]})
second_phase_selected_indices.append([sorted_indices[selected_indices[idx]].item(), downstream_train_examples[sorted_indices[selected_indices[idx]]]['id']])
with open(f"{args.output_dir}/{tag}/prompts/{downstream_test_examples[test_id]['id']}.json", 'w') as f:
json.dump([[test_id, second_phase_selected_indices, downstream_test_examples[test_id]['label']], cur_train_data, downstream_test_examples[test_id]], f, indent=4)
bar.update(1) |
_torch
_vision
class MaskFormerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = (MaskFormerFeatureExtractor if (is_vision_available() and is_torch_available()) else None)
def setUp(self):
self.feature_extract_tester = MaskFormerFeatureExtractionTester(self)
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, 'image_mean'))
self.assertTrue(hasattr(feature_extractor, 'image_std'))
self.assertTrue(hasattr(feature_extractor, 'do_normalize'))
self.assertTrue(hasattr(feature_extractor, 'do_resize'))
self.assertTrue(hasattr(feature_extractor, 'size'))
self.assertTrue(hasattr(feature_extractor, 'max_size'))
def test_batch_feature(self):
pass
def test_call_pil(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
(expected_height, expected_width) = self.feature_extract_tester.get_expected_values(image_inputs)
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, expected_height, expected_width))
(expected_height, expected_width) = self.feature_extract_tester.get_expected_values(image_inputs, batched=True)
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, expected_height, expected_width))
def test_call_numpy(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
(expected_height, expected_width) = self.feature_extract_tester.get_expected_values(image_inputs)
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, expected_height, expected_width))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
(expected_height, expected_width) = self.feature_extract_tester.get_expected_values(image_inputs, batched=True)
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, expected_height, expected_width))
def test_call_pytorch(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
(expected_height, expected_width) = self.feature_extract_tester.get_expected_values(image_inputs)
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, expected_height, expected_width))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
(expected_height, expected_width) = self.feature_extract_tester.get_expected_values(image_inputs, batched=True)
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, expected_height, expected_width))
def test_equivalence_pad_and_create_pixel_mask(self):
feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict)
feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images_with_method = feature_extractor_1.encode_inputs(image_inputs, return_tensors='pt')
encoded_images = feature_extractor_2(image_inputs, return_tensors='pt')
self.assertTrue(torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=0.0001))
self.assertTrue(torch.allclose(encoded_images_with_method['pixel_mask'], encoded_images['pixel_mask'], atol=0.0001))
def comm_get_feature_extractor_inputs(self, with_annotations=False):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
num_classes = 8
batch_size = self.feature_extract_tester.batch_size
annotations = None
if with_annotations:
annotations = [{'masks': np.random.rand(num_classes, 384, 384).astype(np.float32), 'labels': (np.random.rand(num_classes) > 0.5).astype(np.int64)} for _ in range(batch_size)]
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
inputs = feature_extractor(image_inputs, annotations, return_tensors='pt', pad_and_return_pixel_mask=True)
return inputs
def test_with_size_divisibility(self):
size_divisibilities = [8, 16, 32]
weird_input_sizes = [(407, 802), (582, 1094)]
for size_divisibility in size_divisibilities:
feat_extract_dict = {**self.feat_extract_dict, **{'size_divisibility': size_divisibility}}
feature_extractor = self.feature_extraction_class(**feat_extract_dict)
for weird_input_size in weird_input_sizes:
inputs = feature_extractor([np.ones((3, *weird_input_size))], return_tensors='pt')
pixel_values = inputs['pixel_values']
self.assertTrue(((pixel_values.shape[(- 1)] % size_divisibility) == 0))
self.assertTrue(((pixel_values.shape[(- 2)] % size_divisibility) == 0))
def test_call_with_numpy_annotations(self):
num_classes = 8
batch_size = self.feature_extract_tester.batch_size
inputs = self.comm_get_feature_extractor_inputs(with_annotations=True)
for el in inputs.values():
self.assertEqual(el.shape[0], batch_size)
pixel_values = inputs['pixel_values']
mask_labels = inputs['mask_labels']
class_labels = inputs['class_labels']
self.assertEqual(pixel_values.shape[(- 2)], mask_labels.shape[(- 2)])
self.assertEqual(pixel_values.shape[(- 1)], mask_labels.shape[(- 1)])
self.assertEqual(mask_labels.shape[1], class_labels.shape[1])
self.assertEqual(mask_labels.shape[1], num_classes)
def test_post_process_segmentation(self):
fature_extractor = self.feature_extraction_class()
outputs = self.feature_extract_tester.get_fake_maskformer_outputs()
segmentation = fature_extractor.post_process_segmentation(outputs)
self.assertEqual(segmentation.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_classes, self.feature_extract_tester.height, self.feature_extract_tester.width))
target_size = (1, 4)
segmentation = fature_extractor.post_process_segmentation(outputs, target_size=target_size)
self.assertEqual(segmentation.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_classes, *target_size))
def test_post_process_semantic_segmentation(self):
fature_extractor = self.feature_extraction_class()
outputs = self.feature_extract_tester.get_fake_maskformer_outputs()
segmentation = fature_extractor.post_process_semantic_segmentation(outputs)
self.assertEqual(segmentation.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.height, self.feature_extract_tester.width))
target_size = (1, 4)
segmentation = fature_extractor.post_process_semantic_segmentation(outputs, target_size=target_size)
self.assertEqual(segmentation.shape, (self.feature_extract_tester.batch_size, *target_size))
def test_post_process_panoptic_segmentation(self):
fature_extractor = self.feature_extraction_class()
outputs = self.feature_extract_tester.get_fake_maskformer_outputs()
segmentation = fature_extractor.post_process_panoptic_segmentation(outputs, object_mask_threshold=0)
self.assertTrue((len(segmentation) == self.feature_extract_tester.batch_size))
for el in segmentation:
self.assertTrue(('segmentation' in el))
self.assertTrue(('segments' in el))
self.assertEqual(type(el['segments']), list)
self.assertEqual(el['segmentation'].shape, (self.feature_extract_tester.height, self.feature_extract_tester.width)) |
class BenchmarkTestCase(unittest.TestCase):
def tearDown(self):
for file in glob.glob('{0}/moonshot*.pkl'.format(TMP_DIR)):
os.remove(file)
def test_complain_if_no_price_fields_for_benchmark(self):
class BuyAndHold(Moonshot):
CODE = 'buy-and-hold'
DB = 'sample-stk-1d'
BENCHMARK = 'FI12345'
def prices_to_signals(self, prices):
signals = pd.DataFrame(1, index=prices.loc['Volume'].index, columns=prices.loc['Volume'].columns)
return signals
def positions_to_gross_returns(self, positions, prices):
return pd.DataFrame(0, index=positions.index, columns=positions.columns)
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [5000, 16000, 8800], 'FI23456': [15000, 14000, 28800]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
with self.assertRaises(MoonshotParameterError) as cm:
BuyAndHold().backtest()
self.assertIn('Cannot extract BENCHMARK FI12345 from sample-stk-1d data without one of Close, Open, Bid, Ask, High, Low', repr(cm.exception))
def test_complain_if_benchmark_sid_missing(self):
class BuyBelow10(Moonshot):
CODE = 'buy-below-10'
DB = 'sample-stk-1d'
BENCHMARK = 99999
def prices_to_signals(self, prices):
signals = (prices.loc['Close'] < 10)
return signals.astype(int)
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03', '2018-05-04'])
fields = ['Close', 'Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9, 11, 10.5, 9.99, 5000, 16000, 8800, 9900], 'FI23456': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
with self.assertRaises(MoonshotError) as cm:
BuyBelow10().backtest()
self.assertIn('BENCHMARK Sid 99999 is not in sample-stk-1d data', repr(cm.exception))
def test_benchmark_eod(self):
class BuyBelow10(Moonshot):
CODE = 'buy-below-10'
BENCHMARK = 'FI23456'
def prices_to_signals(self, prices):
signals = (prices.loc['Close'] < 10)
return signals.astype(int)
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03', '2018-05-04'])
fields = ['Close', 'Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9, 11, 10.5, 9.99, 5000, 16000, 8800, 9900], 'FI23456': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = BuyBelow10().backtest()
self.assertSetEqual(set(results.index.get_level_values('Field')), {'Commission', 'AbsExposure', 'Signal', 'Return', 'Slippage', 'NetExposure', 'TotalHoldings', 'Turnover', 'AbsWeight', 'Weight', 'Benchmark'})
results = results.where(results.notnull(), 'nan')
benchmarks = results.loc['Benchmark'].reset_index()
benchmarks['Date'] = benchmarks.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
benchmarks['FI23456'] = benchmarks['FI23456'].astype(float).round(7)
self.assertDictEqual(benchmarks.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': ['nan', 'nan', 'nan', 'nan'], 'FI23456': [0.0, 0.1122346, (- 0.2272727), 0.2352941]})
('moonshot.strategies.base.get_prices')
('moonshot.strategies.base.download_master_file')
def test_request_benchmark_sid_if_universes_or_sids(self, mock_download_master_file, mock_get_prices):
class BuyBelow10(Moonshot):
CODE = 'buy-below-10'
BENCHMARK = 'FI34567'
SIDS = ['FI12345', 'FI23456']
def prices_to_signals(self, prices):
signals = (prices.loc['Close'] < 10)
return signals.astype(int)
class BuyBelow10Universe(BuyBelow10):
CODE = 'buy-below-10-universe'
SIDS = None
UNIVERSES = 'my-universe'
def _mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03', '2018-05-04'])
fields = ['Close', 'Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9, 11, 10.5, 9.99, 5000, 16000, 8800, 9900], 'FI23456': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000], 'FI34567': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000]}, index=idx)
return prices
def _mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'XYZ', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
mock_download_master_file.side_effect = _mock_download_master_file
mock_get_prices.return_value = _mock_get_prices()
results = BuyBelow10().backtest()
results = BuyBelow10Universe().backtest()
get_prices_call_1 = mock_get_prices.mock_calls[0]
(_, args, kwargs) = get_prices_call_1
self.assertEqual(kwargs['sids'], ['FI12345', 'FI23456', 'FI34567'])
self.assertIsNone(kwargs['universes'])
get_prices_call_2 = mock_get_prices.mock_calls[1]
(_, args, kwargs) = get_prices_call_2
self.assertEqual(kwargs['sids'], ['FI34567'])
self.assertEqual(kwargs['universes'], 'my-universe')
('moonshot.strategies.base.get_prices')
('moonshot.strategies.base.download_master_file')
def test_dont_request_benchmark_sid_if_no_universes_or_sids(self, mock_download_master_file, mock_get_prices):
class BuyBelow10(Moonshot):
CODE = 'buy-below-10'
BENCHMARK = 'FI34567'
def prices_to_signals(self, prices):
signals = (prices.loc['Close'] < 10)
return signals.astype(int)
def _mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03', '2018-05-04'])
fields = ['Close', 'Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9, 11, 10.5, 9.99, 5000, 16000, 8800, 9900], 'FI23456': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000], 'FI34567': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000]}, index=idx)
return prices
def _mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'XYZ', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
mock_download_master_file.side_effect = _mock_download_master_file
mock_get_prices.return_value = _mock_get_prices()
results = BuyBelow10().backtest()
get_prices_call_1 = mock_get_prices.mock_calls[0]
(_, args, kwargs) = get_prices_call_1
self.assertEqual(kwargs['sids'], [])
self.assertIsNone(kwargs['universes'])
def test_benchmark_eod_with_benchmark_db(self):
class BuyBelow10(Moonshot):
CODE = 'buy-below-10'
DB = 'demo-stk-1d'
BENCHMARK = 'FI34567'
BENCHMARK_DB = 'etf-1d'
def prices_to_signals(self, prices):
signals = (prices.loc['Close'] < 10)
return signals.astype(int)
def mock_get_prices(codes, *args, **kwargs):
if (BuyBelow10.DB in codes):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03', '2018-05-04'])
fields = ['Close', 'Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9, 11, 10.5, 9.99, 5000, 16000, 8800, 9900], 'FI23456': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000]}, index=idx)
return prices
else:
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI34567': [199.6, 210.45, 210.12]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = BuyBelow10().backtest()
self.assertSetEqual(set(results.index.get_level_values('Field')), {'Commission', 'AbsExposure', 'Signal', 'Return', 'Slippage', 'NetExposure', 'TotalHoldings', 'Turnover', 'AbsWeight', 'Weight', 'Benchmark'})
results = results.where(results.notnull(), 'nan')
benchmarks = results.loc['Benchmark'].reset_index()
benchmarks['Date'] = benchmarks.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
benchmarks['FI12345'] = benchmarks['FI12345'].astype(float).round(7)
self.assertDictEqual(benchmarks.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.0, 0.0543587, (- 0.0015681), 0.0], 'FI23456': ['nan', 'nan', 'nan', 'nan']})
def test_complain_if_once_a_day_intraday_and_no_benchmark_time(self):
class ShortAbove10Intraday(Moonshot):
CODE = 'short-above-10'
DB = 'sample-stk-15min'
BENCHMARK = 'FI12345'
def prices_to_signals(self, prices):
morning_prices = prices.loc['Open'].xs('09:30:00', level='Time')
short_signals = (morning_prices > 10)
return (- short_signals.astype(int))
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
closes = prices.loc['Close']
entry_prices = closes.xs('09:30:00', level='Time')
exit_prices = closes.xs('15:30:00', level='Time')
pct_changes = ((exit_prices - entry_prices) / entry_prices)
gross_returns = (pct_changes * positions)
return gross_returns
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close', 'Open']
times = ['09:30:00', '15:30:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI12345': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3, 9.88, 10.34, 10.23, 16.45, 8.9, 11.3], 'FI23456': [10.56, 12.01, 10.5, 9.8, 13.4, 14.5, 9.89, 11, 8.5, 10.5, 14.1, 15.6]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
with self.assertRaises(MoonshotParameterError) as cm:
ShortAbove10Intraday().backtest()
self.assertIn('Cannot extract BENCHMARK FI12345 from sample-stk-15min data because prices contains intraday prices but no BENCHMARK_TIME specified', repr(cm.exception))
def test_complain_if_benchmark_time_not_in_data(self):
class ShortAbove10Intraday(Moonshot):
CODE = 'short-above-10'
DB = 'sample-stk-15min'
BENCHMARK = 'FI12345'
BENCHMARK_TIME = '15:45:00'
def prices_to_signals(self, prices):
morning_prices = prices.loc['Open'].xs('09:30:00', level='Time')
short_signals = (morning_prices > 10)
return (- short_signals.astype(int))
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
closes = prices.loc['Close']
entry_prices = closes.xs('09:30:00', level='Time')
exit_prices = closes.xs('15:30:00', level='Time')
pct_changes = ((exit_prices - entry_prices) / entry_prices)
gross_returns = (pct_changes * positions)
return gross_returns
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close', 'Open']
times = ['09:30:00', '15:30:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI12345': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3, 9.88, 10.34, 10.23, 16.45, 8.9, 11.3], 'FI23456': [10.56, 12.01, 10.5, 9.8, 13.4, 14.5, 9.89, 11, 8.5, 10.5, 14.1, 15.6]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
with self.assertRaises(MoonshotError) as cm:
ShortAbove10Intraday().backtest()
self.assertIn('BENCHMARK_TIME 15:45:00 is not in sample-stk-15min data', repr(cm.exception))
def test_complain_if_intraday_benchmark_db(self):
class ShortAbove10Intraday(Moonshot):
CODE = 'short-above-10'
DB = 'sample-stk-15min'
BENCHMARK = 'FI12345'
BENCHMARK_DB = 'etf-15min'
BENCHMARK_TIME = '15:45:00'
def prices_to_signals(self, prices):
morning_prices = prices.loc['Open'].xs('09:30:00', level='Time')
short_signals = (morning_prices > 10)
return (- short_signals.astype(int))
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
closes = prices.loc['Close']
entry_prices = closes.xs('09:30:00', level='Time')
exit_prices = closes.xs('15:30:00', level='Time')
pct_changes = ((exit_prices - entry_prices) / entry_prices)
gross_returns = (pct_changes * positions)
return gross_returns
def mock_get_prices(codes, *args, **kwargs):
if (ShortAbove10Intraday.DB in codes):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close', 'Open']
times = ['09:30:00', '15:30:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI34567': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3, 9.88, 10.34, 10.23, 16.45, 8.9, 11.3], 'FI23456': [10.56, 12.01, 10.5, 9.8, 13.4, 14.5, 9.89, 11, 8.5, 10.5, 14.1, 15.6]}, index=idx)
return prices
else:
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close']
times = ['09:30:00', '12:30:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI12345': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
with self.assertRaises(MoonshotError) as cm:
ShortAbove10Intraday().backtest()
self.assertIn('only end-of-day databases are supported for BENCHMARK_DB but etf-15min is intraday', repr(cm.exception))
def test_complain_if_error_querying_benchmark_db(self):
class ShortAbove10Intraday(Moonshot):
CODE = 'short-above-10'
DB = 'sample-stk-15min'
BENCHMARK = 'FI12345'
BENCHMARK_DB = 'etf-15min'
BENCHMARK_TIME = '15:45:00'
def prices_to_signals(self, prices):
morning_prices = prices.loc['Open'].xs('09:30:00', level='Time')
short_signals = (morning_prices > 10)
return (- short_signals.astype(int))
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
closes = prices.loc['Close']
entry_prices = closes.xs('09:30:00', level='Time')
exit_prices = closes.xs('15:30:00', level='Time')
pct_changes = ((exit_prices - entry_prices) / entry_prices)
gross_returns = (pct_changes * positions)
return gross_returns
def mock_get_prices(codes, *args, **kwargs):
if (ShortAbove10Intraday.DB in codes):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close', 'Open']
times = ['09:30:00', '15:30:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI34567': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3, 9.88, 10.34, 10.23, 16.45, 8.9, 11.3], 'FI23456': [10.56, 12.01, 10.5, 9.8, 13.4, 14.5, 9.89, 11, 8.5, 10.5, 14.1, 15.6]}, index=idx)
return prices
else:
raise NoHistoricalData(requests.HTTPError('No history matches the query parameters'))
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
with self.assertRaises(MoonshotError) as cm:
ShortAbove10Intraday().backtest()
self.assertIn('error querying BENCHMARK_DB etf-15min: NoHistoricalData', repr(cm.exception))
def test_benchmark_once_a_day_intraday(self):
class ShortAbove10Intraday(Moonshot):
CODE = 'short-above-10'
BENCHMARK = 'FI12345'
BENCHMARK_TIME = '15:30:00'
def prices_to_signals(self, prices):
morning_prices = prices.loc['Open'].xs('09:30:00', level='Time')
short_signals = (morning_prices > 10)
return (- short_signals.astype(int))
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
closes = prices.loc['Close']
entry_prices = closes.xs('09:30:00', level='Time')
exit_prices = closes.xs('15:30:00', level='Time')
pct_changes = ((exit_prices - entry_prices) / entry_prices)
gross_returns = (pct_changes * positions)
return gross_returns
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close', 'Open']
times = ['09:30:00', '15:30:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI12345': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3, 9.88, 10.34, 10.23, 16.45, 8.9, 11.3], 'FI23456': [10.56, 12.01, 10.5, 9.8, 13.4, 14.5, 9.89, 11, 8.5, 10.5, 14.1, 15.6]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = ShortAbove10Intraday().backtest()
results = results.where(results.notnull(), 'nan')
benchmarks = results.loc['Benchmark'].reset_index()
benchmarks['Date'] = benchmarks.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
benchmarks['FI12345'] = benchmarks['FI12345'].astype(float).round(7)
self.assertDictEqual(benchmarks.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00'], 'FI12345': [0.0, 0.4784689, (- 0.2038835)], 'FI23456': ['nan', 'nan', 'nan']})
def test_benchmark_once_a_day_intraday_with_benchmark_db(self):
class ShortAbove10Intraday(Moonshot):
CODE = 'short-above-10'
DB = 'demo-stk-15min'
BENCHMARK = 'FI34567'
BENCHMARK_DB = 'etf-1d'
def prices_to_signals(self, prices):
morning_prices = prices.loc['Open'].xs('09:30:00', level='Time')
short_signals = (morning_prices > 10)
return (- short_signals.astype(int))
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
closes = prices.loc['Close']
entry_prices = closes.xs('09:30:00', level='Time')
exit_prices = closes.xs('15:30:00', level='Time')
pct_changes = ((exit_prices - entry_prices) / entry_prices)
gross_returns = (pct_changes * positions)
return gross_returns
def mock_get_prices(codes, *args, **kwargs):
if (ShortAbove10Intraday.DB in codes):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close', 'Open']
times = ['09:30:00', '15:30:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI12345': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3, 9.88, 10.34, 10.23, 16.45, 8.9, 11.3], 'FI23456': [10.56, 12.01, 10.5, 9.8, 13.4, 14.5, 9.89, 11, 8.5, 10.5, 14.1, 15.6]}, index=idx)
return prices
else:
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI34567': [199.6, 210.45, 210.12]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = ShortAbove10Intraday().backtest()
results = results.where(results.notnull(), 'nan')
benchmarks = results.loc['Benchmark'].reset_index()
benchmarks['Date'] = benchmarks.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
benchmarks['FI12345'] = benchmarks['FI12345'].astype(float).round(7)
self.assertDictEqual(benchmarks.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00'], 'FI12345': [0.0, 0.0543587, (- 0.0015681)], 'FI23456': ['nan', 'nan', 'nan']})
('moonshot.strategies.base.get_prices')
def test_pass_benchmark_db_args_correctly(self, mock_get_prices):
class ShortAbove10Intraday(Moonshot):
CODE = 'short-above-10'
BENCHMARK = 'FI12345'
BENCHMARK_DB = 'benchmark-db'
DB_DATA_FREQUENCY = 'daily'
def prices_to_signals(self, prices):
morning_prices = prices.loc['Open']
short_signals = (morning_prices > 10)
return (- short_signals.astype(int))
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
closes = prices.loc['Close']
gross_returns = (closes.pct_change() * positions.shift())
return gross_returns
def _mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03'])
fields = ['Close', 'Open']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9.6, 10.45, 10.12, 9.88, 10.34, 10.23], 'FI23456': [10.56, 12.01, 10.5, 9.89, 11, 8.5]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
mock_get_prices.side_effect = _mock_get_prices
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = ShortAbove10Intraday().backtest()
self.assertEqual(len(mock_get_prices.mock_calls), 2)
benchmark_get_prices_call = mock_get_prices.mock_calls[1]
(_, args, kwargs) = benchmark_get_prices_call
self.assertEqual(args[0], 'benchmark-db')
self.assertEqual(kwargs['sids'], 'FI12345')
self.assertEqual(kwargs['fields'], 'Close')
self.assertEqual(kwargs['data_frequency'], 'daily')
def test_benchmark_continuous_intraday(self):
class BuyBelow10ShortAbove10ContIntraday(Moonshot):
BENCHMARK = 'FI23456'
def prices_to_signals(self, prices):
long_signals = (prices.loc['Close'] <= 10)
short_signals = (prices.loc['Close'] > 10)
signals = long_signals.astype(int).where(long_signals, (- short_signals.astype(int)))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02'])
fields = ['Close']
times = ['10:00:00', '11:00:00', '12:00:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI12345': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3], 'FI23456': [10.56, 12.01, 10.5, 9.8, 13.4, 7.5]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = BuyBelow10ShortAbove10ContIntraday().backtest()
results = results.where(results.notnull(), 'nan')
benchmarks = results.loc['Benchmark'].reset_index()
benchmarks['Date'] = benchmarks.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
benchmarks['FI23456'] = benchmarks['FI23456'].astype(float).round(7)
self.assertDictEqual(benchmarks.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00'], 'Time': ['10:00:00', '11:00:00', '12:00:00', '10:00:00', '11:00:00', '12:00:00'], 'FI12345': ['nan', 'nan', 'nan', 'nan', 'nan', 'nan'], 'FI23456': [0.0, 0.1373106, (- 0.1257286), (- 0.0666667), 0.3673469, (- 0.4402985)]})
def test_benchmark_continuous_intraday_with_benchmark_db(self):
class BuyBelow10ShortAbove10ContIntraday(Moonshot):
DB = 'demo-stk-15min'
BENCHMARK = 'FI34567'
BENCHMARK_DB = 'etf-1d'
def prices_to_signals(self, prices):
long_signals = (prices.loc['Close'] <= 10)
short_signals = (prices.loc['Close'] > 10)
signals = long_signals.astype(int).where(long_signals, (- short_signals.astype(int)))
return signals
def mock_get_prices(codes, *args, **kwargs):
if (BuyBelow10ShortAbove10ContIntraday.DB in codes):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02'])
fields = ['Close']
times = ['10:00:00', '11:00:00', '12:00:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI12345': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3], 'FI23456': [10.56, 12.01, 10.5, 9.8, 13.4, 7.5]}, index=idx)
return prices
else:
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02'])
fields = ['Close']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI34567': [199.6, 210.45]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = BuyBelow10ShortAbove10ContIntraday().backtest()
results = results.where(results.notnull(), 'nan')
benchmarks = results.loc['Benchmark'].reset_index()
benchmarks['Date'] = benchmarks.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
benchmarks['FI12345'] = benchmarks['FI12345'].astype(float).round(7)
self.assertDictEqual(benchmarks.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00'], 'Time': ['10:00:00', '11:00:00', '12:00:00', '10:00:00', '11:00:00', '12:00:00'], 'FI12345': [0.0, 0.0, 0.0, 0.0543587, 0.0, 0.0], 'FI23456': ['nan', 'nan', 'nan', 'nan', 'nan', 'nan']}) |
def get_config_from_root(root):
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.ConfigParser()
with open(setup_cfg, 'r') as f:
parser.read_file(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
return parser.get('versioneer', name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = (get(parser, 'style') or '')
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
if (cfg.tag_prefix in ("''", '""')):
cfg.tag_prefix = ''
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
return cfg |
def setup() -> None:
root_log = get_logger()
if constants.FILE_LOGS:
log_file = Path('logs', 'bot.log')
log_file.parent.mkdir(exist_ok=True)
file_handler = handlers.RotatingFileHandler(log_file, maxBytes=5242880, backupCount=7, encoding='utf8')
file_handler.setFormatter(core_logging.log_format)
root_log.addHandler(file_handler)
if ('COLOREDLOGS_LEVEL_STYLES' not in os.environ):
coloredlogs.DEFAULT_LEVEL_STYLES = {**coloredlogs.DEFAULT_LEVEL_STYLES, 'trace': {'color': 246}, 'critical': {'background': 'red'}, 'debug': coloredlogs.DEFAULT_LEVEL_STYLES['info']}
if ('COLOREDLOGS_LOG_FORMAT' not in os.environ):
coloredlogs.DEFAULT_LOG_FORMAT = core_logging.log_format._fmt
coloredlogs.install(level=core_logging.TRACE_LEVEL, logger=root_log, stream=sys.stdout)
root_log.setLevel((logging.DEBUG if constants.DEBUG_MODE else logging.INFO))
_set_trace_loggers() |
def create_fixtures(dev: SmartDevice, outputdir: Path):
for (name, module) in dev.modules.items():
module_dir = (outputdir / name)
if (not module_dir.exists()):
module_dir.mkdir(exist_ok=True, parents=True)
sw_version = dev.hw_info['sw_ver']
sw_version = sw_version.split(' ', maxsplit=1)[0]
filename = f"{dev.model}_{dev.hw_info['hw_ver']}_{sw_version}.json"
module_file = (module_dir / filename)
if module_file.exists():
continue
typer.echo(f'Creating {module_file} for {dev.model}')
with module_file.open('w') as f:
json.dump(module.data, f, indent=4) |
def get_fslocation_from_item(node: 'Node') -> Tuple[(Union[(str, Path)], Optional[int])]:
location: Optional[Tuple[(str, Optional[int], str)]] = getattr(node, 'location', None)
if (location is not None):
return location[:2]
obj = getattr(node, 'obj', None)
if (obj is not None):
return getfslineno(obj)
return (getattr(node, 'fspath', 'unknown location'), (- 1)) |
class RuleAPITests(AuthenticatedAPITestCase):
def setUp(self):
super().setUp()
self.client.force_authenticate(user=None)
def test_can_access_rules_view(self):
url = reverse('api:rules')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.json(), list)
def test_link_format_query_param_produces_different_results(self):
url = reverse('api:rules')
markdown_links_response = self.client.get((url + '?link_format=md'))
html_links_response = self.client.get((url + '?link_format=html'))
self.assertNotEqual(markdown_links_response.json(), html_links_response.json())
def test_format_link_raises_value_error_for_invalid_target(self):
with self.assertRaises(ValueError):
RulesView._format_link('a', 'b', 'c')
def test_get_returns_400_for_wrong_link_format(self):
url = reverse('api:rules')
response = self.client.get((url + '?link_format=unknown'))
self.assertEqual(response.status_code, 400) |
.parametrize('subcommand, text, completions', [('create', '', ['jazz', 'rock']), ('create', 'ja', ['jazz ']), ('create', 'foo', []), ('creab', 'ja', [])])
def test_subcommand_completions(ac_app, subcommand, text, completions):
line = 'music {} {}'.format(subcommand, text)
endidx = len(line)
begidx = (endidx - len(text))
first_match = complete_tester(text, line, begidx, endidx, ac_app)
if completions:
assert (first_match is not None)
else:
assert (first_match is None)
assert (ac_app.completion_matches == sorted(completions, key=ac_app.default_sort_key)) |
class JalaliDateFormatter(BaseFormatter):
_post_parsers = ['persianday', 'persiandayzeropadded', 'persiandayofyear', 'persiandayofyearzeropadded', 'persianmonth', 'persianmonthzeropadded', 'persianyear', 'persianyearzeropadded', 'persianshortyear', 'persianshortyearzeropadded', 'localdateformat', 'monthabbr', 'monthabbr_ascii', 'monthname', 'monthnameascii', 'shortyear', 'dayofyear']
def __init__(self, format_string, directive_db=None):
if (not directive_db):
directive_db = DATE_FORMAT_DIRECTIVES
self.format_string = format_string
self.directives = directive_db
self.directives_by_key = {d.key: d for d in self.directives}
self.directives_by_name = {d.name: d for d in self.directives}
self._parser_regex = self._create_parser_regex()
def _create_parser_regex(self):
regex = u'^'
index = 0
for m in re.finditer(consts.FORMAT_DIRECTIVE_REGEX, self.format_string):
directive_key = m.group()[1:]
if (directive_key not in self.directives_by_key):
continue
directive = self.directives_by_key[directive_key]
if (index < m.start()):
regex += self.format_string[index:m.start()]
index = m.end()
if (directive.key == u'%'):
regex += u'%'
continue
regex += (u'(?P<%(group_name)s>%(regexp)s)' % dict(group_name=directive.key, regexp=directive.regex))
regex += self.format_string[index:]
regex += u'$'
return regex
def parser_regex(self):
return self._parser_regex
def iter_format_directives(self):
for m in re.finditer(consts.FORMAT_DIRECTIVE_REGEX, self.format_string):
key = m.group()[1:]
if (key in self.directives_by_key):
(yield (m, self.directives_by_key[key]))
def format(self, jalali_date):
result = ''
index = 0
for (match, directive) in self.iter_format_directives():
if (index < match.start()):
result += self.format_string[index:match.start()]
result += directive.format(jalali_date)
index = match.end()
result += self.format_string[index:]
return result
def filter_persian_digit(s):
for (p, e) in consts.PERSIAN_DIGIT_MAPPING:
s = s.replace(p[1], p[0])
return s
def _parse(self, date_string):
m = re.match(self.parser_regex, self.filter_persian_digit(date_string))
if (not m):
raise ValueError((u"time data '%s' does not match format '%s' with generated regex: '%s'" % (date_string, self.format_string, self.parser_regex)))
result = {}
for (directive_key, v) in m.groupdict().items():
directive = self.directives_by_key[directive_key]
result[directive.name] = directive.type_(v)
return result
def post_parsers(self):
return self._post_parsers
def _parse_post_processor(self, parse_result):
for directive_name in self.post_parsers:
if (directive_name in parse_result):
self.directives_by_name[directive_name].post_parser(parse_result, self)
def parse(self, date_string):
result = self._parse(date_string)
self._parse_post_processor(result)
return result |
def _get_package(pl_name, version, robust, use_v8):
pl_dir = (DataDir / pl_name)
pl_dir.mkdir(parents=True, exist_ok=True)
prefix = 'pdfium-'
if use_v8:
prefix += 'v8-'
fn = (prefix + f'{ReleaseNames[pl_name]}.tgz')
fu = f'{ReleaseURL}{version}/{fn}'
fp = (pl_dir / fn)
print(f"'{fu}' -> '{fp}'")
try:
url_request.urlretrieve(fu, fp)
except Exception:
if robust:
traceback.print_exc()
return (None, None)
else:
raise
return (pl_name, fp) |
def integration_value(direction: Direction, subsystem: Subsystem, partition: Cut, system_state: SystemStateSpecification, repertoire_distance: Optional[str]=None) -> float:
repertoire_distance = fallback(repertoire_distance, config.REPERTOIRE_DISTANCE)
cut_subsystem = subsystem.apply_cut(partition)
if (repertoire_distance == 'GENERALIZED_INTRINSIC_DIFFERENCE'):
partitioned_repertoire = cut_subsystem.forward_repertoire(direction, subsystem.node_indices, subsystem.node_indices).squeeze()[system_state[direction].state]
else:
partitioned_repertoire = cut_subsystem.repertoire(direction, subsystem.node_indices, subsystem.node_indices)
return subsystem.evaluate_partition(direction, subsystem.node_indices, subsystem.node_indices, partition, partitioned_repertoire=partitioned_repertoire, repertoire_distance=repertoire_distance, state=system_state[direction]) |
_start_docstrings('The bare MGP-STR Model transformer outputting raw hidden-states without any specific head on top.', MGP_STR_START_DOCSTRING)
class MgpstrModel(MgpstrPreTrainedModel):
def __init__(self, config: MgpstrConfig):
super().__init__(config)
self.config = config
self.embeddings = MgpstrEmbeddings(config)
self.encoder = MgpstrEncoder(config)
def get_input_embeddings(self) -> nn.Module:
return self.embeddings.proj
_start_docstrings_to_model_forward(MGP_STR_INPUTS_DOCSTRING)
def forward(self, pixel_values, output_attentions=None, output_hidden_states=None, return_dict=None):
output_attentions = (output_attentions if (output_attentions is not None) else self.config.output_attentions)
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if (pixel_values is None):
raise ValueError('You have to specify pixel_values')
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if (not return_dict):
return encoder_outputs
return BaseModelOutput(last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) |
def score_bw(args):
if args.backwards1:
scorer1_src = args.target_lang
scorer1_tgt = args.source_lang
else:
scorer1_src = args.source_lang
scorer1_tgt = args.target_lang
if (args.score_model2 is not None):
if args.backwards2:
scorer2_src = args.target_lang
scorer2_tgt = args.source_lang
else:
scorer2_src = args.source_lang
scorer2_tgt = args.target_lang
rerank1_is_gen = ((args.gen_model == args.score_model1) and (args.source_prefix_frac is None))
rerank2_is_gen = ((args.gen_model == args.score_model2) and (args.source_prefix_frac is None))
(pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, backwards_preprocessed_dir, lm_preprocessed_dir) = rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, args.shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac)
score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards1)
if (args.score_model2 is not None):
score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards2)
if args.right_to_left1:
rerank_data1 = right_to_left_preprocessed_dir
elif args.backwards1:
rerank_data1 = backwards_preprocessed_dir
else:
rerank_data1 = left_to_right_preprocessed_dir
gen_param = ['--batch-size', str(128), '--score-reference', '--gen-subset', 'train']
if ((not rerank1_is_gen) and (not os.path.isfile(score1_file))):
print('STEP 4: score the translations for model 1')
model_param1 = ['--path', args.score_model1, '--source-lang', scorer1_src, '--target-lang', scorer1_tgt]
gen_model1_param = (([rerank_data1] + gen_param) + model_param1)
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, gen_model1_param)
with open(score1_file, 'w') as f:
with redirect_stdout(f):
generate.main(input_args)
if ((args.score_model2 is not None) and (not os.path.isfile(score2_file)) and (not rerank2_is_gen)):
print('STEP 4: score the translations for model 2')
if args.right_to_left2:
rerank_data2 = right_to_left_preprocessed_dir
elif args.backwards2:
rerank_data2 = backwards_preprocessed_dir
else:
rerank_data2 = left_to_right_preprocessed_dir
model_param2 = ['--path', args.score_model2, '--source-lang', scorer2_src, '--target-lang', scorer2_tgt]
gen_model2_param = (([rerank_data2] + gen_param) + model_param2)
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, gen_model2_param)
with open(score2_file, 'w') as f:
with redirect_stdout(f):
generate.main(input_args) |
def get_dist_info():
if (torch.__version__ < '1.0'):
initialized = dist._initialized
else:
initialized = dist.is_initialized()
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return (rank, world_size) |
def _image_file(path):
abs_path = os.path.abspath(path)
image_files = os.listdir(abs_path)
for i in range(len(image_files)):
if ((not os.path.isdir(image_files[i])) and _is_image_file(image_files[i])):
image_files[i] = os.path.join(abs_path, image_files[i])
return image_files |
def c_array_initializer(components: list[str], *, indented: bool=False) -> str:
indent = ((' ' * 4) if indented else '')
res = []
current: list[str] = []
cur_len = 0
for c in components:
if ((not current) or ((((cur_len + 2) + len(indent)) + len(c)) < 70)):
current.append(c)
cur_len += (len(c) + 2)
else:
res.append((indent + ', '.join(current)))
current = [c]
cur_len = len(c)
if (not res):
return ('{%s}' % ', '.join(current))
res.append((indent + ', '.join(current)))
return (((('{\n ' + ',\n '.join(res)) + '\n') + indent) + '}') |
def se_inception_v3(include_top=True, weights=None, input_tensor=None, input_shape=None, pooling=None, classes=1000):
if (weights not in {'imagenet', None}):
raise ValueError('The `weights` argument should be either `None` (random initialization) or `imagenet` (pre-training on ImageNet).')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as imagenet with `include_top` as true, `classes` should be 1000')
input_shape = _obtain_input_shape(input_shape, default_size=299, min_size=139, data_format=K.image_data_format(), require_flatten=include_top)
if (input_tensor is None):
img_input = Input(shape=input_shape)
elif (not K.is_keras_tensor(input_tensor)):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if (K.image_data_format() == 'channels_first'):
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed0')
x = squeeze_excite_block(x)
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed1')
x = squeeze_excite_block(x)
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed2')
x = squeeze_excite_block(x)
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
x = squeeze_excite_block(x)
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed4')
x = squeeze_excite_block(x)
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name=('mixed' + str((5 + i))))
x = squeeze_excite_block(x)
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed7')
x = squeeze_excite_block(x)
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate([branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
x = squeeze_excite_block(x)
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2], axis=channel_axis, name=('mixed9_' + str(i)))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name=('mixed' + str((9 + i))))
x = squeeze_excite_block(x)
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
elif (pooling == 'avg'):
x = GlobalAveragePooling2D()(x)
elif (pooling == 'max'):
x = GlobalMaxPooling2D()(x)
if (input_tensor is not None):
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='SE_Inception_V3')
return model |
def timeout_exponential_backoff(retries: int, timeout: float, maximum: float) -> Iterator[float]:
(yield timeout)
tries = 1
while (tries < retries):
tries += 1
(yield timeout)
while (timeout < maximum):
timeout = min((timeout * 2), maximum)
(yield timeout)
while True:
(yield maximum) |
def _make_system(N, system):
gamma = 0.25
a = destroy(N)
if (system == 'simple'):
H = (a.dag() * a)
sc_ops = [(np.sqrt(gamma) * a)]
elif (system == '2 c_ops'):
H = QobjEvo([(a.dag() * a)])
sc_ops = [(np.sqrt(gamma) * a), ((gamma * a) * a)]
elif (system == 'H td'):
H = [[(a.dag() * a), f]]
sc_ops = [(np.sqrt(gamma) * QobjEvo(a))]
elif (system == 'complex'):
H = [(((a.dag() * a) + a.dag()) + a)]
sc_ops = [(np.sqrt(gamma) * a), ((gamma * a) * a)]
elif (system == 'c_ops td'):
H = [(a.dag() * a)]
sc_ops = [[(np.sqrt(gamma) * a), f]]
return (H, sc_ops) |
def run_main():
topdirs = [('/%s/' % d) for d in os.listdir('/')]
def abs_path_start(path, pos):
if (pos < 0):
return False
return ((pos == 0) or (path[(pos - 1)] == ':'))
def fix_path(p):
pp = None
for pr in topdirs:
pp2 = p.find(pr)
if (abs_path_start(p, pp2) and ((pp is None) or (pp > pp2))):
pp = pp2
if (pp is not None):
return ((p[:pp] + 'Z:') + p[pp:].replace('/', '\\'))
return p
parser = argparse.ArgumentParser()
parser.add_argument('wine', action='store')
parser.add_argument('-v', action='store', dest='version', default='120')
parser.add_argument('-I', action='append', dest='incl_paths')
parser.add_argument('mode', action='store')
parser.add_argument('binary', action='store')
parser.add_argument('free_args', nargs=argparse.REMAINDER)
args = parser.parse_args()
wine = args.wine
mode = args.mode
binary = args.binary
version = args.version
incl_paths = args.incl_paths
free_args = args.free_args
bin_dir = os.path.dirname(binary)
tc_dir = os.path.dirname(os.path.dirname(os.path.dirname(bin_dir)))
if (not incl_paths):
incl_paths = [(tc_dir + '/VC/include'), (tc_dir + '/include')]
cmd_out = find_cmd_out(free_args)
env = os.environ.copy()
env.pop('DISPLAY', None)
env['WINEDLLOVERRIDES'] = 'msvcr{}=n'.format(version)
env['WINEDEBUG'] = 'fixme-all'
env['INCLUDE'] = ';'.join((fix_path(p) for p in incl_paths))
env['VSINSTALLDIR'] = fix_path(tc_dir)
env['VCINSTALLDIR'] = fix_path((tc_dir + '/VC'))
env['WindowsSdkDir'] = fix_path(tc_dir)
env['LIBPATH'] = fix_path((tc_dir + '/VC/lib/amd64'))
env['LIB'] = fix_path((tc_dir + '/VC/lib/amd64'))
cmd = ([binary] + [fix_path(x) for x in free_args])
for x in ('/NOLOGO', '/nologo', '/FD'):
try:
cmd.remove(x)
except ValueError:
pass
def run_process(sleep, tout):
if sleep:
time.sleep(sleep)
args = {'cmd': cmd, 'env': env, 'mode': mode, 'tout': tout}
slave_cmd = [sys.executable, '--python', sys.argv[0], wine, 'slave', json.dumps(args)]
p = run_subprocess(slave_cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=False)
(out, _) = p.communicate()
return (p.wait(), out)
def print_err_log(log):
if (not log):
return
if (mode == 'cxx'):
log = colorize(log)
((print >> sys.stderr), log)
tout = 15
while True:
(rc, out) = run_process(0, tout)
if (rc in ((- signal.SIGALRM), signal.SIGALRM)):
print_err_log(out)
((print >> sys.stderr), '##append_tag##time out')
elif (out and (' stack overflow ' in out)):
((print >> sys.stderr), '##append_tag##stack overflow')
elif (out and ('recvmsg: Connection reset by peer' in out)):
((print >> sys.stderr), '##append_tag##wine gone')
elif (out and ('D8037' in out)):
((print >> sys.stderr), '##append_tag##repair wine')
try:
os.unlink(os.path.join(os.environ['WINEPREFIX'], '.update-timestamp'))
except Exception as e:
((print >> sys.stderr), e)
else:
print_err_log(out)
if rc:
return rc
if cmd_out:
if is_good_file(cmd_out):
return 0
else:
((print >> sys.stderr), '##append_tag##no output')
else:
return 0
tout *= 3 |
()
('--filename', default='samples/sample_wind_poitiers.csv', help='Input filename')
('--year', default=2014, help='Year')
def main(filename, year):
df_all = pd.read_csv(filename, parse_dates=['Timestamp'])
df_all = df_all.set_index('Timestamp')
f_year = get_by_func('year')
df_all['by_page'] = df_all.index.map(f_year)
f_month = get_by_func('month')
df_all['by'] = df_all.index.map(f_month)
df_all = df_all.reset_index().set_index(['by_page', 'by', 'Timestamp'])
(nrows, ncols) = (3, 4)
fig = plt.figure()
bins = np.arange(0.01, 8, 1)
fig.suptitle(('Wind speed - %d' % year))
for month in range(1, 13):
ax = fig.add_subplot(nrows, ncols, month, projection='windrose')
title = datetime.datetime(year, month, 1).strftime('%b')
ax.set_title(title)
try:
df = df_all.loc[year].loc[(year, month)]
except KeyError:
continue
direction = df['direction'].values
var = df['speed'].values
ax.contourf(direction, var, bins=bins, cmap=cm.hot)
ax.contour(direction, var, bins=bins, colors='black')
plt.show() |
_fixtures(WebFixture, AddressAppFixture)
def test_adding_an_address(web_fixture, address_app_fixture):
browser = address_app_fixture.browser
browser.open('/')
browser.click(XPath.link().with_text('Add'))
assert address_app_fixture.is_on_add_page()
browser.type(XPath.input_labelled('Name'), 'John Doe')
browser.type(XPath.input_labelled('Email'), '')
browser.click(XPath.button_labelled('Save'))
assert address_app_fixture.is_on_home_page()
assert address_app_fixture.address_is_listed_as('John Doe', '') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.