code stringlengths 281 23.7M |
|---|
def setup_args():
args = argparse.Namespace()
args.global_sync_iter = 20
args.block_momentum = 0.875
args.block_lr = 0.5
args.input_size = 5
args.nb_classes = 2
args.batch_size = 1
args.lr = [0.001]
args.momentum = 0
args.weight_decay = 0
args.warmup_iterations = 0
args.use_nbm = True
args.average_sync = True
args.global_sync_iter = 1
args.model_parallel_size = 1
args.distributed_backend = 'gloo'
args.distributed_world_size = 2
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_init_host = 'localhost'
args.distributed_port = (port + 1)
args.local_world_size = args.distributed_world_size
cfg = OmegaConf.create()
cfg.optimization = OmegaConf.create()
cfg.common = OmegaConf.create()
cfg.distributed_training = OmegaConf.create()
cfg.dataset = OmegaConf.create()
cfg.bmuf = OmegaConf.create()
cfg.optimizer = OmegaConf.create()
cfg.bmuf.global_sync_iter = args.global_sync_iter
cfg.bmuf.block_momentum = args.block_momentum
cfg.bmuf.block_lr = args.block_lr
cfg.dataset.batch_size = args.batch_size
cfg.optimization.lr = args.lr
cfg.optimizer.momentum = args.momentum
cfg.optimizer.weight_decay = args.weight_decay
cfg.bmuf.warmup_iterations = args.warmup_iterations
cfg.bmuf.use_nbm = args.use_nbm
cfg.bmuf.average_sync = args.average_sync
cfg.common.model_parallel_size = args.model_parallel_size
cfg.distributed_training.distributed_backend = args.distributed_backend
cfg.distributed_training.distributed_world_size = args.distributed_world_size
cfg.bmuf.distributed_world_size = args.distributed_world_size
cfg.distributed_training.distributed_init_method = args.distributed_init_method
cfg.distributed_training.distributed_port = args.distributed_port
return (cfg, args) |
def exception_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("name: 'pythonnet' force_backward: true\n input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }\n layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'\n python_param { module: 'test_python_layer' layer: 'ExceptionLayer' } }\n ")
return f.name |
def test_importorskip_module_level(pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n foobarbaz = pytest.importorskip("foobarbaz")\n\n def test_foo():\n pass\n ')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*collected 0 items / 1 skipped*']) |
def output_node2vec(g, tmp_node_vec_fname, node_vec_fname, options):
with open(tmp_node_vec_fname, 'r') as f:
with open(node_vec_fname, 'w') as fo:
fo.write(f'''size={options.dim}, alpha={options.alpha}, windows={options.window}, negative={options.neg}, walk_num={options.walk_num}, walk_len={options.walk_length}
''')
id2node = dict([(v, k) for (k, v) in g.node2id.items()])
first = True
for line in f:
if first:
first = False
continue
(id_, vectors) = line.strip().split(' ', 1)
line = ('%s\t%s\n' % (id2node[int(id_)], vectors))
fo.write(line) |
class struct_s_pxe_cpb_fill_header_fragmented(ctypes.Structure):
_pack_ = True
_fields_ = [('SrcAddr', (ctypes.c_ubyte * 32)), ('DestAddr', (ctypes.c_ubyte * 32)), ('PacketLen', ctypes.c_uint32), ('Protocol', ctypes.c_uint16), ('MediaHeaderLen', ctypes.c_uint16), ('FragCnt', ctypes.c_uint16), ('reserved', ctypes.c_uint16), ('PADDING_0', (ctypes.c_ubyte * 4)), ('FragDesc', (struct__struct_121 * 16))] |
class kNNClassificationEvaluatorPytorch(Evaluator):
def __init__(self, sentences_train, y_train, sentences_test, y_test, k=1, batch_size=32, limit=None, **kwargs):
super().__init__(**kwargs)
if (limit is not None):
sentences_train = sentences_train[:limit]
y_train = y_train[:limit]
sentences_test = sentences_test[:limit]
y_test = y_test[:limit]
self.sentences_train = sentences_train
self.y_train = y_train
self.sentences_test = sentences_test
self.y_test = y_test
self.batch_size = batch_size
self.k = k
def __call__(self, model, test_cache=None):
print('use kNNClassificationEvaluatorPytorch')
scores = {}
max_accuracy = 0
max_f1 = 0
max_ap = 0
X_train = np.asarray(model.encode(self.sentences_train, batch_size=self.batch_size))
if (test_cache is None):
X_test = np.asarray(model.encode(self.sentences_test, batch_size=self.batch_size))
test_cache = X_test
else:
X_test = test_cache
for metric in ['cosine', 'euclidean', 'dot']:
if (metric == 'cosine'):
distances = (1 - self._cos_sim(X_test, X_train))
elif (metric == 'euclidean'):
distances = self._euclidean_dist(X_test, X_train)
elif (metric == 'dot'):
distances = (- self._dot_score(X_test, X_train))
neigh_indices = torch.topk(distances, k=self.k, dim=1, largest=False).indices
y_train = torch.tensor(self.y_train)
y_pred = torch.mode(y_train[neigh_indices], dim=1).values
accuracy = accuracy_score(self.y_test, y_pred)
f1 = f1_score(self.y_test, y_pred, average='macro')
ap = average_precision_score(self.y_test, y_pred)
scores[('accuracy_' + metric)] = accuracy
scores[('f1_' + metric)] = f1
scores[('ap_' + metric)] = ap
max_accuracy = max(max_accuracy, accuracy)
max_f1 = max(max_f1, f1)
max_ap = max(max_ap, ap)
scores['accuracy'] = max_accuracy
scores['f1'] = max_f1
scores['ap'] = max_ap
return (scores, test_cache)
def _cos_sim(a: Tensor, b: Tensor):
if (not isinstance(a, torch.Tensor)):
a = torch.tensor(a)
if (not isinstance(b, torch.Tensor)):
b = torch.tensor(b)
if (len(a.shape) == 1):
a = a.unsqueeze(0)
if (len(b.shape) == 1):
b = b.unsqueeze(0)
a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
return torch.mm(a_norm, b_norm.transpose(0, 1))
def _euclidean_dist(a: Tensor, b: Tensor):
if (not isinstance(a, torch.Tensor)):
a = torch.tensor(a)
if (not isinstance(b, torch.Tensor)):
b = torch.tensor(b)
if (len(a.shape) == 1):
a = a.unsqueeze(0)
if (len(b.shape) == 1):
b = b.unsqueeze(0)
return torch.cdist(a, b, p=2)
def _dot_score(a: Tensor, b: Tensor):
if (not isinstance(a, torch.Tensor)):
a = torch.tensor(a)
if (not isinstance(b, torch.Tensor)):
b = torch.tensor(b)
if (len(a.shape) == 1):
a = a.unsqueeze(0)
if (len(b.shape) == 1):
b = b.unsqueeze(0)
return torch.mm(a, b.transpose(0, 1)) |
class FixNumliterals(fixer_base.BaseFix):
_accept_type = token.NUMBER
def match(self, node):
return (node.value.startswith('0') or (node.value[(- 1)] in 'Ll'))
def transform(self, node, results):
val = node.value
if (val[(- 1)] in 'Ll'):
val = val[:(- 1)]
elif (val.startswith('0') and val.isdigit() and (len(set(val)) > 1)):
val = ('0o' + val[1:])
return Number(val, prefix=node.prefix) |
def dense_stack_tds(td_list: (Sequence[TensorDictBase] | LazyStackedTensorDict), dim: int=None) -> T:
if isinstance(td_list, LazyStackedTensorDict):
dim = td_list.stack_dim
td_list = td_list.tensordicts
elif (dim is None):
raise ValueError('If a list of tensordicts is provided, stack_dim must not be None')
shape = list(td_list[0].shape)
shape.insert(dim, len(td_list))
out = td_list[0].unsqueeze(dim).expand(shape).clone()
return torch.stack(td_list, dim=dim, out=out) |
class FeatureExtractorUtilTester(unittest.TestCase):
def test_cached_files_are_used_when_internet_is_down(self):
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = []
response_mock.raise_for_status.side_effect = HTTPError
_ = Wav2Vec2FeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2')
with mock.patch('transformers.utils.hub.requests.head', return_value=response_mock) as mock_head:
_ = Wav2Vec2FeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2')
mock_head.assert_called() |
class ShoppingUI(UserInterface):
def assemble(self):
shopping_cart = ShoppingCart.for_current_session()
home = self.define_view('/', title='Paypal Example')
home.set_slot('main', PurchaseForm.factory(shopping_cart))
order_summary_page = self.define_view('/order_summary', title='Order Summary')
order_summary_page.set_slot('main', PurchaseSummary.factory(shopping_cart))
self.define_transition(shopping_cart.events.pay_event, home, order_summary_page)
self.define_transition(shopping_cart.events.clear_event, order_summary_page, home)
self.define_page(MainPage) |
def change_size_unit(total):
if (total < (1 << 10)):
return '{:.2f} B'.format(total)
elif (total < (1 << 20)):
return '{:.2f} KB'.format((total / (1 << 10)))
elif (total < (1 << 30)):
return '{:.2f} MB'.format((total / (1 << 20)))
else:
return '{:.2f} GB'.format((total / (1 << 30))) |
class TestModuleFinder():
def find(self, path, *args, **kwargs):
return set(ModuleFinder.find(str(path), *args, **kwargs))
EXAMPLES = {'simple_folder': (['file.py', 'other.py'], {}, ['file', 'other']), 'exclude': (['file.py', 'other.py'], {'exclude': ['f*']}, ['other']), 'include': (['file.py', 'fole.py', 'other.py'], {'include': ['f*'], 'exclude': ['fo*']}, ['file']), 'invalid-name': (['my-file.py', 'other.file.py'], {}, [])}
.parametrize('example', EXAMPLES.keys())
def test_finder(self, tmp_path, example):
(files, kwargs, expected_modules) = self.EXAMPLES[example]
ensure_files(tmp_path, files)
assert (self.find(tmp_path, **kwargs) == set(expected_modules))
.skipif((not has_symlink()), reason='Symlink support required')
def test_symlinked_packages_are_included(self, tmp_path):
src = '_myfiles/file.py'
ensure_files(tmp_path, [src])
os.symlink((tmp_path / src), (tmp_path / 'link.py'))
assert (self.find(tmp_path) == {'link'}) |
()
def pickle_files_wo_callback_data(user_data, chat_data, bot_data, conversations):
data = {'user_data': user_data, 'chat_data': chat_data, 'bot_data': bot_data, 'conversations': conversations}
with Path('pickletest_user_data').open('wb') as f:
pickle.dump(user_data, f)
with Path('pickletest_chat_data').open('wb') as f:
pickle.dump(chat_data, f)
with Path('pickletest_bot_data').open('wb') as f:
pickle.dump(bot_data, f)
with Path('pickletest_conversations').open('wb') as f:
pickle.dump(conversations, f)
with Path('pickletest').open('wb') as f:
pickle.dump(data, f)
return True |
class TestDevNet(unittest.TestCase):
def setUp(self):
self.n_train = 200
self.n_test = 100
self.contamination = 0.1
self.roc_floor = 0.8
(self.X_train, self.X_test, self.y_train, self.y_test) = generate_data(n_train=self.n_train, n_test=self.n_test, n_features=10, contamination=self.contamination, random_state=42)
anom_id = np.where((self.y_train == 1))[0]
known_anom_id = np.random.choice(anom_id, 10, replace=False)
y_semi = np.zeros_like(self.y_train, dtype=int)
y_semi[known_anom_id] = 1
device = ('cuda' if torch.cuda.is_available() else 'cpu')
self.clf = RoSAS(epochs=1, hidden_dims=20, device=device, random_state=42)
self.clf.fit(self.X_train, y_semi)
def test_parameters(self):
assert (hasattr(self.clf, 'decision_scores_') and (self.clf.decision_scores_ is not None))
assert (hasattr(self.clf, 'labels_') and (self.clf.labels_ is not None))
assert (hasattr(self.clf, 'threshold_') and (self.clf.threshold_ is not None))
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_labels_confidence(self):
(pred_labels, confidence) = self.clf.predict(self.X_test, return_confidence=True)
assert_equal(pred_labels.shape, self.y_test.shape)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def tearDown(self):
pass |
def lattice_to_kws_index(clat, utterance_id, max_silence_frames=50, max_states=(- 1), allow_partial=True, destructive=False):
if destructive:
index = _kws_functions._lattice_to_kws_index_destructive(clat, utterance_id, max_silence_frames, max_states, allow_partial)
else:
index = _kws_functions._lattice_to_kws_index(clat, utterance_id, max_silence_frames, max_states, allow_partial)
return _fst.KwsIndexVectorFst(index) |
def decompress_and_unpickle(key: str, serialized: bytes, flags: int) -> Any:
if (flags & PickleFlags.ZLIB):
serialized = zlib.decompress(serialized)
flags ^= PickleFlags.ZLIB
if (flags == 0):
return serialized
if (flags in (PickleFlags.INTEGER, PickleFlags.LONG)):
return int(serialized)
if (flags == PickleFlags.PICKLE):
try:
return pickle.loads(serialized)
except Exception:
logging.info('Pickle error', exc_info=True)
return None
logging.info('unrecognized flags')
return serialized |
def test_SagaException():
try:
raise se.SagaException('SagaException')
except se.SagaException as e:
assert ('SagaException' in e.get_message()), str(e)
assert ('SagaException' in str(e)), str(e)
try:
raise se.SagaException('SagaException')
except se.NotImplemented:
assert False
except Exception as e:
assert ('SagaException' in e.get_message()), str(e)
assert ('SagaException' in str(e)), str(e) |
def run_coro_with_timeout(aw: Coroutine, loop: asyncio.AbstractEventLoop, timeout: float) -> Any:
try:
return asyncio.run_coroutine_threadsafe(aw, loop).result((millis_to_seconds(timeout) + _LOADED_SYSTEM_TIMEOUT))
except concurrent.futures.TimeoutError as ex:
raise EventLoopBlocked from ex |
class Conv3d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[(int, Tuple[(int, ...)])]=3, stride: Union[(int, Tuple[(int, ...)])]=1, dilation: int=1, bias: bool=False, transposed: bool=False) -> None:
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = make_ntuple(kernel_size, ndim=3)
self.stride = make_ntuple(stride, ndim=3)
self.dilation = dilation
self.transposed = transposed
self.kernel_volume = int(np.prod(self.kernel_size))
if (self.kernel_volume > 1):
self.kernel = nn.Parameter(torch.zeros(self.kernel_volume, in_channels, out_channels))
else:
self.kernel = nn.Parameter(torch.zeros(in_channels, out_channels))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def extra_repr(self) -> str:
s = '{in_channels}, {out_channels}, kernel_size={kernel_size}'
if (self.stride != ((1,) * len(self.stride))):
s += ', stride={stride}'
if (self.dilation != 1):
s += ', dilation={dilation}'
if (self.bias is None):
s += ', bias=False'
if self.transposed:
s += ', transposed=True'
return s.format(**self.__dict__)
def reset_parameters(self) -> None:
std = (1 / math.sqrt(((self.out_channels if self.transposed else self.in_channels) * self.kernel_volume)))
self.kernel.data.uniform_((- std), std)
if (self.bias is not None):
self.bias.data.uniform_((- std), std)
def forward(self, input: SparseTensor) -> SparseTensor:
return F.conv3d(input, self.kernel, kernel_size=self.kernel_size, bias=self.bias, stride=self.stride, dilation=self.dilation, transposed=self.transposed) |
class _PyModule(PyDefinedObject, AbstractModule):
def __init__(self, pycore, ast_node, resource):
self.resource = resource
self.concluded_data = []
AbstractModule.__init__(self)
PyDefinedObject.__init__(self, pycore, ast_node, None)
def absolute_name(self) -> str:
return self.get_name()
def _get_concluded_data(self):
new_data = _ConcludedData()
self.concluded_data.append(new_data)
return new_data
def _forget_concluded_data(self):
for data in self.concluded_data:
data._invalidate()
def get_resource(self):
return self.resource |
def convert_observation_field_params(params: RequestParams) -> RequestParams:
if ('observation_fields' in params):
params['observation_field_values_attributes'] = params.pop('observation_fields')
obs_fields = params.get('observation_field_values_attributes')
if isinstance(obs_fields, dict):
params['observation_field_values_attributes'] = [{'observation_field_id': k, 'value': v} for (k, v) in obs_fields.items()]
return params |
def test_preprocess_input():
x = np.random.uniform(0, 255, (2, 10, 10, 3))
assert (utils.preprocess_input(x).shape == x.shape)
out1 = utils.preprocess_input(x, 'channels_last')
out2 = utils.preprocess_input(np.transpose(x, (0, 3, 1, 2)), 'channels_first')
assert_allclose(out1, out2.transpose(0, 2, 3, 1))
x = np.random.uniform(0, 255, (10, 10, 3))
assert (utils.preprocess_input(x).shape == x.shape)
out1 = utils.preprocess_input(x, 'channels_last')
out2 = utils.preprocess_input(np.transpose(x, (2, 0, 1)), 'channels_first')
assert_allclose(out1, out2.transpose(1, 2, 0)) |
class TimeDiversityBinning():
param_names = ['binning']
params = [('HeadTailBreaks', 'Quantiles', 'EqualInterval')]
def setup(self, *args):
test_file_path = mm.datasets.get_path('bubenec')
self.df_buildings = gpd.read_file(test_file_path, layer='buildings')
self.df_streets = gpd.read_file(test_file_path, layer='streets')
self.df_tessellation = gpd.read_file(test_file_path, layer='tessellation')
self.df_buildings['height'] = np.linspace(10.0, 30.0, 144)
self.df_tessellation['area'] = mm.Area(self.df_tessellation).series
self.sw = mm.sw_high(k=3, gdf=self.df_tessellation, ids='uID')
def time_Simpson(self, binning):
mm.Simpson(self.df_tessellation, 'area', self.sw, 'uID', binning)
def time_Shannon(self, binning):
mm.Shannon(self.df_tessellation, 'area', self.sw, 'uID', binning) |
def create_metadata(title, author=None):
if (author is None):
author = 'PyMedPhys Contributors'
metadata = {'metadata': {'title': title, 'upload_type': 'dataset', 'creators': [{'name': author}], 'description': '<p>This is an automated upload from the PyMedPhys library.</p>', 'license': 'Apache-2.0', 'access_right': 'open'}}
return json.dumps(metadata) |
def average_distance_auc(reference, query, min_threshold=0, max_threshold=0.01, plot=False):
kdtree = sklearn.neighbors.KDTree(reference)
(distances, _) = kdtree.query(query, k=1)
x = np.linspace(min_threshold, max_threshold)
y = [((distances <= xi).sum() / distances.size) for xi in x]
auc = (sklearn.metrics.auc((x - min_threshold), y) / (max_threshold - min_threshold))
if plot:
import matplotlib.pyplot as plt
plt.subplot(121)
plt.hist(distances)
plt.subplot(122)
plt.plot(x, y)
plt.xlim(0, max_threshold)
plt.ylim(0, 1)
plt.show()
return auc |
def test_unix_temporal_crs__coordinate_system():
crs = CRS('TIMECRS[Unix time,TDATUM[Unix epoch,TIMEORIGIN[1970-01-01T00:00:00Z]],CS[TemporalCount,1],AXIS[Time,future,TIMEUNIT[second]]]')
assert (crs.cs_to_cf() == [{'standard_name': 'time', 'long_name': 'time', 'calendar': 'proleptic_gregorian', 'units': 'second since 1970-01-01T00:00:00', 'axis': 'T'}]) |
def test_simulation_9():
with Simulation(MODEL_WEIR_SETTING_PATH) as sim:
J1 = Nodes(sim)['J1']
def init_function():
J1.initial_depth = 15
sim.initial_conditions(init_function)
for (ind, step) in enumerate(sim):
if (ind == 0):
assert (J1.depth > 14) |
class EvoNorm2dB0(nn.Module):
def __init__(self, num_features, apply_act=True, momentum=0.1, eps=0.001, **_):
super().__init__()
self.apply_act = apply_act
self.momentum = momentum
self.eps = eps
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.v = (nn.Parameter(torch.ones(num_features)) if apply_act else None)
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
if (self.v is not None):
nn.init.ones_(self.v)
def forward(self, x):
_assert((x.dim() == 4), 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, (- 1), 1, 1)
if (self.v is not None):
if self.training:
var = x.float().var(dim=(0, 2, 3), unbiased=False)
n = (x.numel() / x.shape[1])
self.running_var.copy_(((self.running_var * (1 - self.momentum)) + ((var.detach() * self.momentum) * (n / (n - 1)))))
else:
var = self.running_var
left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x)
v = self.v.to(x_dtype).view(v_shape)
right = ((x * v) + instance_std(x, self.eps))
x = (x / left.max(right))
return ((x * self.weight.to(x_dtype).view(v_shape)) + self.bias.to(x_dtype).view(v_shape)) |
def open_file_chooser_dialog(title='Choose a file', multiple=False):
dialog = Gtk.FileChooserDialog(title, None, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
dialog.set_default_response(Gtk.ResponseType.OK)
dialog.set_select_multiple(multiple)
response = dialog.run()
result = None
if (response == Gtk.ResponseType.OK):
result = dialog.get_filenames()
dialog.destroy()
return result |
def process_data(points_name, dataset='test'):
locs = []
feats = []
point_ids = []
for (idx, i) in enumerate(range(val_reps)):
scan.open_scan(points_name)
label_name = points_name.replace('bin', 'label').replace('velodyne', 'labels')
if (dataset == 'val'):
scan.open_label(label_name)
label = scan.sem_label
label = label.astype(np.int32)
else:
label = [0]
remissions = scan.remissions
coords = scan.points
point_num = len(coords)
if config['Segmentation']['use_coords']:
feature = np.concatenate([coords, remissions.reshape((- 1), 1)], 1)
else:
feature = remissions.reshape((- 1), 1)
coords = np.ascontiguousarray((coords - coords.mean(0)))
m = np.eye(3)
m[0][0] *= ((np.random.randint(0, 2) * 2) - 1)
m *= config['Segmentation']['scale']
theta = ((np.random.rand() * 2) * math.pi)
m = np.matmul(m, [[math.cos(theta), math.sin(theta), 0], [(- math.sin(theta)), math.cos(theta), 0], [0, 0, 1]])
coords = ((np.matmul(coords, m) + (config['Segmentation']['full_scale'][1] / 2)) + np.random.uniform((- 2), 2, 3))
m = coords.min(0)
M = coords.max(0)
offset = (((- m) + (np.clip((((config['Segmentation']['full_scale'][1] - M) + m) - 0.001), 0, None) * np.random.rand(3))) + (np.clip((((config['Segmentation']['full_scale'][1] - M) + m) + 0.001), None, 0) * np.random.rand(3)))
coords += offset
idxs = ((coords.min(1) >= 0) * (coords.max(1) < config['Segmentation']['full_scale'][1]))
coords = coords[idxs]
feature = feature[idxs]
coords = torch.Tensor(coords).long()
locs.append(torch.cat([coords, torch.LongTensor(coords.shape[0], 1).fill_(idx)], 1))
feats.append(torch.Tensor(feature))
point_ids.append(torch.from_numpy(np.nonzero(idxs)[0]))
locs = torch.cat(locs, 0)
feats = torch.cat(feats, 0)
point_ids = torch.cat(point_ids, 0)
labels = torch.Tensor(label)
return {'seg_coords': locs, 'seg_features': feats, 'y': labels.long(), 'point_ids': point_ids, 'length': point_num} |
def register_argparse_argument_parameter(param_name: str, param_type: Optional[Type[Any]]) -> None:
attr_name = f'{_CUSTOM_ATTRIB_PFX}{param_name}'
if ((param_name in CUSTOM_ACTION_ATTRIBS) or hasattr(argparse.Action, attr_name)):
raise KeyError(f'Custom parameter {param_name} already exists')
if (not re.search('^[A-Za-z_][A-Za-z0-9_]*$', param_name)):
raise KeyError(f'Invalid parameter name {param_name} - cannot be used as a python identifier')
getter_name = f'get_{param_name}'
def _action_get_custom_parameter(self: argparse.Action) -> Any:
return getattr(self, attr_name, None)
setattr(argparse.Action, getter_name, _action_get_custom_parameter)
setter_name = f'set_{param_name}'
def _action_set_custom_parameter(self: argparse.Action, value: Any) -> None:
if (param_type and (not isinstance(value, param_type))):
raise TypeError(f'{param_name} must be of type {param_type}, got: {value} ({type(value)})')
setattr(self, attr_name, value)
setattr(argparse.Action, setter_name, _action_set_custom_parameter)
CUSTOM_ACTION_ATTRIBS.add(param_name) |
def test_correctness_voronoi():
(head, tail, weight) = _voronoi(cau_coords)
known_head = np.array([0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 4, 4])
known_tail = np.array([1, 2, 4, 0, 2, 0, 1, 3, 4, 2, 0, 2])
np.testing.assert_array_equal(known_head, head)
np.testing.assert_array_equal(known_tail, tail)
np.testing.assert_array_equal(np.ones(head.shape), weight)
(head, tail, weight) = _voronoi(lap_coords)
known_head = np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4])
known_tail = np.array([1, 2, 3, 4, 0, 3, 4, 0, 3, 0, 1, 2, 0, 1])
np.testing.assert_array_equal(known_head, head)
np.testing.assert_array_equal(known_tail, tail)
np.testing.assert_array_equal(np.ones(head.shape), weight) |
def test_FullMultiplicativeForm_kracka2010ranking():
dm = skcriteria.mkdm(matrix=[[33.95, 23.78, 11.45, 39.97, 29.44, 167.1, 3.852], [38.9, 4.17, 6.32, 0.01, 4.29, 132.52, 25.184], [37.59, 9.36, 8.23, 4.35, 10.22, 136.71, 10.845], [30.44, 37.59, 13.91, 74.08, 45.1, 198.34, 2.186], [36.21, 14.79, 9.17, 17.77, 17.06, 148.3, 6.61], [37.8, 8.55, 7.97, 2.35, 9.25, 134.83, 11.935]], objectives=[min, min, min, min, max, min, max], alternatives=['A1', 'A2', 'A3', 'A4', 'A5', 'A6'], criteria=['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
expected = RankResult('FullMultiplicativeForm', ['A1', 'A2', 'A3', 'A4', 'A5', 'A6'], [5, 1, 3, 6, 4, 2], {'score': np.log([3.4343, 148689.356, 120.3441, 0.7882, 16.2917, 252.9155])})
transformer = VectorScaler(target='matrix')
dm = transformer.transform(dm)
ranker = FullMultiplicativeForm()
result = ranker.evaluate(dm)
assert result.values_equals(expected)
assert (result.method == expected.method)
assert np.allclose(result.e_.score, expected.e_.score, atol=0.0001) |
def pg_config_dictionary(*pg_config_path, encoding='utf-8', timeout=8):
default_output = get_command_output(pg_config_path, encoding=encoding, timeout=timeout)
if (default_output is not None):
d = {}
for x in default_output.splitlines():
if ((not x) or x.isspace() or (x.find('=') == (- 1))):
continue
(k, v) = x.split('=', 1)
d[k.lower().strip()] = v.strip()
return d
opt = []
for l in get_command_output(pg_config_path, 'help', encoding=encoding, timeout=timeout).splitlines():
dash_pos = l.find('--')
if (dash_pos == (- 1)):
continue
sp_pos = l.find(' ', dash_pos)
opt.append(l[(dash_pos + 2):sp_pos])
if ('help' in opt):
opt.remove('help')
if ('version' in opt):
opt.remove('version')
d = dict(zip(opt, get_command_output(pg_config_path, *opt, encoding=encoding, timeout=timeout).splitlines()))
d['version'] = get_command_output(pg_config_path, 'version', encoding=encoding, timeout=timeout).strip()
return d |
(scope='module')
def grpc_port(greeter_pb2, greeter_pb2_grpc):
class Servicer(greeter_pb2_grpc.GreeterServicer):
def SayHello(self, message, context):
metadata = []
for (key, value) in context.invocation_metadata():
metadata.append((key, value))
metadata = tuple(metadata)
return greeter_pb2.HelloReply(message=base64.b64encode(pickle.dumps(metadata)))
with run_grpc_service_in_process(functools.partial(greeter_pb2_grpc.add_GreeterServicer_to_server, Servicer())) as port:
(yield port) |
def test_atmost():
vp = IDPool()
n = 20
b = 50
assert (n <= b)
lits = [vp.id(v) for v in range(1, (n + 1))]
top = vp.top
G = CardEnc.atmost(lits, b, vpool=vp)
assert (len(G.clauses) == 0)
try:
assert (vp.top >= top)
except AssertionError as e:
print(f'''
vp.top = {vp.top} (expected >= {top})
''')
raise e |
class SegmentationNet10a(VGGNet):
cfg = [(64, 1), (128, 1), ('M', None), (256, 1), (256, 1), (512, 2), (512, 2)]
def __init__(self, config):
super(SegmentationNet10a, self).__init__()
self.batchnorm_track = config.batchnorm_track
self.trunk = SegmentationNet10aTrunk(config, cfg=SegmentationNet10a.cfg)
self.head = SegmentationNet10aHead(config, output_k=config.output_k, cfg=SegmentationNet10a.cfg)
self._initialize_weights()
def forward(self, x):
x = self.trunk(x)
x = self.head(x)
return x |
def generate_html_response():
html_content = '\n <!doctype html>\n <html>\n <head>\n <title>PyScript Service Worker</title>\n </head>\n <body>\n <h1>PyScript from a service worker </h1>\n <h2>FastAPI demo</h2>\n <ul>\n <li>Test some random <a href="./json">json</a></li>\n <li>Test some random <a href="./emoji">emoji</a></li>\n </ul>\n </body>\n </html>\n '
return HTMLResponse(content=html_content, status_code=200) |
class FC3_LogVolData(BaseData):
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.fstype = kwargs.get('fstype', '')
self.grow = kwargs.get('grow', False)
self.maxSizeMB = kwargs.get('maxSizeMB', 0)
self.name = kwargs.get('name', '')
self.format = kwargs.get('format', True)
self.percent = kwargs.get('percent', 0)
self.recommended = kwargs.get('recommended', False)
self.size = kwargs.get('size', None)
self.preexist = kwargs.get('preexist', False)
self.vgname = kwargs.get('vgname', '')
self.mountpoint = kwargs.get('mountpoint', '')
def __eq__(self, y):
if (not y):
return False
return ((self.vgname == y.vgname) and (self.name == y.name))
def __ne__(self, y):
return (not (self == y))
def _getArgsAsStr(self):
retval = ''
if self.fstype:
retval += (' --fstype="%s"' % self.fstype)
if self.grow:
retval += ' --grow'
if self.maxSizeMB:
retval += (' --maxsize=%d' % self.maxSizeMB)
if (not self.format):
retval += ' --noformat'
if self.percent:
retval += (' --percent=%d' % self.percent)
if self.recommended:
retval += ' --recommended'
if self.size:
retval += (' --size=%d' % self.size)
if self.preexist:
retval += ' --useexisting'
return retval
def __str__(self):
retval = BaseData.__str__(self)
args = self._getArgsAsStr()
args += (' --name=%s' % self.name)
args += (' --vgname=%s' % self.vgname)
retval += ('logvol %s%s\n' % (self.mountpoint, args))
return retval |
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group('Dataset and data loading')
group.add_argument('--num-workers', default=1, type=int, metavar='N', help='how many subprocesses to use for data loading')
group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true', help='ignore too long or too short lines in valid and test set')
group.add_argument('--max-tokens', type=int, metavar='N', help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N', help='maximum number of sentences in a batch')
group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N', help='batch size will be a multiplier of this value')
parser.add_argument('--dataset-impl', metavar='FORMAT', choices=get_available_dataset_impl(), help='output dataset implementation')
if train:
group.add_argument('--train-subset', default='train', metavar='SPLIT', choices=['train', 'valid', 'test'], help='data subset to use for training (train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT', help='comma separated list of data subsets to use for validation (train, valid, valid1, test, test1)')
group.add_argument('--validate-interval', type=int, default=1, metavar='N', help='validate every N epochs')
group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N', help='specified random seed for validation')
group.add_argument('--disable-validation', action='store_true', help='disable validation')
group.add_argument('--max-tokens-valid', type=int, metavar='N', help='maximum number of tokens in a validation batch (defaults to --max-tokens)')
group.add_argument('--max-sentences-valid', type=int, metavar='N', help='maximum number of sentences in a validation batch (defaults to --max-sentences)')
group.add_argument('--curriculum', default=0, type=int, metavar='N', help="don't shuffle batches for first N epochs")
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT', help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N', help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID', help='id of the shard to generate (id < num_shards)')
return group |
def _get_wheel_metadata_from_wheel(whl_basename, metadata_directory, config_settings):
from zipfile import ZipFile
with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'):
pass
whl_file = os.path.join(metadata_directory, whl_basename)
with ZipFile(whl_file) as zipf:
dist_info = _dist_info_files(zipf)
zipf.extractall(path=metadata_directory, members=dist_info)
return dist_info[0].split('/')[0] |
class CommandTester():
def __init__(self, command: Command) -> None:
self._command = command
self._io = BufferedIO()
self._inputs: list[str] = []
self._status_code: (int | None) = None
def command(self) -> Command:
return self._command
def io(self) -> BufferedIO:
return self._io
def status_code(self) -> (int | None):
return self._status_code
def execute(self, args: str='', inputs: (str | None)=None, interactive: (bool | None)=None, verbosity: (Verbosity | None)=None, decorated: (bool | None)=None, supports_utf8: bool=True) -> int:
application = self._command.application
input_: (StringInput | ArgvInput) = StringInput(args)
if ((application is not None) and application.definition.has_argument('command') and (self._command.name is not None)):
name = self._command.name
if (' ' in name):
argv = [application.name, self._command.name, *input_._tokens]
input_ = ArgvInput(argv)
else:
input_ = StringInput(((name + ' ') + args))
self._io.set_input(input_)
assert isinstance(self._io.output, BufferedOutput)
assert isinstance(self._io.error_output, BufferedOutput)
self._io.output.set_supports_utf8(supports_utf8)
self._io.error_output.set_supports_utf8(supports_utf8)
if (inputs is not None):
self._io.input.set_stream(StringIO(inputs))
if (interactive is not None):
self._io.interactive(interactive)
if (verbosity is not None):
self._io.set_verbosity(verbosity)
if (decorated is not None):
self._io.decorated(decorated)
self._status_code = self._command.run(self._io)
return self._status_code |
_fixtures(WebFixture)
def test_check_missing_form(web_fixture):
fixture = web_fixture
class ModelObject():
fields = ExposedNames()
fields.name = (lambda i: Field())
class MyPanel(Div):
def __init__(self, view):
super().__init__(view)
model_object = ModelObject()
forgotten_form = Form(view, 'myform')
self.add_child(TextInput(forgotten_form, model_object.fields.name))
wsgi_app = fixture.new_wsgi_app(child_factory=MyPanel.factory())
browser = Browser(wsgi_app)
expected_message = 'Could not find form for <TextInput name=myform-name>. Its form, <Form form id="myform".*> is not present on the current page'
with expected(ProgrammerError, test=expected_message):
browser.open('/') |
class Namer():
def __init__(self, debug_trail: DebugTrail, path_to_suffix: Mapping[(CrownPath, str)], path: CrownPath):
self.debug_trail = debug_trail
self.path_to_suffix = path_to_suffix
self._path = path
def _with_path_suffix(self, basis: str) -> str:
if (not self._path):
return basis
return ((basis + '_') + self.path_to_suffix[self._path])
def path(self) -> CrownPath:
return self._path
def v_data(self) -> str:
return self._with_path_suffix('data')
def v_known_keys(self) -> str:
return self._with_path_suffix('known_keys')
def v_required_keys(self) -> str:
return self._with_path_suffix('required_keys')
def v_extra(self) -> str:
return self._with_path_suffix('extra')
def v_has_not_found_error(self) -> str:
return self._with_path_suffix('has_not_found_error')
def with_trail(self, error_expr: str) -> str:
if (self.debug_trail in (DebugTrail.FIRST, DebugTrail.ALL)):
if (len(self._path) == 0):
return error_expr
if (len(self._path) == 1):
return f'append_trail({error_expr}, {self._path[0]!r})'
return f'extend_trail({error_expr}, {self._path!r})'
return error_expr
def emit_error(self, error_expr: str) -> str:
if (self.debug_trail == DebugTrail.ALL):
return f'errors.append({self.with_trail(error_expr)})'
return f'raise {self.with_trail(error_expr)}' |
class PyzoLogger(QtWidgets.QWidget):
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
self._logger_shell = PyzoLoggerShell(self)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self._logger_shell, 1)
self.layout.setSpacing(0)
margin = pyzo.config.view.widgetMargin
self.layout.setContentsMargins(margin, margin, margin, margin)
self.setLayout(self.layout)
def updateZoom(self):
self._logger_shell.setZoom(pyzo.config.view.zoom)
def updateFont(self):
self._logger_shell.setFont(pyzo.config.view.fontname) |
def is_valid_bn_fold(conv_linear: NodeProto, model: ModelProto, fold_backward: bool) -> bool:
valid = True
if (conv_linear.op_type in LinearType):
w = retrieve_constant_input(conv_linear, model, WEIGHT_INDEX)[0]
if (w is None):
valid = False
if (not fold_backward):
if (conv_linear.op_type == 'Conv'):
valid &= all(((item == 0) for item in get_node_attribute(conv_linear, 'pads')))
valid &= (get_node_attribute(conv_linear, 'group') == 1)
elif (conv_linear.op_type == 'ConvTranspose'):
valid = False
elif (conv_linear.op_type == 'ConvTranspose'):
valid &= (get_node_attribute(conv_linear, 'group') in (1, get_input_output_channels(conv_linear, model)[0]))
return valid |
class App(ttk.Frame):
def __init__(self, parent):
ttk.Frame.__init__(self, parent)
for index in range(4):
self.columnconfigure(index=index, weight=1)
self.rowconfigure(index=(index + 1), weight=1)
self.result = tk.StringVar(value='')
self.setup_widgets()
def setup_widgets(self):
self.label = ttk.Label(self, anchor='e', textvariable=self.result, font=('-size', 15), padding=5)
self.label.grid(row=0, column=0, columnspan=4, sticky='ew')
for (index, key) in enumerate('147C2580369=+-*/'):
ttk.Button(self, text=key, style=('TButton' if (key != '=') else 'Accent.TButton'), command=partial(self.button_pressed, key)).grid(row=((index % 4) + 1), column=(index // 4), sticky='nsew', padx=2, pady=2)
def button_pressed(self, key):
if (key == 'C'):
self.result.set('')
elif (key == '='):
self.result.set(str(round(eval(self.result.get()))))
else:
self.result.set((self.result.get() + key)) |
class Effect2054(BaseEffect):
type = 'passive'
def handler(fit, skill, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Shield Resistance Amplifier')), 'explosiveDamageResistanceBonus', (skill.getModifiedItemAttr('hardeningBonus') * skill.level), **kwargs) |
def validate(model, data_loader):
print('validating ... ', flush=True, end='')
val_loss_meter = pyutils.AverageMeter('loss1', 'loss2')
model.eval()
with torch.no_grad():
for pack in data_loader:
img = pack['img']
label = pack['label'].cuda(non_blocking=True)
x = model(img)
loss1 = F.multilabel_soft_margin_loss(x, label)
val_loss_meter.add({'loss1': loss1.item()})
model.train()
print(('loss: %.4f' % val_loss_meter.pop('loss1')))
return |
def pytest_generate_tests(metafunc):
if getattr(metafunc, 'function', False):
if getattr(metafunc.function, 'pytestmark', False):
marks = metafunc.function.pytestmark
order_marks = [mark for mark in marks if (mark.name == 'order')]
if (len(order_marks) > 1):
metafunc.function.pytestmark = [mark for mark in marks if (mark.name != 'order')]
args = [pytest.param(_get_mark_description(mark), marks=[mark]) for mark in order_marks]
if ('order' not in metafunc.fixturenames):
metafunc.fixturenames.append('order')
metafunc.parametrize('order', args) |
def test_ScanArgs_remove_outer_output():
hmm_model_env = create_test_hmm()
scan_args = hmm_model_env['scan_args']
hmm_model_env['scan_op']
Y_t = hmm_model_env['Y_t']
Y_rv = hmm_model_env['Y_rv']
hmm_model_env['sigmas_in']
hmm_model_env['sigmas_t']
Gamma_rv = hmm_model_env['Gamma_rv']
Gamma_in = hmm_model_env['Gamma_in']
S_in = hmm_model_env['S_in']
S_t = hmm_model_env['S_t']
rng_in = hmm_model_env['rng_in']
scan_updates = hmm_model_env['scan_updates']
scan_args_copy = copy(scan_args)
test_v = Y_rv
rm_info = scan_args_copy.remove_from_fields(test_v, rm_dependents=True)
(removed_nodes, _) = zip(*rm_info)
assert (Y_t in removed_nodes)
assert (len(scan_args_copy.inner_out_nit_sot) == 0)
assert (Y_rv not in scan_args_copy.outer_outputs)
assert (len(scan_args_copy.outer_out_nit_sot) == 0)
assert (S_t in scan_args_copy.inner_out_sit_sot)
assert (S_in in scan_args_copy.outer_out_sit_sot)
assert (Gamma_in in scan_args_copy.inner_in_non_seqs)
assert (Gamma_rv in scan_args_copy.outer_in_non_seqs)
assert (rng_in in scan_args_copy.inner_out_shared)
assert (list(scan_updates.values()) == scan_args.outer_out_shared) |
_lr_scheduler('cosine')
class CosineSchedule(FairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with cosine. Consider --lr-scheduler=fixed instead.')
warmup_end_lr = args.max_lr
if (args.warmup_init_lr < 0):
args.warmup_init_lr = args.lr[0]
self.min_lr = args.lr[0]
self.max_lr = args.max_lr
assert (self.max_lr > self.min_lr), 'max_lr must be more than lr'
self.t_mult = args.t_mult
self.period = args.lr_period_updates
if (self.period <= 0):
assert (args.max_update >= 0), 'Either --max_update or --lr-period-updates must be set'
self.period = (args.max_update - args.warmup_updates)
if (args.warmup_updates > 0):
self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates)
else:
self.lr_step = 1
self.warmup_updates = args.warmup_updates
self.lr_shrink = args.lr_shrink
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
def add_args(parser):
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr')
parser.add_argument('--max-lr', type=float, metavar='LR', help='max learning rate, must be more than args.lr')
parser.add_argument('--t-mult', default=1, type=float, metavar='LR', help='factor to grow the length of each period')
parser.add_argument('--lr-period-updates', default=(- 1), type=float, metavar='LR', help='initial number of updates per period')
parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing')
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
if (num_updates < self.args.warmup_updates):
self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step))
else:
curr_updates = (num_updates - self.args.warmup_updates)
if (self.t_mult != 1):
i = math.floor(math.log((1 - ((curr_updates / self.period) * (1 - self.t_mult))), self.t_mult))
t_i = ((self.t_mult ** i) * self.period)
t_curr = (curr_updates - (((1 - (self.t_mult ** i)) / (1 - self.t_mult)) * self.period))
else:
i = math.floor((curr_updates / self.period))
t_i = self.period
t_curr = (curr_updates - (self.period * i))
lr_shrink = (self.lr_shrink ** i)
min_lr = (self.min_lr * lr_shrink)
max_lr = (self.max_lr * lr_shrink)
self.lr = (min_lr + ((0.5 * (max_lr - min_lr)) * (1 + math.cos(((math.pi * t_curr) / t_i)))))
self.optimizer.set_lr(self.lr)
return self.lr |
class QuestionAnsweringArgumentHandler(ArgumentHandler):
def normalize(self, item):
if isinstance(item, SquadExample):
return item
elif isinstance(item, dict):
for k in ['question', 'context']:
if (k not in item):
raise KeyError('You need to provide a dictionary with keys {question:..., context:...}')
elif (item[k] is None):
raise ValueError(f'`{k}` cannot be None')
elif (isinstance(item[k], str) and (len(item[k]) == 0)):
raise ValueError(f'`{k}` cannot be empty')
return QuestionAnsweringPipeline.create_sample(**item)
raise ValueError(f'{item} argument needs to be of type (SquadExample, dict)')
def __call__(self, *args, **kwargs):
if ((args is not None) and (len(args) > 0)):
if (len(args) == 1):
inputs = args[0]
elif ((len(args) == 2) and ({type(el) for el in args} == {str})):
inputs = [{'question': args[0], 'context': args[1]}]
else:
inputs = list(args)
elif ('X' in kwargs):
inputs = kwargs['X']
elif ('data' in kwargs):
inputs = kwargs['data']
elif (('question' in kwargs) and ('context' in kwargs)):
if (isinstance(kwargs['question'], list) and isinstance(kwargs['context'], str)):
inputs = [{'question': Q, 'context': kwargs['context']} for Q in kwargs['question']]
elif (isinstance(kwargs['question'], list) and isinstance(kwargs['context'], list)):
if (len(kwargs['question']) != len(kwargs['context'])):
raise ValueError("Questions and contexts don't have the same lengths")
inputs = [{'question': Q, 'context': C} for (Q, C) in zip(kwargs['question'], kwargs['context'])]
elif (isinstance(kwargs['question'], str) and isinstance(kwargs['context'], str)):
inputs = [{'question': kwargs['question'], 'context': kwargs['context']}]
else:
raise ValueError("Arguments can't be understood")
else:
raise ValueError(f'Unknown arguments {kwargs}')
if isinstance(inputs, dict):
inputs = [inputs]
elif isinstance(inputs, Iterable):
inputs = [i for i in inputs]
else:
raise ValueError(f'Invalid arguments {kwargs}')
for (i, item) in enumerate(inputs):
inputs[i] = self.normalize(item)
return inputs |
class GammaIncInv(BinaryScalarOp):
nfunc_spec = ('scipy.special.gammaincinv', 2, 1)
def st_impl(k, x):
return scipy.special.gammaincinv(k, x)
def impl(self, k, x):
return GammaIncInv.st_impl(k, x)
def grad(self, inputs, grads):
(k, x) = inputs
(gz,) = grads
return [grad_not_implemented(self, 0, k), (((gz * exp(gammaincinv(k, x))) * gamma(k)) * (gammaincinv(k, x) ** (1 - k)))]
def c_code(self, *args, **kwargs):
raise NotImplementedError() |
def set_deployment_placement_options(deployment_config: dict, scaling_config: ScalingConfig):
scaling_config = scaling_config.as_air_scaling_config()
deployment_config.setdefault('ray_actor_options', {})
replica_actor_resources = {'CPU': deployment_config['ray_actor_options'].get('num_cpus', 1), 'GPU': deployment_config['ray_actor_options'].get('num_gpus', 0), **deployment_config['ray_actor_options'].get('resources', {})}
if (('placement_group_bundles' in deployment_config) or ('placement_group_strategy' in deployment_config)):
raise ValueError('placement_group_bundles and placement_group_strategy must not be specified in deployment_config. Use scaling_config to configure replicaplacement group.')
deployment_config['placement_group_bundles'] = ([replica_actor_resources] + scaling_config.as_placement_group_factory().bundles)
deployment_config['placement_group_strategy'] = scaling_config.placement_strategy
return deployment_config |
.parametrize('output_is_path', [True, False])
.filterwarnings('ignore::sgkit.io.vcfzarr_reader.DimensionNameForFixedFormatFieldWarning')
def test_zarr_to_vcf(shared_datadir, tmp_path, output_is_path):
path = path_for_test(shared_datadir, 'sample.vcf.gz')
intermediate = tmp_path.joinpath('intermediate.vcf.zarr').as_posix()
output = tmp_path.joinpath('output.vcf').as_posix()
kwargs = zarr_array_sizes(path)
vcf_to_zarr(path, intermediate, fields=['INFO/*', 'FORMAT/*'], mixed_ploidy=True, **kwargs)
if output_is_path:
output = tmp_path.joinpath('output.vcf').as_posix()
zarr_to_vcf(intermediate, output)
else:
output_str = StringIO()
zarr_to_vcf(intermediate, output_str)
with open(output, 'w') as f:
f.write(output_str.getvalue())
v = VCF(output)
assert (v.samples == ['NA00001', 'NA00002', 'NA00003'])
variant = next(v)
assert (variant.CHROM == '19')
assert (variant.POS == 111)
assert (variant.ID is None)
assert (variant.REF == 'A')
assert (variant.ALT == ['C'])
assert (variant.QUAL == pytest.approx(9.6))
assert (variant.FILTER is None)
assert (variant.genotypes == [[0, 0, True], [0, 0, True], [0, 1, False]])
assert_array_equal(variant.format('HQ'), [[10, 15], [10, 10], [3, 3]]) |
_fast
def test_long_destroyers_loop():
(x, y, z) = inputs()
e = dot(dot(add_in_place(x, y), add_in_place(y, z)), add(z, x))
g = create_fgraph([x, y, z], [e])
assert g.consistent()
TopoSubstitutionNodeRewriter(add, add_in_place).rewrite(g)
assert g.consistent()
assert (str(g) != 'FunctionGraph(Dot(Dot(AddInPlace(x, y), AddInPlace(y, z)), AddInPlace(z, x)))')
e2 = dot(dot(add_in_place(x, y), add_in_place(y, z)), add_in_place(z, x))
with pytest.raises(InconsistencyError):
create_fgraph(*clone([x, y, z], [e2])) |
class PresetEchoesHints(PresetTab, Ui_PresetEchoesHints):
def __init__(self, editor: PresetEditor, game_description: GameDescription, window_manager: WindowManager):
super().__init__(editor, game_description, window_manager)
self.setupUi(self)
self.hint_layout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop)
for (i, stk_hint_mode) in enumerate(SkyTempleKeyHintMode):
self.hint_sky_temple_key_combo.setItemData(i, stk_hint_mode)
self.hint_sky_temple_key_combo.currentIndexChanged.connect(self._on_stk_combo_changed)
def tab_title(cls) -> str:
return 'Hints'
def uses_patches_tab(cls) -> bool:
return False
def _on_stk_combo_changed(self, new_index: int):
with self._editor as editor:
editor.set_configuration_field('hints', dataclasses.replace(editor.configuration.hints, sky_temple_keys=self.hint_sky_temple_key_combo.currentData()))
def on_preset_changed(self, preset: Preset):
set_combo_with_value(self.hint_sky_temple_key_combo, preset.configuration.hints.sky_temple_keys) |
def get_example_xml(song_path, rating, lastplayed):
song_uri = fsn2uri(song_path)
mount_uri = fsn2uri(find_mount_point(song_path))
return ('<?xml version="1.0" standalone="yes"?>\n<rhythmdb version="1.9">\n <entry type="song">\n <title>Music</title>\n <genre>Unknown</genre>\n <track-number>7</track-number>\n <duration>199</duration>\n <file-size>4799124</file-size>\n <location>%s</location>\n <mountpoint>%s</mountpoint>\n <mtime></mtime>\n <first-seen></first-seen>\n <last-seen></last-seen>\n <last-played>%d</last-played>\n <play-count>1</play-count>\n <bitrate>191</bitrate>\n <rating>%d</rating>\n <date>731881</date>\n <media-type>audio/mpeg</media-type>\n <composer>Unknown</composer>\n </entry>\n</rhythmdb>' % (song_uri, mount_uri, lastplayed, rating)).encode('utf-8') |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty.Use --overwrite_output_dir to overcome.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.setLevel((logging.INFO if (jax.process_index() == 0) else logging.ERROR))
if (jax.process_index() == 0):
transformers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
logger.info(f'Training/evaluation parameters {training_args}')
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer)
elif model_args.text_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.text_model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer)
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
model = FlaxHybridCLIP.from_text_vision_pretrained(model_args.text_model_name_or_path, model_args.vision_model_name_or_path, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), text_from_pt=model_args.from_pt, vision_from_pt=model_args.from_pt)
config = model.config
set_seed(training_args.seed)
preprocess = Transform(config.vision_config.image_size)
preprocess = torch.jit.script(preprocess)
train_dataset = ImageTextDataset(data_args.data_dir, data_args.train_file, captions_per_image=2, transform=preprocess)
eval_dataset = ImageTextDataset(data_args.data_dir, data_args.validation_file, captions_per_image=1, transform=preprocess)
num_epochs = int(training_args.num_train_epochs)
train_batch_size = (int(training_args.per_device_train_batch_size) * jax.device_count())
eval_batch_size = (int(training_args.per_device_eval_batch_size) * jax.device_count())
steps_per_epoch = (len(train_dataset) // train_batch_size)
total_train_steps = (steps_per_epoch * num_epochs)
def collate_fn(examples):
pixel_values = torch.stack([example[0] for example in examples]).permute(0, 2, 3, 1).numpy()
captions = [example[1] for example in examples]
inputs = tokenizer(captions, max_length=data_args.max_seq_length, padding='max_length', truncation=True, return_tensors='np')
batch = {'pixel_values': pixel_values, 'input_ids': inputs['input_ids'], 'attention_mask': inputs['attention_mask']}
return batch
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=data_args.preprocessing_num_workers, persistent_workers=True, drop_last=True, collate_fn=collate_fn)
eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=eval_batch_size, shuffle=False, num_workers=data_args.preprocessing_num_workers, persistent_workers=True, drop_last=True, collate_fn=collate_fn)
if (has_tensorboard and (jax.process_index() == 0)):
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir).joinpath('logs').as_posix())
rng = jax.random.PRNGKey(training_args.seed)
(rng, dropout_rng) = jax.random.split(rng)
linear_decay_lr_schedule_fn = create_learning_rate_fn(len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate)
adamw = optax.adamw(learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay)
state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng)
def cross_entropy(logits, axis):
logprobs = jax.nn.log_softmax(logits, axis=axis)
nll = jnp.diag(logprobs)
ce = (- jnp.mean(nll))
return ce
def clip_loss(similarity):
loss = ((cross_entropy(similarity, axis=0) + cross_entropy(similarity, axis=1)) / 2)
return loss
def train_step(state, batch):
(dropout_rng, new_dropout_rng) = jax.random.split(state.dropout_rng)
def compute_loss(params):
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss = clip_loss(logits)
return loss
grad_fn = jax.value_and_grad(compute_loss)
(loss, grad) = grad_fn(state.params)
grad = jax.lax.pmean(grad, 'batch')
new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
metrics = {'loss': loss, 'learning_rate': linear_decay_lr_schedule_fn(state.step)}
metrics = jax.lax.pmean(metrics, axis_name='batch')
return (new_state, metrics)
def eval_step(params, batch):
logits = model(**batch, params=params, train=False)[0]
loss = clip_loss(logits)
metrics = {'loss': loss}
metrics = jax.lax.pmean(metrics, axis_name='batch')
return metrics
p_train_step = jax.pmap(train_step, 'batch', donate_argnums=(0,))
p_eval_step = jax.pmap(eval_step, 'batch')
state = state.replicate()
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {num_epochs}')
logger.info(f' Instantaneous batch size per device = {training_args.per_device_train_batch_size}')
logger.info(f' Total train batch size (w. parallel & distributed) = {train_batch_size}')
logger.info(f' Total optimization steps = {total_train_steps}')
train_time = 0
(rng, input_rng) = jax.random.split(rng)
epochs = tqdm(range(num_epochs), desc=f'Epoch ... (1/{num_epochs})', position=0)
for epoch in epochs:
train_start = time.time()
(rng, input_rng) = jax.random.split(rng)
train_metrics = []
steps_per_epoch = (len(train_dataset) // train_batch_size)
train_step_progress_bar = tqdm(total=steps_per_epoch, desc='Training...', position=1, leave=False)
for batch in train_loader:
batch = shard(batch)
(state, train_metric) = p_train_step(state, batch)
train_metrics.append(train_metric)
train_step_progress_bar.update(1)
train_time += (time.time() - train_start)
train_metric = unreplicate(train_metric)
train_step_progress_bar.close()
epochs.write(f"Epoch... ({(epoch + 1)}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})")
eval_metrics = []
eval_steps = (len(eval_dataset) // eval_batch_size)
eval_step_progress_bar = tqdm(total=eval_steps, desc='Evaluating...', position=2, leave=False)
for batch in eval_loader:
batch = shard(batch)
metrics = p_eval_step(state.params, batch)
eval_metrics.append(metrics)
eval_step_progress_bar.update(1)
eval_metrics = get_metrics(eval_metrics)
eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics)
eval_step_progress_bar.close()
desc = f"Epoch... ({(epoch + 1)}/{num_epochs} | Eval Loss: {eval_metrics['loss']})"
epochs.write(desc)
epochs.desc = desc
if (has_tensorboard and (jax.process_index() == 0)):
cur_step = (epoch * (len(train_dataset) // train_batch_size))
write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step)
if (jax.process_index() == 0):
params = jax.device_get(unreplicate(state.params))
model.save_pretrained(training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub, commit_message=f'Saving weights and logs of epoch {(epoch + 1)}') |
class PHP(CNF, object):
def __init__(self, nof_holes, kval=1, topv=0, verb=False):
super(PHP, self).__init__()
vpool = IDPool(start_from=(topv + 1))
var = (lambda i, j: vpool.id('v_{0}_{1}'.format(i, j)))
for i in range(1, ((kval * nof_holes) + 2)):
self.append([var(i, j) for j in range(1, (nof_holes + 1))])
pigeons = range(1, ((kval * nof_holes) + 2))
for j in range(1, (nof_holes + 1)):
for comb in itertools.combinations(pigeons, (kval + 1)):
self.append([(- var(i, j)) for i in comb])
if verb:
head = 'c {0}PHP formula for'.format(('' if (kval == 1) else (str(kval) + '-')))
head += ' {0} pigeons and {1} holes'.format(((kval * nof_holes) + 1), nof_holes)
self.comments.append(head)
for i in range(1, ((kval * nof_holes) + 2)):
for j in range(1, (nof_holes + 1)):
self.comments.append('c (pigeon, hole) pair: ({0}, {1}); bool var: {2}'.format(i, j, var(i, j))) |
def exportFighters(fighters):
FIGHTER_ORDER = ('Light Fighter', 'Heavy Fighter', 'Support Fighter')
def fighterSorter(fighter):
groupName = Market.getInstance().getGroupByItem(fighter.item).name
return (FIGHTER_ORDER.index(groupName), fighter.item.typeName)
fighterLines = []
for fighter in sorted(fighters, key=fighterSorter):
fighterLines.append('{} x{}'.format(fighter.item.typeName, fighter.amount))
return '\n'.join(fighterLines) |
def test_ff_cannot_write_to_struct_field():
class C():
bar: Bits16
class B():
foo: Bits32
bar: ([([C] * 5)] * 5)
class A(ComponentLevel3):
def construct(s):
s.wire = Wire(B)
_ff
def ffs():
s.wire.bar <<= 1
try:
_test_model(A)
except UpdateFFNonTopLevelSignalError as e:
print('{} is thrown\n{}'.format(e.__class__.__name__, e))
return
raise Exception("Should've thrown UpdateFFNonTopLevelSignalError.") |
class VOC(BaseDataLoader):
def __init__(self, kwargs):
self.MEAN = [0.485, 0.456, 0.406]
self.STD = [0.229, 0.224, 0.225]
self.batch_size = kwargs.pop('batch_size')
kwargs['mean'] = self.MEAN
kwargs['std'] = self.STD
kwargs['ignore_index'] = 255
try:
shuffle = kwargs.pop('shuffle')
except:
shuffle = False
num_workers = kwargs.pop('num_workers')
self.dataset = VOCDataset(**kwargs)
super(VOC, self).__init__(self.dataset, self.batch_size, shuffle, num_workers, val_split=None) |
def main_fn(path_config_file, extra_args={}):
(env_name, env_extra_args, output_file, seed_number, lowU_train_val, highU_train_val, lowU_test_val, highU_test_val, max_episode_length, num_data_train, num_data_test, save_video, disable_substep, control_policy, n_rollout, num_data_colocation, extra_noise_colocation) = load_config_yaml(path_config_file, extra_args)
domain_name = env_name
task_name = env_extra_args['task_name']
np.random.seed(seed_number)
env = suite.load(domain_name, task_name, task_kwargs={'random': seed_number})
print('Load environment\t : ENV_NAME={}, TASK_NAME={}'.format(env_name, task_name))
if disable_substep:
old_substep_val = int(env._n_sub_steps)
else:
old_substep_val = 1
env._n_sub_steps = int((env._n_sub_steps / old_substep_val))
actual_dt = (env._n_sub_steps * env.physics.model.opt.timestep)
print('Load done.')
n_state = (env.physics.model.nq + env.physics.model.nv)
n_control = env.physics.model.nu
print(' DM time step = {} -> n_steps = {}, opt_time_steps = {} '.format(actual_dt, env._n_sub_steps, env.physics.model.opt.timestep))
lowU_train_val = (lowU_train_val if (control_policy is None) else (- control_policy.get('noise_train', 0.0)))
highU_train_val = (highU_train_val if (control_policy is None) else control_policy.get('noise_train', 0.0))
lowU_train = np.array([lowU_train_val for i in range(n_control)])
highU_train = np.array([highU_train_val for i in range(n_control)])
lowU_test_val = (lowU_test_val if (control_policy is None) else (- control_policy.get('noise_test', 0.0)))
highU_test_val = (highU_test_val if (control_policy is None) else control_policy.get('noise_test', 0.0))
lowU_test = np.array([lowU_test_val for i in range(n_control)])
highU_test = np.array([highU_test_val for i in range(n_control)])
max_traj = int(np.max(np.array(num_data_train)))
(xTrainList, uTrainList, xnextTrainList, iMq_acc, qacc) = generate_data(lowU_train, highU_train, env, num_data=max_traj, max_length=max_episode_length, repeat_u=old_substep_val, control_policy=control_policy, n_rollout=n_rollout)
print(np.array(qacc).shape, np.array(iMq_acc).shape)
regTerm = (1.0 / np.mean(np.abs((np.array(qacc).T / np.sum(iMq_acc, axis=1)))))
print('## Quotient F/acc = {}'.format(regTerm))
(xTest, uTest, xnextTest, _, _) = generate_data(lowU_test, highU_test, env, num_data=num_data_test, max_length=max_episode_length, repeat_u=old_substep_val, control_policy=control_policy, n_rollout=n_rollout)
(xColoc, uColoc, _, _, _) = generate_data((lowU_test - extra_noise_colocation), (highU_test + extra_noise_colocation), env, num_data=num_data_colocation, max_length=max_episode_length, repeat_u=old_substep_val, control_policy=control_policy, n_rollout=1)
mLog = SampleLog(xTrain=xTrainList, xTrainExtra=(None, (xColoc, uColoc[0], None)), uTrain=uTrainList, xnextTrain=xnextTrainList, lowU_train=lowU_train, highU_train=highU_train, xTest=xTest, xTestExtra=None, uTest=uTest, xnextTest=xnextTest, lowU_test=lowU_test, highU_test=highU_test, env_name=env_name, env_extra_args=env_extra_args, m_rng=seed_number, seed_number=seed_number, qp_indx=None, qp_base=(env.physics.model.nq, env.physics.model.nv, regTerm), n_state=n_state, n_control=n_control, actual_dt=actual_dt, control_policy=control_policy, disable_substep=(disable_substep, num_data_train, max_episode_length), n_rollout=n_rollout)
mFile = open((output_file + '.pkl'), 'wb')
pickle.dump(mLog, mFile)
mFile.close()
print('Env extra args : {}'.format(env_extra_args))
print('Number inputs\t\t : {}'.format(n_control))
print('Time step dt\t\t : {}'.format(actual_dt))
print('Initial seed number\t : {}'.format(seed_number))
print('Training control range\t : {}'.format([(x, y) for (x, y) in zip(lowU_train, highU_train)]))
print('Testing control range\t : {}'.format([(x, y) for (x, y) in zip(lowU_test, highU_test)]))
print('Control policy\t : {}'.format(control_policy))
print('Resulting RNG\t\t : {}'.format(seed_number))
print('Training size\t\t : {}'.format(num_data_train))
print('Testing size\t\t : {}'.format(num_data_test))
print('No. rollout\t\t : {}'.format(n_rollout))
print('Size data:\t\t Train = {}, Test = {}'.format(len(xTrainList), len(xTest))) |
def test_bits_to_int():
rs = np.random.RandomState(52)
bitstrings = rs.choice([0, 1], size=(100, 23))
nums = bits_to_ints(bitstrings)
assert (nums.shape == (100,))
for (num, bs) in zip(nums, bitstrings):
ref_num = cirq.big_endian_bits_to_int(bs.tolist())
assert (num == ref_num)
(num,) = bits_to_ints([1, 0])
assert (num == 2) |
def init():
cache_path = standarddir.cache()
data_path = standarddir.data()
QWebSettings.setIconDatabasePath(standarddir.cache())
QWebSettings.setOfflineWebApplicationCachePath(os.path.join(cache_path, 'application-cache'))
QWebSettings.globalSettings().setLocalStoragePath(os.path.join(data_path, 'local-storage'))
QWebSettings.setOfflineStoragePath(os.path.join(data_path, 'offline-storage'))
settings = QWebSettings.globalSettings()
_set_user_stylesheet(settings)
_set_cookie_accept_policy(settings)
_set_cache_maximum_pages(settings)
_init_user_agent()
config.instance.changed.connect(_update_settings)
global global_settings
global_settings = WebKitSettings(QWebSettings.globalSettings())
global_settings.init_settings() |
def recursively_load_weights(fairseq_model, hf_model, is_headless):
unused_weights = []
fairseq_dict = fairseq_model.state_dict()
feature_extractor = hf_model.wav2vec2.feature_extractor
for (name, value) in fairseq_dict.items():
is_used = False
if ('conv_layers' in name):
load_conv_layer(name, value, feature_extractor, unused_weights, (hf_model.config.feat_extract_norm == 'group'))
is_used = True
else:
for (key, mapped_key) in MAPPING.items():
mapped_key = (('wav2vec2.' + mapped_key) if (mapped_key not in TOP_LEVEL_KEYS) else mapped_key)
if ((key in name) or (key.split('w2v_model.')[(- 1)] == name.split('.')[0])):
is_used = True
if ('*' in mapped_key):
layer_index = name.split(key)[0].split('.')[(- 2)]
mapped_key = mapped_key.replace('*', layer_index)
if ('weight_g' in name):
weight_type = 'weight_g'
elif ('weight_v' in name):
weight_type = 'weight_v'
elif ('bias' in name):
weight_type = 'bias'
elif ('weight' in name):
weight_type = 'weight'
else:
weight_type = None
set_recursively(hf_model, mapped_key, value, name, weight_type)
continue
if (not is_used):
unused_weights.append(name)
logger.warning(f'Unused weights: {unused_weights}') |
def read_lmv_tofits(fileobj):
from astropy.io import fits
(data, header) = read_lmv(fileobj)
data = data.squeeze()
bad_kws = ['NAXIS4', 'CRVAL4', 'CRPIX4', 'CDELT4', 'CROTA4', 'CUNIT4', 'CTYPE4']
cards = [(fits.header.Card(keyword=k, value=v[0], comment=v[1]) if isinstance(v, tuple) else fits.header.Card(''.join((s for s in k if (s in string.printable))), (''.join((s for s in v if (s in string.printable))) if isinstance(v, six.string_types) else v))) for (k, v) in six.iteritems(header) if (k not in bad_kws)]
Header = fits.Header(cards)
hdu = fits.PrimaryHDU(data=data, header=Header)
return hdu |
class TestRequestsBackend():
.parametrize('test_data,expected', [(False, '0'), (True, '1'), ('12', '12'), (12, '12'), (12.0, '12.0'), (complex((- 2), 7), '(-2+7j)')])
def test_prepare_send_data_non_strings(self, test_data, expected) -> None:
assert isinstance(expected, str)
files = {'file': ('file.tar.gz', '12345', 'application/octet-stream')}
post_data = {'test_data': test_data}
result = requests_backend.RequestsBackend.prepare_send_data(files=files, post_data=post_data, raw=False)
assert (result.json is None)
assert result.content_type.startswith('multipart/form-data')
assert isinstance(result.data, MultipartEncoder)
assert isinstance(result.data.fields['test_data'], str)
assert (result.data.fields['test_data'] == expected) |
def test_cmdstep_cmd_is_dict_default_save_true():
obj = CmdStep('blahname', Context({'cmd': {'run': 'blah', 'save': True}}), is_shell=False)
assert (not obj.is_shell)
assert (obj.logger.name == 'blahname')
assert (obj.context == Context({'cmd': {'run': 'blah', 'save': True}}))
assert (obj.commands == [Command('blah', cwd=None, is_shell=False, is_save=True)]) |
class Effect11943(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'thermalDamage', ship.getModifiedItemAttr('shipBonusGD1'), skill='Gallente Destroyer', **kwargs) |
class BertDataLoader(DataLoader):
def __iter__(self):
while True:
while self._empty():
self._fill_buf()
if ((self.start + self.batch_size) >= self.end):
instances = self.buffer[self.start:]
else:
instances = self.buffer[self.start:(self.start + self.batch_size)]
self.start += self.batch_size
src = []
tgt_mlm = []
is_next = []
seg = []
masked_words_num = 0
for ins in instances:
masked_words_num += len(ins[1])
if (masked_words_num == 0):
continue
for ins in instances:
src.append(ins[0])
tgt_mlm.append(([0] * len(ins[0])))
for mask in ins[1]:
tgt_mlm[(- 1)][mask[0]] = mask[1]
is_next.append(ins[2])
seg.append(((([1] * ins[3][0]) + ([2] * (ins[3][1] - ins[3][0]))) + ([PAD_ID] * (len(ins[0]) - ins[3][1]))))
(yield (torch.LongTensor(src), torch.LongTensor(tgt_mlm), torch.LongTensor(is_next), torch.LongTensor(seg))) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2], last_phase=True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.out_dim = (512 * block.expansion)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, last_phase=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
if last_phase:
for _ in range(1, (blocks - 1)):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer, last=True))
else:
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x_1 = self.layer1(x)
x_2 = self.layer2(x_1)
x_3 = self.layer3(x_2)
x_4 = self.layer4(x_3)
pooled = self.avgpool(x_4)
features = torch.flatten(pooled, 1)
return {'fmaps': [x_1, x_2, x_3, x_4], 'features': features}
def forward(self, x):
return self._forward_impl(x)
def last_conv(self):
if hasattr(self.layer4[(- 1)], 'conv3'):
return self.layer4[(- 1)].conv3
else:
return self.layer4[(- 1)].conv2 |
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, pytorch_dump_path):
config = CanineConfig()
model = CanineModel(config)
model.eval()
print(f'Building PyTorch model from configuration: {config}')
load_tf_weights_in_canine(model, config, tf_checkpoint_path)
print(f'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(pytorch_dump_path)
tokenizer = CanineTokenizer()
print(f'Save tokenizer files to {pytorch_dump_path}')
tokenizer.save_pretrained(pytorch_dump_path) |
.parametrize('matrix_server_count', [2])
.parametrize('number_of_transports', [2])
def test_matrix_message_sync(matrix_transports):
(transport0, transport1) = matrix_transports
transport0_messages = set()
transport1_messages = set()
transport0_message_handler = MessageHandler(transport0_messages)
transport1_message_handler = MessageHandler(transport1_messages)
raiden_service0 = MockRaidenService(transport0_message_handler)
raiden_service1 = MockRaidenService(transport1_message_handler)
raiden_service1.handle_and_track_state_changes = MagicMock()
transport0.start(raiden_service0, None)
transport1.start(raiden_service1, None)
queue_identifier = QueueIdentifier(recipient=transport1._raiden_service.address, canonical_identifier=factories.UNIT_CANONICAL_ID)
raiden0_queues = views.get_all_messagequeues(views.state_from_raiden(raiden_service0))
raiden0_queues[queue_identifier] = []
for i in range(5):
message = Processed(message_identifier=MessageID(i), signature=EMPTY_SIGNATURE)
raiden0_queues[queue_identifier].append(message)
transport0._raiden_service.sign(message)
transport0.send_async([MessagesQueue(queue_identifier, [(message, transport1.address_metadata)])])
with Timeout(TIMEOUT_MESSAGE_RECEIVE):
while (not (len(transport0_messages) == 5)):
gevent.sleep(0.1)
while (not (len(transport1_messages) == 5)):
gevent.sleep(0.1)
for i in range(5):
assert any(((m.message_identifier == i) for m in transport1_messages))
for i in range(5):
assert any(((m.delivered_message_identifier == i) for m in transport0_messages))
raiden0_queues[queue_identifier] = []
transport1.stop()
for i in range(10, 15):
message = Processed(message_identifier=MessageID(i), signature=EMPTY_SIGNATURE)
raiden0_queues[queue_identifier].append(message)
transport0._raiden_service.sign(message)
transport0.send_async([MessagesQueue(queue_identifier, [(message, transport1.address_metadata)])])
transport1.start(transport1._raiden_service, None)
with gevent.Timeout(TIMEOUT_MESSAGE_RECEIVE):
while (len(transport1_messages) != 10):
gevent.sleep(0.1)
while (len(transport0_messages) != 10):
gevent.sleep(0.1)
for i in range(10, 15):
assert any(((m.message_identifier == i) for m in transport1_messages))
for i in range(10, 15):
assert any(((m.delivered_message_identifier == i) for m in transport0_messages)) |
(id='vmware-node-reboot', name='Reboot VMware VM', description='Reboot the node(s) by starting the VMware VM on which the node is configured', outputs={'success': NodeScenarioSuccessOutput, 'error': NodeScenarioErrorOutput})
def node_reboot(cfg: NodeScenarioConfig) -> typing.Tuple[(str, typing.Union[(NodeScenarioSuccessOutput, NodeScenarioErrorOutput)])]:
with kube_helper.setup_kubernetes(None) as cli:
vsphere = vSphere(verify=cfg.verify_session)
core_v1 = client.CoreV1Api(cli)
watch_resource = watch.Watch()
node_list = kube_helper.get_node_list(cfg, kube_helper.Actions.REBOOT, core_v1)
nodes_rebooted = {}
for name in node_list:
try:
for _ in range(cfg.runs):
logging.info('Starting node_reboot_scenario injection')
logging.info('Rebooting the node %s ', name)
vsphere.reboot_instances(name)
if (not cfg.skip_openshift_checks):
kube_helper.wait_for_unknown_status(name, cfg.timeout, watch_resource, core_v1)
kube_helper.wait_for_ready_status(name, cfg.timeout, watch_resource, core_v1)
nodes_rebooted[int(time.time_ns())] = Node(name=name)
logging.info('Node with instance ID: %s has rebooted successfully', name)
logging.info('node_reboot_scenario has been successfully injected!')
except Exception as e:
logging.error('Failed to reboot node instance. Test Failed')
logging.error('node_reboot_scenario injection failed! Error was: %s', str(e))
return ('error', NodeScenarioErrorOutput(format_exc(), kube_helper.Actions.REBOOT))
return ('success', NodeScenarioSuccessOutput(nodes_rebooted, kube_helper.Actions.REBOOT)) |
def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights):
assert (weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0])
loss_real = torch.mean(F.relu((1.0 - logits_real)), dim=[1, 2, 3])
loss_fake = torch.mean(F.relu((1.0 + logits_fake)), dim=[1, 2, 3])
loss_real = ((weights * loss_real).sum() / weights.sum())
loss_fake = ((weights * loss_fake).sum() / weights.sum())
d_loss = (0.5 * (loss_real + loss_fake))
return d_loss |
def download_and_unzip_post(config, rootpath, hot_run=True, disable_progress=False):
resource = config['category']
destination = os.path.relpath(config['destination'])
postdata = config['urls']['post']
url = postdata.pop('url')
file_path = os.path.join(destination, os.path.basename(url))
if hot_run:
if os.path.exists(file_path):
os.remove(file_path)
logger.info(f"Downloading resource '{resource}' from cloud '{url}'.")
progress_retrieve(url, file_path, data=postdata, disable_progress=disable_progress)
if config.get('unzip', False):
with ZipFile(file_path, 'r') as zipfile:
zipfile.extractall(destination)
os.remove(file_path)
logger.info(f"Downloaded resource '{resource}' from cloud '{url}'.")
return True |
class Object(object):
def __init__(self, tagname, inamevals):
self._tagname = tagname
self._data = []
for kv in inamevals:
self._data.append(list(kv))
def inamevals_to_save(self):
for (k, v) in self._data:
(yield (k, to_xstr(v)))
def inamevals(self):
for (k, v) in self._data:
(yield (k, v))
def __iter__(self):
for (k, _) in self._data:
(yield k)
def rename_attribute(self, old, new):
for kv in self._data:
if (kv[0] == old):
kv[0] = new
def drop_attribute(self, k):
self._data = [kv for kv in self._data if (kv[0] != k)]
def replace(self, other):
self._tagname = other._tagname
self._data = copy.deepcopy(other._data)
def __setitem__(self, k, v):
for kv in self._data:
if (kv[0] == k):
kv[1] = v
return
self._data.append([k, v])
def __getitem__(self, item):
for kv in self._data:
if (kv[0] == item):
return kv[1]
raise KeyError(item)
def get(self, *args):
if (len(args) == 1):
return self.__getitem__(args[0])
else:
try:
return self.__getitem__(args[0])
except KeyError:
return args[1] |
def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training):
features = []
if (is_training and (not example.is_impossible)):
start_position = example.start_position
end_position = example.end_position
actual_text = ' '.join(example.doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = ' '.join(whitespace_tokenize(example.answer_text))
if (actual_text.find(cleaned_answer_text) == (- 1)):
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text)
return []
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
if (is_training and (not example.is_impossible)):
tok_start_position = orig_to_tok_index[example.start_position]
if (example.end_position < (len(example.doc_tokens) - 1)):
tok_end_position = (orig_to_tok_index[(example.end_position + 1)] - 1)
else:
tok_end_position = (len(all_doc_tokens) - 1)
(tok_start_position, tok_end_position) = _improve_answer_span(all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text)
spans = []
truncated_query = tokenizer.encode(example.question_text, add_special_tokens=False, max_length=max_query_length)
sequence_added_tokens = (((tokenizer.max_len - tokenizer.max_len_single_sentence) + 1) if ('roberta' in str(type(tokenizer))) else (tokenizer.max_len - tokenizer.max_len_single_sentence))
sequence_pair_added_tokens = (tokenizer.max_len - tokenizer.max_len_sentences_pair)
span_doc_tokens = all_doc_tokens
while ((len(spans) * doc_stride) < len(all_doc_tokens)):
encoded_dict = tokenizer.encode_plus((truncated_query if (tokenizer.padding_side == 'right') else span_doc_tokens), (span_doc_tokens if (tokenizer.padding_side == 'right') else truncated_query), max_length=max_seq_length, return_overflowing_tokens=True, pad_to_max_length=True, stride=(((max_seq_length - doc_stride) - len(truncated_query)) - sequence_pair_added_tokens), truncation_strategy=('only_second' if (tokenizer.padding_side == 'right') else 'only_first'))
paragraph_len = min((len(all_doc_tokens) - (len(spans) * doc_stride)), ((max_seq_length - len(truncated_query)) - sequence_pair_added_tokens))
if (tokenizer.pad_token_id in encoded_dict['input_ids']):
non_padded_ids = encoded_dict['input_ids'][:encoded_dict['input_ids'].index(tokenizer.pad_token_id)]
else:
non_padded_ids = encoded_dict['input_ids']
tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
token_to_orig_map = {}
for i in range(paragraph_len):
index = (((len(truncated_query) + sequence_added_tokens) + i) if (tokenizer.padding_side == 'right') else i)
token_to_orig_map[index] = tok_to_orig_index[((len(spans) * doc_stride) + i)]
encoded_dict['paragraph_len'] = paragraph_len
encoded_dict['tokens'] = tokens
encoded_dict['token_to_orig_map'] = token_to_orig_map
encoded_dict['truncated_query_with_special_tokens_length'] = (len(truncated_query) + sequence_added_tokens)
encoded_dict['token_is_max_context'] = {}
encoded_dict['start'] = (len(spans) * doc_stride)
encoded_dict['length'] = paragraph_len
spans.append(encoded_dict)
if ('overflowing_tokens' not in encoded_dict):
break
span_doc_tokens = encoded_dict['overflowing_tokens']
for doc_span_index in range(len(spans)):
for j in range(spans[doc_span_index]['paragraph_len']):
is_max_context = _new_check_is_max_context(spans, doc_span_index, ((doc_span_index * doc_stride) + j))
index = (j if (tokenizer.padding_side == 'left') else (spans[doc_span_index]['truncated_query_with_special_tokens_length'] + j))
spans[doc_span_index]['token_is_max_context'][index] = is_max_context
for span in spans:
cls_index = span['input_ids'].index(tokenizer.cls_token_id)
p_mask = np.array(span['token_type_ids'])
p_mask = np.minimum(p_mask, 1)
if (tokenizer.padding_side == 'right'):
p_mask = (1 - p_mask)
p_mask[np.where((np.array(span['input_ids']) == tokenizer.sep_token_id))[0]] = 1
p_mask[cls_index] = 0
span_is_impossible = example.is_impossible
start_position = 0
end_position = 0
if (is_training and (not span_is_impossible)):
doc_start = span['start']
doc_end = ((span['start'] + span['length']) - 1)
out_of_span = False
if (not ((tok_start_position >= doc_start) and (tok_end_position <= doc_end))):
out_of_span = True
if out_of_span:
start_position = cls_index
end_position = cls_index
span_is_impossible = True
else:
if (tokenizer.padding_side == 'left'):
doc_offset = 0
else:
doc_offset = (len(truncated_query) + sequence_added_tokens)
start_position = ((tok_start_position - doc_start) + doc_offset)
end_position = ((tok_end_position - doc_start) + doc_offset)
features.append(SquadFeatures(span['input_ids'], span['attention_mask'], span['token_type_ids'], cls_index, p_mask.tolist(), example_index=0, unique_id=0, paragraph_len=span['paragraph_len'], token_is_max_context=span['token_is_max_context'], tokens=span['tokens'], token_to_orig_map=span['token_to_orig_map'], start_position=start_position, end_position=end_position))
return features |
def write_metadata(metadata, out_dir):
with open(os.path.join(out_dir, 'train.txt'), 'w', encoding='utf-8') as f:
for m in metadata:
f.write(('|'.join([str(x) for x in m]) + '\n'))
frames = sum([m[2] for m in metadata])
hours = ((frames * hparams.frame_shift_ms) / (3600 * 1000))
print(('Wrote %d utterances, %d frames (%.2f hours)' % (len(metadata), frames, hours)))
print(('Max input length: %d' % max((len(m[3]) for m in metadata))))
print(('Max output length: %d' % max((m[2] for m in metadata)))) |
class PlaylistModel(TrackCurrentModel):
order: Order
sourced = False
def __init__(self, order_cls: type[Order]=OrderInOrder):
super().__init__(object)
self.order = order_cls()
def next(self):
iter_ = self.current_iter
print_d(('Using %s.next_explicit() to get next song' % self.order))
self.current_iter = self.order.next_explicit(self, iter_)
def next_ended(self):
iter_ = self.current_iter
print_d(('Using %s.next_implicit() to get next song' % self.order))
self.current_iter = self.order.next_implicit(self, iter_)
def previous(self):
iter_ = self.current_iter
self.current_iter = self.order.previous_explicit(self, iter_)
def go_to(self, song_or_iter, explicit=False, source=None):
assert ((source is None) or (source is self))
print_d(('Told to go to %r' % getattr(song_or_iter, 'key', song_or_iter)))
iter_ = None
if isinstance(song_or_iter, Gtk.TreeIter):
iter_ = song_or_iter
elif (song_or_iter is not None):
self.last_current = song_or_iter
iter_ = self.find(song_or_iter)
if explicit:
self.current_iter = self.order.set_explicit(self, iter_)
else:
self.current_iter = self.order.set_implicit(self, iter_)
return self.current_iter
def set(self, songs: Sequence[Any]):
self.order.reset(self)
super().set(songs)
def remove(self, iter_):
self.order.reset(self)
super().remove(iter_)
def clear(self):
self.order.reset(self)
super().clear()
def reset(self):
self.go_to(None)
self.order.reset(self)
if (not self.is_empty()):
self.next() |
def test_tags_disabled_namespace(v2_protocol, basic_images, liveserver_session, app_reloader, liveserver, registry_server_executor):
credentials = ('devtable', 'password')
registry_server_executor.on(liveserver).disable_namespace('buynlarge')
v2_protocol.tags(liveserver_session, credentials=credentials, namespace='buynlarge', repo_name='orgrepo', expected_failure=Failures.NAMESPACE_DISABLED) |
class AsmCmdGotoLinked(AsmCmdBase):
_id = 20
_menuText = QT_TRANSLATE_NOOP('asm3', 'Select linked object')
_tooltip = QT_TRANSLATE_NOOP('asm3', 'Select the linked object')
_accel = 'A, G'
_toolbarName = ''
def getIconName(cls):
return 'LinkSelect'
def Activated(cls):
from .assembly import isTypeOf, AsmElement, AsmElementLink, AsmElementGroup
sels = FreeCADGui.Selection.getSelectionEx('', 0, True)
if (not sels):
return
if (not sels[0].SubElementNames):
FreeCADGui.runCommand('Std_LinkSelectLinked')
return
subname = sels[0].SubElementNames[0]
obj = sels[0].Object.getSubObject(subname, retType=1)
if (not isTypeOf(obj, (AsmElementLink, AsmElement))):
FreeCADGui.runCommand('Std_LinkSelectLinked')
return
import Part
from . import assembly
subname = assembly.flattenLastSubname(sels[0].Object, subname)
subname = Part.splitSubname(subname)[0].split('.')
link = obj.LinkedObject
if isinstance(link, tuple):
linkSub = link[1]
link = link[0]
else:
linkSub = (link.Name + '.')
link = obj.Proxy.getAssembly().getElementGroup()
if isTypeOf(obj, AsmElementLink):
subname = subname[:(- 4)]
if (not isTypeOf(link, AsmElementGroup)):
subname.append('2')
else:
subname = subname[:(- 2)]
subname[(- 1)] = '2'
subname.append(link.Name)
prefix = subname
linkSub = linkSub.split('.')
subname = '.'.join((prefix + linkSub))
sobj = sels[0].Object.getSubObject(subname, retType=1)
if (not sobj):
logger.error('Cannot find sub object {}.{}', objName(sels[0].Object), subname)
return
if ((not linkSub[(- 1)]) and linkSub[(- 2)].startswith('$')):
linkSub[(- 2)] = sobj.Name
subname = '.'.join((prefix + linkSub))
FreeCADGui.Selection.pushSelStack()
FreeCADGui.Selection.clearSelection()
FreeCADGui.Selection.addSelection(sels[0].Object, subname)
FreeCADGui.Selection.pushSelStack()
FreeCADGui.runCommand('Std_TreeSelection')
def IsActive(cls):
return _isCommandActive('Std_LinkSelectLinked') |
def trans_mat_all_days(animal_day_transmats, animal_id, dpi=200, figsize=(8, 4)):
day_transmats = animal_day_transmats[animal_id]
ncol = len(day_transmats)
(fig, ax) = plt.subplots(1, ncol, dpi=dpi, figsize=figsize)
ax = ax.ravel()
for (ind, (day, trans_mat_tup)) in enumerate(day_transmats.items()):
if (ind == (len(day_transmats) - 1)):
colorbar = True
else:
colorbar = False
(mat, states) = (trans_mat_tup.matrix, trans_mat_tup.states)
trans_mat(mat, states, ax=ax[ind], colorbar=colorbar)
ax[ind].set_title(day) |
def find_span(sentence, search_text, start=0):
search_text = search_text.lower()
for tok in sentence[start:]:
remainder = sentence[tok.i:].text.lower()
if remainder.startswith(search_text):
len_to_consume = len(search_text)
start_idx = tok.idx
for next_tok in sentence[tok.i:]:
end_idx = (next_tok.idx + len(next_tok.text))
if ((end_idx - start_idx) == len_to_consume):
span = sentence[tok.i:(next_tok.i + 1)]
return span
return None |
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train() |
class ProfilerTracer(torch.fx.Tracer):
def trace(self, root, concrete_args=None):
orig_record_function_enter = torch.autograd.profiler.record_function.__enter__
orig_record_function_exit = torch.autograd.profiler.record_function.__exit__
def fake_profiler_enter(_self):
nonlocal self
handle_proxy = self.create_proxy(kind='call_function', target=torch.ops.profiler._record_function_enter, args=(_self.name,), kwargs={})
assert (getattr(_self, '_fx_profiler_ctx', None) is None)
setattr(_self, '_fx_profiler_ctx', handle_proxy)
return handle_proxy
def fake_profiler_exit(_self, exc_type, exc_value, traceback):
assert hasattr(_self, '_fx_profiler_ctx')
handle_proxy = _self._fx_profiler_ctx
torch.ops.profiler._record_function_exit(handle_proxy)
setattr(_self, '_fx_profiler_ctx', None)
torch.autograd.profiler.record_function.__enter__ = fake_profiler_enter
torch.autograd.profiler.record_function.__exit__ = fake_profiler_exit
try:
return super().trace(root, concrete_args)
finally:
torch.autograd.profiler.record_function.__enter__ = orig_record_function_enter
torch.autograd.profiler.record_function.__exit__ = orig_record_function_exit |
_optimizer('rmsprop_tf')
class RMSPropTF(ClassyOptimizer):
def __init__(self, lr: float=0.1, momentum: float=0, weight_decay: float=0, alpha: float=0.99, eps: float=1e-08, centered: bool=False) -> None:
super().__init__()
self._lr = lr
self._momentum = momentum
self._weight_decay = weight_decay
self._alpha = alpha
self._eps = eps
self._centered = centered
def prepare(self, param_groups):
self.optimizer = RMSpropTFOptimizer(param_groups, lr=self._lr, momentum=self._momentum, weight_decay=self._weight_decay, alpha=self._alpha, eps=self._eps, centered=self._centered)
def from_config(cls, config: Dict[(str, Any)]) -> 'RMSPropTF':
config.setdefault('lr', 0.1)
config.setdefault('momentum', 0.0)
config.setdefault('weight_decay', 0.0)
config.setdefault('alpha', 0.99)
config.setdefault('eps', 1e-08)
config.setdefault('centered', False)
for key in ['momentum', 'alpha']:
assert ((config[key] >= 0.0) and (config[key] < 1.0) and (type(config[key]) == float)), f"Config must contain a '{key}' in [0, 1) for RMSPropTF optimizer"
assert is_pos_float(config['eps']), f"Config must contain a positive 'eps' for RMSPropTF optimizer"
assert isinstance(config['centered'], bool), "Config must contain a boolean 'centered' param for RMSPropTF optimizer"
return cls(lr=config['lr'], momentum=config['momentum'], weight_decay=config['weight_decay'], alpha=config['alpha'], eps=config['eps'], centered=config['centered']) |
def test_raises_if_no_generic_params_supplied(converter: Union[(Converter, BaseConverter)]):
data = TClass(1, 'a')
with pytest.raises(StructureHandlerNotFoundError, match='Unsupported type: ~T. Register a structure hook for it.|Missing type for generic argument T, specify it when structuring.') as exc:
converter.structure(asdict(data), TClass)
assert (exc.value.type_ is T) |
def load_runtime_vs_ns(fname, xlabel='Sample size $n$', show_legend=True, xscale='linear', yscale='linear'):
func_xvalues = (lambda agg_results: agg_results['ns'])
ex = 1
def func_title(agg_results):
(repeats, _, n_methods) = agg_results['job_results'].shape
alpha = agg_results['alpha']
title = ('%s. %d trials. $\\alpha$ = %.2g.' % (agg_results['prob_label'], repeats, alpha))
return title
results = plot.plot_runtime(ex, fname, func_xvalues, xlabel=xlabel, func_title=func_title)
plt.title('')
plt.gca().legend(loc='best').set_visible(show_legend)
if show_legend:
plt.legend(bbox_to_anchor=(1.0, 1.05))
if (xscale is not None):
plt.xscale(xscale)
if (yscale is not None):
plt.yscale(yscale)
return results |
(bp, '/testTree', methods=['GET'])
def test_tree():
res = ResMsg()
data = [{'id': 1, 'father_id': None, 'name': '01'}, {'id': 2, 'father_id': 1, 'name': '0101'}, {'id': 3, 'father_id': 1, 'name': '0102'}, {'id': 4, 'father_id': 1, 'name': '0103'}, {'id': 5, 'father_id': 2, 'name': '010101'}, {'id': 6, 'father_id': 2, 'name': '010102'}, {'id': 7, 'father_id': 2, 'name': '010103'}, {'id': 8, 'father_id': 3, 'name': '010201'}, {'id': 9, 'father_id': 4, 'name': '010301'}, {'id': 10, 'father_id': 9, 'name': ''}, {'id': 11, 'father_id': 9, 'name': ''}]
new_tree = Tree(data=data)
data = new_tree.build_tree()
res.update(data=data)
return res.data |
def _test():
import torch
pretrained = False
models = [condensenet74_c4_g4, condensenet74_c8_g8]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != condensenet74_c4_g4) or (weight_count == 4773944))
assert ((model != condensenet74_c8_g8) or (weight_count == 2935416))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
class BTOOLS_OT_add_balcony(bpy.types.Operator):
bl_idname = 'btools.add_balcony'
bl_label = 'Add Balcony'
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
props: bpy.props.PointerProperty(type=BalconyProperty)
def poll(cls, context):
return ((context.object is not None) and (context.mode == 'EDIT_MESH'))
def execute(self, context):
self.props.init(get_selected_face_dimensions(context))
return build(context, self.props)
def draw(self, context):
self.props.draw(context, self.layout) |
def resamp(x, type, shift, extmod):
if (shift is None):
shift = 1
if (extmod is None):
extmod = 'per'
if ((type == 0) or (type == 1)):
y = resampc(x, type, shift, extmod)
elif ((type == 2) or (type == 3)):
y = resampc(x.T, (type - 2), shift, extmod).T
else:
print('The second input (type) must be one of {0, 1, 2, 3}')
return y |
def test_envunset_doesnt_exist():
try:
del os.environ['ARB_DELETE_SNARK']
except KeyError:
pass
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'env': {'unset': ['ARB_DELETE_SNARK']}})
assert pypyr.steps.env.env_unset(context)
assert ('ARB_DELETE_SNARK' not in os.environ) |
class MultiCorpusSampledDataset(FairseqDataset):
def __init__(self, datasets: Dict[(str, FairseqDataset)], sampling_func: Callable[([List], int)]=None):
super().__init__()
assert isinstance(datasets, OrderedDict)
self.datasets = datasets
if (sampling_func is None):
sampling_func = uniform_sampler
self.sampling_func = sampling_func
self.total_num_instances = 0
for (_, dataset) in datasets.items():
assert isinstance(dataset, FairseqDataset)
self.total_num_instances += len(dataset)
self._ordered_indices = None
def __len__(self):
return self.total_num_instances
def ordered_indices(self):
if (self._ordered_indices is None):
self._ordered_indices = OrderedDict([(key, dataset.ordered_indices()) for (key, dataset) in self.datasets.items()])
return np.arange(len(self))
def _map_index_to_dataset(self, key: int, index: int):
assert (self._ordered_indices is not None), 'Must call MultiCorpusSampledDataset.ordered_indices() first'
mapped_index = (index % len(self.datasets[key]))
return self._ordered_indices[key][mapped_index]
def __getitem__(self, index: int):
return OrderedDict([(key, dataset[self._map_index_to_dataset(key, index)]) for (key, dataset) in self.datasets.items()])
def collater(self, samples: List[Dict]):
if (len(samples) == 0):
return None
selected_key = self.sampling_func(list(self.datasets.keys()))
selected_samples = [sample[selected_key] for sample in samples]
return self.datasets[selected_key].collater(selected_samples)
def num_tokens(self, index: int):
return max((dataset.num_tokens(self._map_index_to_dataset(key, index)) for (key, dataset) in self.datasets.items()))
def size(self, index: int):
return max((dataset.size(self._map_index_to_dataset(key, index)) for (key, dataset) in self.datasets.items()))
def supports_prefetch(self):
return all((getattr(dataset, 'supports_prefetch', False) for dataset in self.datasets.values()))
def prefetch(self, indices):
for (key, dataset) in self.datasets.items():
dataset.prefetch([self._map_index_to_dataset(key, index) for index in indices]) |
def main():
args = parse_args()
cfg_path = args.config
cfg = Config.fromfile(cfg_path)
(_, fullname) = os.path.split(cfg_path)
(fname, ext) = os.path.splitext(fullname)
root_workdir = cfg.pop('root_workdir')
workdir = os.path.join(root_workdir, fname)
os.makedirs(workdir, exist_ok=True)
test_cfg = cfg['test']
inference_cfg = cfg['inference']
common_cfg = cfg['common']
common_cfg['workdir'] = workdir
common_cfg['distribute'] = args.distribute
runner = TestRunner(test_cfg, inference_cfg, common_cfg)
runner.load_checkpoint(args.checkpoint)
runner() |
def test_event_filter_for_payments():
secret = factories.make_secret()
identifier = PaymentID(1)
target = TargetAddress(factories.make_address())
event1 = EventPaymentSentSuccess(token_network_registry_address=UNIT_TOKEN_NETWORK_REGISTRY_ADDRESS, token_network_address=UNIT_TOKEN_NETWORK_ADDRESS, identifier=identifier, amount=PaymentAmount(5), target=target, secret=secret, route=[])
assert event_filter_for_payments(event=event1, partner_address=None)
assert event_filter_for_payments(event=event1, partner_address=Address(target))
assert (not event_filter_for_payments(event=event1, partner_address=factories.make_address()))
initiator = InitiatorAddress(factories.make_address())
event2 = EventPaymentReceivedSuccess(token_network_registry_address=UNIT_TOKEN_NETWORK_REGISTRY_ADDRESS, token_network_address=UNIT_TOKEN_NETWORK_ADDRESS, identifier=identifier, amount=PaymentAmount(5), initiator=initiator)
assert event_filter_for_payments(event=event2, partner_address=None)
assert event_filter_for_payments(event=event2, partner_address=Address(initiator))
assert (not event_filter_for_payments(event=event2, partner_address=factories.make_address()))
event3 = EventPaymentSentFailed(token_network_registry_address=UNIT_TOKEN_NETWORK_REGISTRY_ADDRESS, token_network_address=UNIT_TOKEN_NETWORK_ADDRESS, identifier=identifier, target=target, reason='whatever')
assert event_filter_for_payments(event=event3, partner_address=None)
assert event_filter_for_payments(event=event3, partner_address=Address(target))
assert (not event_filter_for_payments(event=event3, partner_address=factories.make_address())) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.