code stringlengths 101 5.91M |
|---|
def _require_without_generator(value, name):
if (value is not None):
return value
else:
raise ValueError(f"{name}: expected a value for 'n_samples', 'input_dim', and 'multiplicity' when 'generator' is not provided, found {name}=None.") |
_utils.test(require=ti.extension.quant, debug=True)
def test_1D_quant_array_negative():
N = 4
qi7 = ti.types.quant.int(7)
x = ti.field(dtype=qi7)
ti.root.quant_array(ti.i, N, max_num_bits=32).place(x)
def assign():
for i in range(N):
assert (x[i] == 0)
x[i] = (- i)
assert (x[i] == (- i))
assign() |
def _model_to_graph(model, args, verbose=False, input_names=None, output_names=None, operator_export_type=OperatorExportTypes.ONNX, example_outputs=None, _retain_param_name=False, do_constant_folding=True, _disable_torch_constant_prop=False, fixed_batch_size=False, training=None, use_new_jit_passes=False, dynamic_axes=None):
from torch.onnx.symbolic_helper import _export_onnx_opset_version
if isinstance(args, torch.Tensor):
args = (args,)
if isinstance(example_outputs, torch.Tensor):
example_outputs = [example_outputs]
(graph, params, torch_out) = _create_jit_graph(model, args, _retain_param_name, use_new_jit_passes)
input_and_param_names = [val.debugName() for val in graph.inputs()]
param_names = input_and_param_names[(len(input_and_param_names) - len(params)):]
params_dict = dict(zip(param_names, params))
graph = _optimize_graph(graph, operator_export_type, _disable_torch_constant_prop=_disable_torch_constant_prop, fixed_batch_size=fixed_batch_size, params_dict=params_dict, use_new_jit_passes=use_new_jit_passes, dynamic_axes=dynamic_axes, input_names=input_names)
from torch.onnx.symbolic_helper import _onnx_shape_inference
if (isinstance(model, torch.jit.ScriptModule) or isinstance(model, torch.jit.ScriptFunction)):
assert (example_outputs is not None), 'example_outputs must be provided when exporting a ScriptModule or ScriptFunction.'
(out_vars, _) = torch.jit._flatten(tuple(example_outputs))
torch._C._jit_pass_onnx_assign_output_shape(graph, out_vars, _onnx_shape_inference)
if (torch_out is not None):
(output_tensors, _) = torch._C._jit_flatten(torch_out)
torch._C._jit_pass_onnx_assign_output_shape(graph, output_tensors, _onnx_shape_inference)
_set_input_and_output_names(graph, input_names, output_names)
(flatten_args, _) = torch._C._jit_flatten(args)
assert ((len(params) + len(flatten_args)) == sum((1 for _ in graph.inputs())))
input_and_param_names = [val.debugName() for val in graph.inputs()]
param_names = input_and_param_names[(len(input_and_param_names) - len(params)):]
params_dict = dict(zip(param_names, params))
if ((training is None) or (training == TrainingMode.EVAL) or ((training == TrainingMode.PRESERVE) and (not is_originally_training))):
params_dict = torch._C._jit_pass_onnx_eval_peephole(graph, params_dict)
if (do_constant_folding and (_export_onnx_opset_version in torch.onnx.constant_folding_opset_versions)):
params_dict = torch._C._jit_pass_onnx_constant_fold(graph, params_dict, _export_onnx_opset_version)
torch._C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
params_dict = torch._C._jit_pass_onnx_eliminate_unused_items(graph, params_dict)
if (_export_onnx_opset_version < 9):
torch._C._jit_pass_onnx_cast_all_constant_to_floating(graph)
if verbose:
print(graph)
params_dict = torch._C._jit_pass_filter_non_tensor_arguments(params_dict)
torch._C._jit_decay_packed_param_input_types(graph)
return (graph, params_dict, torch_out) |
def __build_pyramid(models, features):
return [__build_model_pyramid(n, m, features) for (n, m) in models] |
.parametrize('dtype,device', product([torch.float], devices))
def test_knn_graph_large(dtype, device):
x = torch.randn(1000, 3, dtype=dtype, device=device)
edge_index = knn_graph(x, k=5, flow='target_to_source', loop=True)
tree = scipy.spatial.cKDTree(x.cpu().numpy())
(_, col) = tree.query(x.cpu(), k=5)
truth = set([(i, j) for (i, ns) in enumerate(col) for j in ns])
assert (to_set(edge_index.cpu()) == truth) |
def calculate_bow_node_edge_feats(data_write_dir, rel2idx):
print('[INFO] Starting BOW Feature Calculation For Node Edge Features...')
scan_ids = os.listdir(osp.join(data_write_dir, 'data'))
scan_ids = sorted([scan_id[:(- 4)] for scan_id in scan_ids])
idx_2_rel = {idx: relation_name for (relation_name, idx) in rel2idx.items()}
wordToIx = {}
for key in rel2idx.keys():
wordToIx[key] = len(wordToIx)
print('[INFO] Size of Node Edge Vocabulary - {}'.format(len(wordToIx)))
print('[INFO] Generated Vocabulary, Calculating BOW Features...')
for scan_id in scan_ids:
data_dict_filename = osp.join(data_write_dir, 'data', '{}.pkl'.format(scan_id))
data_dict = common.load_pkl_data(data_dict_filename)
edge = data_dict['edges']
objects_ids = data_dict['objects_id']
triples = data_dict['triples']
edges = data_dict['edges']
entities_edge_names = ([None] * len(objects_ids))
for idx in range(len(edges)):
edge = edges[idx]
entity_idx = edge[0]
rel_name = idx_2_rel[triples[idx][2]]
if (rel_name == 'inside'):
print(scan_id)
if (entities_edge_names[entity_idx] is None):
entities_edge_names[entity_idx] = [rel_name]
else:
entities_edge_names[entity_idx].append(rel_name)
entity_edge_feats = None
for entity_edge_names in entities_edge_names:
entity_edge_feat = np.expand_dims(make_bow_vector(entity_edge_names, wordToIx), 0)
entity_edge_feats = (entity_edge_feat if (entity_edge_feats is None) else np.concatenate((entity_edge_feats, entity_edge_feat), axis=0))
data_dict['bow_vec_object_edge_feats'] = entity_edge_feats
assert (data_dict['bow_vec_object_edge_feats'].shape[0] == data_dict['objects_count'])
common.write_pkl_data(data_dict, data_dict_filename)
print('[INFO] Completed BOW Feature Calculation For Node Edge Features.') |
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype):
assert (shape[(- 1)] == shape[(- 2)])
t = make_tensor(shape, device=device, dtype=dtype)
(u, _, vh) = torch.linalg.svd(t, full_matrices=False)
real_dtype = (t.real.dtype if t.dtype.is_complex else t.dtype)
s = torch.arange(1.0, (shape[(- 1)] + 1), dtype=real_dtype, device=device).mul_((1.0 / (shape[(- 1)] + 1)))
return ((u * s.to(dtype)) vh) |
def parse_args(args=None):
parser = argparse.ArgumentParser(description='Training and Testing Knowledge Graph Embedding Models', usage='train.py [<args>] [-h | --help]')
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--countries', action='store_true', help='Use Countries S1/S2/S3 datasets')
parser.add_argument('--regions', type=int, nargs='+', default=None, help='Region Id for Countries S1/S2/S3 datasets, DO NOT MANUALLY SET')
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-de', '--double_entity_embedding', action='store_true')
parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true', help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=10000, type=int)
parser.add_argument('--valid_steps', default=10000, type=int)
parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--eval_type', default='random', type=str)
return parser.parse_args(args) |
def degree_lowest_rational_function(r, x):
from sage.rings.fraction_field import FractionField
F = FractionField(r.parent())
r = F(r)
f = r.numerator().polynomial(x)
g = r.denominator().polynomial(x)
return (f.valuation() - g.valuation()) |
def test_wrap_experiment_name_parameters_none():
_experiment(name_parameters='none')
def test_exp(ctxt=None, seed=1):
del ctxt
del seed
with pytest.raises(ValueError, match='wrap_experiment.name_parameters'):
test_exp() |
def _AllReduceBlobs(blob_names, devices, model, net, rendezvous, use_nccl, max_concurrent_distributed_ops):
if ((rendezvous is None) or (rendezvous['num_shards'] <= 1)):
_AllReduceBlobsSingleHost(blob_names, devices, model, net, use_nccl)
else:
_AllReduceBlobsDistributed(blob_names, devices, model, net, rendezvous, max_concurrent_distributed_ops) |
def test_unknown_language_tokenizer(unknown_language_name):
base_pipe = stanza.Pipeline('en', dir=TEST_MODELS_DIR, processors='tokenize', download_method=None)
tokenize_processor = base_pipe.processors['tokenize']
pipe = stanza.Pipeline(unknown_language_name, processors='tokenize', allow_unknown_language=True, tokenize_model_path=tokenize_processor.config['model_path'], download_method=None)
doc = pipe('This is a test')
words = [x.text for x in doc.sentences[0].words]
assert (words == ['This', 'is', 'a', 'test']) |
def unsqueeze_expand_flat_dim0(x, num):
return x.unsqueeze(dim=0).expand(num, *(((- 1),) * x.ndim)).reshape((num * x.size(0)), *x.size()[1:]) |
class SingleThreadASGIRunner(SingleThreadRunner):
def _execute_impl(self, results: TestResultSet) -> Generator[(events.ExecutionEvent, None, None)]:
(yield from self._run_tests(maker=self.schema.get_all_tests, template=asgi_test, settings=self.hypothesis_settings, generation_config=self.generation_config, seed=self.seed, checks=self.checks, max_response_time=self.max_response_time, targets=self.targets, results=results, headers=self.headers, store_interactions=self.store_interactions, dry_run=self.dry_run)) |
class BPRLoss(ModelLayer):
def __init__(self, model, input_record, name='bpr_loss', **kwargs):
super(BPRLoss, self).__init__(model, name, input_record, **kwargs)
assert schema.is_schema_subset(schema.Struct(('pos_prediction', schema.Scalar()), ('neg_prediction', schema.List(np.float32))), input_record)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(np.float32, self.get_next_blob_reference('output'))
def add_ops(self, net):
neg_score = self.input_record.neg_prediction['values']()
pos_score = net.LengthsTile([self.input_record.pos_prediction(), self.input_record.neg_prediction['lengths']()], net.NextScopedBlob('pos_score_repeated'))
softplus = net.Softplus([net.Sub([neg_score, pos_score])])
net.ReduceFrontSum(softplus, self.output_schema.field_blobs()) |
def load_grid(fname):
with open(fname, 'r') as f:
rows = f.readlines()
outs = []
out = []
for row in rows:
if ('#' in row):
continue
row = row.split()
if (len(row) > 0):
row = (row[:2] + [''.join(row[2:])])
assert (len(row) == 3), 'File cannot be parsed'
out.append(row)
else:
if (len(out) > 0):
outs.append(out)
out = []
if (len(out) > 0):
outs.append(out)
return outs |
def get_args():
parser = argparse.ArgumentParser()
home = os.path.expanduser('~')
source_dir = os.path.join(home, 'data', 'squad')
target_dir = os.path.join(home, 'data', 'squad-class')
parser.add_argument('-s', '--source_dir', default=source_dir)
parser.add_argument('-t', '--target_dir', default=target_dir)
return parser.parse_args() |
.skipif((not GPUs_available), reason='No GPU is available to test CUDA function')
.parametrize(['no_of_packets', 'iterations'], [(200000, 5)])
def test_full_formal_integral(no_of_packets, iterations, config_verysimple, simulation_verysimple):
sim = simulation_verysimple
formal_integrator_numba = FormalIntegrator(sim.simulation_state, sim.plasma, sim.transport)
formal_integrator_cuda = FormalIntegrator(sim.simulation_state, sim.plasma, sim.transport)
formal_integrator_numba.interpolate_shells = max((2 * formal_integrator_numba.model.no_of_shells), 80)
formal_integrator_cuda.interpolate_shells = max((2 * formal_integrator_cuda.model.no_of_shells), 80)
res_numba = formal_integrator_numba.make_source_function()
att_S_ul_numba = res_numba[0].flatten(order='F')
Jred_lu_numba = res_numba[1].values.flatten(order='F')
Jblue_lu_numba = res_numba[2].flatten(order='F')
res_cuda = formal_integrator_cuda.make_source_function()
att_S_ul_cuda = res_cuda[0].flatten(order='F')
Jred_lu_cuda = res_cuda[1].values.flatten(order='F')
Jblue_lu_cuda = res_cuda[2].flatten(order='F')
formal_integrator_numba.generate_numba_objects()
formal_integrator_numba.integrator = NumbaFormalIntegrator(formal_integrator_numba.numba_radial_1d_geometry, formal_integrator_numba.numba_model, formal_integrator_numba.opacity_state, formal_integrator_numba.points)
formal_integrator_cuda.generate_numba_objects()
L_cuda = formal_integrator_cuda.integrator.formal_integral(formal_integrator_cuda.model.t_inner, sim.transport.spectrum.frequency, sim.transport.spectrum.frequency.shape[0], att_S_ul_cuda, Jred_lu_cuda, Jblue_lu_cuda, formal_integrator_cuda.transport.tau_sobolevs_integ, formal_integrator_cuda.transport.electron_densities_integ, formal_integrator_cuda.points)[0]
L_numba = formal_integrator_numba.integrator.formal_integral(formal_integrator_numba.model.t_inner, sim.transport.spectrum.frequency, sim.transport.spectrum.frequency.shape[0], att_S_ul_numba, Jred_lu_numba, Jblue_lu_numba, formal_integrator_numba.transport.tau_sobolevs_integ, formal_integrator_numba.transport.electron_densities_integ, formal_integrator_numba.points)[0]
ntest.assert_allclose(L_cuda, L_numba, rtol=1e-14) |
class MyContextManager():
def __init__(self, seed):
self.rng = np.random.default_rng(seed)
_blocker
def __enter__(self):
a = self.rng.integers(1, 10)
b = self.rng.integers(1, 10)
print(f'Computing LCM of {a} and {b}')
return np.lcm(a, b)
_blocker
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
pass |
def plot_transition_matrix(transition_matrix):
transition_matrix = validate_numpy_array(transition_matrix)
fig = plt.figure(figsize=(20, 20))
ax = sns.heatmap(data=transition_matrix.T, annot=False, cbar=True)
ax.set_ylabel('Hidden states')
ax.set_xlabel('Time step')
ax.set_title('Transition probabilities')
ax.invert_yaxis()
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data |
def get_transformations(mean, std, resize_size, crop_size, mode='train', jit_script=False):
if (mode == 'train'):
transform = [torchvision.transforms.Resize((resize_size, resize_size)), torchvision.transforms.RandomCrop((crop_size, crop_size)), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean, std)]
else:
transform = [torchvision.transforms.Resize((crop_size, crop_size)), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean, std)]
if jit_script:
transform = torch.nn.Sequential(*transform)
transform = torch.jit.script(transform)
else:
transform = torchvision.transforms.Compose(transform)
return transform |
def _is_p_power_mod(a, p, N):
for (q, e) in N.factor():
v = a.valuation(q)
if (v >= e):
continue
if (v % p):
return False
aa = (a / (q ** v))
ee = (e - v)
if (q != p):
if ((q % p) == 1):
if ((GF(q)(aa) ** ((q - 1) / p)) != 1):
return False
elif (ee > 1):
if (p % 2):
if ((Integers((p ** 2))(aa) ** (p - 1)) != 1):
return False
elif (ee == 2):
if ((aa % 4) != 1):
return False
elif ((aa % 8) != 1):
return False
return True |
def cython_import_all(filename, globals, **kwds):
m = cython_import(filename, **kwds)
for (k, x) in m.__dict__.items():
if (k[0] != '_'):
globals[k] = x |
def convert_name(name):
mapping = {'conv_3d': 'conv3d', 'batch_norm': 'bn', 'w:0': 'weight', 'b:0': 'bias', 'moving_mean:0': 'running_mean', 'moving_variance:0': 'running_var', 'beta:0': 'bias'}
segs = name.split('/')
new_segs = []
i = 0
while (i < len(segs)):
seg = segs[i]
if ('Mixed' in seg):
new_segs.append(seg)
elif (('Conv' in seg) and ('Mixed' not in name)):
new_segs.append(seg)
elif ('Branch' in seg):
branch_i = int(seg.split('_')[(- 1)])
i += 1
seg = segs[i]
if (('Mixed_5b' in name) and (branch_i == 2)):
if ('1x1' in seg):
new_segs.append(f'b{branch_i}a')
elif ('3x3' in seg):
new_segs.append(f'b{branch_i}b')
else:
raise Exception()
elif ('a' in seg):
if (branch_i == 0):
new_segs.append('b0')
else:
new_segs.append(f'b{branch_i}a')
elif ('b' in seg):
new_segs.append(f'b{branch_i}b')
else:
raise Exception
elif (seg == 'Logits'):
new_segs.append('logits')
i += 1
elif (seg in mapping):
new_segs.append(mapping[seg])
else:
raise Exception(f'No match found for seg {seg} in name {name}')
i += 1
return '.'.join(new_segs) |
(reason='the class is not fully tested')
class Neo4jDirectedBreadthFirstNeighbors():
def __init__(self, graph):
if (not isinstance(graph, Neo4jStellarDiGraph)):
raise TypeError('Graph must be a Neo4jStellarDiGraph.')
self.graph = graph
def run(self, nodes=None, n=1, in_size=None, out_size=None):
head_nodes = [head_node for head_node in nodes for _ in range(n)]
hops = [[head_nodes]]
in_sample_query = _bfs_neighbor_query(sampling_direction='IN', id_property=self.graph.cypher_id_property, node_label=self.graph.cypher_node_label)
out_sample_query = _bfs_neighbor_query(sampling_direction='OUT', id_property=self.graph.cypher_id_property, node_label=self.graph.cypher_node_label)
for (in_num, out_num) in zip(in_size, out_size):
last_hop = hops[(- 1)]
this_hop = []
for cur_nodes in last_hop:
neighbor_records = self.graph.graph_db.run(in_sample_query, parameters={'node_id_list': cur_nodes, 'num_samples': in_num})
this_hop.append(neighbor_records.data()[0]['next_samples'])
neighbor_records = self.graph.graph_db.run(out_sample_query, parameters={'node_id_list': cur_nodes, 'num_samples': out_num})
this_hop.append(neighbor_records.data()[0]['next_samples'])
hops.append(this_hop)
return sum(hops, []) |
class SideObstacleSpaceInvadersWorld(SpaceInvadersWorld):
def create_world(self, parent):
super(SideObstacleSpaceInvadersWorld, self).create_world(parent)
self.obstacle1 = SideObstacle(world=self, position=(10, (self._height / 2)))
parent.add(self.obstacle1, z=1)
self.obstacle2 = SideObstacle(world=self, position=((self._width - 10), (self._height / 2)))
parent.add(self.obstacle2, z=1) |
def print_atoms(molname, forcepred, cgbeads, molecule, hbonda, hbondd, partitioning, ringatoms, ringatoms_flat, trial=False):
logger.debug('Entering print_atoms()')
atomnames = []
beadtypes = []
text = ''
for bead in range(len(cgbeads)):
try:
(smi_frag, wc_log_p, charge) = substruct2smi(molecule, partitioning, bead, cgbeads, ringatoms)
except Exception:
raise
atom_name = ''
for (character, count) in sorted(six.iteritems(letter_occurrences(smi_frag))):
try:
float(character)
except ValueError:
if (count == 1):
atom_name += '{:s}'.format(character)
else:
atom_name += '{:s}{:s}'.format(character, str(count))
(mol_frag, errval) = gen_molecule_smi(smi_frag)
charge_frag = get_charge(mol_frag)
if (errval == 0):
charge_frag = get_charge(mol_frag)
try:
if (charge_frag == 0):
alogps = smi2alogps(forcepred, smi_frag, wc_log_p, (bead + 1), trial)
else:
alogps = 0.0
except (NameError, TypeError, ValueError):
return (atomnames, beadtypes, errval)
hbond_a_flag = 0
for at in hbonda:
if (partitioning[at] == bead):
hbond_a_flag = 1
break
hbond_d_flag = 0
for at in hbondd:
if (partitioning[at] == bead):
hbond_d_flag = 1
break
in_ring = (cgbeads[bead] in ringatoms_flat)
bead_type = determine_bead_type(alogps, charge, hbond_a_flag, hbond_d_flag, in_ring)
atom_name = ''
name_index = 0
while ((atom_name in atomnames) or (name_index == 0)):
name_index += 1
atom_name = '{:1s}{:02d}'.format(bead_type[0], name_index)
atomnames.append(atom_name)
if (not trial):
text = (text + ' {:<5d} {:5s} 1 {:5s} {:7s} {:<5d} {:2d} ; {:s}\n'.format((bead + 1), bead_type, molname, atom_name, (bead + 1), charge, smi_frag))
beadtypes.append(bead_type)
text = (text + '\n')
return (atomnames, beadtypes, text) |
_torch
_pytesseract
class LayoutLMv2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = (LayoutLMv2FeatureExtractor if is_pytesseract_available() else None)
def setUp(self):
self.feature_extract_tester = LayoutLMv2FeatureExtractionTester(self)
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, 'do_resize'))
self.assertTrue(hasattr(feature_extractor, 'size'))
self.assertTrue(hasattr(feature_extractor, 'apply_ocr'))
def test_batch_feature(self):
pass
def test_call_pil(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoding = feature_extractor(image_inputs[0], return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
self.assertIsInstance(encoding.words, list)
self.assertIsInstance(encoding.boxes, list)
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
def test_call_numpy(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
def test_call_pytorch(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.size, self.feature_extract_tester.size))
def test_layoutlmv2_integration_test(self):
feature_extractor = LayoutLMv2FeatureExtractor()
from datasets import load_dataset
ds = load_dataset('hf-internal-testing/fixtures_docvqa', split='test')
image = Image.open(ds[0]['file']).convert('RGB')
encoding = feature_extractor(image, return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
self.assertEqual(len(encoding.words), len(encoding.boxes))
expected_words = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', 'Introductory', 'Remarks', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']]
expected_boxes = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]]
self.assertListEqual(encoding.words, expected_words)
self.assertListEqual(encoding.boxes, expected_boxes)
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
encoding = feature_extractor(image, return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224)) |
def ComputeRHS(dU, rk):
if (rk > 0):
U[:] = TV.backward(U_hat, U)
curl[:] = Curl(U_hat, curl)
dU = Cross(U, curl, dU)
P_hat[:] = np.sum((dU * K_over_K2), 0, out=P_hat)
dU -= (P_hat * K)
dU -= ((nu * K2) * U_hat)
return dU |
def parse_cpu_trace(thread_records):
next_id = 0
start_record = None
cuda_records = {}
functions = []
record_stack = []
string_table = StringTable()
def adjusted_time(cuda_record):
assert (cuda_record.device() != (- 1))
cuda_time_0 = cuda_records[cuda_record.device()]
return (cuda_time_0.cuda_elapsed_us(cuda_record) + start_record.cpu_elapsed_us(cuda_time_0))
for record in itertools.chain(*thread_records):
if (record.name() == '__start_profile'):
start_record = record
elif (record.name() == '__cuda_start_event'):
assert (record.device() != (- 1))
cuda_records[record.device()] = record
assert (start_record is not None)
for record in itertools.chain(*thread_records):
if (record.kind() == 'mark'):
continue
elif (record.kind() == 'push'):
record_stack.append((next_id, record))
next_id += 1
elif (record.kind() == 'pop'):
(function_id, start) = record_stack.pop()
fe = FunctionEvent(id=function_id, name=string_table[start.name()], thread=start.thread_id(), cpu_start=start_record.cpu_elapsed_us(start), cpu_end=start_record.cpu_elapsed_us(record))
if start.has_cuda():
cuda_start = adjusted_time(start)
cuda_end = adjusted_time(record)
fe.append_kernel(start.name(), start.device(), cuda_start, cuda_end)
functions.append(fe)
functions.sort(key=(lambda evt: evt.cpu_interval.start))
return functions |
def graph_transform_dense_mpi(worker_id, meta_graph_def, op_library_path, config):
with tf.Graph().as_default() as graph:
tf.train.import_meta_graph(meta_graph_def)
num_workers = hvd.size()
op_to_control_consumer_ops = get_all_control_consumers(graph)
trainable_variable_ops = [var.op for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
local_variables = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)
sparse_var_ops = set([gradients_info._target.op for gradients_info in tf.get_collection(tf.GraphKeys.GRADIENTS_INFO) if (not isinstance(gradients_info._grad, tf.Tensor))])
for op in tf.get_default_graph().get_operations():
if (op.type in sparse_var_update_op_types.keys()):
sparse_var_ops.add(op.inputs[UPDATE_OP_VAR_POS].op)
global_grad_ops = [var.op for var in tf.get_collection(PARALLAX_GLOBAL_GRADS)]
for var in (global_variables + local_variables):
op = var.op
if ((op not in sparse_var_ops) and (op not in global_grad_ops)):
op._set_device(('/job:worker/task:%d' % worker_id))
var_op_to_agg_grad = {}
with tf.device(('/job:worker/task:%d' % worker_id)):
for gradients_info in tf.get_collection(tf.GraphKeys.GRADIENTS_INFO):
target_tensor = gradients_info._target
grad = gradients_info._grad
if (target_tensor.op not in trainable_variable_ops):
parallax_log.debug(('Gradient for non-trainable variable %s is created, ignore' % target_tensor.op.name))
continue
if isinstance(grad, tf.Tensor):
_add_aggregation_ops(gradients_info, op_to_control_consumer_ops, config)
var_op_to_agg_grad[target_tensor.op.name] = gradients_info._grad.name
return (tf.train.export_meta_graph(graph=graph), var_op_to_agg_grad) |
.parametrize(['test_input', 'expected_result'], [(1, 'I'), (5, 'V'), (19, 'XIX'), (556, 'DLVI'), (1400, 'MCD'), (1999, 'MCMXCIX'), (3000, 'MMM')])
def test_int_to_roman(test_input, expected_result):
assert (int_to_roman(test_input) == expected_result)
with pytest.raises(TypeError):
int_to_roman(1.5) |
class _ASPP(nn.Module):
def __init__(self, in_ch, out_ch, rates):
super(_ASPP, self).__init__()
for (i, rate) in enumerate(rates):
self.add_module('c{}'.format(i), nn.Conv2d(in_ch, out_ch, 3, 1, padding=rate, dilation=rate, bias=True))
for m in self.children():
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.constant_(m.bias, 0)
def forward(self, x):
return sum([stage(x) for stage in self.children()]) |
def extract_meta_review(category='Video_Games'):
processed_dir = f'files/{category}/processed'
raw_dir = f'files/{category}/raw'
path = f'{raw_dir}/meta_{category}.json.gz'
g = gzip.open(path, 'r')
asin2meta = {}
for l in tqdm(g):
line = json.loads(l)
meta = {}
meta['asin'] = line['asin']
meta['brand'] = line['brand']
meta['category_list'] = line['category']
meta['main_category'] = line['main_cat']
meta['also_view'] = line['also_view']
meta['also_buy'] = line['also_buy']
meta['title'] = line['title']
asin2meta[line['asin']] = meta
os.makedirs(processed_dir, exist_ok=True)
torch.save(asin2meta, os.path.join(processed_dir, 'asin2meta.pt'))
path = f'{raw_dir}/{category}_5.json.gz'
g = gzip.open(path, 'r')
review_list = []
i = 0
for l in tqdm(g):
line = json.loads(l)
rating = line['overall']
time = line['reviewTime']
time = time.replace(',', '')
splitted = time.split(' ')
mon = splitted[0].zfill(2)
day = splitted[1][:2].zfill(2)
year = splitted[2]
time = f'{year}{mon}{day}'
asin = line['asin']
user = line['reviewerID']
review_list.append((user, asin, rating, time))
torch.save(review_list, os.path.join(processed_dir, 'reviews.pt')) |
class TestRedisStoreHandlerOp(TestCase):
def setUp(self):
super(TestRedisStoreHandlerOp, self).setUp()
self.uuid = (str(uuid.uuid4()) + '/')
def tearDown(self):
super(TestRedisStoreHandlerOp, self).tearDown()
def create_store_handler(self):
store_handler = 'store_handler'
workspace.RunOperatorOnce(core.CreateOperator('RedisStoreHandlerCreate', [], [store_handler], prefix=self.uuid, host=os.getenv('REDIS_HOST', 'localhost'), port=int(os.getenv('REDIS_PORT', 6379))))
return store_handler
def test_set_get(self):
StoreOpsTests.test_set_get(self.create_store_handler)
def test_get_timeout(self):
with self.assertRaises(StoreHandlerTimeoutError):
StoreOpsTests.test_get_timeout(self.create_store_handler) |
(nopython=True, cache=True)
def _label_switching_(A_indptr, A_indices, A_data, num_nodes, alpha=0.5, itnum_max=50):
x = np.ones(num_nodes)
deg = np.zeros(num_nodes)
Nc = np.zeros(num_nodes)
Np = np.zeros(num_nodes)
cids = np.arange(num_nodes)
for nid in range(num_nodes):
deg[nid] = np.sum(A_data[A_indptr[nid]:A_indptr[(nid + 1)]])
Nc[cids[nid]] += x[nid]
Np[cids[nid]] += (1 - x[nid])
M = (np.sum(deg) / 2)
rho = (M / ((num_nodes * (num_nodes - 1)) / 2))
for _it in range(itnum_max):
order = np.random.choice(num_nodes, size=num_nodes, replace=False)
updated_node_num = 0
for (_k, node_id) in enumerate(order):
neighbors = A_indices[A_indptr[node_id]:A_indptr[(node_id + 1)]]
weight = A_data[A_indptr[node_id]:A_indptr[(node_id + 1)]]
clist = np.unique(cids[neighbors])
next_cid = (- 1)
qself = 0
dqmax = 0
for cprime in clist:
for xprime in [0, 1]:
neis = neighbors[(cids[neighbors] == cprime)]
non_pp_edges = (x[neis] + ((1 - x[neis]) * xprime))
dq = ((1 - alpha) * np.sum((weight[(cids[neighbors] == cprime)] * non_pp_edges)))
Nc_prime = (Nc[cprime] - (xprime * (cprime == cids[node_id])))
Np_prime = (Np[cprime] - ((1 - xprime) * (cprime == cids[node_id])))
dq -= ((alpha * rho) * (Nc_prime + (Np_prime * xprime)))
if ((cprime == cids[node_id]) and (xprime == x[node_id])):
qself = dq
continue
if (dqmax < dq):
next_cid = cprime
next_x = xprime
dqmax = dq
dqmax -= qself
if (dqmax <= 1e-16):
continue
Nc[cids[node_id]] -= x[node_id]
Np[cids[node_id]] -= (1 - x[node_id])
Nc[next_cid] += next_x
Np[next_cid] += (1 - next_x)
cids[node_id] = next_cid
x[node_id] = next_x
updated_node_num += 1
if ((updated_node_num / num_nodes) < 0.001):
break
return (cids, x) |
class MDPEnvironment(Environment):
def __init__(self, **configs):
super().__init__(**configs)
try:
from blackhc import mdp
from blackhc.mdp import example as mdp_examples
except ImportError as e:
Logger.error('please run `pip install -e .[dev]` before using MDPEnvironment')
raise e
scenarios = {'one_round_dmdp': mdp_examples._one_round_dmdp, 'two_round_dmdp': mdp_examples._two_round_dmdp, 'one_round_nmdp': mdp_examples._one_round_nmdp, 'two_round_nmdp': mdp_examples._two_round_nmdp, 'multi_round_nmdp': mdp_examples._multi_round_nmdp}
env_id = configs['env_id']
if (env_id not in scenarios):
raise ValueError(f'env_id={env_id} not a legal environment id, you should init mdp environments from one of them: {scenarios.keys()}')
self.env = scenarios[env_id]().to_env()
self._possible_agents = ['agent']
def possible_agents(self) -> List[AgentID]:
return self._possible_agents
def observation_spaces(self) -> Dict[(AgentID, gym.Space)]:
return dict.fromkeys(self.possible_agents, self.env.observation_space)
def action_spaces(self) -> Dict[(AgentID, gym.Space)]:
return dict.fromkeys(self.possible_agents, self.env.action_space)
def time_step(self, actions: Dict[(AgentID, Any)]) -> Tuple[(Dict[(AgentID, Any)], Dict[(AgentID, float)], Dict[(AgentID, bool)], Dict[(AgentID, Any)])]:
(obs, rew, done, info) = self.env._step(actions['agent'])
obs = dict.fromkeys(self.possible_agents, obs)
rew = dict.fromkeys(self.possible_agents, rew)
done = dict.fromkeys(self.possible_agents, done)
return (None, obs, rew, done, info)
def reset(self, max_step: int=None) -> Union[(None, Sequence[Dict[(AgentID, Any)]])]:
super(MDPEnvironment, self).reset(max_step=max_step)
observation = self.env._reset()
return (None, dict.fromkeys(self.possible_agents, observation))
def close(self):
return self.env.close()
def render(self, *args, **kwargs):
return self.env._render()
def seed(self, seed: int=None):
return self.env.seed(seed) |
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
for node in node.find_all(nodes.Call):
if ((not isinstance(node.node, nodes.Name)) or (node.node.name not in gettext_functions)):
continue
strings = []
for arg in node.args:
if (isinstance(arg, nodes.Const) and isinstance(arg.value, string_types)):
strings.append(arg.value)
else:
strings.append(None)
for _ in node.kwargs:
strings.append(None)
if (node.dyn_args is not None):
strings.append(None)
if (node.dyn_kwargs is not None):
strings.append(None)
if (not babel_style):
strings = tuple((x for x in strings if (x is not None)))
if (not strings):
continue
elif (len(strings) == 1):
strings = strings[0]
else:
strings = tuple(strings)
(yield (node.lineno, node.node.name, strings)) |
def plot_point(res, marker='o', color=None):
ax.plot((512 + res.x[0]), (512 + res.x[1]), marker=marker, color=color, ms=10) |
def cal_recall(predicts, labels, user_ids, k):
d = {'user': np.squeeze(user_ids), 'predict': np.squeeze(predicts), 'label': np.squeeze(labels)}
df = pd.DataFrame(d)
user_unique = df.user.unique()
recall = []
for user_id in user_unique:
user_sdf = df[(df['user'] == user_id)]
if (user_sdf.shape[0] < 2):
continue
user_sdf = user_sdf.sort_values(by=['predict'], ascending=False)
total_rel = min(user_sdf['label'].sum(), k)
intersect_at_k = user_sdf['label'][0:k].sum()
try:
recall.append((float(intersect_at_k) / float(total_rel)))
except:
continue
return np.mean(np.array(recall)) |
def get_cursor_pos(window):
xpos_value = ctypes.c_double(0.0)
xpos = ctypes.pointer(xpos_value)
ypos_value = ctypes.c_double(0.0)
ypos = ctypes.pointer(ypos_value)
_glfw.glfwGetCursorPos(window, xpos, ypos)
return (xpos_value.value, ypos_value.value) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, padding=(0, 1, 1), downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=(1, 1, 1), bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=(1, 3, 3), stride=stride, padding=padding, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, (planes * 4), kernel_size=(1, 1, 1), bias=False)
self.bn3 = nn.BatchNorm3d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
def adjust_learning_rate(optimizer, epoch, gammas, schedule, lr):
assert (len(gammas) == len(schedule)), 'length of gammas and schedule should be equal'
for (gamma, step) in zip(gammas, schedule):
if (epoch >= step):
lr = (lr * gamma)
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr |
def validate_cz_dic(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(dic.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(dic.is_valid)
else:
return df.applymap(dic.is_valid)
return dic.is_valid(df) |
_utils.test()
def test_atomic_min_rvalue_as_frist_op():
def func():
y = ti.Vector([1, 2, 3])
z = ti.atomic_min([3, 2, 1], y)
with pytest.raises(ti.TaichiSyntaxError) as e:
func()
assert ('atomic_min' in str(e.value))
assert ('cannot use a non-writable target as the first operand of' in str(e.value)) |
def test_noconvert_args(msg):
a = m.ArgInspector()
assert (msg(a.f('hi')) == '\n loading ArgInspector1 argument WITH conversion allowed. Argument value = hi\n ')
assert (msg(a.g('this is a', 'this is b')) == '\n loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a\n loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b\n 13\n loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)\n ')
assert (msg(a.g('this is a', 'this is b', 42)) == '\n loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a\n loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b\n 42\n loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)\n ')
assert (msg(a.g('this is a', 'this is b', 42, 'this is d')) == '\n loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a\n loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b\n 42\n loading ArgInspector2 argument WITH conversion allowed. Argument value = this is d\n ')
assert (a.h('arg 1') == 'loading ArgInspector2 argument WITHOUT conversion allowed. Argument value = arg 1')
assert (msg(m.arg_inspect_func('A1', 'A2')) == '\n loading ArgInspector2 argument WITH conversion allowed. Argument value = A1\n loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = A2\n ')
assert (m.floats_preferred(4) == 2.0)
assert (m.floats_only(4.0) == 2.0)
with pytest.raises(TypeError) as excinfo:
m.floats_only(4)
assert (msg(excinfo.value) == '\n floats_only(): incompatible function arguments. The following argument types are supported:\n 1. (f: float) -> float\n\n Invoked with: 4\n ')
assert (m.ints_preferred(4) == 2)
assert (m.ints_preferred(True) == 0)
with pytest.raises(TypeError) as excinfo:
m.ints_preferred(4.0)
assert (msg(excinfo.value) == '\n ints_preferred(): incompatible function arguments. The following argument types are supported:\n 1. (i: int) -> int\n\n Invoked with: 4.0\n ')
assert (m.ints_only(4) == 2)
with pytest.raises(TypeError) as excinfo:
m.ints_only(4.0)
assert (msg(excinfo.value) == '\n ints_only(): incompatible function arguments. The following argument types are supported:\n 1. (i: int) -> int\n\n Invoked with: 4.0\n ') |
def build_tiny_model_summary(results, organization=None, token=None):
tiny_model_summary = {}
for config_name in results:
processors = [key for (key, value) in results[config_name]['processor'].items()]
tokenizer_classes = sorted([x for x in processors if (x.endswith('TokenizerFast') or x.endswith('Tokenizer'))])
processor_classes = sorted([x for x in processors if (x not in tokenizer_classes)])
for framework in FRAMEWORKS:
if (framework not in results[config_name]):
continue
for arch_name in results[config_name][framework]:
model_classes = [arch_name]
base_arch_name = (arch_name[2:] if arch_name.startswith('TF') else arch_name)
if (results[config_name][framework][arch_name]['model'] is None):
model_classes = []
if (base_arch_name not in tiny_model_summary):
tiny_model_summary[base_arch_name] = {}
tiny_model_summary[base_arch_name].update({'tokenizer_classes': tokenizer_classes, 'processor_classes': processor_classes})
tiny_model_summary[base_arch_name]['model_classes'] = sorted((tiny_model_summary[base_arch_name].get('model_classes', []) + model_classes))
if (organization is not None):
repo_name = f'tiny-random-{base_arch_name}'
if (base_arch_name in COMPOSITE_MODELS):
repo_name = f'tiny-random-{COMPOSITE_MODELS[base_arch_name]}'
repo_id = f'{organization}/{repo_name}'
try:
commit_hash = hf_api.repo_info(repo_id, token=token).sha
except Exception:
logger.warning(f'''Failed to get information for {repo_id}.
{traceback.format_exc()}''')
del tiny_model_summary[base_arch_name]
continue
tiny_model_summary[base_arch_name]['sha'] = commit_hash
return tiny_model_summary |
def load_jsonl(fp: str) -> List[dict]:
ret = []
with open(fp, 'r') as inf:
for line in inf:
content = json.loads(line)
ret.append(content)
return ret |
def convert_to_score(s, binarize_thres=None):
v = float(s)
if (binarize_thres is not None):
v = float((v >= binarize_thres))
return v |
class TrainingSetup():
dataset_and_model: Tuple
optimizer_with_config: Tuple[(TestOptimizer, Dict)]
epochs: int
batch_size: Optional[int] = None
n_train_samples: Optional[int] = None
def dataset(self):
return self.dataset_and_model[0]
def model(self):
model = get_model(self.dataset_and_model[1])
return model
def organize_training_data(self):
return get_dataset(dataset_name=self.dataset(), batch_size=self.batch_size, n_samples=self.n_train_samples)
def test_optimizer(self):
return self.optimizer_with_config[0]
def set_optimizer_with_model_parameters(self, params, config):
return self.test_optimizer().config_from_dict(params=params, data=config)
def optimizer_configuration(self):
return self.optimizer_with_config[1]
def optimizer_name(self):
test_optim = self.test_optimizer()
independent_batch = self.optimizer_configuration().get('independent_batch')
autoschedule = self.optimizer_configuration().get('autoschedule')
_name = test_optim.name
if (independent_batch is not None):
if independent_batch:
_name = f'{_name}_i'
if (autoschedule is not None):
if autoschedule:
_name = f'{_name}_autosched'
return _name
def init_result_dict(self):
(dataset, model_name) = self.dataset_and_model
return {'data': dataset, 'model': model_name, 'name': self.optimizer_name()}
def train_epoch_with_sls(self, model, optimizer, train_set, test_set, train_loader, criterion, train_iter_loss_list, metric_lists: Dict):
begin = time.time()
model.train()
for (batch_index, (data, target)) in enumerate(train_loader):
if use_GPU:
(data, target) = (data.cuda(), target.cuda())
closure = (lambda : criterion(model, data, target))
optimizer.zero_grad()
loss = optimizer.step(closure=closure)
train_iter_loss_list.append(loss.item())
end = time.time()
train_loss = compute_loss(model, train_set)
test_acc = compute_accuracy(model, test_set)
self.update_evaluation_metrics(metric_lists=metric_lists, train_loss=train_loss, test_acc=test_acc, time_per_epoch=(end - begin))
def train_epoch_with_smb(self, model, train_set, test_set, train_loader, optimizer, criterion, metric_lists):
begin = time.time()
model.train()
for (batch_index, (data, target)) in enumerate(train_loader):
if use_GPU:
(data, target) = (data.cuda(), target.cuda())
def closure():
optimizer.zero_grad()
loss = criterion(model, data, target)
return loss
optimizer.step(closure=closure)
end = time.time()
train_loss = compute_loss(model, train_set)
test_acc = compute_accuracy(model, test_set)
self.update_evaluation_metrics(metric_lists=metric_lists, train_loss=train_loss, test_acc=test_acc, time_per_epoch=(end - begin))
def train_epoch_with_adam(self, model, train_set, test_set, train_loader, optimizer, criterion, metric_lists):
begin = time.time()
model.train()
for (batch_index, (data, target)) in enumerate(train_loader):
if use_GPU:
(data, target) = (data.cuda(), target.cuda())
optimizer.zero_grad()
loss = criterion(model, data, target)
loss.backward()
optimizer.step()
end = time.time()
train_loss = compute_loss(model, train_set)
test_acc = compute_accuracy(model, test_set)
self.update_evaluation_metrics(metric_lists=metric_lists, train_loss=train_loss, test_acc=test_acc, time_per_epoch=(end - begin))
def train_epoch_with_sgd(self, model, train_set, test_set, train_loader, optimizer, criterion, metric_lists):
begin = time.time()
model.train()
for (batch_index, (data, target)) in enumerate(train_loader):
if use_GPU:
(data, target) = (data.cuda(), target.cuda())
optimizer.zero_grad()
loss = criterion(model, data, target)
loss.backward()
optimizer.step()
end = time.time()
train_loss = compute_loss(model, train_set)
test_acc = compute_accuracy(model, test_set)
self.update_evaluation_metrics(metric_lists=metric_lists, train_loss=train_loss, test_acc=test_acc, time_per_epoch=(end - begin))
def update_evaluation_metrics(metric_lists: Dict, train_loss, test_acc, time_per_epoch):
metric_lists['train_loss_list'].append(train_loss)
metric_lists['test_acc_list'].append(test_acc)
metric_lists['run_time_list'].append(time_per_epoch) |
def register_Ns3RrcDlDcchMessage_methods(root_module, cls):
cls.add_constructor([param('ns3::RrcDlDcchMessage const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'bIterator')], is_virtual=True)
cls.add_method('PreSerialize', 'void', [], is_const=True, is_virtual=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('DeserializeDlDcchMessage', 'ns3::Buffer::Iterator', [param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('SerializeDlDcchMessage', 'void', [param('int', 'msgType')], is_const=True, visibility='protected')
return |
class BaseFileHandler(metaclass=ABCMeta):
str_like = True
def load_from_fileobj(self, file, **kwargs):
pass
def dump_to_fileobj(self, obj, file, **kwargs):
pass
def dump_to_str(self, obj, **kwargs):
pass
def load_from_path(self, filepath, mode='r', **kwargs):
with open(filepath, mode) as f:
return self.load_from_fileobj(f, **kwargs)
def dump_to_path(self, obj, filepath, mode='w', **kwargs):
with open(filepath, mode) as f:
self.dump_to_fileobj(obj, f, **kwargs) |
class Unflatten(Module):
NamedShape = Tuple[Tuple[(str, int)]]
__constants__ = ['dim', 'unflattened_size']
dim: Union[(int, str)]
unflattened_size: Union[(_size, NamedShape)]
def __init__(self, dim: Union[(int, str)], unflattened_size: Union[(_size, NamedShape)]) -> None:
super(Unflatten, self).__init__()
if isinstance(dim, int):
self._require_tuple_int(unflattened_size)
elif isinstance(dim, str):
self._require_tuple_tuple(unflattened_size)
else:
raise TypeError('invalid argument type for dim parameter')
self.dim = dim
self.unflattened_size = unflattened_size
def _require_tuple_tuple(self, input):
if isinstance(input, tuple):
for (idx, elem) in enumerate(input):
if (not isinstance(elem, tuple)):
raise TypeError(('unflattened_size must be tuple of tuples, ' + 'but found element of type {} at pos {}'.format(type(elem).__name__, idx)))
return
raise TypeError(('unflattened_size must be a tuple of tuples, ' + 'but found type {}'.format(type(input).__name__)))
def _require_tuple_int(self, input):
if isinstance(input, (tuple, list)):
for (idx, elem) in enumerate(input):
if (not isinstance(elem, int)):
raise TypeError(('unflattened_size must be tuple of ints, ' + 'but found element of type {} at pos {}'.format(type(elem).__name__, idx)))
return
raise TypeError('unflattened_size must be a tuple of ints, but found type {}'.format(type(input).__name__))
def forward(self, input: Tensor) -> Tensor:
return input.unflatten(self.dim, self.unflattened_size)
def extra_repr(self) -> str:
return 'dim={}, unflattened_size={}'.format(self.dim, self.unflattened_size) |
class TCManager(ABC):
def __init__(self, tc, timeout):
self._timeout = timeout
errcodes = toml.load(pkg_resources.resource_filename(__name__, 'errcodes.toml'))[tc]
self._all_errcodes = errcodes['all']
self._inc_errcodes = errcodes['included']
def _build_tc_cmd(self, fpath):
pass
def _type_check(self, fpath):
try:
cwd = os.getcwd()
os.chdir(dirname(fpath))
result = subprocess.run(self._build_tc_cmd(basename(fpath)), capture_output=True, text=True, timeout=self._timeout)
retcode = result.returncode
outlines = result.stdout.splitlines()
return (retcode, outlines)
except subprocess.TimeoutExpired:
raise TypeCheckingTooLong
finally:
os.chdir(cwd)
def _check_tc_outcome(self, returncode, outlines):
pass
def light_assess(self, fpath):
logger.info(f'Light assessing {fpath}.')
try:
(retcode, outlines) = self._type_check(fpath)
self._check_tc_outcome(retcode, outlines)
logger.info('Passed the light assessment.')
return True
except CustomError as e:
logger.error(str(e))
return False
except CustomWarning as e:
logger.warning(str(e))
return False
def _parse_tc_output(self, returncode, outlines):
pass
def _report_errors(self, parsed_result):
pass
def heavy_assess(self, fpath):
try:
(retcode, outlines) = self._type_check(fpath)
parsed_result = self._parse_tc_output(retcode, outlines)
self._report_errors(parsed_result)
return parsed_result
except CustomError as e:
logger.error(str(e)) |
def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[(Tuple, bool)]:
dynamic = False
if (padding is None):
padding = (((stride - 1) + (dilation * (kernel_size - 1))) // 2)
return (padding, dynamic)
if isinstance(padding, str):
padding = padding.lower()
if (padding == 'same'):
if ((stride == 1) and (((dilation * (kernel_size - 1)) % 2) == 0)):
padding = (((stride - 1) + (dilation * (kernel_size - 1))) // 2)
else:
padding = 0
dynamic = True
elif (padding == 'valid'):
padding = 0
else:
padding = (((stride - 1) + (dilation * (kernel_size - 1))) // 2)
return (padding, dynamic) |
def _initialize_comm(comm: Optional[MPI.Comm]=None) -> MPI.Comm:
if (comm is None):
comm = fenics.MPI.comm_world
return comm |
class VideoDataset(data.Dataset):
def __init__(self, root_path, list_file, num_segments=3, new_length=1, modality='RGB', image_tmpl='img_{:05d}.jpg', transform=None, force_grayscale=False, random_shift=True, test_mode=False, num_clips=1):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.num_clips = num_clips
if (self.modality == 'RGBDiff'):
self.new_length += 1
self._parse_list()
def _load_image(self, directory, idx):
if ((self.modality == 'RGB') or (self.modality == 'RGBDiff')):
try:
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert('RGB')]
except Exception:
print('error loading image:', os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')]
elif (self.modality == 'Flow'):
x_img = Image.open(os.path.join(self.root_path, directory.replace('frames', 'flow_x'), self.image_tmpl.format(idx).replace('image_', 'flow_x_'))).convert('L')
y_img = Image.open(os.path.join(self.root_path, directory.replace('frames', 'flow_y'), self.image_tmpl.format(idx).replace('image_', 'flow_y_'))).convert('L')
return [x_img, y_img]
def _parse_list(self):
tmp = [x.strip().split(' ') for x in open(self.list_file)]
tmp = [item for item in tmp if (int(item[1]) >= 3)]
self.video_list = [VideoRecord(item) for item in tmp]
print(('video number:%d' % len(self.video_list)))
def _sample_indices(self, record):
if (self.modality == 'Flow'):
num_frames = (record.num_frames - 1)
else:
num_frames = record.num_frames
average_duration = (((num_frames - self.new_length) + 1) // self.num_segments)
if (average_duration > 0):
offsets = (np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments))
elif (((num_frames - self.new_length) + 1) > self.num_segments):
offsets = np.sort(randint(((num_frames - self.new_length) + 1), size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return (offsets + 1)
def _get_val_indices(self, record):
if (self.modality == 'Flow'):
num_frames = (record.num_frames - 1)
else:
num_frames = record.num_frames
if (num_frames > ((self.num_segments + self.new_length) - 1)):
tick = (((num_frames - self.new_length) + 1) / float(self.num_segments))
offsets = np.array([int(((tick / 2.0) + (tick * x))) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return (offsets + 1)
def _get_test_indices(self, record):
if (self.modality == 'Flow'):
num_frames = (record.num_frames - 1)
else:
num_frames = record.num_frames
tick = (((num_frames - self.new_length) + 1) / float(self.num_segments))
if (self.num_clips == 1):
offsets = (np.array([int(((tick / 2.0) + (tick * x))) for x in range(self.num_segments)]) + 1)
elif (self.num_clips == 2):
offsets = [(np.array([int((tick * x)) for x in range(self.num_segments)]) + 1), (np.array([int(((tick * x) + (tick / 2.0))) for x in range(self.num_segments)]) + 1)]
return offsets
def __getitem__(self, index):
record = self.video_list[index]
if (not self.test_mode):
segment_indices = (self._sample_indices(record) if self.random_shift else self._get_val_indices(record))
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
if (self.num_clips > 1):
process_data_final = []
for k in range(self.num_clips):
images = list()
for seg_ind in indices[k]:
p = int(seg_ind)
for i in range(self.new_length):
seg_imgs = self._load_image(record.path, p)
images.extend(seg_imgs)
if (p < record.num_frames):
p += 1
(process_data, label) = self.transform((images, record.label))
process_data_final.append(process_data)
process_data_final = torch.stack(process_data_final, 0)
return (process_data_final, label)
else:
images = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_imgs = self._load_image(record.path, p)
images.extend(seg_imgs)
if (p < record.num_frames):
p += 1
(process_data, label) = self.transform((images, record.label))
return (process_data, label)
def __len__(self):
return len(self.video_list) |
def compute_precision_at_k(targs, preds, k):
check_inputs(targs, preds)
classes_rel = np.flatnonzero((targs == 1))
if (len(classes_rel) == 0):
return 0.0
top_k_pred = np.argsort(preds)[::(- 1)][:k]
metric_value = (float(len(np.intersect1d(top_k_pred, classes_rel))) / k)
return metric_value |
class HMRHead(BaseModule):
def __init__(self, feat_dim, smpl_mean_params=None, npose=144, nbeta=10, ncam=3, hdim=1024, init_cfg=None):
super(HMRHead, self).__init__(init_cfg=init_cfg)
self.fc1 = nn.Linear((((feat_dim + npose) + nbeta) + ncam), hdim)
self.drop1 = nn.Dropout()
self.fc2 = nn.Linear(hdim, hdim)
self.drop2 = nn.Dropout()
self.decpose = nn.Linear(hdim, npose)
self.decshape = nn.Linear(hdim, nbeta)
self.deccam = nn.Linear(hdim, ncam)
nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)
if (smpl_mean_params is None):
init_pose = torch.zeros([1, npose])
init_shape = torch.zeros([1, nbeta])
init_cam = torch.FloatTensor([[1, 0, 0]])
else:
mean_params = np.load(smpl_mean_params)
init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)
init_shape = torch.from_numpy(mean_params['shape'][:].astype('float32')).unsqueeze(0)
init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)
self.register_buffer('init_pose', init_pose)
self.register_buffer('init_shape', init_shape)
self.register_buffer('init_cam', init_cam)
def forward(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3):
if (isinstance(x, list) or isinstance(x, tuple)):
x = x[(- 1)]
output_seq = False
if (len(x.shape) == 4):
x = x.mean(dim=(- 1)).mean(dim=(- 1))
elif (len(x.shape) == 3):
output_seq = True
(B, T, L) = x.shape
x = x.view((- 1), L)
batch_size = x.shape[0]
if (init_pose is None):
init_pose = self.init_pose.expand(batch_size, (- 1))
if (init_shape is None):
init_shape = self.init_shape.expand(batch_size, (- 1))
if (init_cam is None):
init_cam = self.init_cam.expand(batch_size, (- 1))
pred_pose = init_pose
pred_shape = init_shape
pred_cam = init_cam
for i in range(n_iter):
xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1)
xc = self.fc1(xc)
xc = self.drop1(xc)
xc = self.fc2(xc)
xc = self.drop2(xc)
pred_pose = (self.decpose(xc) + pred_pose)
pred_shape = (self.decshape(xc) + pred_shape)
pred_cam = (self.deccam(xc) + pred_cam)
pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)
if output_seq:
pred_rotmat = pred_rotmat.view(B, T, 24, 3, 3)
pred_shape = pred_shape.view(B, T, 10)
pred_cam = pred_cam.view(B, T, 3)
output = {'pred_pose': pred_rotmat, 'pred_shape': pred_shape, 'pred_cam': pred_cam}
return output |
def eval_vae(epoch, args, trainer, eval_data):
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
RawResult = collections.namedtuple('RawResult', ['unique_id', 'start_logits', 'end_logits'])
(eval_loader, eval_examples, eval_features) = eval_data
all_results = []
qa_results = []
qg_results = {}
res_dict = {}
example_index = (- 1)
for batch in tqdm(eval_loader, desc='Eval iter', leave=False, position=4):
(c_ids, q_ids, a_ids, start, end) = batch_to_device(batch, args.device)
batch_size = c_ids.size(0)
batch_c_ids = c_ids.cpu().tolist()
batch_q_ids = q_ids.cpu().tolist()
batch_start = start.cpu().tolist()
batch_end = end.cpu().tolist()
(batch_posterior_q_ids, batch_posterior_start, batch_posterior_end, posterior_z_prob) = trainer.generate_posterior(c_ids, q_ids, a_ids)
(batch_start_logits, batch_end_logits) = trainer.generate_answer_logits(c_ids, q_ids, a_ids)
(batch_posterior_q_ids, batch_posterior_start, batch_posterior_end) = (batch_posterior_q_ids.cpu().tolist(), batch_posterior_start.cpu().tolist(), batch_posterior_end.cpu().tolist())
posterior_z_prob = posterior_z_prob.cpu()
(batch_prior_q_ids, batch_prior_start, batch_prior_end, prior_z_prob) = trainer.generate_prior(c_ids)
(batch_prior_q_ids, batch_prior_start, batch_prior_end) = (batch_prior_q_ids.cpu().tolist(), batch_prior_start.cpu().tolist(), batch_prior_end.cpu().tolist())
prior_z_prob = prior_z_prob.cpu()
for i in range(batch_size):
example_index += 1
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index]
unique_id = int(eval_feature.unique_id)
context = to_string(batch_c_ids[i], tokenizer)
real_question = to_string(batch_q_ids[i], tokenizer)
posterior_question = to_string(batch_posterior_q_ids[i], tokenizer)
prior_question = to_string(batch_prior_q_ids[i], tokenizer)
real_answer = to_string(batch_c_ids[i][batch_start[i]:(batch_end[i] + 1)], tokenizer)
posterior_answer = to_string(batch_c_ids[i][batch_posterior_start[i]:(batch_posterior_end[i] + 1)], tokenizer)
prior_answer = to_string(batch_c_ids[i][batch_prior_start[i]:(batch_prior_end[i] + 1)], tokenizer)
all_results.append(Result(context=context, real_question=real_question, posterior_question=posterior_question, prior_question=prior_question, real_answer=real_answer, posterior_answer=posterior_answer, prior_answer=prior_answer, posterior_z_prob=posterior_z_prob[i], prior_z_prob=prior_z_prob[i]))
qg_results[unique_id] = posterior_question
res_dict[unique_id] = real_question
qa_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits))
output_prediction_file = os.path.join(args.model_dir, 'pred.json')
write_predictions(eval_examples, eval_features, qa_results, n_best_size=20, max_answer_length=30, do_lower_case=True, output_prediction_file=output_prediction_file, verbose_logging=False, version_2_with_negative=False, null_score_diff_threshold=0, noq_position=True)
with open(args.dev_dir) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
with open(os.path.join(args.model_dir, 'pred.json')) as prediction_file:
predictions = json.load(prediction_file)
ret = evaluate(dataset, predictions)
bleu = eval_qg(res_dict, qg_results)
return (ret, bleu, all_results) |
class lora_sdr_lora_tx(gr.hier_block2):
def __init__(self, bw=125000, cr=1, has_crc=True, impl_head=False, samp_rate=250000, sf=7, ldro_mode=2, frame_zero_padd=(2 ** 7)):
gr.hier_block2.__init__(self, 'lora_sdr_lora_tx', gr.io_signature(0, 0, 0), gr.io_signature(1, 1, (gr.sizeof_gr_complex * 1)))
self.message_port_register_hier_in('in')
self.bw = bw
self.cr = cr
self.has_crc = has_crc
self.impl_head = impl_head
self.samp_rate = samp_rate
self.sf = sf
self.frame_zero_padd = frame_zero_padd
self.lora_sdr_whitening_0 = lora_sdr.whitening(False, False, ',', 'packet_len')
self.lora_sdr_modulate_0 = lora_sdr.modulate(sf, samp_rate, bw, [8, 16], frame_zero_padd, 8)
self.lora_sdr_interleaver_0 = lora_sdr.interleaver(cr, sf, ldro_mode, bw)
self.lora_sdr_header_0 = lora_sdr.header(impl_head, has_crc, cr)
self.lora_sdr_hamming_enc_0 = lora_sdr.hamming_enc(cr, sf)
self.lora_sdr_gray_demap_0 = lora_sdr.gray_demap(sf)
self.lora_sdr_add_crc_0 = lora_sdr.add_crc(has_crc)
self.msg_connect((self, 'in'), (self.lora_sdr_whitening_0, 'msg'))
self.connect((self.lora_sdr_add_crc_0, 0), (self.lora_sdr_hamming_enc_0, 0))
self.connect((self.lora_sdr_gray_demap_0, 0), (self.lora_sdr_modulate_0, 0))
self.connect((self.lora_sdr_hamming_enc_0, 0), (self.lora_sdr_interleaver_0, 0))
self.connect((self.lora_sdr_header_0, 0), (self.lora_sdr_add_crc_0, 0))
self.connect((self.lora_sdr_interleaver_0, 0), (self.lora_sdr_gray_demap_0, 0))
self.connect((self.lora_sdr_modulate_0, 0), (self, 0))
self.connect((self.lora_sdr_whitening_0, 0), (self.lora_sdr_header_0, 0))
def get_bw(self):
return self.bw
def set_bw(self, bw):
self.bw = bw
def get_cr(self):
return self.cr
def set_cr(self, cr):
self.cr = cr
self.lora_sdr_hamming_enc_0.set_cr(self.cr)
self.lora_sdr_header_0.set_cr(self.cr)
self.lora_sdr_interleaver_0.set_cr(self.cr)
def get_has_crc(self):
return self.has_crc
def set_has_crc(self, has_crc):
self.has_crc = has_crc
def get_impl_head(self):
return self.impl_head
def set_impl_head(self, impl_head):
self.impl_head = impl_head
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
def get_sf(self):
return self.sf
def set_sf(self, sf):
self.sf = sf
self.lora_sdr_gray_demap_0.set_sf(self.sf)
self.lora_sdr_hamming_enc_0.set_sf(self.sf)
self.lora_sdr_interleaver_0.set_sf(self.sf)
self.lora_sdr_modulate_0.set_sf(self.sf) |
class Prior(ABC):
def __init__(self, config):
self.config = config
self.device = config['device']
def add_expert(self):
pass
def record_usage(self, usage, index=None):
pass
def nl_prior(self, normalize=False):
pass |
def test_multiple_network():
module_creators = [ModuleCreator(TSTNetNormal(), [(4, 3, 32, 32), (4, 3, 32, 32)]), ModuleCreator(ResUnit(16), [(4, 3, 32, 32)]), ModuleCreator(NestedTestNet(), [(4, 3, 32, 32), (4, 3, 32, 32)])]
with nn.graph_def.graph() as g:
for module_creator in module_creators:
module = module_creator.module
proto_variable_inputs = [nn.ProtoVariable(shape) for shape in module_creator.input_shape]
outputs = module(*proto_variable_inputs)
for (module_creator, network) in zip(module_creators, g.networks.values()):
variable_inputs = module_creator.get_variable_inputs()
outputs = network(*variable_inputs)
ref_outputs = module_creator.module(*variable_inputs)
forward_variable_and_check_equal(outputs, ref_outputs) |
.parametrize('hint,expected', [(list, Instance(TypeInfo(list), (AnyType(),))), (list[int], Instance(TypeInfo(list), (Instance(TypeInfo(int)),))), (list[int], Instance(TypeInfo(list), (Instance(TypeInfo(int)),))), (set[int], Instance(TypeInfo(set), (Instance(TypeInfo(int)),))), (set, Instance(TypeInfo(set), (AnyType(),))), (set[int], Instance(TypeInfo(set), (Instance(TypeInfo(int)),))), (set[int], Instance(TypeInfo(set), (Instance(TypeInfo(int)),))), (dict[(int, str)], Instance(TypeInfo(dict), (Instance(TypeInfo(int)), Instance(TypeInfo(str))))), (dict[(int, str)], Instance(TypeInfo(dict), (Instance(TypeInfo(int)), Instance(TypeInfo(str))))), ((int | str), UnionType((Instance(TypeInfo(int)), Instance(TypeInfo(str))))), (Union[(int, str)], UnionType((Instance(TypeInfo(int)), Instance(TypeInfo(str))))), (Union[(int, type(None))], UnionType((NoneType(), Instance(TypeInfo(int))))), (tuple[(int, str)], TupleType((Instance(TypeInfo(int)), Instance(TypeInfo(str))))), (tuple[(int, str)], TupleType((Instance(TypeInfo(int)), Instance(TypeInfo(str))))), (tuple, TupleType((AnyType(),), unknown_size=True)), (Any, AnyType()), (type(None), NoneType()), (A, AnyType()), (list, Instance(TypeInfo(list), (AnyType(),))), (tuple, TupleType((AnyType(),), unknown_size=True))])
def test_convert_type_hints(hint, expected):
graph = TypeSystem()
assert (graph.convert_type_hint(hint) == expected)
assert (repr(graph.convert_type_hint(hint)) == repr(expected)) |
def parse_win_mp_grid(f):
for line in f.readlines():
if ('mp_grid' in line):
return parse_line_list(line.split(':')[1], ' ', int) |
class NoiseScheduleEDM():
def __init__(self, schedule='linear', betas=None, alphas_cumprod=None, continuous_beta_0=0.1, continuous_beta_1=20.0, dtype=torch.float32):
if (schedule not in ['discrete', 'linear']):
raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear'".format(schedule))
self.schedule = schedule
if (schedule == 'discrete'):
if (betas is not None):
log_alphas = (0.5 * torch.log((1 - betas)).cumsum(dim=0))
else:
assert (alphas_cumprod is not None)
log_alphas = (0.5 * torch.log(alphas_cumprod))
self.T = 1.0
self.log_alpha_array = self.numerical_clip_alpha(log_alphas).reshape((1, (- 1))).to(dtype=dtype)
self.total_N = self.log_alpha_array.shape[1]
self.t_array = torch.linspace(0.0, 1.0, (self.total_N + 1))[1:].reshape((1, (- 1))).to(dtype=dtype)
else:
self.T = 80.0
self.total_N = 1000
self.beta_0 = continuous_beta_0
self.beta_1 = continuous_beta_1
def numerical_clip_alpha(self, log_alphas, clipped_lambda=(- 5.1)):
log_sigmas = (0.5 * torch.log((1.0 - torch.exp((2.0 * log_alphas)))))
lambs = (log_alphas - log_sigmas)
idx = torch.searchsorted(torch.flip(lambs, [0]), clipped_lambda)
if (idx > 0):
log_alphas = log_alphas[:(- idx)]
return log_alphas
def marginal_log_mean_coeff(self, t):
return torch.zeros_like(t)
def marginal_alpha(self, t):
return torch.ones_like(t)
def marginal_std(self, t):
return t
def marginal_lambda(self, t):
return (- torch.log(t))
def inverse_lambda(self, lamb):
return (- lamb).exp().reshape(((- 1),)) |
class AutoModel(object):
def __init__(self):
raise EnvironmentError('AutoModel is designed to be instantiated using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or `AutoModel.from_config(config)` methods.')
def from_config(cls, config):
for (config_class, model_class) in MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_MAPPING.keys()))))
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for (config_class, model_class) in MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_MAPPING.keys())))) |
class OpenAssistantScenario(Scenario):
name = 'open_assistant'
description = "The conversation dataset released by LAION's Open Assistant project."
tags = ['instructions']
def __init__(self, language: str):
super().__init__()
self.language: str = language
def get_instances(self, output_path: str) -> List[Instance]:
def matches_target_language(msg):
if (self.language == 'all'):
return True
else:
return (msg['lang'] == self.language)
def get_split_instances(split: Dataset, split_tag: str):
initial_prompts: Dict[(str, str)] = {}
for msg in split:
assert (msg['model_name'] is None)
if ((msg['parent_id'] is None) and matches_target_language(msg)):
assert (msg['role'] == 'prompter')
assert (msg['message_id'] not in initial_prompts)
initial_prompts[msg['message_id']] = msg['text']
prompt_responses: DefaultDict[(str, List[str])] = defaultdict(list)
for msg in split:
if ((msg['role'] == 'assistant') and (msg['parent_id'] in initial_prompts) and matches_target_language(msg)):
prompt_responses[msg['parent_id']].append(msg['text'])
instances: List[Instance] = []
for (msg_id, prompt) in initial_prompts.items():
reference_texts: List[str] = []
if (msg_id in prompt_responses):
reference_texts = prompt_responses[msg_id]
instance = Instance(input=Input(text=prompt), references=[Reference(Output(text=ref), tags=[CORRECT_TAG]) for ref in reference_texts], split=split_tag)
instances.append(instance)
return instances
dataset: Any = load_dataset('OpenAssistant/oasst1')
train_instances = get_split_instances(dataset['train'], TRAIN_SPLIT)
valid_instances = get_split_instances(dataset['validation'], VALID_SPLIT)
return (train_instances + valid_instances) |
class ConvTranspose3d(_ConvTransposeNd):
__doc__ = ('Applies a 3D transposed convolution operator over an input image composed of several input\n planes.\n The transposed convolution operator multiplies each input value element-wise by a learnable kernel,\n and sums over the outputs from all input feature planes.\n\n This module can be seen as the gradient of Conv3d with respect to its input.\n It is also known as a fractionally-strided convolution or\n a deconvolution (although it is not an actual deconvolution operation as it does\n not compute a true inverse of convolution). For more information, see the visualizations\n `here`_ and the `Deconvolutional Networks`_ paper.\n\n This module supports :ref:`TensorFloat32<tf32_on_ampere>`.\n\n * :attr:`stride` controls the stride for the cross-correlation.\n\n * :attr:`padding` controls the amount of implicit zero padding on both\n sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note\n below for details.\n\n * :attr:`output_padding` controls the additional size added to one side\n of the output shape. See note below for details.\n\n * :attr:`dilation` controls the spacing between the kernel points; also known as the a trous algorithm.\n It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.\n\n {groups_note}\n\n The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`\n can either be:\n\n - a single ``int`` -- in which case the same value is used for the depth, height and width dimensions\n - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,\n the second `int` for the height dimension and the third `int` for the width dimension\n\n Note:\n The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``\n amount of zero padding to both sizes of the input. This is set so that\n when a :class:`~torch.nn.Conv3d` and a :class:`~torch.nn.ConvTranspose3d`\n are initialized with same parameters, they are inverses of each other in\n regard to the input and output shapes. However, when ``stride > 1``,\n :class:`~torch.nn.Conv3d` maps multiple input shapes to the same output\n shape. :attr:`output_padding` is provided to resolve this ambiguity by\n effectively increasing the calculated output shape on one side. Note\n that :attr:`output_padding` is only used to find output shape, but does\n not actually add zero-padding to output.\n\n Note:\n {cudnn_reproducibility_note}\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding\n will be added to both sides of each dimension in the input. Default: 0\n output_padding (int or tuple, optional): Additional size added to one side\n of each dimension in the output shape. Default: 0\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n '.format(**reproducibility_notes, **convolution_notes) + '\n\n Shape:\n - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where\n\n .. math::\n D_{out} = (D_{in} - 1) \\times \\text{stride}[0] - 2 \\times \\text{padding}[0] + \\text{dilation}[0]\n \\times (\\text{kernel\\_size}[0] - 1) + \\text{output\\_padding}[0] + 1\n .. math::\n H_{out} = (H_{in} - 1) \\times \\text{stride}[1] - 2 \\times \\text{padding}[1] + \\text{dilation}[1]\n \\times (\\text{kernel\\_size}[1] - 1) + \\text{output\\_padding}[1] + 1\n .. math::\n W_{out} = (W_{in} - 1) \\times \\text{stride}[2] - 2 \\times \\text{padding}[2] + \\text{dilation}[2]\n \\times (\\text{kernel\\_size}[2] - 1) + \\text{output\\_padding}[2] + 1\n\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape\n :math:`(\\text{in\\_channels}, \\frac{\\text{out\\_channels}}{\\text{groups}},`\n :math:`\\text{kernel\\_size[0]}, \\text{kernel\\_size[1]}, \\text{kernel\\_size[2]})`.\n The values of these weights are sampled from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{out} * \\prod_{i=0}^{2}\\text{kernel\\_size}[i]}`\n bias (Tensor): the learnable bias of the module of shape (out_channels)\n If :attr:`bias` is ``True``, then the values of these weights are\n sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{out} * \\prod_{i=0}^{2}\\text{kernel\\_size}[i]}`\n\n Examples::\n\n >>> # With square kernels and equal stride\n >>> m = nn.ConvTranspose3d(16, 33, 3, stride=2)\n >>> # non-square kernels and unequal stride and with padding\n >>> m = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2))\n >>> input = torch.randn(20, 16, 10, 50, 100)\n >>> output = m(input)\n\n .. _`here`:\n .. _`Deconvolutional Networks`:\n ')
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t=1, padding: _size_3_t=0, output_padding: _size_3_t=0, groups: int=1, bias: bool=True, dilation: _size_3_t=1, padding_mode: str='zeros', device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
super(ConvTranspose3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, True, output_padding, groups, bias, padding_mode, **factory_kwargs)
def forward(self, input: Tensor, output_size: Optional[List[int]]=None) -> Tensor:
if (self.padding_mode != 'zeros'):
raise ValueError('Only `zeros` padding mode is supported for ConvTranspose3d')
assert isinstance(self.padding, tuple)
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size, self.dilation)
return F.conv_transpose3d(input, self.weight, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation) |
def test_coerce_to_bytes_with_none():
_to_bytes_io
def func(fh):
assert (fh is None)
func(None) |
class Memory(object):
def __init__(self, initial_feature, memory_net):
self.h_state = initial_feature
def update(self, new_feature, memory_net):
self.h_state = memory_net(new_feature, self.h_state)
def train_update(self, feature_sequence, memory_net):
for (i, f) in enumerate(feature_sequence):
if (i == 0):
h = f
else:
h = memory_net(f, h)
self.h_state = h |
class KRTToRCBijectionTypeDTwisted(KRTToRCBijectionTypeD, KRTToRCBijectionTypeA2Even):
def run(self, verbose=False):
if verbose:
from sage.combinat.rigged_configurations.tensor_product_kr_tableaux_element import TensorProductOfKirillovReshetikhinTableauxElement
for cur_crystal in reversed(self.tp_krt):
r = cur_crystal.parent().r()
for (col_number, cur_column) in enumerate(reversed(cur_crystal.to_array(False))):
self.cur_path.insert(0, [])
if (r == self.n):
if verbose:
print('')
print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), self.cur_path)))
print('')
print(repr(self.ret_rig_con))
print('\n')
print('Applying doubling map')
self.doubling_map()
self.cur_dims.insert(0, [0, 1])
for letter in reversed(cur_column):
self.cur_dims[0][0] += 1
val = letter.value
if verbose:
print('')
print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), self.cur_path)))
print('')
print(repr(self.ret_rig_con))
print('\n')
self.cur_path[0].insert(0, [letter])
self.next_state(val)
if (r == self.n):
if verbose:
print('')
print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), self.cur_path)))
print('')
print(repr(self.ret_rig_con))
print('\n')
print('Applying halving map')
self.halving_map()
if (col_number > 0):
for (i, letter_singleton) in enumerate(self.cur_path[0]):
self.cur_path[1][i].insert(0, letter_singleton[0])
self.cur_dims[1][1] += 1
self.cur_path.pop(0)
self.cur_dims.pop(0)
for a in range(self.n):
self._update_vacancy_nums(a)
self.ret_rig_con.set_immutable()
return self.ret_rig_con
def next_state(self, val):
n = self.n
tableau_height = (len(self.cur_path[0]) - 1)
if (val == 'E'):
KRTToRCBijectionTypeA2Even.next_state(self, val)
return
elif (val > 0):
KRTToRCBijectionTypeA.next_state(self, val)
if (tableau_height >= (n - 1)):
self._correct_vacancy_nums()
return
pos_val = (- val)
if (pos_val == 0):
if (len(self.ret_rig_con[(pos_val - 1)]) > 0):
max_width = self.ret_rig_con[(n - 1)][0]
else:
max_width = 1
max_width = self.ret_rig_con[(n - 1)].insert_cell(max_width)
width_n = (max_width + 1)
for a in reversed(range(tableau_height, (n - 1))):
max_width = self.ret_rig_con[a].insert_cell(max_width)
self._update_vacancy_nums((a + 1))
self._update_partition_values((a + 1))
self._update_vacancy_nums(tableau_height)
if (tableau_height >= (n - 1)):
self._correct_vacancy_nums()
self._update_partition_values(tableau_height)
if (tableau_height > 0):
self._update_vacancy_nums((tableau_height - 1))
self._update_partition_values((tableau_height - 1))
p = self.ret_rig_con[(n - 1)]
num_rows = len(p)
for i in range(num_rows):
if (p._list[i] == width_n):
j = (i + 1)
while ((j < num_rows) and (p._list[j] == width_n) and (p.vacancy_numbers[j] == p.rigging[j])):
j += 1
p.rigging[(j - 1)] -= 1
break
return
case_S = ([None] * n)
pos_val = (- val)
if (len(self.ret_rig_con[(pos_val - 1)]) > 0):
max_width = self.ret_rig_con[(pos_val - 1)][0]
else:
max_width = 1
for a in range((pos_val - 1), (n - 1)):
max_width = self.ret_rig_con[a].insert_cell(max_width)
case_S[a] = max_width
partition = self.ret_rig_con[(n - 1)]
num_rows = len(partition)
case_QS = False
for i in range((num_rows + 1)):
if (i == num_rows):
max_width = 0
if case_QS:
partition._list.append(1)
partition.vacancy_numbers.append(None)
j = (len(partition._list) - 1)
while ((j >= 0) and (partition._list[j] == 1)):
j -= 1
partition.rigging.insert((j + 1), None)
width_n = 1
else:
j = (len(partition._list) - 1)
while ((j >= 0) and (partition._list[j] <= 2)):
j -= 1
partition._list.insert((j + 1), 2)
partition.vacancy_numbers.insert((j + 1), None)
partition.rigging.insert((j + 1), None)
break
elif (partition._list[i] <= max_width):
if (partition.vacancy_numbers[i] == partition.rigging[i]):
max_width = partition._list[i]
if case_QS:
partition._list[i] += 1
width_n = partition._list[i]
partition.rigging[i] = None
else:
j = (i - 1)
while ((j >= 0) and (partition._list[j] <= (max_width + 2))):
partition.rigging[(j + 1)] = partition.rigging[j]
j -= 1
partition._list.pop(i)
partition._list.insert((j + 1), (max_width + 2))
partition.rigging[(j + 1)] = None
break
elif (((partition.vacancy_numbers[i] - 1) == partition.rigging[i]) and (not case_QS)):
case_QS = True
partition._list[i] += 1
partition.rigging[i] = None
for a in reversed(range(tableau_height, (n - 1))):
if (case_S[a] == max_width):
self._insert_cell_case_S(self.ret_rig_con[a])
else:
max_width = self.ret_rig_con[a].insert_cell(max_width)
self._update_vacancy_nums((a + 1))
self._update_partition_values((a + 1))
self._update_vacancy_nums(tableau_height)
if (tableau_height >= (n - 1)):
self._correct_vacancy_nums()
self._update_partition_values(tableau_height)
if (pos_val <= tableau_height):
for a in range((pos_val - 1), tableau_height):
self._update_vacancy_nums(a)
self._update_partition_values(a)
if (pos_val > 1):
self._update_vacancy_nums((pos_val - 2))
self._update_partition_values((pos_val - 2))
elif (tableau_height > 0):
self._update_vacancy_nums((tableau_height - 1))
self._update_partition_values((tableau_height - 1))
if case_QS:
num_rows = len(partition)
for i in range(num_rows):
if (partition._list[i] == width_n):
j = (i + 1)
while ((j < num_rows) and (partition._list[j] == width_n) and (partition.vacancy_numbers[j] == partition.rigging[j])):
j += 1
partition.rigging[(j - 1)] -= 1
break |
def saveScore(outPath, outValue, *args):
flagPath = (outPath + '.flag')
while os.path.isfile(flagPath):
time.sleep(1)
open(flagPath, 'a').close()
if os.path.isfile(outPath):
with open(outPath, 'rb') as file:
outDict = json.load(file)
if (not isinstance(outDict, dict)):
outDict = {}
else:
outDict = {}
fullDict = outDict
for item in args[:(- 1)]:
if (str(item) not in outDict):
outDict[str(item)] = {}
outDict = outDict[str(item)]
outDict[args[(- 1)]] = outValue
with open(outPath, 'w') as file:
json.dump(fullDict, file, indent=2)
os.remove(flagPath) |
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res |
def test_bbsplus_and_range():
from zksk.primitives.rangeproof import RangeStmt
from zksk.utils import make_generators
mG = BilinearGroupPair()
keypair = BBSPlusKeypair.generate(mG, 9)
(pk, sk) = (keypair.pk, keypair.sk)
(generators, h0) = (keypair.generators, keypair.h0)
creator = BBSPlusSignatureCreator(pk)
msg_val = Bn(30)
lhs = creator.commit([msg_val])
presignature = sk.sign(lhs.com_message)
signature = creator.obtain_signature(presignature)
(e, s, m) = (Secret(signature.e), Secret(signature.s), Secret(msg_val))
p1 = BBSPlusSignatureStmt([e, s, m], pk, signature)
(g, h) = make_generators(2, mG.G1)
randomizer = Secret(value=mG.G1.order().random())
com = ((m * g) + (randomizer * h))
p2 = RangeStmt(com.eval(), g, h, 18, 9999, m, randomizer)
stmt = (p1 & p2)
proof = stmt.prove()
assert stmt.verify(proof) |
def fpAbs(a, ctx=None):
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
return FPRef(Z3_mk_fpa_abs(ctx.ref(), a.as_ast()), ctx) |
def test_optimization_result_try_get_optimal_point_for_successful_optimization() -> None:
data = {FOO: mk_dataset([[0.25, 0.25], [0.5, 0.4]], [[0.8], [0.7]])}
result: OptimizationResult[None] = OptimizationResult(Ok(Record(data, {FOO: _PseudoTrainableQuadratic()}, None)), [])
(x, y, idx) = result.try_get_optimal_point()
npt.assert_allclose(x, [0.5, 0.4])
npt.assert_allclose(y, [0.7])
npt.assert_allclose(idx, 1) |
(nopython=True, fastmath=True, cache=True)
def apply_bmask_1D(u, mask):
for j in range(u.shape[1]):
if (mask[j] == 0):
for i in range(u.shape[0]):
u[(i, j)] = 0
return u |
class Player2Vec(Algorithm):
def __init__(self, session, meta, nodes, class_size, gcn_output1, embedding, encoding):
self.meta = meta
self.nodes = nodes
self.class_size = class_size
self.gcn_output1 = gcn_output1
self.embedding = embedding
self.encoding = encoding
self.placeholders = {'a': tf.placeholder(tf.float32, [self.meta, self.nodes, self.nodes], 'adj'), 'x': tf.placeholder(tf.float32, [self.nodes, self.embedding], 'nxf'), 'batch_index': tf.placeholder(tf.int32, [None], 'index'), 't': tf.placeholder(tf.float32, [None, self.class_size], 'labels'), 'lr': tf.placeholder(tf.float32, [], 'learning_rate'), 'mom': tf.placeholder(tf.float32, [], 'momentum'), 'num_features_nonzero': tf.placeholder(tf.int32)}
(loss, probabilities) = self.forward_propagation()
(self.loss, self.probabilities) = (loss, probabilities)
self.l2 = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.01), tf.trainable_variables())
self.pred = tf.one_hot(tf.argmax(self.probabilities, 1), class_size)
print(self.pred.shape)
self.correct_prediction = tf.equal(tf.argmax(self.probabilities, 1), tf.argmax(self.placeholders['t'], 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, 'float'))
print('Forward propagation finished.')
self.sess = session
self.optimizer = tf.train.AdamOptimizer(self.placeholders['lr'])
gradients = self.optimizer.compute_gradients((self.loss + self.l2))
capped_gradients = [(tf.clip_by_value(grad, (- 5.0), 5.0), var) for (grad, var) in gradients if (grad is not None)]
self.train_op = self.optimizer.apply_gradients(capped_gradients)
self.init = tf.global_variables_initializer()
print('Backward propagation finished.')
def forward_propagation(self):
with tf.variable_scope('gcn'):
gcn_emb = []
for i in range(self.meta):
gcn_out = tf.reshape(GCN(self.placeholders, self.gcn_output1, self.embedding, self.encoding, index=i).embedding(), [1, (self.nodes * self.encoding)])
gcn_emb.append(gcn_out)
gcn_emb = tf.concat(gcn_emb, 0)
assert (gcn_emb.shape == [self.meta, (self.nodes * self.encoding)])
print('GCN embedding over!')
with tf.variable_scope('attention'):
gat_out = AttentionLayer.attention(inputs=gcn_emb, attention_size=1, v_type='tanh')
gat_out = tf.reshape(gat_out, [self.nodes, self.encoding])
print('Embedding with attention over!')
with tf.variable_scope('classification'):
batch_data = tf.matmul(tf.one_hot(self.placeholders['batch_index'], self.nodes), gat_out)
W = tf.get_variable(name='weights', shape=[self.encoding, self.class_size], initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='bias', shape=[1, self.class_size], initializer=tf.zeros_initializer())
tf.transpose(batch_data, perm=[0, 1])
logits = (tf.matmul(batch_data, W) + b)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.placeholders['t'], logits=logits)
return (loss, tf.nn.sigmoid(logits))
def train(self, x, a, t, b, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict(x, a, t, b, learning_rate, momentum, self.placeholders)
outs = self.sess.run([self.train_op, self.loss, self.accuracy, self.pred, self.probabilities], feed_dict=feed_dict)
loss = outs[1]
acc = outs[2]
pred = outs[3]
prob = outs[4]
return (loss, acc, pred, prob)
def test(self, x, a, t, b, learning_rate=0.01, momentum=0.9):
feed_dict = utils.construct_feed_dict(x, a, t, b, learning_rate, momentum, self.placeholders)
(acc, pred, probabilities, tags) = self.sess.run([self.accuracy, self.pred, self.probabilities, self.correct_prediction], feed_dict=feed_dict)
return (acc, pred, probabilities, tags) |
def auto_str(cls):
def __str__(self):
return ('%s(%s)' % (type(self).__name__, ', '.join((('%s=%s' % item) for item in vars(self).items()))))
cls.__str__ = __str__
return cls |
def extractRelUIndexes(sequence, layers):
layers.sort()
index = 0
output = []
indexRef = 0
indexScale = 1
hasCaughtRelUOnLayer = False
while ((indexRef < len(layers)) and (index < len(sequence))):
if isinstance(sequence[index], torch.nn.ReLU):
if ((not hasCaughtRelUOnLayer) and (indexScale == layers[indexRef])):
hasCaughtRelUOnLayer = True
output.append(index)
indexRef += 1
if (isinstance(sequence[index], torch.nn.MaxPool2d) or isinstance(sequence[index], torch.nn.AvgPool2d)):
hasCaughtRelUOnLayer = False
indexScale += 1
index += 1
return output |
class ProgressMonitor(Plugin):
stat_name = 'progress'
def __init__(self):
super(ProgressMonitor, self).__init__([(1, 'iteration'), (1, 'epoch')])
def register(self, trainer):
self.trainer = trainer
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['samples_used'] = 0
stats['epoch_size'] = len(trainer.dataset)
stats['log_iter_fields'] = ['{samples_used}/{epoch_size}', '({percent:.2f}%)']
def iteration(self, iteration, input, *args):
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['samples_used'] += 1
stats['percent'] = ((100.0 * stats['samples_used']) / stats['epoch_size'])
def epoch(self, *args):
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['samples_used'] = 0
stats['percent'] = 0 |
def build_model_tabular(args, dims, regularization_fns=None):
hidden_dims = tuple(map(int, args.dims.split('-')))
def build_cnf():
diffeq = layers.ODEnet(hidden_dims=hidden_dims, input_shape=(dims,), strides=None, conv=False, layer_type=args.layer_type, nonlinearity=args.nonlinearity)
odefunc = layers.ODEfunc(diffeq=diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher)
cnf = layers.CNF(odefunc=odefunc, T=args.time_length, train_T=args.train_T, regularization_fns=regularization_fns, solver=args.solver)
return cnf
chain = [build_cnf() for _ in range(args.num_blocks)]
if args.batch_norm:
bn_layers = [layers.MovingBatchNorm1d(dims, bn_lag=args.bn_lag) for _ in range(args.num_blocks)]
bn_chain = [layers.MovingBatchNorm1d(dims, bn_lag=args.bn_lag)]
for (a, b) in zip(chain, bn_layers):
bn_chain.append(a)
bn_chain.append(b)
chain = bn_chain
model = layers.SequentialFlow(chain)
set_cnf_options(args, model)
return model |
class CrossSelfTransformer(nn.Module):
def __init__(self, latent_dim, input_dim, depth, heads, dim_head, ff_expansion=4, attn_dropout=0.0, ff_dropout=0.0):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([PreNorm(latent_dim, Attention(latent_dim, context_dim=input_dim, heads=heads, dim_head=dim_head, dropout=attn_dropout), context_dim=input_dim), PreNorm(latent_dim, Attention(latent_dim, heads=heads, dim_head=dim_head, dropout=attn_dropout)), PreNorm(latent_dim, FeedForward(latent_dim, mult=ff_expansion, dropout=ff_dropout))]))
def forward(self, x, context, mask=None, context_mask=None):
for (cross_attn, self_attn, ff) in self.layers:
x = (cross_attn(x, context=context, mask=context_mask) + x)
x = (self_attn(x, mask=mask) + x)
x = (ff(x) + x)
return x |
class GraphAttentionEmbedding(torch.nn.Module):
def __init__(self, in_channels, out_channels, msg_dim, time_enc):
super().__init__()
self.time_enc = time_enc
edge_dim = (msg_dim + time_enc.out_channels)
self.conv = TransformerConv(in_channels, (out_channels // 2), heads=2, dropout=0.1, edge_dim=edge_dim)
def forward(self, x, last_update, edge_index, t, msg):
rel_t = (last_update[edge_index[0]] - t)
rel_t_enc = self.time_enc(rel_t.to(x.dtype))
edge_attr = torch.cat([rel_t_enc, msg], dim=(- 1))
return self.conv(x, edge_index, edge_attr) |
def count_nfe(model):
class AccNumEvals(object):
def __init__(self):
self.num_evals = 0
def __call__(self, module):
if isinstance(module, layers.ODEfunc):
self.num_evals += module.num_evals()
accumulator = AccNumEvals()
model.apply(accumulator)
return accumulator.num_evals |
def get_lbs_for_random_crop(crop_size, data_shape, margins):
lbs = []
for i in range((len(data_shape) - 2)):
if (((data_shape[(i + 2)] - crop_size[i]) - margins[i]) > margins[i]):
lbs.append(np.random.randint(margins[i], ((data_shape[(i + 2)] - crop_size[i]) - margins[i])))
else:
lbs.append(((data_shape[(i + 2)] - crop_size[i]) // 2))
return lbs |
def test_check_type_of_target() -> None:
X = [0.5, 0.2, 0.4, 0.8, 3.8]
y = [0.4, 0.2, 3.6, 3, 0.2]
mapie_cal = MapieCalibrator()
with pytest.raises(ValueError, match='.*Make sure to have one of the allowed targets:*'):
mapie_cal.fit(X, y) |
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') |
class DenseNet(nn.Module):
def __init__(self, depth=22, block=Bottleneck, dropRate=0, num_classes=10, growthRate=12, compressionRate=2):
super(DenseNet, self).__init__()
assert (((depth - 4) % 3) == 0), 'depth should be 3n+4'
n = (((depth - 4) / 3) if (block == BasicBlock) else ((depth - 4) // 6))
self.growthRate = growthRate
self.dropRate = dropRate
self.inplanes = (growthRate * 2)
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_denseblock(block, n)
self.trans1 = self._make_transition(compressionRate)
self.dense2 = self._make_denseblock(block, n)
self.trans2 = self._make_transition(compressionRate)
self.dense3 = self._make_denseblock(block, n)
self.bn = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.inplanes, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_denseblock(self, block, blocks):
layers = []
for i in range(blocks):
layers.append(block(self.inplanes, growthRate=self.growthRate, dropRate=self.dropRate))
self.inplanes += self.growthRate
return nn.Sequential(*layers)
def _make_transition(self, compressionRate):
inplanes = self.inplanes
outplanes = int(math.floor((self.inplanes // compressionRate)))
self.inplanes = outplanes
return Transition(inplanes, outplanes)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.dense3(x)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def _load_word_clusters(path):
clusters = dict()
with path.open(encoding='utf8') as f:
for line in f:
split = line.rstrip().split('\t')
if (not split):
continue
clusters[split[0]] = split[1]
return clusters |
def load(folder: Union[(str, Path)]):
folder = Path(folder)
if folder.is_absolute():
return load_from_numpy_bundle(folder, '/')
else:
return load_from_numpy_bundle(folder, '.') |
def shard_params(params, params_spec, mesh):
shard_fn = pjit((lambda x: x), in_shardings=(params_spec,), out_shardings=params_spec)
with mesh:
return shard_fn(params) |
def novelty_local_competition(individual: IndividualLike, container: Sequence, k: int=1, dist: Union[(str, Callable)]='euclidean', ignore_first: bool=False, default_novelty: float=0.1, default_local_competition: float=1.0) -> Tuple[(float, float)]:
if (len(container) == 0):
return (default_novelty, default_local_competition)
distances: Sequence = features_distances(individual, container, dist)
nearest_neighbours_dists: Sequence
nearest_neighbours: Sequence
if ignore_first:
nn: Sequence = sorted(zip(distances, container))[1:(k + 1)]
else:
nn = sorted(zip(distances, container))[:k]
(nearest_neighbours_dists, nearest_neighbours) = tuple(zip(*nn))
novelty: float = np.mean(nearest_neighbours_dists)
local_competition: float = (sum((individual.fitness.dominates(ind.fitness) for ind in nearest_neighbours)) / float(k))
return (novelty, local_competition) |
def checkpoint(nets, history, args, epoch_num):
print('Saving checkpoints...')
(net_encoder, net_decoder, crit) = nets
suffix_latest = 'epoch_{}.pth'.format(epoch_num)
dict_encoder = net_encoder.state_dict()
dict_decoder = net_decoder.state_dict()
torch.save(history, '{}/history_{}'.format(args.ckpt, suffix_latest))
torch.save(dict_encoder, '{}/encoder_{}'.format(args.ckpt, suffix_latest))
torch.save(dict_decoder, '{}/decoder_{}'.format(args.ckpt, suffix_latest)) |
def _impl(array, highlevel, behavior, attrs):
from awkward._connect.pyarrow import import_pyarrow_compute
pc = import_pyarrow_compute('e')
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
layout = ctx.unwrap(array)
out = ak._do.recursively_apply(layout, ak.operations.str._get_ufunc_action(pc.utf8_swapcase, pc.ascii_swapcase, bytestring_to_string=True))
return ctx.wrap(out, highlevel=highlevel) |
def remove_stopwords(sentence):
return ' '.join((w for w in sentence.split() if (w not in STOPWORDS))) |
def shell(args: List[str]):
cmd = shlex.join(args)
hlog(f'Executing: {cmd}')
exit_code = subprocess.call(args)
if (exit_code != 0):
hlog(f'Failed with exit code {exit_code}: {cmd}') |
class FlaxPreTrainedModel(ABC):
config_class = None
base_model_prefix = ''
def __init__(self, config: PretrainedConfig, module: nn.Module, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32):
if (config is None):
raise ValueError('config cannot be None')
if (module is None):
raise ValueError('module cannot be None')
self._config = config
self._module = module
self.key = PRNGKey(seed)
self.dtype = dtype
random_params = self.init(self.key, input_shape)
self._required_params = set(flatten_dict(unfreeze(random_params)).keys())
self.params = random_params
def init(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> Dict:
raise NotImplementedError(f'init method has to be implemented for {self}')
def config(self) -> PretrainedConfig:
return self._config
def module(self) -> nn.Module:
return self._module
def params(self) -> Union[(Dict, FrozenDict)]:
return self._params
def required_params(self) -> Set:
return self._required_params
def params(self, params: Union[(Dict, FrozenDict)]):
if isinstance(params, FrozenDict):
params = unfreeze(params)
param_keys = set(flatten_dict(params).keys())
if (len((self.required_params - param_keys)) > 0):
raise ValueError(f'Some parameters are missing. Make sure that `params` include the following parameters {(self.required_params - param_keys)}')
self._params = freeze(params)
def convert_from_pytorch(pt_state: Dict, config: PretrainedConfig) -> Dict:
raise NotImplementedError()
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], dtype: jnp.dtype=jnp.float32, *model_args, **kwargs):
config = kwargs.pop('config', None)
cache_dir = kwargs.pop('cache_dir', None)
from_pt = kwargs.pop('from_pt', False)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
local_files_only = kwargs.pop('local_files_only', False)
use_auth_token = kwargs.pop('use_auth_token', None)
revision = kwargs.pop('revision', None)
if (is_offline_mode() and (not local_files_only)):
logger.info('Offline mode: forcing local_files_only=True')
local_files_only = True
if (not isinstance(config, PretrainedConfig)):
config_path = (config if (config is not None) else pretrained_model_name_or_path)
(config, model_kwargs) = cls.config_class.from_pretrained(config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, **kwargs)
else:
model_kwargs = kwargs
model_kwargs['dtype'] = dtype
if (pretrained_model_name_or_path is not None):
if os.path.isdir(pretrained_model_name_or_path):
if (from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME))):
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)):
archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)
else:
raise EnvironmentError('Error no file named {} found in directory {} or `from_pt` set to False'.format([FLAX_WEIGHTS_NAME, WEIGHTS_NAME], pretrained_model_name_or_path))
elif (os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path)):
archive_file = pretrained_model_name_or_path
else:
archive_file = hf_bucket_url(pretrained_model_name_or_path, filename=(WEIGHTS_NAME if from_pt else FLAX_WEIGHTS_NAME), revision=revision)
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token)
except EnvironmentError as err:
logger.error(err)
msg = f'''Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:
- '{pretrained_model_name_or_path}' is a correct model identifier listed on '
- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {WEIGHTS_NAME}.
'''
raise EnvironmentError(msg)
if (resolved_archive_file == archive_file):
logger.info(f'loading weights file {archive_file}')
else:
logger.info(f'loading weights file {archive_file} from cache at {resolved_archive_file}')
else:
resolved_archive_file = None
with open(resolved_archive_file, 'rb') as state_f:
try:
if from_pt:
import torch
state = torch.load(state_f)
state = convert_state_dict_from_pt(cls, state, config)
else:
state = from_bytes(cls, state_f.read())
except UnpicklingError:
raise EnvironmentError(f'Unable to convert pytorch model {archive_file} to Flax deserializable object. ')
model = cls(config, *model_args, **model_kwargs)
if ((cls.base_model_prefix not in dict(model.params)) and (cls.base_model_prefix in state)):
state = state[cls.base_model_prefix]
state = flatten_dict(state)
random_state = flatten_dict(unfreeze(model.params))
missing_keys = (model.required_params - set(state.keys()))
unexpected_keys = (set(state.keys()) - model.required_params)
for missing_key in missing_keys:
state[missing_key] = random_state[missing_key]
if (len(unexpected_keys) > 0):
logger.warning(f'''Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when initializing {model.__class__.__name__}: {unexpected_keys}
- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).''')
else:
logger.info(f'''All model checkpoint weights were used when initializing {model.__class__.__name__}.
''')
if (len(missing_keys) > 0):
logger.warning(f'''Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized: {missing_keys}
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.''')
else:
logger.info(f'''All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.
If your task is similar to the task the model of the checkpoint was trained on, you can already use {model.__class__.__name__} for predictions without further training.''')
model.params = unflatten_dict(state)
return model
def save_pretrained(self, save_directory: Union[(str, os.PathLike)]):
if os.path.isfile(save_directory):
logger.error('Provided path ({}) should be a directory, not a file'.format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
save_directory = os.path.abspath(save_directory)
self.config.save_pretrained(save_directory)
with open(os.path.join(save_directory, FLAX_WEIGHTS_NAME), 'wb') as f:
model_bytes = to_bytes(self.params)
f.write(model_bytes) |
def operations_from_log(log_path: str) -> Generator[(tuple[(Operation, str, (str | None))], None, None)]:
try:
log = open(log_path, 'r', encoding='utf-8')
except FileNotFoundError:
return
for line in log:
line = line.replace('File Operation Logger', '').strip()
if (not line):
continue
(operation, tail) = line.split(': ', maxsplit=1)
operation = operation.strip()
if (operation in ('write', 'append')):
try:
(path, checksum) = (x.strip() for x in tail.rsplit(' #', maxsplit=1))
except ValueError:
logger.warn(f"File log entry lacks checksum: '{line}'")
(path, checksum) = (tail.strip(), None)
(yield (operation, path, checksum))
elif (operation == 'delete'):
(yield (operation, tail.strip(), None))
log.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.