code stringlengths 101 5.91M |
|---|
def subgraphs_to_query(subgraphs, db):
q = GraphQuery(graph_db=db, induced_subgraphs=subgraphs[1])
if (subgraphs[0] == 'all_of'):
for i in range(2, len(subgraphs)):
q.intersect(GraphQuery(graph_db=db, induced_subgraphs=subgraphs[i]), in_place=True)
elif (subgraphs[0] == 'one_of'):
for i in range(2, len(subgraphs)):
q.union(GraphQuery(graph_db=db, induced_subgraphs=subgraphs[i]), in_place=True)
else:
raise KeyError('unable to initiate query: illegal input format for induced_subgraphs')
return q |
def test_2d_2d_stride_trick():
data = np.array([101], dtype=np.int32)
array = np.lib.stride_tricks.as_strided(data, (40, 3), strides=(0, 0))
container = {'node0-data': array}
form = '\n {\n "class": "NumpyArray",\n "primitive": "int32",\n "form_key": "node0",\n "inner_shape": [3]\n }\n '
result = ak.from_buffers(form, len(array), container, highlevel=False)
assert np.shares_memory(result.data, array)
result = ak.from_buffers(form, (len(array) - 4), container, highlevel=False)
assert np.shares_memory(result.data, array) |
_model('convtransformer')
class ConvTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def add_args(parser):
parser.add_argument('--input-feat-per-channel', type=int, metavar='N', help='encoder input dimension per input channel')
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension (extra linear layer if different from decoder embed dim)')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings')
parser.add_argument('--load-pretrained-encoder-from', type=str, metavar='STR', help='model to take encoder weights from (for initialization)')
parser.add_argument('--load-pretrained-decoder-from', type=str, metavar='STR', help='model to take decoder weights from (for initialization)')
parser.add_argument('--conv-out-channels', type=int, metavar='INT', help='the number of output channels of conv layer')
def build_encoder(cls, args):
encoder = ConvTransformerEncoder(args)
if getattr(args, 'load_pretrained_encoder_from', None):
encoder = checkpoint_utils.load_pretrained_component_from_model(component=encoder, checkpoint=args.load_pretrained_encoder_from)
return encoder
def build_decoder(cls, args, task, embed_tokens):
decoder = TransformerDecoderNoExtra(args, task.target_dictionary, embed_tokens)
if getattr(args, 'load_pretrained_decoder_from', None):
decoder = checkpoint_utils.load_pretrained_component_from_model(component=decoder, checkpoint=args.load_pretrained_decoder_from)
return decoder
def build_model(cls, args, task):
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(task.target_dictionary, args.decoder_embed_dim)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
.unused
def set_batch_first(lprobs):
lprobs.batch_first = True
def get_normalized_probs(self, net_output: Tuple[(Tensor, Optional[Dict[(str, List[Optional[Tensor]])]])], log_probs: bool, sample: Optional[Dict[(str, Tensor)]]=None):
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
if self.training:
self.set_batch_first(lprobs)
return lprobs
def output_layout(self):
return 'BTD'
'\n The forward method inherited from the base class has a **kwargs argument in\n its input, which is not supported in torchscript. This method overrites the forward\n method definition without **kwargs.\n '
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(prev_output_tokens=prev_output_tokens, encoder_out=encoder_out)
return decoder_out |
.parametrize('sampling_strategy', ['auto', 'majority', 'not minority', 'not majority', 'all'])
def test_random_under_sampler_strings(sampling_strategy):
(X, y) = make_classification(n_samples=100, n_clusters_per_class=1, n_classes=3, weights=[0.1, 0.3, 0.6], random_state=0)
RandomUnderSampler(sampling_strategy=sampling_strategy).fit_resample(X, y) |
def register_function(lib, item, ignore_errors):
try:
func = getattr(lib, item[0])
except AttributeError as e:
msg = (str(e) + '. Please ensure that your python bindings are compatible with your libclang.so version.')
if ignore_errors:
return
raise LibclangError(msg)
if (len(item) >= 2):
func.argtypes = item[1]
if (len(item) >= 3):
func.restype = item[2]
if (len(item) == 4):
func.errcheck = item[3] |
def soft_update_from_to(source, target, tau):
for (target_param, param) in zip(target.parameters(), source.parameters()):
target_param.data.copy_(((target_param.data * (1.0 - tau)) + (param.data * tau))) |
class ParallelAvoidance():
def __init__(self, ped_ped=None, *, b_center=0.0, **kwargs):
self.ped_ped = (ped_ped or potentials.PedPedPotential(2.1))
self.b_center = b_center
self.simulator = Simulator(ped_ped=self.ped_ped, **kwargs)
def generate(self, n):
speed0 = (0.7 + (0.4 * torch.rand(1).item()))
ped0 = [(- 5.0), 0.0, speed0, 0.0, 6.0, 0.0]
generator_initial_states = []
for b in ((0.75 * torch.randn(n)) + self.b_center):
speed = (1.2 + (0.2 * torch.rand(1).item()))
ped1 = [7.0, b, (- speed), 0.0, (- 7.0), b]
state = np.array([ped0, ped1])
state = self.simulator.normalize_state(state)
generator_initial_states.append(state)
with torch.no_grad():
return [self.simulator.run(initial_state, 21) for initial_state in generator_initial_states] |
('data.resisc45', 'class')
class Resisc45Data(base.ImageTfdsData):
def __init__(self, data_dir=None):
dataset_builder = tfds.builder('resisc45:3.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
num_examples = dataset_builder.info.splits['train'].num_examples
train_count = ((num_examples * TRAIN_SPLIT_PERCENT) // 100)
val_count = ((num_examples * VALIDATION_SPLIT_PERCENT) // 100)
test_count = ((num_examples * TEST_SPLIT_PERCENT) // 100)
tfds_splits = {'train': 'train[:{}]'.format(train_count), 'val': 'train[{}:{}]'.format(train_count, (train_count + val_count)), 'trainval': 'train[:{}]'.format((train_count + val_count)), 'test': 'train[{}:]'.format((train_count + val_count)), 'train800': 'train[:800]', 'val200': 'train[{}:{}]'.format(train_count, (train_count + 200)), 'train800val200': 'train[:800]+train[{}:{}]'.format(train_count, (train_count + 200))}
num_samples_splits = {'train': train_count, 'val': val_count, 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
super(Resisc45Data, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_and_cast_tensors_fn({'image': ('image', None), 'label': ('label', None)}), num_classes=dataset_builder.info.features['label'].num_classes) |
def test_rag_generator():
generator = RagGenerator(client_name='openai', model_name='text-curie-001', context_dir='data/home_search/v0', max_output_token=256, top_k_api=10, top_k_example=3, query_template='{api_docs}\n{examples}\nTask: {query}\nActions:\n')
query = 'Find a home with 12 bed above $961000 in Birmingham.'
(prompt, text, error) = generator.generate(query)
assert (error is None), error |
def separate_and_evaluate(track, args, ext):
estimates = test.separate(track.audio, args)
if args.out_dir:
mus.save_estimates(estimates, track, args.out_dir)
scores = museval.eval_mus_track(track, estimates, output_dir=args.out_dir)
ext.clear_memory_cache()
return scores |
def get_args():
parser = argparse.ArgumentParser()
parser = add_ffn_train_args(parser)
return parser.parse_args() |
def check_dist_restriction(options, check_target=False):
dist_restriction_set = any([options.python_version, options.platform, options.abi, options.implementation])
binary_only = FormatControl(set(), {':all:'})
sdist_dependencies_allowed = ((options.format_control != binary_only) and (not options.ignore_dependencies))
if (dist_restriction_set and sdist_dependencies_allowed):
raise CommandError('When restricting platform and interpreter constraints using --python-version, --platform, --abi, or --implementation, either --no-deps must be set, or --only-binary=:all: must be set and --no-binary must not be set (or must be set to :none:).')
if check_target:
if (dist_restriction_set and (not options.target_dir)):
raise CommandError("Can not use any platform or abi specific options unless installing via '--target'") |
def processFiles(fname, prefix, dset, trunc):
dataset = []
codeVocab = collections.Counter()
nlVocab = collections.Counter()
i = 0
didnt_parse = 0
for line in open(fname, 'r'):
i += 1
if ((i % 10000) == 0):
print(i)
js = json.loads(line)
code = js['renamed']
codeVocab.update(code)
nlToks = processNlToks(js['nlToks'])
nlVocab.update(nlToks)
codeToks = [cTok.encode('ascii', 'replace').decode().replace('\x0c', '').strip() for cTok in code]
if ((len(nlToks) == 0) or (len(codeToks) == 0)):
continue
if (len(js['memberVariables']) == 0):
js['memberVariables']['placeHolder'] = 'PlaceHolder'
if (len(js['memberFunctions']) == 0):
js['memberFunctions']['placeHolder'] = [['placeholderType']]
(methodNames, methodReturns, methodParamNames, methodParamTypes) = ([], [], [], [])
for methodName in js['memberFunctions']:
for methodInstance in js['memberFunctions'][methodName]:
methodNames.append(methodName)
methodReturns.append(('None' if (methodInstance[0] is None) else methodInstance[0]))
if (len(methodInstance) == 1):
methodInstance += ['NoParams noParams']
methodParamNames.append([methodInstance[p].split()[(- 1)] for p in range(1, len(methodInstance))])
methodParamTypes.append([' '.join(methodInstance[p].split()[:(- 1)]).replace('final ', '') for p in range(1, len(methodInstance))])
memberVarNames = [key.split('=')[0].encode('ascii', 'replace').decode() for (key, value) in js['memberVariables'].items()]
memberVarTypes = [value.encode('ascii', 'replace').decode() for (key, value) in js['memberVariables'].items()]
for t in range(0, len(codeToks)):
if ((codeToks[t] in memberVarNames) and ((codeToks[(t - 1)] != '.') or ((codeToks[(t - 1)] == '.') and (codeToks[(t - 2)] == 'this')))):
codeToks[t] = ('concodeclass_' + codeToks[t])
elif ((codeToks[t] == '(') and (codeToks[(t - 1)] in methodNames) and ((codeToks[(t - 2)] != '.') or ((codeToks[(t - 2)] == '.') and (codeToks[(t - 3)] == 'this')))):
codeToks[(t - 1)] = ('concodefunc_' + codeToks[(t - 1)])
try:
rule_seq = getProductions((('class TestClass { ' + ' '.join(codeToks)) + ' }'))
except:
import pdb
pdb.set_trace()
if (rule_seq is None):
didnt_parse += 1
continue
if (dset == 'train'):
trainNls.append(' '.join(nlToks))
elif (' '.join(nlToks) in trainNls):
continue
try:
seq2seq = ((((' '.join(nlToks).lower() + ' concode_field_sep ') + ' concode_elem_sep '.join([((vtyp + ' ') + vnam) for (vnam, vtyp) in zip(memberVarNames, memberVarTypes)])) + ' concode_field_sep ') + ' concode_elem_sep '.join([((((mret + ' ') + mname) + ' concode_func_sep ') + ' concode_func_sep '.join((((mpt + ' ') + mpn) for (mpt, mpn) in zip(mpts, mpns)))) for (mret, mname, mpts, mpns) in zip(methodReturns, methodNames, methodParamTypes, methodParamNames)])).split()
seq2seq_nop = ((((' '.join(nlToks).lower() + ' concode_field_sep ') + ' concode_elem_sep '.join([((vtyp + ' ') + vnam) for (vnam, vtyp) in zip(memberVarNames, memberVarTypes)])) + ' concode_field_sep ') + ' concode_elem_sep '.join([((mret + ' ') + mname) for (mret, mname) in zip(methodReturns, methodNames)])).split()
except:
import pdb
pdb.set_trace()
dataset.append({'nl': nlToks, 'code': codeToks, 'idx': js['idx'], 'varNames': memberVarNames, 'varTypes': memberVarTypes, 'rules': rule_seq, 'methodNames': methodNames, 'methodReturns': methodReturns, 'methodParamNames': methodParamNames, 'methodParamTypes': methodParamTypes, 'seq2seq': seq2seq, 'seq2seq_nop': seq2seq_nop})
if (len(dataset) == trunc):
break
f = open((prefix + '.dataset'), 'w')
f.write(json.dumps(dataset, indent=4))
f.close()
print(('Total code vocab: ' + str(len(codeVocab))))
print(('Total nl vocab: ' + str(len(nlVocab))))
print(('Total didnt parse: ' + str(didnt_parse))) |
class class_cls(nn.Module):
def __init__(self, input_dim, nclass):
super(class_cls, self).__init__()
self.fc = nn.Linear(input_dim, nclass)
self.logic = nn.LogSoftmax(dim=1)
self.fc1 = nn.Linear(input_dim, 512)
self.bn1_fc = nn.BatchNorm1d(512)
self.fc2 = nn.Linear(512, nclass)
self.bn_fc2 = nn.BatchNorm1d(nclass)
self.apply(weights_init)
def forward(self, x, att):
x = F.relu(self.bn1_fc(self.fc1(x)))
x = self.fc2(x)
return x |
def is_PowerSeriesRing(R):
if isinstance(R, PowerSeriesRing_generic):
return (R.ngens() == 1)
else:
return False |
def get_public_fields(obj):
return [attr for attr in dir(obj) if (not (attr.startswith('_') or inspect.isbuiltin(attr) or inspect.isfunction(attr) or inspect.ismethod(attr)))] |
def single_or_rankzero():
return ((not _current_communicator) or (_current_communicator.rank == 0)) |
class UniformPolicy(Policy, Serializable):
def __init__(self, env_spec):
Serializable.quick_init(self, locals())
self._Da = env_spec.action_space.flat_dim
super(UniformPolicy, self).__init__(env_spec)
def get_action(self, observation):
return (np.random.uniform((- 1.0), 1.0, self._Da), None)
def get_actions(self, observations):
pass
def log_diagnostics(self, paths):
pass
def get_params_internal(self, **tags):
pass
def get_action_with_raw(self, observation):
act = np.random.uniform((- 1.0), 1.0, self._Da)
return (act, np.arctanh(act)) |
.skipif((not has_pytorch()), reason='Pytorch not installed.')
_utils.test()
def test_torch_zero():
def test_torch(arr: ti.types.ndarray()):
pass
test_torch(torch.zeros(0, dtype=torch.int32))
test_torch(torch.zeros((0, 5), dtype=torch.int32))
test_torch(torch.zeros((5, 0, 5), dtype=torch.int32)) |
class TestAAPhi(unittest.TestCase):
def test_aa_phi(self):
ms = range(1, 5)
ns = range(1, 5)
for m in ms:
U = b.util.haar_rand(m)
for n in ns:
phiU = b.aa_phi(U, n)
phiUCorrect = aa_phi_slow(U, n)
self.assertTrue(np.allclose(phiU, phiUCorrect)) |
class Siren(nn.Module):
def __init__(self, in_features, hidden_features, hidden_layers, out_features, outermost_linear=False, first_omega_0=30, hidden_omega_0=30.0):
super().__init__()
self.net = []
self.net.append(SineLayer(in_features, hidden_features, is_first=True, omega_0=first_omega_0))
for i in range(hidden_layers):
self.net.append(SineLayer(hidden_features, hidden_features, is_first=False, omega_0=hidden_omega_0))
if outermost_linear:
final_linear = nn.Linear(hidden_features, out_features)
with torch.no_grad():
final_linear.weight.uniform_(((- np.sqrt((6 / hidden_features))) / hidden_omega_0), (np.sqrt((6 / hidden_features)) / hidden_omega_0))
self.net.append(final_linear)
else:
self.net.append(SineLayer(hidden_features, out_features, is_first=False, omega_0=hidden_omega_0))
self.net = nn.Sequential(*self.net)
def forward(self, coords):
coords = coords.clone().detach().requires_grad_(True)
output = self.net(coords)
return (output, coords)
def forward_with_activations(self, coords, retain_grad=False):
activations = OrderedDict()
activation_count = 0
x = coords.clone().detach().requires_grad_(True)
activations['input'] = x
for (i, layer) in enumerate(self.net):
if isinstance(layer, SineLayer):
(x, intermed) = layer.forward_with_intermediate(x)
if retain_grad:
x.retain_grad()
intermed.retain_grad()
activations['_'.join((str(layer.__class__), ('%d' % activation_count)))] = intermed
activation_count += 1
else:
x = layer(x)
if retain_grad:
x.retain_grad()
activations['_'.join((str(layer.__class__), ('%d' % activation_count)))] = x
activation_count += 1
return activations |
def _try_numeric(val: str) -> ((float | str) | None):
if (val == ''):
return None
try:
return float(val)
except ValueError:
return val |
def _get_line_to_branch_coverage(subject_properties, trace):
line_to_branch_coverage = {}
for predicate in subject_properties.existing_predicates:
lineno = subject_properties.existing_predicates[predicate].line_no
if (lineno not in line_to_branch_coverage):
line_to_branch_coverage[lineno] = CoverageEntry()
line_to_branch_coverage[lineno] += CoverageEntry(existing=2)
if ((predicate, 0.0) in trace.true_distances.items()):
line_to_branch_coverage[lineno] += CoverageEntry(covered=1)
if ((predicate, 0.0) in trace.false_distances.items()):
line_to_branch_coverage[lineno] += CoverageEntry(covered=1)
return line_to_branch_coverage |
def load_checkpoints(model, path, device):
checkpoint = torch.load(path, map_location=device)
model_state = checkpoint.get('model_state', None)
model.load_state_dict(model_state) |
def make_builder(out_file, impl, vocab_size=None):
if (impl == 'mmap'):
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
elif (impl == 'fasta'):
raise NotImplementedError
else:
return IndexedDatasetBuilder(out_file) |
def test_tf_grad_log_sm():
import tensorflow as tf
print('TF version:', tf.__version__)
with tf.Session() as session:
x = tf.constant([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
y = tf.nn.log_softmax(x)
scores = [0.0, float('-inf'), float('-inf')]
def combine(s_, y_):
return tf.where(tf.is_finite(s_), (s_ + y_), s_)
for t in range(2):
ys = y[t]
scores = [combine(scores[0], ys[0]), tf.reduce_logsumexp([combine(scores[0], ys[1]), combine(scores[1], ys[1])]), tf.reduce_logsumexp([combine(scores[1], ys[2]), combine(scores[2], ys[2])])]
z = scores[(- 1)]
(dx,) = tf.gradients(z, x)
print(session.run(dx)) |
_utils.test(require=ti.extension.bls)
def test_scattering():
bls_particle_grid(N=128, ppc=10, block_size=8, scatter=True, use_offset=False) |
class BiSeNet(nn.Module):
def __init__(self, num_class):
super(BiSeNet, self).__init__()
self.cp = ContextPath()
self.ffm = FeatureFusionModule(256, 256)
self.conv_out = BiSeNetOutput(256, 256, num_class)
self.conv_out16 = BiSeNetOutput(128, 64, num_class)
self.conv_out32 = BiSeNetOutput(128, 64, num_class)
def forward(self, x, return_feat=False):
(h, w) = x.size()[2:]
(feat_res8, feat_cp8, feat_cp16) = self.cp(x)
feat_sp = feat_res8
feat_fuse = self.ffm(feat_sp, feat_cp8)
(out, feat) = self.conv_out(feat_fuse)
(out16, feat16) = self.conv_out16(feat_cp8)
(out32, feat32) = self.conv_out32(feat_cp16)
out = F.interpolate(out, (h, w), mode='bilinear', align_corners=True)
out16 = F.interpolate(out16, (h, w), mode='bilinear', align_corners=True)
out32 = F.interpolate(out32, (h, w), mode='bilinear', align_corners=True)
if return_feat:
feat = F.interpolate(feat, (h, w), mode='bilinear', align_corners=True)
feat16 = F.interpolate(feat16, (h, w), mode='bilinear', align_corners=True)
feat32 = F.interpolate(feat32, (h, w), mode='bilinear', align_corners=True)
return (out, out16, out32, feat, feat16, feat32)
else:
return (out, out16, out32) |
def test_multiclip_mono():
class TestClip(core.Clip):
def __init__(self, key, data_home='foo', dataset_name='foo', index=None, metadata=None):
self.key = key
def f(self):
return (np.random.uniform((- 1), 1, 100), 1000)
class TestClipGroup1(core.ClipGroup):
def __init__(self, clipgroup_id, data_home, dataset_name, index, clip_class, metadata):
super().__init__(clipgroup_id, data_home, dataset_name, index, clip_class, metadata)
def to_jams(self):
return None
def clip_audio_property(self):
return 'f'
index = {'clipgroups': {'ab': {'clips': ['a', 'b', 'c']}}}
clipgroup_id = 'ab'
dataset_name = 'test'
data_home = 'tests/resources/sound_datasets'
clipgroup = TestClipGroup1(clipgroup_id, data_home, dataset_name, index, TestClip, (lambda : None))
target1 = clipgroup.get_target(['a', 'c'])
assert (target1.shape == (1, 100))
assert (np.max(np.abs(target1)) <= 1)
target1 = clipgroup.get_target(['a', 'c'], average=False)
assert (target1.shape == (1, 100))
assert (np.max(np.abs(target1)) <= 2)
class TestClip1(core.Clip):
def __init__(self, key, data_home='foo', dataset_name='foo', index=None, metadata=None):
self.key = key
def f(self):
return (np.random.uniform((- 1), 1, (1, 100)), 1000)
class TestClipGroup1(core.ClipGroup):
def __init__(self, clipgroup_id, data_home, dataset_name, index, clip_class, metadata):
super().__init__(clipgroup_id, data_home, dataset_name, index, clip_class, metadata)
def to_jams(self):
return None
def clip_audio_property(self):
return 'f'
index = {'clipgroups': {'ab': {'clips': ['a', 'b', 'c']}}}
clipgroup_id = 'ab'
dataset_name = 'test'
data_home = 'tests/resources/sound_datasets'
clipgroup = TestClipGroup1(clipgroup_id, data_home, dataset_name, index, TestClip, (lambda : None))
target1 = clipgroup.get_target(['a', 'c'])
assert (target1.shape == (1, 100))
assert (np.max(np.abs(target1)) <= 1)
target1 = clipgroup.get_target(['a', 'c'], average=False)
assert (target1.shape == (1, 100))
assert (np.max(np.abs(target1)) <= 2) |
def step(epoch):
if (epoch < (EP / 4)):
return 0
if (epoch < (EP / 2)):
return (1 / 3)
if (epoch < ((EP * 3) / 4)):
return (2 / 3)
else:
return 1 |
def generate_seq_indexes(indexes):
if (not indexes):
(yield [])
return
for ind in indexes[0]:
for seq in generate_seq_indexes(indexes[1:]):
(yield ([ind] + seq)) |
def load_existing_model(args, cfg, cfg_train, reload_ckpt_path, device):
FourierGrid_datasets = ['waymo', 'mega', 'nerfpp']
if ((cfg.data.dataset_type in FourierGrid_datasets) or (cfg.model == 'FourierGrid')):
model_class = FourierGridModel
elif cfg.data.ndc:
model_class = dmpigo.DirectMPIGO
elif cfg.data.unbounded_inward:
model_class = dcvgo.DirectContractedVoxGO
else:
model_class = dvgo.DirectVoxGO
model = utils.load_model(model_class, reload_ckpt_path).to(device)
optimizer = utils.create_optimizer_or_freeze_model(model, cfg_train, global_step=0)
(model, optimizer, start) = utils.load_checkpoint(model, optimizer, reload_ckpt_path, args.no_reload_optimizer)
return (model, optimizer, start) |
def update_average(model_tgt, model_src, beta):
param_dict_src = dict(model_src.named_parameters())
for (p_name, p_tgt) in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert (p_src is not p_tgt)
p_tgt.copy_(((beta * p_tgt) + ((1.0 - beta) * p_src))) |
class AlgebraicNumber_base(sage.structure.element.FieldElement):
def __init__(self, parent, x):
sage.structure.element.FieldElement.__init__(self, parent)
if isinstance(x, (int, sage.rings.integer.Integer, sage.rings.rational.Rational)):
self._descr = ANRational(x)
elif isinstance(x, ANDescr):
self._descr = x
elif ((parent is QQbar) and isinstance(x, NumberFieldElement_gaussian)):
if x.parent()._standard_embedding:
self._descr = ANExtensionElement(QQbar_I_generator, QQbar_I_nf(x.list()))
else:
self._descr = ANExtensionElement(QQbar_I_generator, QQbar_I_nf([x[0], (- x[1])]))
else:
raise TypeError('Illegal initializer for algebraic number')
prec = 64
self._value = self._descr._interval_fast(prec)
while self._value.is_NaN():
prec = (2 * prec)
self._value = self._descr._interval_fast(prec)
def _repr_(self):
if isinstance(self._descr, ANRational):
return repr(self._descr)
if (isinstance(self._descr, ANExtensionElement) and (self._descr._generator is QQbar_I_generator)):
return repr(self._descr._value)
if (self.parent().options.display_format == 'radical'):
radical = self.radical_expression()
if (radical is not self):
return repr(radical)
if (self.parent() is QQbar):
return repr(CIF(self._value))
else:
return repr(RIF(self._value))
def _latex_(self):
from sage.misc.latex import latex
if isinstance(self._descr, ANRational):
return latex(self._descr._value)
if (isinstance(self._descr, ANExtensionElement) and (self._descr._generator is QQbar_I_generator)):
return latex(self._descr._value)
if (self.parent().options.display_format == 'radical'):
radical = self.radical_expression()
if (radical is not self):
return latex(radical)
return repr(self).replace('*I', ' \\sqrt{-1}')
def _sage_input_(self, sib, coerce):
(v, complicated) = self._descr.handle_sage_input(sib, coerce, (self.parent() is QQbar))
if (complicated or True):
sib.id_cache(self, v, 'v')
return v
def _mul_(self, other):
sk = type(self._descr)
ok = type(other._descr)
return type(self)(_binop_algo[(sk, ok)](self, other, operator.mul))
def _div_(self, other):
if (not other):
raise ZeroDivisionError('division by zero in algebraic field')
sk = type(self._descr)
ok = type(other._descr)
return type(self)(_binop_algo[(sk, ok)](self, other, operator.truediv))
def __invert__(self):
if (not self):
raise ZeroDivisionError('division by zero in algebraic field')
return type(self)(self._descr.invert(self))
def _add_(self, other):
sk = type(self._descr)
ok = type(other._descr)
return type(self)(_binop_algo[(sk, ok)](self, other, operator.add))
def _sub_(self, other):
sk = type(self._descr)
ok = type(other._descr)
return type(self)(_binop_algo[(sk, ok)](self, other, operator.sub))
def _neg_(self):
return type(self)(self._descr.neg(self))
def __abs__(self):
return AlgebraicReal(self._descr.abs(self))
def __hash__(self):
if (self.parent() is AA):
return hash((self + AA_hash_offset).interval_exact(RIF))
else:
return hash((self + QQbar_hash_offset).interval_exact(CIF))
def __bool__(self):
if (not self._value.contains_zero()):
return True
elif self._value.is_zero():
if (not isinstance(self._descr, ANRational)):
self._set_descr(ANRational(QQ.zero()))
return False
sd = self._descr
if isinstance(sd, ANExtensionElement):
return True
elif isinstance(sd, ANRational):
return bool(sd._value)
elif (isinstance(sd, ANUnaryExpr) and (sd._op != 'real') and (sd._op != 'imag')):
ans = bool(sd._arg)
if (not ans):
self._set_descr(ANRational(QQ.zero()))
return ans
elif (isinstance(sd, ANBinaryExpr) and (sd._op is operator.mul)):
ans = (bool(sd._left) and bool(sd._right))
if (not ans):
self._set_descr(ANRational(QQ.zero()))
return ans
elif (isinstance(sd, ANBinaryExpr) and (sd._op is operator.truediv)):
ans = bool(sd._left)
if (not ans):
self._set_descr(ANRational(QQ.zero()))
return ans
if (self._value.prec() < 128):
self._more_precision()
if (not self._value.contains_zero()):
return True
if isinstance(sd, ANBinaryExpr):
op = sd._op
left = sd._left
right = (sd._right if (op is operator.sub) else (- sd._right))
lp = left.minpoly()
rp = right.minpoly()
if (lp != rp):
return True
c = cmp_elements_with_same_minpoly(left, right, left.minpoly())
if (c is not None):
if (c == 0):
self._set_descr(ANRational(QQ.zero()))
return bool(c)
self.exactify()
return bool(self)
def is_square(self):
if (self.parent() is AA):
return bool((self >= 0))
else:
return True
def is_integer(self):
return (self in ZZ)
def sqrt(self, all=False, extend=True):
if self.is_zero():
if all:
return [self]
else:
return self
if ((self.parent() is AA) and (self < 0) and (not extend)):
if (not all):
raise ValueError(lazy_string('%s is not a square in AA, being negative. Use extend = True for a square root in QQbar.', self))
else:
return []
root = (self ** (~ ZZ(2)))
if all:
return [root, (- root)]
else:
return root
def nth_root(self, n, all=False):
if (not all):
return (self ** (~ ZZ(n)))
else:
root = (QQbar(self) ** (~ ZZ(n)))
zlist = [root]
zeta = QQbar.zeta(n)
for k in range(1, n):
root *= zeta
zlist.append(root)
return zlist
def as_number_field_element(self, minimal=False, embedded=False, prec=53):
return number_field_elements_from_algebraics(self, minimal=minimal, embedded=embedded, prec=prec)
def exactify(self):
od = self._descr
if isinstance(od, (ANRational, ANExtensionElement)):
return
self._set_descr(self._descr.exactify())
def _set_descr(self, new_descr):
self._descr = new_descr
new_val = self._descr._interval_fast(self.parent().default_interval_prec())
if (is_RealIntervalFieldElement(new_val) and is_ComplexIntervalFieldElement(self._value)):
self._value = self._value.real().intersection(new_val)
elif (is_RealIntervalFieldElement(self._value) and is_ComplexIntervalFieldElement(new_val)):
self._value = self._value.intersection(new_val.real())
else:
self._value = self._value.intersection(new_val)
def simplify(self):
self.exactify()
od = self._descr
if od.is_simple():
return
self._set_descr(od.simplify(self))
def _exact_field(self):
sd = self._descr
if isinstance(sd, (ANRational, ANExtensionElement)):
return sd.generator()
self.exactify()
return self._exact_field()
def _exact_value(self):
sd = self._descr
if isinstance(sd, (ANRational, ANExtensionElement)):
return sd
self.exactify()
return self._descr
def _more_precision(self):
prec = self._value.prec()
self._value = self._descr._interval_fast((prec * 2))
def minpoly(self):
try:
return self._minimal_polynomial
except AttributeError:
self.exactify()
self._minimal_polynomial = self._descr.minpoly()
return self._minimal_polynomial
def degree(self):
return self.minpoly().degree()
def interval_fast(self, field):
while (self._value.prec() < field.prec()):
self._more_precision()
return field(self._value)
def interval_diameter(self, diam):
if (diam <= 0):
raise ValueError('diameter must be positive in interval_diameter')
while (self._value.diameter() > diam):
self._more_precision()
return self._value
def interval(self, field):
target = (RR(1.0) >> field.prec())
val = self.interval_diameter(target)
if (isinstance(field, (RealIntervalField_class, RealBallField)) and is_ComplexIntervalFieldElement(val)):
if val.imag().is_zero():
return field(val.real())
elif self.imag().is_zero():
return field(self.real())
else:
raise TypeError(lazy_string('unable to convert %s to real interval', self))
else:
return field(val)
_arb_ = _acb_ = _complex_mpfi_ = _real_mpfi_ = interval
def radical_expression(self):
from sage.symbolic.ring import SR
poly = self.minpoly()
if is_ComplexIntervalFieldElement(self._value):
interval_field = self._value.parent()
else:
interval_field = ComplexIntervalField(self._value.prec())
roots = poly.roots(SR, multiplicities=False)
if (len(roots) != poly.degree()):
return self
while True:
candidates = []
for root in roots:
if interval_field(root).overlaps(interval_field(self._value)):
candidates.append(root)
if (len(candidates) == 1):
return candidates[0]
roots = candidates
interval_field = interval_field.to_prec((interval_field.prec() * 2))
def _maxima_init_(self, I=None):
try:
return self._rational_()._maxima_init_()
except ValueError:
pass
rad = self.radical_expression()
if isinstance(rad.parent(), sage.rings.abc.SymbolicRing):
return rad._maxima_init_()
raise NotImplementedError('cannot find radical expression') |
def save_rates(ckpt_path, handle):
(runner, spikes, rates) = init_by_ckpt(ckpt_path, mode=DATASET_MODES.val)
if (('maze' in variant) or ('m700' in variant)):
runner.config.defrost()
runner.config.DATA.DATAPATH = '/snel/share/data/ndt_paper/m1_maze/heldout_trial/2296_trials/0_seed'
runner.config.freeze()
rate_output_pth = f'/snel/share/joel/ndt_rates/psth_match'
rate_output_pth = osp.join(rate_output_pth, ('grid' if grid else 'pbt'))
rate_output_fn = f'{handle}_{variant}.h5'
val_errs[handle] = runner.best_val['value'].cpu().numpy() |
def test_box():
def f3(x):
return x
builder = lb.Numpy(np.int32)
out1 = f3(builder)
assert (ak.to_list(out1.snapshot()) == [])
for x in range(15):
out1.append(x)
out2 = f3(out1)
assert (ak.to_list(out2.snapshot()) == list(range(15)))
builder = lb.Empty()
out3 = f3(builder)
assert (ak.to_list(out3.snapshot()) == [])
builder = lb.ListOffset(np.int64, lb.Numpy(np.int32))
out4 = f3(builder)
assert (ak.to_list(out4.snapshot()) == [])
builder = lb.ListOffset(np.int64, lb.Numpy(np.int64))
out5 = f3(builder)
assert (ak.to_list(out5.snapshot()) == [])
builder = lb.ListOffset(np.int32, lb.Empty())
out6 = f3(builder)
assert (ak.to_list(out6.snapshot()) == [])
builder = lb.ListOffset(np.int32, lb.ListOffset(np.int64, lb.Numpy(np.int64)))
out5 = f3(builder)
assert (ak.to_list(out5.snapshot()) == [])
builder = lb.Regular(lb.Numpy(np.float64), 3)
out7 = f3(builder)
assert (ak.to_list(out7.snapshot()) == [])
builder = lb.ListOffset(np.int32, lb.Regular(lb.Numpy(np.float64), 3))
out8 = f3(builder)
assert (ak.to_list(out8.snapshot()) == [])
builder = lb.IndexedOption(np.int32, lb.Numpy(np.float64))
out9 = f3(builder)
assert (ak.to_list(out9.snapshot()) == [])
builder = lb.IndexedOption(np.int64, lb.Numpy(np.float64))
out10 = f3(builder)
assert (ak.to_list(out10.snapshot()) == [])
builder = lb.ByteMasked(lb.Numpy(np.float64), valid_when=True)
out11 = f3(builder)
assert (ak.to_list(out11.snapshot()) == [])
builder = lb.BitMasked(np.uint8, lb.Numpy(np.float64), True, True)
out12 = f3(builder)
assert (ak.to_list(out12.snapshot()) == [])
builder = lb.Unmasked(lb.Numpy(np.int64))
out13 = f3(builder)
assert (ak.to_list(out13.snapshot()) == [])
builder = lb.Record([lb.Numpy(np.float64), lb.Numpy(np.int64), lb.Numpy(np.uint8)], ['one', 'two', 'three'])
out14 = f3(builder)
assert (ak.to_list(out14.snapshot()) == [])
builder = lb.Tuple([lb.Numpy(np.float64), lb.ListOffset(np.int64, lb.Numpy(np.int32))])
out15 = f3(builder)
assert (ak.to_list(out15.snapshot()) == [])
builder = lb.Union(np.int8, np.int64, [lb.Numpy(np.float64), lb.ListOffset(np.int64, lb.Numpy(np.int32))])
out16 = f3(builder)
assert (ak.to_list(out16.snapshot()) == []) |
def value_func(obs, critic_policy=None, qf1=None, qf2=None):
(actions, *_) = critic_policy(obs)
sa = torch.cat([obs, actions], dim=(- 1))
(q1, q2) = (qf1(sa), qf2(sa))
min_q = torch.min(q1, q2)
return min_q |
def test_semisupervisedtrainingplan_metrics():
adata = scvi.data.synthetic_iid(n_labels=3)
scvi.model.SCANVI.setup_anndata(adata, labels_key='labels', unlabeled_category='label_0', batch_key='batch')
model = scvi.model.SCANVI(adata)
model.train(max_epochs=1, check_val_every_n_epoch=1)
for mode in ['train', 'validation']:
for metric in [METRIC_KEYS.ACCURACY_KEY, METRIC_KEYS.F1_SCORE_KEY, METRIC_KEYS.CLASSIFICATION_LOSS_KEY]:
assert (f'{mode}_{metric}' in model.history_) |
def get_rnn_cell(cell_class, num_units, num_layers=1, keep_prob=1.0, dropout_input_keep_prob=None, dropout_output_keep_prob=None, reuse=None):
if (dropout_input_keep_prob is None):
dropout_input_keep_prob = keep_prob
if (dropout_output_keep_prob is None):
dropout_output_keep_prob = keep_prob
cells = []
for _ in range(num_layers):
cell = None
if (cell_class == 'RNN'):
cell = tf.contrib.rnn.BasicRNNCell(num_units=num_units, reuse=reuse)
elif (cell_class == 'GRU'):
cell = tf.contrib.rnn.GRUCell(num_units=num_units, reuse=reuse)
elif (cell_class == 'LSTM'):
cell = tf.contrib.rnn.BasicLSTMCell(num_units=num_units, state_is_tuple=True, reuse=reuse)
if (keep_prob < 1.0):
cell = tf.contrib.rnn.DropoutWrapper(cell=cell, input_keep_prob=dropout_input_keep_prob, output_keep_prob=dropout_output_keep_prob)
cells.append(cell)
if (len(cells) > 1):
final_cell = tf.contrib.rnn.core_rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
else:
final_cell = cells[0]
return final_cell |
def ignore_exceptions(func):
def inner(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
try:
return func(*args, **kwargs)
except Exception:
return None
return inner |
_torch
_sentencepiece
_tokenizers
class LlamaIntegrationTest(unittest.TestCase):
def setUpClass(cls):
checkpoint_name = 'hf-internal-testing/llama-tokenizer'
cls.tokenizer: LlamaTokenizer = LlamaTokenizer.from_pretrained(checkpoint_name)
cls.rust_tokenizer = LlamaTokenizerFast.from_pretrained(checkpoint_name)
return cls
_torch
def integration_tests(self):
inputs = self.tokenizer(['The following string should be properly encoded: Hello.', 'But ird and ird '], return_tensors='pt')
self.assertEqual(nested_simplify(inputs), {'input_ids': [[1, 450, 1494, 1347, 881, 367, 6284, 18511, 29901, 15043, 29889], [1, 1205, 29871, 1823, 322, 29871, 31010, 30691, 1678, 1823, 1678, 30718]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]})
def test_conversion(self):
self.rust_tokenizer.save_pretrained('./out')
with tempfile.TemporaryDirectory() as dirname:
self.rust_tokenizer.save_pretrained(dirname)
with open(os.path.join(dirname, 'tokenizer.json'), 'r') as f:
old_serialized = f.read()
new_tokenizer = convert_slow_tokenizer(self.tokenizer)
with tempfile.NamedTemporaryFile() as f:
new_tokenizer.save(f.name)
new_serialized = open(f.name, 'r').read()
with open('out_tokenizer.json', 'w') as g:
g.write(new_serialized)
self.assertEqual(old_serialized, new_serialized)
def test_simple_encode_decode(self):
pyth_tokenizer = self.tokenizer
rust_tokenizer = self.rust_tokenizer
self.assertEqual(pyth_tokenizer.encode('This is a test'), [1, 910, 338, 263, 1243])
self.assertEqual(rust_tokenizer.encode('This is a test'), [1, 910, 338, 263, 1243])
self.assertEqual(pyth_tokenizer.decode([1, 910, 338, 263, 1243], skip_special_tokens=True), 'This is a test')
self.assertEqual(rust_tokenizer.decode([1, 910, 338, 263, 1243], skip_special_tokens=True), 'This is a test')
self.assertEqual(pyth_tokenizer.encode(''), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392])
self.assertEqual(rust_tokenizer.encode(''), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392])
self.assertEqual(pyth_tokenizer.decode([1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392], skip_special_tokens=True), '')
self.assertEqual(rust_tokenizer.decode([1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392], skip_special_tokens=True), '')
self.assertEqual(pyth_tokenizer.encode('Hi Hello'), [1, 6324, 29871, 15043])
self.assertEqual(rust_tokenizer.encode('Hi Hello'), [1, 6324, 29871, 15043])
self.assertEqual(pyth_tokenizer.decode([1, 6324, 29871, 15043], skip_special_tokens=True), 'Hi Hello')
self.assertEqual(rust_tokenizer.decode([1, 6324, 29871, 15043], skip_special_tokens=True), 'Hi Hello')
self.assertEqual(pyth_tokenizer.encode('Hi Hello'), [1, 6324, 259, 15043])
self.assertEqual(rust_tokenizer.encode('Hi Hello'), [1, 6324, 259, 15043])
self.assertEqual(pyth_tokenizer.decode([1, 6324, 259, 15043], skip_special_tokens=True), 'Hi Hello')
self.assertEqual(rust_tokenizer.decode([1, 6324, 259, 15043], skip_special_tokens=True), 'Hi Hello')
self.assertEqual(pyth_tokenizer.encode(''), [1])
self.assertEqual(rust_tokenizer.encode(''), [1])
self.assertEqual(pyth_tokenizer.encode(' '), [1, 259])
self.assertEqual(rust_tokenizer.encode(' '), [1, 259])
self.assertEqual(pyth_tokenizer.encode(' '), [1, 1678])
self.assertEqual(rust_tokenizer.encode(' '), [1, 1678])
self.assertEqual(pyth_tokenizer.encode(' Hello'), [1, 29871, 15043])
self.assertEqual(rust_tokenizer.encode(' Hello'), [1, 29871, 15043])
def test_no_differences_showcase(self):
pyth_tokenizer = self.tokenizer
rust_tokenizer = self.rust_tokenizer
self.assertEqual(pyth_tokenizer.encode(''), [1])
self.assertEqual(rust_tokenizer.encode(''), [1])
self.assertEqual(pyth_tokenizer.encode(' '), [1, 259])
self.assertEqual(rust_tokenizer.encode(' '), [1, 259])
self.assertEqual(pyth_tokenizer.encode(' '), [1, 1678])
self.assertEqual(rust_tokenizer.encode(' '), [1, 1678])
self.assertEqual(pyth_tokenizer.encode(' Hello'), [1, 29871, 15043])
self.assertEqual(rust_tokenizer.encode(' Hello'), [1, 29871, 15043])
self.assertEqual(pyth_tokenizer.encode('<s>'), [1, 1])
self.assertEqual(rust_tokenizer.encode('<s>'), [1, 1])
def test_no_differences_decode(self):
pyth_tokenizer = self.tokenizer
rust_tokenizer = self.rust_tokenizer
self.assertEqual(pyth_tokenizer.decode([869]), '.')
self.assertEqual(rust_tokenizer.decode([869]), '.')
self.assertEqual(pyth_tokenizer.decode([30112, 869]), ' .')
self.assertEqual(rust_tokenizer.decode([30112, 869]), ' .')
def test_no_differences_special_tokens(self):
pyth_tokenizer = self.tokenizer
rust_tokenizer = self.rust_tokenizer
self.assertEqual(pyth_tokenizer.encode(''), [1])
self.assertEqual(rust_tokenizer.encode(''), [1])
self.assertEqual(pyth_tokenizer.encode('<s>'), [1, 1])
self.assertEqual(rust_tokenizer.encode('<s>'), [1, 1])
((os.getenv('RUN_TOKENIZER_INTEGRATION', '0') == '0'), 'RUN_TOKENIZER_INTEGRATION=1 to run tokenizer integration tests')
def test_integration_test_xnli(self):
import tqdm
pyth_tokenizer = self.tokenizer
rust_tokenizer = self.rust_tokenizer
dataset = load_dataset('code_x_glue_ct_code_to_text', 'go')
for item in tqdm.tqdm(dataset['validation']):
string = item['code']
encoded1 = pyth_tokenizer.encode(string)
encoded2 = rust_tokenizer.encode(string)
self.assertEqual(encoded1, encoded2)
decoded1 = pyth_tokenizer.decode(encoded1, skip_special_tokens=True)
decoded2 = rust_tokenizer.decode(encoded2, skip_special_tokens=True)
self.assertEqual(decoded1, decoded2)
dataset = load_dataset('xnli', 'all_languages')
for item in tqdm.tqdm(dataset['train']):
for string in item['premise'].values():
encoded1 = pyth_tokenizer.encode(string)
encoded2 = rust_tokenizer.encode(string)
self.assertEqual(encoded1, encoded2)
decoded1 = pyth_tokenizer.decode(encoded1, skip_special_tokens=True)
decoded2 = rust_tokenizer.decode(encoded2, skip_special_tokens=True)
self.assertEqual(decoded1, decoded2) |
def make_target_python(options):
target_python = TargetPython(platform=options.platform, py_version_info=options.python_version, abi=options.abi, implementation=options.implementation)
return target_python |
def predict():
args = get_args()
kwargs = args.__dict__
save_dir = kwargs['save_dir']
common.setup_logger(save_dir, log_name='scarf_pred_binned.log', debug=kwargs['debug'])
pl.utilities.seed.seed_everything(kwargs.get('seed'))
yaml_args = yaml.dump(kwargs)
logging.info(f'''
{yaml_args}''')
with open((Path(save_dir) / 'args.yaml'), 'w') as fp:
fp.write(yaml_args)
data_dir = Path('')
if (kwargs.get('dataset_name') is not None):
dataset_name = kwargs['dataset_name']
data_dir = (Path('data/spec_datasets') / dataset_name)
labels = (data_dir / kwargs['dataset_labels'])
df = pd.read_csv(labels, sep='\t')
if kwargs['debug']:
df = df[:10]
if (kwargs['subset_datasets'] != 'none'):
splits = pd.read_csv(((data_dir / 'splits') / kwargs['split_name']), sep='\t')
folds = set(splits.keys())
folds.remove('spec')
fold_name = list(folds)[0]
if (kwargs['subset_datasets'] == 'train_only'):
names = splits[(splits[fold_name] == 'train')]['spec'].tolist()
elif (kwargs['subset_datasets'] == 'test_only'):
names = splits[(splits[fold_name] == 'test')]['spec'].tolist()
elif (kwargs['subset_datasets'] == 'debug_special'):
names = splits[(splits[fold_name] == 'test')]['spec'].tolist()
names = names[:5]
names = ['CCMSLIB']
kwargs['debug'] = True
else:
raise NotImplementedError()
df = df[df['spec'].isin(names)]
gen_checkpoint = kwargs['gen_checkpoint']
inten_checkpoint = kwargs['inten_checkpoint']
torch.set_num_threads(1)
gen_model_obj = scarf_model.ScarfNet.load_from_checkpoint(gen_checkpoint)
inten_model_obj = scarf_model.ScarfIntenNet.load_from_checkpoint(inten_checkpoint)
logging.info(f'Loaded gen / inten models from {gen_checkpoint} & {inten_checkpoint}')
model = scarf_model.JointModel(gen_model_obj=gen_model_obj, inten_model_obj=inten_model_obj)
assert (not kwargs['gpu'])
(num_bins, upper_limit) = (15000, 1500)
with torch.no_grad():
model.eval()
model.freeze()
gpu = kwargs['gpu']
device = ('cuda' if gpu else 'cpu')
model.to(device)
binned_out = kwargs['binned_out']
def single_predict_mol(entry):
torch.set_num_threads(1)
smi = entry['smiles']
full_output = model.predict_mol(smi, threshold=kwargs['threshold'], device=device, max_nodes=kwargs['max_nodes'], adduct=entry['ionization'], binned_out=binned_out)
if binned_out:
output_spec = full_output['spec']
best_inds = None
if kwargs['sparse_out']:
sparse_k = kwargs['sparse_k']
best_inds = np.argsort(output_spec, (- 1))[::(- 1)][:sparse_k]
best_intens = np.take_along_axis(output_spec, best_inds, (- 1))
output_spec = np.stack([best_inds, best_intens], (- 1))
update_dict = {}
for (param_k, param_v) in full_output.items():
if (param_k in ['spec', 'forms', 'masses']):
continue
if kwargs['sparse_out']:
best_params = np.take_along_axis(param_v, best_inds, (- 1))
param_v = np.stack([best_inds, best_params], (- 1))
update_dict[param_k] = param_v
out = {'preds': output_spec}
out.update(update_dict)
else:
out = {'preds': full_output['spec'], 'form_masses': full_output['masses'], 'forms': full_output['forms']}
return out
all_rows = [j for (_, j) in df.iterrows()]
preds = []
if (kwargs['debug'] or (kwargs['batch_size'] == 1)):
for j in all_rows:
pred = single_predict_mol(j)
preds.append(pred)
else:
preds = common.chunked_parallel(all_rows, single_predict_mol, max_cpu=kwargs['batch_size'])
if binned_out:
output_keys = set(preds[0].keys())
update_dict = {}
for k in output_keys:
temp = [i[k] for i in preds]
update_dict[k] = np.stack(temp, 0)
spec_names_ar = [i['spec'] for i in all_rows]
smiles_ar = np.array([i['smiles'] for i in all_rows])
inchikeys = [common.inchikey_from_smiles(i) for i in smiles_ar]
output = {'smiles': smiles_ar, 'ikeys': inchikeys, 'spec_names': spec_names_ar, 'num_bins': num_bins, 'upper_limit': upper_limit, 'sparse_out': kwargs['sparse_out']}
output.update(update_dict)
out_file = (Path(kwargs['save_dir']) / 'binned_preds.p')
with open(out_file, 'wb') as fp:
pickle.dump(output, fp)
else:
for (pred_obj, row) in zip(preds, all_rows):
mz = pred_obj['form_masses']
intens = [float(i) for i in pred_obj['preds']]
cand_form = row['formula']
smiles = row['smiles']
form_list = pred_obj['forms']
spec_name = row['spec']
ionization = row['ionization']
mass_shift = common.ion2mass[ionization]
mz_shifted = (mass_shift + np.array(mz)).tolist()
tbl = {'mz': mz, 'ms2_inten': intens, 'rel_inten': intens, 'mono_mass': mz_shifted, 'formula_mass_no_adduct': mz, 'mass_diff': ([0] * len(mz)), 'formula': form_list, 'ions': ([ionization] * len(mz))}
new_form = {'cand_form': cand_form, 'spec_name': spec_name, 'cand_ion': ionization, 'output_tbl': tbl, 'smiles': smiles}
save_path = (Path(kwargs['save_dir']) / 'tree_preds_inten')
save_path.mkdir(exist_ok=True)
out_file = (save_path / f'pred_{spec_name}.json')
with open(out_file, 'w') as fp:
json.dump(new_form, fp, indent=2) |
class TD2020LearnAPI(TFPluginAPI):
def __init__(self):
self.owning_player = None
self.initial_board_config = None
self.setup = False
self.g = None
self.graph_var = None
self.session_var = None
self.mcts = None
def onSetup(self):
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
current_directory = os.path.join(os.path.dirname(__file__), 'temp/')
self.g = RTSGame()
n1 = NNet(self.g, OneHotEncoder())
n1.load_checkpoint(current_directory, 'best.pth.tar')
args = dotdict({'numMCTSSims': 2, 'cpuct': 1.0})
self.mcts = MCTS(self.g, n1, args)
self.graph_var = graph
self.session_var = session
self.setup = True
def onJsonInput(self, jsonInput):
if (not self.setup):
return
encoded_actors = jsonInput['data']
initial_board_config = []
for encoded_actor in encoded_actors:
initial_board_config.append(dotdict({'x': encoded_actor['x'], 'y': encoded_actor['y'], 'player': encoded_actor['player'], 'a_type': encoded_actor['actorType'], 'health': encoded_actor['health'], 'carry': encoded_actor['carry'], 'gold': encoded_actor['money'], 'timeout': encoded_actor['remaining']}))
self.initial_board_config = initial_board_config
self.owning_player = jsonInput['player']
with self.graph_var.as_default():
with self.session_var.as_default():
self.g.setInitBoard(self.initial_board_config)
b = self.g.getInitBoard()
def n1p(board):
return np.argmax(self.mcts.getActionProb(board, temp=0))
canonical_board = self.g.getCanonicalForm(b, self.owning_player)
recommended_act = n1p(canonical_board)
(y, x, action_index) = np.unravel_index(recommended_act, [b.shape[0], b.shape[0], NUM_ACTS])
act = {'x': str(x), 'y': str(y), 'action': ACTS_REV[action_index]}
print(('Printing recommended action ' + str(act)))
return act
def onBeginTraining(self):
pass
def run(self, args):
pass
def close(self, args):
print('Closing Get Action')
if self.session_var:
self.session_var.close()
self.owning_player = None
self.initial_board_config = None
self.setup = False
self.g = None
self.graph_var = None
self.session_var = None
self.mcts = None |
def test_raises_on_non_square_input():
with pytest.raises(ValueError):
graph = csr_matrix([[0, 1, 2], [2, 1, 0]])
maximum_flow(graph, 0, 1) |
class UpsamplingBilinear2d(Upsample):
def __init__(self, size=None, scale_factor=None):
super(UpsamplingBilinear2d, self).__init__(size, scale_factor, mode='bilinear', align_corners=True)
def forward(self, input):
warnings.warn('nn.UpsamplingBilinear2d is deprecated. Use nn.functional.interpolate instead.')
return super(UpsamplingBilinear2d, self).forward(input) |
class KR_type_C(KirillovReshetikhinGenericCrystal):
def classical_decomposition(self):
return CrystalOfTableaux(self.cartan_type().classical(), shapes=horizontal_dominoes_removed(self.r(), self.s()))
def ambient_crystal(self):
return KashiwaraNakashimaTableaux(['A', ((2 * self.cartan_type().classical().rank()) + 1), 2], self.r(), self.s())
_method
def ambient_dict_pm_diagrams(self):
ulist = []
s = self.s()
r = self.r()
m = (s // 2)
for i in range((m + 1)):
for la in IntegerVectors((m - i), min_length=r, max_length=r):
ulist.append(PMDiagram(([[j, j] for j in la] + [[((s - (2 * m)) + (2 * i))]])))
return {x.inner_shape(): x for x in ulist}
_method
def ambient_highest_weight_dict(self):
A = self.ambient_dict_pm_diagrams()
ambient = self.ambient_crystal()
return {key: ambient.retract(ambient.from_pm_diagram_to_highest_weight_vector(A[key])) for key in A}
_method
def highest_weight_dict(self):
return {x.lift().to_tableau().shape(): x for x in self.module_generators}
_method
def to_ambient_crystal(self):
hwd = self.highest_weight_dict()
ahwd = self.ambient_highest_weight_dict()
pdict = {hwd[key]: ahwd[key] for key in hwd}
classical = self.cartan_type().classical()
return self.crystal_morphism(pdict, index_set=classical.index_set(), automorphism=(lambda i: (i + 1)), cartan_type=classical, check=False)
_method
def from_ambient_crystal(self):
hwd = self.highest_weight_dict()
ahwd = self.ambient_highest_weight_dict()
pdict_inv = {ahwd[key]: hwd[key] for key in hwd}
ind = [(j + 1) for j in self.cartan_type().classical().index_set()]
return AmbientRetractMap(self, self.ambient_crystal(), pdict_inv, index_set=ind, automorphism=(lambda i: (i - 1))) |
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7, conv_block=None):
super(InceptionC, self).__init__()
if (conv_block is None):
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1) |
class ActivationHessianTraceBasicModelTest(BaseHessianTraceBasicModelTest):
def __init__(self, unit_test):
super().__init__(unit_test)
self.val_batch_size = 1
def run_test(self, seed=0):
(graph, pytorch_impl) = self._setup()
hessian_service = hessian_common.HessianInfoService(graph=graph, representative_dataset=self.representative_data_gen, fw_impl=pytorch_impl)
ipts = [n for n in graph.get_topo_sorted_nodes() if (len(n.weights) > 0)]
for ipt in ipts:
self.test_hessian_trace_approx(hessian_service, interest_point=ipt, granularity=hessian_common.HessianInfoGranularity.PER_TENSOR, mode=hessian_common.HessianMode.ACTIVATION) |
def load_config(config_file=None):
config = base_config
if (config_file is not None):
config_file_path = os.path.join('lib', 'configs', f'{config_file}.yaml')
if os.path.isfile(config_file_path):
config.merge_from_file(config_file_path)
msg = f"Merged config from '{config_file_path}'"
else:
print(f"Cannot open the specified yaml config file '{config_file_path}'", level='critical')
exit(0)
else:
msg = f'No yaml config file is specified. Using default config.'
return (config, msg) |
def last_k(tokens, k):
if (not (0 <= k <= len(tokens))):
raise ValueError('k must be between 0 and len(tokens) = {}, got: {}'.format(len(tokens), k))
return tuple(tokens[(len(tokens) - k):]) |
class TestCatalogue_Star(TestCase):
def test_magV(self):
x = [star.magV for star in exocat.stars]
def test_T(self):
x = [star.T for star in exocat.stars]
def test_calcTemperature(self):
x = [star.calcTemperature() for star in exocat.stars] |
def isProjective(heads):
pairs = [(h, d) for (d, h) in enumerate(heads, 1) if (h >= 0)]
for (i, (hi, di)) in enumerate(pairs):
for (hj, dj) in pairs[(i + 1):]:
((li, ri), (lj, rj)) = (sorted([hi, di]), sorted([hj, dj]))
if ((li <= hj <= ri) and (hi == dj)):
return False
if ((lj <= hi <= rj) and (hj == di)):
return False
if (((li < lj < ri) or (li < rj < ri)) and (((li - lj) * (ri - rj)) > 0)):
return False
return True |
.parametrize('digits_bits', [23, 24])
_utils.test(require=ti.extension.quant)
def test_quant_float_precision(digits_bits):
qflt = ti.types.quant.float(exp=8, frac=digits_bits)
x = ti.field(dtype=qflt)
bitpack = ti.BitpackedFields(max_num_bits=32)
bitpack.place(x)
ti.root.place(bitpack)
tests = [np.float32(np.pi), np.float32((np.pi * (1 << 100)))]
for v in tests:
x[None] = v
if (digits_bits == 24):
assert (x[None] == v)
else:
assert (x[None] != v)
assert (x[None] == pytest.approx(v, rel=3e-07)) |
def get_midi_info(pm):
if pm.time_signature_changes:
pm.time_signature_changes.sort(key=(lambda x: x.time))
first_beat_time = pm.time_signature_changes[0].time
else:
first_beat_time = pm.estimate_beat_start()
(tc_times, tempi) = pm.get_tempo_changes()
if (len(pm.time_signature_changes) == 1):
time_sign = '{}/{}'.format(pm.time_signature_changes[0].numerator, pm.time_signature_changes[0].denominator)
else:
time_sign = None
midi_info = {'first_beat_time': first_beat_time, 'num_time_signature_change': len(pm.time_signature_changes), 'time_signature': time_sign, 'tempo': (tempi[0] if (len(tc_times) == 1) else None)}
return midi_info |
def parse_nested_args(d_cmd_cfg):
d_new_cfg = {}
for (key, val) in d_cmd_cfg.items():
l_key = key.split('.')
d = d_new_cfg
for (i_key, each_key) in enumerate(l_key):
if (i_key == (len(l_key) - 1)):
d[each_key] = val
else:
if (each_key not in d):
d[each_key] = {}
d = d[each_key]
return d_new_cfg |
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code=None):
ExprNode.__init__(self, pos, name=py_name, cname=cname, type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname |
class Runner(object):
def __init__(self, config):
self.all_args = config['all_args']
self.envs = config['envs']
self.eval_envs = config['eval_envs']
self.device = config['device']
self.num_agents = config['num_agents']
if config.__contains__('render_envs'):
self.render_envs = config['render_envs']
self.env_name = self.all_args.env_name
self.algorithm_name = self.all_args.algorithm_name
self.experiment_name = self.all_args.experiment_name
self.use_centralized_V = self.all_args.use_centralized_V
self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state
self.num_env_steps = self.all_args.num_env_steps
self.episode_length = self.all_args.episode_length
self.n_rollout_threads = self.all_args.n_rollout_threads
self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
self.n_render_rollout_threads = self.all_args.n_render_rollout_threads
self.use_linear_lr_decay = self.all_args.use_linear_lr_decay
self.hidden_size = self.all_args.hidden_size
self.use_wandb = self.all_args.use_wandb
self.use_single_network = self.all_args.use_single_network
self.use_render = self.all_args.use_render
self.recurrent_N = self.all_args.recurrent_N
self.save_interval = self.all_args.save_interval
self.use_eval = self.all_args.use_eval
self.eval_interval = self.all_args.eval_interval
self.log_interval = self.all_args.log_interval
self.model_dir = self.all_args.model_dir
if self.use_render:
self.run_dir = config['run_dir']
self.gif_dir = str((self.run_dir / 'gifs'))
if (not os.path.exists(self.gif_dir)):
os.makedirs(self.gif_dir)
elif self.use_wandb:
self.save_dir = str(wandb.run.dir)
self.run_dir = str(wandb.run.dir)
else:
self.run_dir = config['run_dir']
self.log_dir = str((self.run_dir / 'logs'))
if (not os.path.exists(self.log_dir)):
os.makedirs(self.log_dir)
self.writter = SummaryWriter(self.log_dir)
self.save_dir = str((self.run_dir / 'models'))
if (not os.path.exists(self.save_dir)):
os.makedirs(self.save_dir)
(TrainAlgo, Policy) = make_trainer_policy_cls(self.algorithm_name, use_single_network=self.use_single_network)
share_observation_space = (self.envs.share_observation_space[0] if self.use_centralized_V else self.envs.observation_space[0])
if ('ft' not in self.algorithm_name):
self.policy = Policy(self.all_args, self.envs.observation_space[0], share_observation_space, self.envs.action_space[0], device=self.device)
self.policy_config = (self.all_args, self.envs.observation_space[0], share_observation_space, self.envs.action_space[0])
policy_config_path = os.path.join(self.run_dir, 'policy_config.pkl')
pickle.dump(self.policy_config, open(policy_config_path, 'wb'))
print(f'Pickle dump policy config at {policy_config_path}')
if (self.model_dir is not None):
self.restore()
self.trainer = TrainAlgo(self.all_args, self.policy, device=self.device)
if (self.algorithm_name != 'population'):
self.buffer = SharedReplayBuffer(self.all_args, self.num_agents, self.envs.observation_space[0], share_observation_space, self.envs.action_space[0])
def run(self):
raise NotImplementedError
def warmup(self):
raise NotImplementedError
def collect(self, step):
raise NotImplementedError
def insert(self, data):
raise NotImplementedError
_grad()
def compute(self):
self.trainer.prep_rollout()
next_values = self.trainer.policy.get_values(np.concatenate(self.buffer.share_obs[(- 1)]), np.concatenate(self.buffer.rnn_states_critic[(- 1)]), np.concatenate(self.buffer.masks[(- 1)]))
next_values = np.array(np.split(_t2n(next_values), self.n_rollout_threads))
self.buffer.compute_returns(next_values, self.trainer.value_normalizer)
def train(self):
self.trainer.prep_training()
train_infos = self.trainer.train(self.buffer)
self.buffer.after_update()
self.log_system()
return train_infos
def save(self):
if self.use_single_network:
policy_model = self.trainer.policy.model
torch.save(policy_model.state_dict(), (str(self.save_dir) + '/model.pt'))
else:
policy_actor = self.trainer.policy.actor
torch.save(policy_actor.state_dict(), (str(self.save_dir) + '/actor.pt'))
policy_critic = self.trainer.policy.critic
torch.save(policy_critic.state_dict(), (str(self.save_dir) + '/critic.pt'))
def restore(self):
if self.use_single_network:
policy_model_state_dict = torch.load((str(self.model_dir) + '/model.pt'), map_location=self.device)
self.policy.model.load_state_dict(policy_model_state_dict)
else:
policy_actor_state_dict = torch.load((str(self.model_dir) + '/actor.pt'), map_location=self.device)
self.policy.actor.load_state_dict(policy_actor_state_dict)
if (not (self.all_args.use_render or self.all_args.use_eval)):
policy_critic_state_dict = torch.load((str(self.model_dir) + '/critic.pt'), map_location=self.device)
self.policy.critic.load_state_dict(policy_critic_state_dict)
def log_train(self, train_infos, total_num_steps):
for (k, v) in train_infos.items():
if self.use_wandb:
wandb.log({k: v}, step=total_num_steps)
else:
self.writter.add_scalars(k, {k: v}, total_num_steps)
def log_env(self, env_infos, total_num_steps):
for (k, v) in env_infos.items():
if (len(v) > 0):
if self.use_wandb:
wandb.log({k: np.mean(v)}, step=total_num_steps)
else:
self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps)
def log_system(self):
mem = psutil.virtual_memory()
total_mem = (((float(mem.total) / 1024) / 1024) / 1024)
used_mem = (((float(mem.used) / 1024) / 1024) / 1024)
if ((used_mem / total_mem) > 0.95):
slack = slackweb.Slack(url=webhook_url)
host_name = socket.gethostname()
slack.notify(text='Host {}: occupied memory is *{:.2f}*%!'.format(host_name, ((used_mem / total_mem) * 100))) |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_dk_cvr(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format in {'compact', 'standard'}):
result = ([cvr.compact(val)] + result)
return result |
def setup_buckets(src_region, dest_region, n_files=1, file_size_mb=1):
(src_provider, src_zone) = src_region.split(':')
(dest_provider, dest_zone) = dest_region.split(':')
if (src_provider == 'azure'):
src_bucket_name = f"skyplanetest{src_zone}/{str(uuid.uuid4()).replace('-', '')}"
else:
src_bucket_name = f'skyplane-integration-{src_zone}-{str(uuid.uuid4())[:8]}'
if (dest_provider == 'azure'):
dest_bucket_name = f"skyplanetest{dest_zone}/{str(uuid.uuid4()).replace('-', '')}"
else:
dest_bucket_name = f'skyplane-integration-{dest_zone}-{str(uuid.uuid4())[:8]}'
logger.debug(f'creating buckets {src_bucket_name} and {dest_bucket_name}')
src_interface = ObjectStoreInterface.create(src_region, src_bucket_name)
dest_interface = ObjectStoreInterface.create(dest_region, dest_bucket_name)
src_interface.create_bucket(src_zone)
dest_interface.create_bucket(dest_zone)
src_prefix = f'src_{uuid.uuid4()}'
dest_prefix = f'dest_{uuid.uuid4()}'
with tempfile.NamedTemporaryFile() as tmp:
fpath = tmp.name
with open(fpath, 'wb+') as f:
f.write(os.urandom(int((file_size_mb * MB))))
for i in range(n_files):
src_interface.upload_object(fpath, f'{src_prefix}/{i}', mime_type='text/plain')
return (src_bucket_name, dest_bucket_name, src_prefix, dest_prefix) |
def test_offline_dataset():
if (not ray.is_initialized()):
ray.init()
server = OfflineDataset(table_capacity=10000)
server.start()
(pname, pqueue) = server.start_producer_pipe(name='test_offline_dataset')
(cname, cqueue) = server.start_consumer_pipe(name='test_offline_dataset', batch_size=64)
server.end_consumer_pipe(name=cname)
server.end_producer_pipe(name=pname)
ray.shutdown() |
def dla60x(cfg, pretrained=None, **kwargs):
BottleneckX.expansion = 2
model = DLA(cfg, [1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024], block=BottleneckX, **kwargs)
if (pretrained is not None):
model.load_pretrained_model(pretrained, 'dla60x')
return model |
class ExperimentBaseRunner(ABC):
def __init__(self, exp: Experiment, env: ExpEnv, verbose: bool) -> None:
self.exp = exp
self.env = env
self.verbose = verbose
self.out = ExpOutput(exp)
self.running: tp.List[tp.Tuple[(Simulator, SimpleComponent)]] = []
self.sockets: tp.List[tp.Tuple[(Executor, str)]] = []
self.wait_sims: tp.List[Component] = []
def sim_executor(self, sim: Simulator) -> Executor:
pass
def sim_graph(self) -> tp.Dict[(Simulator, tp.Set[Simulator])]:
sims = self.exp.all_simulators()
graph = {}
for sim in sims:
deps = (sim.dependencies() + sim.extra_deps)
graph[sim] = set()
for d in deps:
graph[sim].add(d)
return graph
async def start_sim(self, sim: Simulator) -> None:
name = sim.full_name()
if self.verbose:
print(f'{self.exp.name}: starting {name}')
run_cmd = sim.run_cmd(self.env)
if (run_cmd is None):
if self.verbose:
print(f'{self.exp.name}: started dummy {name}')
return
executor = self.sim_executor(sim)
sc = executor.create_component(name, shlex.split(run_cmd), verbose=self.verbose, canfail=True)
(await sc.start())
self.running.append((sim, sc))
for s in sim.sockets_cleanup(self.env):
self.sockets.append((executor, s))
wait_socks = sim.sockets_wait(self.env)
if wait_socks:
if self.verbose:
print(f'{self.exp.name}: waiting for sockets {name}')
(await executor.await_files(wait_socks, verbose=self.verbose))
delay = sim.start_delay()
if (delay > 0):
(await asyncio.sleep(delay))
if sim.wait_terminate():
self.wait_sims.append(sc)
if self.verbose:
print(f'{self.exp.name}: started {name}')
async def before_wait(self) -> None:
pass
async def before_cleanup(self) -> None:
pass
async def after_cleanup(self) -> None:
pass
async def prepare(self) -> None:
copies = []
for host in self.exp.hosts:
path = self.env.cfgtar_path(host)
if self.verbose:
print('preparing config tar:', path)
host.node_config.make_tar(path)
executor = self.sim_executor(host)
task = asyncio.create_task(executor.send_file(path, self.verbose))
copies.append(task)
(await asyncio.wait(copies))
sims = []
for sim in self.exp.all_simulators():
prep_cmds = list(sim.prep_cmds(self.env))
executor = self.sim_executor(sim)
task = asyncio.create_task(executor.run_cmdlist(('prepare_' + self.exp.name), prep_cmds, verbose=self.verbose))
sims.append(task)
(await asyncio.wait(sims))
async def wait_for_sims(self) -> None:
if self.verbose:
print(f'{self.exp.name}: waiting for hosts to terminate')
for sc in self.wait_sims:
(await sc.wait())
async def terminate_collect_sims(self) -> ExpOutput:
self.out.set_end()
if self.verbose:
print(f'{self.exp.name}: cleaning up')
(await self.before_cleanup())
scs = []
for (_, sc) in self.running:
scs.append(asyncio.create_task(sc.int_term_kill()))
(await asyncio.shield(asyncio.wait(scs)))
for (_, sc) in self.running:
(await asyncio.shield(sc.wait()))
scs = []
for (executor, sock) in self.sockets:
scs.append(asyncio.create_task(executor.rmtree(sock)))
if scs:
(await asyncio.shield(asyncio.wait(scs)))
for (sim, sc) in self.running:
self.out.add_sim(sim, sc)
(await asyncio.shield(self.after_cleanup()))
return self.out
async def run(self) -> ExpOutput:
try:
self.out.set_start()
graph = self.sim_graph()
ts = graphlib.TopologicalSorter(graph)
ts.prepare()
while ts.is_active():
starting = []
sims = []
for sim in ts.get_ready():
starting.append(asyncio.create_task(self.start_sim(sim)))
sims.append(sim)
(await asyncio.wait(starting))
for sim in sims:
ts.done(sim)
(await self.before_wait())
(await self.wait_for_sims())
except asyncio.CancelledError:
if self.verbose:
print(f'{self.exp.name}: interrupted')
self.out.set_interrupted()
except:
self.out.set_failed()
traceback.print_exc()
terminate_collect_task = asyncio.create_task(self.terminate_collect_sims())
while True:
try:
return (await asyncio.shield(terminate_collect_task))
except asyncio.CancelledError:
pass |
class CheckpointManager(object):
BLOB_NAMES = 'blob_names'
def __init__(self, db_prefix, node_name, db_type, metadata_handler=None):
self._db_prefix = db_prefix
self._node_name = node_name
self._db_type = db_type
self._metadata_handler = metadata_handler
self._net = core.Net('!!checkpoint_mngr')
self._blob_names = self._net.AddExternalInput(self.BLOB_NAMES)
self._names_output = None
self._path_prefix = None
self._path_type = None
self._current_db_name = None
self._current_checkpoint_duration = None
'\n Initialize the checkpoint manager. Determines all blobs that need to be saved\n or loads from a checkpoint.\n\n Args:\n nodes: An array of nodes where this checkpoint manager is running. Should\n only contain a single node.\n retrieve_from_epoch: Set to a number to load blobs from this epoch.\n path_prefix: Used to construct db name or path where checkpoint files are\n stored.\n path_type: Indicate the type of path where checkpoint files are stored.\n '
def init(self, nodes=None, retrieve_from_epoch=None, path_prefix=None, path_type=None):
assert ((nodes is None) or (len(nodes) == 1)), 'CheckpointManager only supports single node.'
with Task(outputs=[self._blob_names]) as task:
if (retrieve_from_epoch is None):
ops.GetAllBlobNames([], self._blob_names, include_shared=False)
else:
full_db_name = db_name(retrieve_from_epoch, self._node_name, self._db_prefix, path_prefix)
db_type = (path_type or self._db_type)
logger.info(('Initializing checkpoints from = %s' % full_db_name))
ops.Load([], self._blob_names, db=full_db_name, db_type=db_type, absolute_path=True, keep_device=True)
self._names_output = task.outputs()[0]
return task
def blob_list(self):
assert self._names_output
return self._names_output.fetch().tolist()
def _timed_task(self, cp_op_name, add_op):
with Task(name=cp_op_name) as task:
with ops.task_init():
timer = ops.TimerBegin([], counter_name=self._node_name)
add_op()
with ops.task_exit():
time_span_blob = ops.TimerGetAndEnd(timer)
self._current_checkpoint_duration = final_output(time_span_blob)
return task
def collect_checkpoint_stats(self, stats):
if (self._current_db_name and self._current_checkpoint_duration):
stats[self._current_db_name] = self._current_checkpoint_duration.fetch()[0]
else:
logger.info('Failed to collect checkpoint stats: {}'.format(self._current_db_name))
def load(self, epoch, path_prefix=None, path_type=None):
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix, path_prefix)
db_type = (path_type or self._db_type)
logger.info(('Loading checkpoints from = %s' % self._current_db_name))
def add_op():
ops.Load([], self.blob_list(), db=self._current_db_name, db_type=db_type, absolute_path=True, keep_device=True)
return self._timed_task('checkpoint_load', add_op)
def load_blobs_from_checkpoint(self, blob_names, epoch):
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info(('Load from %s' % self._current_db_name))
def add_op():
ops.Load([], blob_names, db=self._current_db_name, db_type=self._db_type, absolute_path=True, allow_incomplete=True)
return self._timed_task('checkpoint_partial_load', add_op)
def check_db_exists(self, epoch):
logger.info(('Check existence of %s' % db_name(epoch, self._node_name, self._db_prefix)))
with Task() as task:
existence = ops.Const(False)
ops.DBExists([], [existence], db_name=db_name(epoch, self._node_name, self._db_prefix), db_type=self._db_type, absolute_path=True)
task.add_output(existence)
return task
def report_checkpoint_stats(self, action_name):
all_stats = {}
self.collect_checkpoint_stats(all_stats)
if self._metadata_handler:
self._metadata_handler.report(action_name, all_stats)
def save(self, epoch):
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info(('Saving to %s' % self._current_db_name))
def add_op():
ops.Save(self.blob_list(), [], db=self._current_db_name, db_type=self._db_type, absolute_path=True)
return self._timed_task('checkpoint_save', add_op)
def write_checkpoint_metadata(self, epoch):
if (self._metadata_handler is not None):
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
last_epoch = user_epoch
if (self._metadata_handler is not None):
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(db_prefix=self._db_prefix, db_type=self._db_type, node_names=[str(self._node_name)], path_prefix=self._path_prefix, path_type=self._path_type)
def cp_accessible(self, epoch=None):
if (self._metadata_handler is not None):
return self._metadata_handler.cp_accessible(epoch)
else:
return True |
def save_model(state, checkpoint, filename='checkpoint.pth.tar'):
filename = (('epoch' + str(state['epoch'])) + filename)
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath) |
def register_Ns3SimpleRefCount__Ns3WifiInformationElement_Ns3Empty_Ns3DefaultDeleter__lt__ns3WifiInformationElement__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::WifiInformationElement, ns3::empty, ns3::DefaultDeleter< ns3::WifiInformationElement > > const &', 'o')])
return |
def argparser(is_train=True):
def str2bool(v):
return (v.lower() == 'true')
parser = argparse.ArgumentParser()
parser.add_argument('--debug', type=str2bool, default=False)
parser.add_argument('--batch_size', type=int, default=8, help='the mini-batch size')
parser.add_argument('--prefix', type=str, default='default', help='a nickname for the training')
parser.add_argument('--dataset', type=str, default='car', choices=['car', 'chair', 'kitti', 'synthia'], help='you can add your own dataset here')
parser.add_argument('--num_input', type=int, default=2, help='the number of source images')
parser.add_argument('--train_dir', type=str, default=None, help='load the latest checkpoint from a directory')
parser.add_argument('--checkpoint', type=str, default=None, help='load all the parameters including the flow and pixel modules and the discriminator')
parser.add_argument('--checkpoint_p', type=str, default=None, help='load the parameters of the pixel module')
parser.add_argument('--checkpoint_f', type=str, default=None, help='load the parameters of the flow module')
parser.add_argument('--checkpoint_g', type=str, default=None, help='load the parameters of both the flow and pixel module')
parser.add_argument('--checkpoint_d', type=str, default=None, help='load the parameters of the discriminator')
parser.add_argument('--log_step', type=int, default=10, help='the frequency of outputing log info')
parser.add_argument('--ckpt_save_step', type=int, default=5000, help='the frequency of saving a checkpoint')
parser.add_argument('--test_sample_step', type=int, default=100, help='the frequency of performing testing inference during training')
parser.add_argument('--write_summary_step', type=int, default=100, help='the frequency of writing TensorBoard summaries')
parser.add_argument('--max_steps', type=int, default=1000000, help='the max training iterations')
parser.add_argument('--learning_rate_p', type=float, default=5e-05, help='the learning rate of the pixel module')
parser.add_argument('--learning_rate_f', type=float, default=0.0001, help='the learning rate of the flow module')
parser.add_argument('--learning_rate_d', type=float, default=0.0001, help='the learning rate of the discriminator')
parser.add_argument('--local_confidence_weight', type=int, default=0.01, help='the weight of the confidence prediction objective')
parser.add_argument('--num_res_block_pixel', type=int, default=0, help='the number of residual block in the bottleneck of the pixel module')
parser.add_argument('--num_res_block_flow', type=int, default=4, help='the number of residual block in the bottleneck of the flow module')
parser.add_argument('--num_dis_conv_layer', type=int, default=5, help='the number of convolutional layers of the discriminator')
parser.add_argument('--num_conv_layer', type=int, default=5, help='the number of convolutional layers of the encoder of both the flow and pixel modules')
parser.add_argument('--num_convlstm_block', type=int, default=2, help='the number of residual ConvLSTM block of the pixel module')
parser.add_argument('--num_convlstm_scale', type=int, default=3, help='how many innermost layers of the pixel module have a residual ConvLSTM connection')
parser.add_argument('--norm_type', type=str, default='None', choices=['batch', 'instance', 'None'], help='the type of normalization')
parser.add_argument('--gan_type', type=str, default='ls', choices=['ls', 'normal'], help='the type of GAN losses such as LS-GAN, WGAN, etc')
parser.add_argument('--gan_start_step', type=int, default=300000, help='start to optimize the GAN loss when the model is stable')
parser.add_argument('--update_rate', type=int, default=1, help='update G more frequently than D')
parser.add_argument('--num_scale', type=int, default=1, help='the number of multi-scale flow prediction (1 means without multi-scale prediction)')
parser.add_argument('--moving_weight', type=str, default='uniform', choices=['uniform', 'shift', 'step'], help='gradually learn each scale from coarse to fine')
parser.add_argument('--max_eval_steps', type=int, default=500, help='max steps of randomly sampling testing tuple.do not need to specify this when a data list is given')
parser.add_argument('--data_id_list', type=str, default=None, help='specify a list of data point that you want to evaluate')
parser.add_argument('--loss', type=str2bool, default=True, help='report the loss')
parser.add_argument('--write_summary', type=str2bool, default=False, help='write the summary of this evaluation as a text file')
parser.add_argument('--plot_image', type=str2bool, default=False, help='rendered predicted images')
parser.add_argument('--quiet', type=str2bool, default=False)
parser.add_argument('--summary_file', type=str, default='report.txt', help='the path to the summary file')
parser.add_argument('--output_dir', type=str, help='the output directory of plotted images')
config = parser.parse_args()
if (config.dataset in ['car', 'chair']):
config.dataset_type = 'object'
import datasets.object_loader as dataset
elif (config.dataset in ['kitti', 'synthia']):
config.dataset_type = 'scene'
import datasets.scene_loader as dataset
(dataset_train, dataset_test) = dataset.create_default_splits(config.num_input, config.dataset)
(image, pose) = dataset_train.get_data(dataset_train.ids[0])
config.data_info = np.concatenate([np.asarray(image.shape), np.asarray(pose.shape)])
model = Model(config, debug_information=config.debug, is_train=is_train)
return (config, model, dataset_train, dataset_test) |
def is_package_installed_and_updated(package: str) -> bool:
try:
all_packages = list_packages(local=True)
pkginfo = all_packages[package]
return (pkginfo.installed_version == pkginfo.remote_version)
except KeyError:
return is_package_installed(package) |
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set()
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if (node in self._nodes):
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
for (k, v) in list(self._preds.items()):
if (not v):
del self._preds[k]
for (k, v) in list(self._succs.items()):
if (not v):
del self._succs[k]
def add(self, pred, succ):
assert (pred != succ)
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert (pred != succ)
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError(('%r not a successor of anything' % succ))
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError(('%r not a successor of %r' % (succ, pred)))
def is_step(self, step):
return ((step in self._preds) or (step in self._succs) or (step in self._nodes))
def get_steps(self, final):
if (not self.is_step(final)):
raise ValueError(('Unknown: %r' % final))
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if (step in seen):
if (step != final):
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
def strong_connections(self):
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if (successor not in lowlinks):
strongconnect(successor)
lowlinks[node] = min(lowlinks[node], lowlinks[successor])
elif (successor in stack):
lowlinks[node] = min(lowlinks[node], index[successor])
if (lowlinks[node] == index[node]):
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if (successor == node):
break
component = tuple(connected_component)
result.append(component)
for node in graph:
if (node not in lowlinks):
strongconnect(node)
return result
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append((' %s -> %s;' % (pred, succ)))
for node in self._nodes:
result.append((' %s;' % node))
result.append('}')
return '\n'.join(result) |
def main():
logging.info(f'Reading annotations from {args.data_file} file...')
dataset = read_jsonl_datafile(args.data_file)
logging.info(f'Total annotations:{len(dataset)}')
logging.info(f'Creating labeled data instances from annotations...')
print(dataset[0].keys())
(task_instances_dict, tag_statistics, question_keys_and_tags) = make_instances_from_dataset(dataset)
logging.info(f'Saving all the instances, statistics and labels in {args.save_file}')
save_in_pickle((task_instances_dict, tag_statistics, question_keys_and_tags), args.save_file) |
def transpose_network(nn):
incoming = {}
outgoing = defaultdict((lambda : []))
dfg = nn.dataFlow
orig_nodes = [x for x in nn.nodes]
for node in orig_nodes:
if (node.isOperator() and (node.name == 'Conv')):
arg_dict = utils.ArgsToDict(node.annotation.operator_def.arg)
if (('order' in arg_dict) and (arg_dict['order'] != 'NCHW')):
continue
inputs = [x for x in node.inputs]
assert (len(inputs) >= 2), 'Conv operator should have two inputs'
outputs = [x for x in node.outputs]
assert (len(outputs) >= 1), 'Conv operator should have an output'
for inp in inputs:
nn.deleteEdge(inp, node)
for outp in outputs:
nn.deleteEdge(node, outp)
for idx in range(2):
new_inp = nn.createUniqueDataNode(inputs[idx].name)
transp = dfg.createNode(ng.NeuralNetOperator('NCHW2NHWC'))
nn.createEdge(inputs[idx], transp)
nn.createEdge(transp, new_inp)
outgoing[inputs[idx]].append(transp)
inputs[idx] = new_inp
for idx in range(len(outputs)):
new_outp = nn.createUniqueDataNode(outputs[idx].name)
transp = dfg.createNode(ng.NeuralNetOperator('NHWC2NCHW'))
nn.createEdge(transp, outputs[idx])
nn.createEdge(new_outp, transp)
incoming[outputs[idx]] = new_outp
outputs[idx] = new_outp
arg_dict['order'] = 'NHWC'
new_node = nn.createNode(core.CreateOperator('Conv', [], [], **arg_dict))
for inp in inputs:
nn.createEdge(inp, new_node)
for outp in outputs:
nn.createEdge(new_node, outp)
nn.deleteNode(node)
for orig_tensor in outgoing:
if (orig_tensor in incoming):
new_tensor = incoming[orig_tensor]
else:
out_ops = outgoing[orig_tensor]
new_tensor = out_ops[0].outputs[0]
outgoing[orig_tensor] = out_ops[1:]
for opnode in outgoing[orig_tensor]:
for out in opnode.outputs:
nn.replaceAllUsesWith(out, new_tensor)
nn.deleteNode(out)
nn.deleteNode(opnode) |
class StandardPermutations_descents(StandardPermutations_n_abstract):
def __classcall_private__(cls, d, n):
return super().__classcall__(cls, tuple(sorted(d)), n)
def __init__(self, d, n):
StandardPermutations_n_abstract.__init__(self, n)
self._d = d
def _repr_(self):
return ('Standard permutations of %s with descents %s' % (self.n, list(self._d)))
def cardinality(self):
def m(l):
s = 0
partial_sums = [0]
for i in l:
s += i
partial_sums.append(s)
return partial_sums
def d(l):
return m(reversed(l))[::(- 1)]
one = ZZ.one()
if (not self._d):
return one
l_ops = ([1] * (self.n - 1))
for i in self._d:
l_ops[i] = 0
l = [one]
for op in reversed(l_ops):
if op:
l = m(l)
else:
l = d(l)
return sum(l)
def first(self):
return descents_composition_first(Composition(descents=(self._d, self.n)))
def last(self):
return descents_composition_last(Composition(descents=(self._d, self.n)))
def __iter__(self):
return iter(descents_composition_list(Composition(descents=(self._d, self.n)))) |
def download_language_builtin_entities(language, *pip_args):
from builtins import str
from snips_nlu_parsers import get_supported_gazetteer_entities
from snips_nlu import __about__
from snips_nlu.cli.download import download_from_resource_name
from snips_nlu.cli.utils import check_resources_alias, get_compatibility, get_json
download_from_resource_name(language, pip_args, verbose=False)
shortcuts = get_json(__about__.__shortcuts__, 'Resource shortcuts')
for entity_name in get_supported_gazetteer_entities(str(language)):
check_resources_alias(entity_name, shortcuts)
compatibility = get_compatibility()
resource_name_lower = entity_name.lower()
long_resource_name = shortcuts.get(resource_name_lower, resource_name_lower)
_download_and_link_entity(long_resource_name, entity_name, language, compatibility, pip_args) |
.parametrize('result', [True, False])
def test_mutation_change_call_success(constructor_mock, result, default_test_case):
factory = MagicMock(tf.TestFactory)
factory.change_random_call.return_value = result
chromosome = tcc.TestCaseChromosome(default_test_case, test_factory=factory)
const0 = ConstructorStatement(default_test_case, constructor_mock)
const0.ret_val.distance = 5
default_test_case.add_statement(const0)
with mock.patch('pynguin.utils.randomness.next_float') as float_mock:
float_mock.return_value = 0.0
with mock.patch.object(const0, 'mutate') as mutate_mock:
mutate_mock.return_value = False
assert (chromosome._mutation_change() == result)
mutate_mock.assert_called_once()
assert (const0.ret_val.distance == 5) |
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self |
def build_norm_layer(cfg, num_features, postfix=''):
if (not isinstance(cfg, dict)):
raise TypeError('cfg must be a dict')
if ('type' not in cfg):
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in NORM_LAYERS):
raise KeyError(f'Unrecognized norm type {layer_type}')
norm_layer = NORM_LAYERS.get(layer_type)
abbr = infer_abbr(norm_layer)
assert isinstance(postfix, (int, str))
name = (abbr + str(postfix))
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-05)
if (layer_type != 'GN'):
layer = norm_layer(num_features, **cfg_)
if ((layer_type == 'SyncBN') and hasattr(layer, '_specify_ddp_gpu_num')):
layer._specify_ddp_gpu_num(1)
else:
assert ('num_groups' in cfg_)
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return (name, layer) |
def load_url(url, model_dir='./pretrained', map_location=torch.device('cpu')):
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
filename = url.split('/')[(- 1)]
cached_file = os.path.join(model_dir, filename)
if (not os.path.exists(cached_file)):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location) |
def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
if (norm_info.onenorm() == 0):
(m_star, s) = (0, 1)
else:
norm_info.set_scale((1.0 / q))
(m_star, s) = _fragment_3_1(norm_info, n0, tol, ell=ell)
norm_info.set_scale(1)
for k in range(q):
X[(k + 1)] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
return (X, 0) |
_decorator(0)
def get_status(html):
cont = public.get_left(html)
if (cont == ''):
return 0
soup = BeautifulSoup(cont, 'lxml')
try:
return int(soup.find_all('strong')[2].get_text())
except Exception:
return 0 |
def camera_ray_from_pixel_with_jacobians(self: sf.CameraCal, pixel: sf.V2, epsilon: sf.Scalar) -> T.Tuple[(sf.V3, sf.Scalar, sf.M, sf.M)]:
(point, is_valid) = self.camera_ray_from_pixel(pixel, epsilon)
point_D_cal = point.jacobian(self.parameters())
point_D_pixel = point.jacobian(pixel)
return (point, is_valid, point_D_cal, point_D_pixel) |
def generate_bsb(dims, reduce_dim, warp_reduce_dim, libname, reps=1):
if os.path.exists(libname):
return
size = reduce((lambda x, y: (x * y)), dims.values())
for d in dims:
if ((d != reduce_dim) and (d != warp_reduce_dim)):
non_reduce_dim = d
non_reduce_size = dims[non_reduce_dim]
dims_declaration = '\n'.join([('struct %s { enum { value = %d }; };' % (d, dims[d])) for d in dims])
temp_source = ('\n #include "blocks.cuh"\n \n #include <chrono>\n \n ' + dims_declaration)
for dims_permutation_in in itertools.permutations(dims):
for dims_permutation_din in itertools.permutations(dims):
for dims_permutation_dout in itertools.permutations(dims):
in_label = ''.join(dims_permutation_in)
din_label = ''.join(dims_permutation_din)
dout_label = ''.join(dims_permutation_dout)
in_layout = ', '.join(dims_permutation_in)
din_layout = ', '.join(dims_permutation_din)
dout_layout = ', '.join(dims_permutation_dout)
layouts_declaration = ('\n using lIN = metal::list<%s>;\n using lDIN = metal::list<%s>;\n using lDOUT = metal::list<%s>;\n ' % (in_layout, din_layout, dout_layout))
func_name = ('temp_%s_%s_%s' % (in_label, din_label, dout_label))
temp_source += '\n extern "C" {{\n double {func_name}(half* IN, half* SCALE, half* DOUT, half* DIN, half* DSCALE, half* DBIAS) {{\n \n half* gIN = nullptr;\n half* gSCALE = nullptr;\n half* gDOUT = nullptr;\n half* gDIN = nullptr;\n half* gDSCALE = nullptr;\n half* gDBIAS = nullptr;\n \n CHECK(cudaMalloc(&gIN, {size} * sizeof(half)));\n CHECK(cudaMemcpy(gIN, IN, {size} * sizeof(half), cudaMemcpyHostToDevice));\n \n CHECK(cudaMalloc(&gSCALE, {non_reduce_size} * sizeof(half)));\n CHECK(cudaMemcpy(gSCALE, SCALE, {non_reduce_size} * sizeof(half), cudaMemcpyHostToDevice));\n \n CHECK(cudaMalloc(&gDOUT, {size} * sizeof(half)));\n CHECK(cudaMemcpy(gDOUT, DOUT, {size} * sizeof(half), cudaMemcpyHostToDevice));\n \n \n CHECK(cudaMalloc(&gDIN, {size} * sizeof(half)));\n CHECK(cudaMalloc(&gDSCALE, {non_reduce_size} * sizeof(half)));\n CHECK(cudaMalloc(&gDBIAS, {non_reduce_size} * sizeof(half)));\n\n {layouts_declaration}\n \n typedef std::chrono::high_resolution_clock Clock;\n auto t1 = Clock::now();\n for (int i = 0; i < {reps}; i++) {{\n BackwardScaleBias<lIN, lDIN, lDOUT, {reduce_dim}, {warp_reduce_dim}>::run(gIN, gSCALE, gDOUT, gDIN, gDSCALE, gDBIAS, (cudaStream_t)0);\n CHECK(cudaStreamSynchronize(0));\n }}\n auto t2 = Clock::now();\n \n CHECK(cudaMemcpy(DIN, gDIN, {size} * sizeof(half), cudaMemcpyDeviceToHost));\n CHECK(cudaMemcpy(DSCALE, gDSCALE, {non_reduce_size} * sizeof(half), cudaMemcpyDeviceToHost));\n CHECK(cudaMemcpy(DBIAS, gDBIAS, {non_reduce_size} * sizeof(half), cudaMemcpyDeviceToHost));\n \n CHECK(cudaFree(gIN));\n CHECK(cudaFree(gSCALE));\n CHECK(cudaFree(gDOUT));\n CHECK(cudaFree(gDIN));\n CHECK(cudaFree(gDSCALE));\n CHECK(cudaFree(gDBIAS));\n \n return std::chrono::duration<double, std::micro>(t2 - t1).count() / {reps};\n }}\n }}\n '.format(layouts_declaration=layouts_declaration, func_name=func_name, size=size, reduce_dim=reduce_dim, warp_reduce_dim=warp_reduce_dim, reps=reps, non_reduce_size=non_reduce_size)
with open('temp.cu', 'w') as f:
f.write(temp_source)
subprocess.run('nvcc -O3 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -c --compiler-options -fPIC temp.cu -o temp.o'.split(' '))
subprocess.run('nvcc -shared -o {libname} temp.o'.format(libname=libname).split(' ')) |
def sense(phase, pos, ang):
p = (pos + (ti.Vector([ti.cos(ang), ti.sin(ang)]) * SENSE_DIST))
return grid[(phase, (p.cast(int) % GRID_SIZE))] |
def test_clip_action(as_default, as_jt_full, as_jt_norm, as_jp_full, as_jp_norm):
assert (as_default.clip_action(upper_99_normalized_action) == upper_99_normalized_action).all()
assert (as_default.clip_action(lower_99_normalized_action) == lower_99_normalized_action).all()
assert (as_default.clip_action(upper_100_normalized_action) == upper_100_normalized_action).all()
assert (as_default.clip_action(lower_100_normalized_action) == lower_100_normalized_action).all()
assert (as_default.clip_action(upper_101_normalized_action) == upper_100_normalized_action).all()
assert (as_default.clip_action(lower_101_normalized_action) == lower_100_normalized_action).all()
assert (as_default.clip_action(normalized_action_to_be_clipped) == normalized_action_clipped).all()
assert (as_jt_norm.clip_action(normalized_action_to_be_clipped) == normalized_action_clipped).all()
assert (as_jp_norm.clip_action(normalized_action_to_be_clipped) == normalized_action_clipped).all()
assert (as_jp_full.clip_action(as_jp_full.denormalize_action(normalized_action_to_be_clipped)) == pytest.approx(as_jp_full.denormalize_action(normalized_action_clipped))) |
(config_path='./conf', config_name='config')
def main(config: DictConfig) -> None:
set_seed(config.train.state.seed)
logger.info(OmegaConf.to_yaml(config, resolve=True))
logger.info(f'Using the model: {config.model.name}')
(train_data, val_data) = get_data(config)
config.data.num_class = len(set([x['labels'] for x in train_features]))
print(f'num_class: {config.data.num_class}')
if (not config.debug):
timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
run_name = f'{config.train.wandb.run_name}_{config.model.model}_{config.data.name}_{timestamp}'
wandb.init(entity=config.train.wandb_entity, project=config.train.wandb_project, config=dict(config), name=run_name)
if (not config.train.pt):
config.train.pt = f'{config.train.pt}/{run_name}'
model = AnomalyTransformer(config)
model.to(config.device)
train(config, model, train_data, val_data) |
class ProperlyShapedPointEstimateModelMixin(PointEstimateActorModelMixin):
def forward_pass_actor(self):
with tf.variable_scope('NormalizeNetworkInput'):
self._create_normalized_network_input()
with tf.variable_scope('ForwardGraph'):
h = self.normalized_network_input
h = slim.fully_connected(h, 600, activation_fn=tf.nn.tanh)
h = slim.fully_connected(h, 200, activation_fn=None)
h = slim.fully_connected(h, 600, activation_fn=tf.nn.tanh)
h = slim.fully_connected(h, self.input_dim, activation_fn=None)
self.prediction = h |
def protoge_config():
config = default_ddpg_config()
config.gamma = 0.98
config.actor_lr = 0.001
config.critic_lr = 0.001
config.actor_weight_decay = 0.0
config.action_l2_regularization = 0.1
config.target_network_update_freq = 40
config.target_network_update_frac = 0.05
config.optimize_every = 1
config.batch_size = 2000
config.warm_up = 2500
config.initial_explore = 5000
config.replay_size = int(1000000.0)
config.clip_target_range = ((- 50.0), 0.0)
config.action_noise = 0.1
config.eexplore = 0.1
config.go_eexplore = 0.1
config.go_reset_percent = 0.0
config.her = 'rfaab_1_4_3_1_1'
config.grad_value_clipping = 5.0
return config |
class Shift(nn.Module):
def __init__(self, kernel_size, dim):
super(Shift, self).__init__()
self.kernel_size = kernel_size
self.dim = dim
assert ((dim == 2) or (dim == 3))
assert ((kernel_size % 2) == 1)
def forward(self, x):
if (self.kernel_size == 1):
return x
out = _shift_cuda(x, self.kernel_size, self.dim)
return out |
def register_Ns3BasicEnergySourceHelper_methods(root_module, cls):
cls.add_constructor([param('ns3::BasicEnergySourceHelper const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')], is_virtual=True)
cls.add_method('DoInstall', 'ns3::Ptr< ns3::EnergySource >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, visibility='private', is_virtual=True)
return |
def _expm(A, use_exact_onenorm):
if isinstance(A, (list, tuple, np.matrix)):
A = np.asarray(A)
if ((len(A.shape) != 2) or (A.shape[0] != A.shape[1])):
raise ValueError('expected a square matrix')
if (A.shape == (0, 0)):
out = np.zeros([0, 0], dtype=A.dtype)
if (issparse(A) or is_pydata_spmatrix(A)):
return A.__class__(out)
return out
if (A.shape == (1, 1)):
out = [[np.exp(A[(0, 0)])]]
if (issparse(A) or is_pydata_spmatrix(A)):
return A.__class__(out)
return np.array(out)
if ((isinstance(A, np.ndarray) or issparse(A) or is_pydata_spmatrix(A)) and (not np.issubdtype(A.dtype, np.inexact))):
A = A.astype(float)
structure = (UPPER_TRIANGULAR if _is_upper_triangular(A) else None)
if (use_exact_onenorm == 'auto'):
use_exact_onenorm = (A.shape[0] < 200)
h = _ExpmPadeHelper(A, structure=structure, use_exact_onenorm=use_exact_onenorm)
eta_1 = max(h.d4_loose, h.d6_loose)
if ((eta_1 < 0.) and (_ell(h.A, 3) == 0)):
(U, V) = h.pade3()
return _solve_P_Q(U, V, structure=structure)
eta_2 = max(h.d4_tight, h.d6_loose)
if ((eta_2 < 0.) and (_ell(h.A, 5) == 0)):
(U, V) = h.pade5()
return _solve_P_Q(U, V, structure=structure)
eta_3 = max(h.d6_tight, h.d8_loose)
if ((eta_3 < 0.) and (_ell(h.A, 7) == 0)):
(U, V) = h.pade7()
return _solve_P_Q(U, V, structure=structure)
if ((eta_3 < 2.) and (_ell(h.A, 9) == 0)):
(U, V) = h.pade9()
return _solve_P_Q(U, V, structure=structure)
eta_4 = max(h.d8_loose, h.d10_loose)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
if (eta_5 == 0):
s = 0
else:
s = max(int(np.ceil(np.log2((eta_5 / theta_13)))), 0)
s = (s + _ell(((2 ** (- s)) * h.A), 13))
(U, V) = h.pade13_scaled(s)
X = _solve_P_Q(U, V, structure=structure)
if (structure == UPPER_TRIANGULAR):
X = _fragment_2_1(X, h.A, s)
else:
for i in range(s):
X = X.dot(X)
return X |
class AdditiveSemigroups(CategoryWithAxiom_singleton):
_base_category_class_and_axiom = (AdditiveMagmas, 'AdditiveAssociative')
AdditiveCommutative = LazyImport('sage.categories.commutative_additive_semigroups', 'CommutativeAdditiveSemigroups', at_startup=True)
AdditiveUnital = LazyImport('sage.categories.additive_monoids', 'AdditiveMonoids', at_startup=True)
class ParentMethods():
def _test_additive_associativity(self, **options):
tester = self._tester(**options)
S = tester.some_elements()
from sage.misc.misc import some_tuples
for (x, y, z) in some_tuples(S, 3, tester._max_runs):
tester.assertEqual(((x + y) + z), (x + (y + z)))
class Homsets(HomsetsCategory):
def extra_super_categories(self):
return [AdditiveSemigroups()]
class CartesianProducts(CartesianProductsCategory):
def extra_super_categories(self):
return [AdditiveSemigroups()]
class Algebras(AlgebrasCategory):
def extra_super_categories(self):
from sage.categories.semigroups import Semigroups
return [Semigroups()]
class ParentMethods():
_method
def algebra_generators(self):
return self.basis().keys().additive_semigroup_generators().map(self.monomial)
def product_on_basis(self, g1, g2):
return self.monomial((g1 + g2)) |
def main(args):
args.device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
train_path_sp = (args.data_path + 'cnsd-sts-train.txt')
train_path_unsp = (args.data_path + 'cnsd-sts-train_unsup.txt')
dev_path_sp = (args.data_path + 'cnsd-sts-dev.txt')
test_path_sp = (args.data_path + 'cnsd-sts-test.txt')
test_data_source = load_sts_data(test_path_sp)
tokenizer = BertTokenizer.from_pretrained(args.pretrain_model_path)
train_data_source = load_sts_data_unsup(train_path_unsp)
train_sents = [data[0] for data in train_data_source]
train_dataset = TrainDataset(train_sents)
train_call_func = CollateFunc(tokenizer, max_len=args.max_length, q_size=args.q_size, dup_rate=args.dup_rate)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=12, collate_fn=train_call_func)
test_dataset = TestDataset(test_data_source, tokenizer, max_len=args.max_length)
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=12)
assert (args.pooler in ['cls', 'pooler', 'last-avg', 'first-last-avg'])
model = ESimcseModel(pretrained_model=args.pretrain_model_path, pooling=args.pooler, dropout=args.dropout).to(args.device)
momentum_encoder = MomentumEncoder(args.pretrain_model_path, args.pooler).to(args.device)
ESimCSELoss = MultiNegativeRankingLoss()
esimcse_loss = ESimCSELoss.multi_negative_ranking_loss
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
train(model, momentum_encoder, train_dataloader, test_dataloader, optimizer, esimcse_loss, args.device, args.save_path) |
def select_free_cuda():
tmp_name = str(uuid.uuid1()).replace('-', '')
os.system(('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >' + tmp_name))
memory_gpu = [int(x.split()[2]) for x in open(tmp_name, 'r').readlines()]
os.system(('rm ' + tmp_name))
return np.argmax(memory_gpu) |
class ConvBnReLU2d(ConvBn2d):
_FLOAT_MODULE = nni.ConvBnReLU2d
_FLOAT_CONV_MODULE = nn.Conv2d
_FLOAT_BN_MODULE = nn.BatchNorm2d
_FLOAT_RELU_MODULE = nn.ReLU
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=None, padding_mode='zeros', eps=1e-05, momentum=0.1, freeze_bn=False, qconfig=None):
super(ConvBnReLU2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, eps, momentum, freeze_bn, qconfig)
def forward(self, input):
return F.relu(ConvBn2d._forward(self, input))
def from_float(cls, mod):
return super(ConvBnReLU2d, cls).from_float(mod) |
def load_spm(path: str) -> sentencepiece.SentencePieceProcessor:
spm = sentencepiece.SentencePieceProcessor()
spm.Load(str(path))
return spm |
def unpublishMySelf():
global profile, brokerURL
deviceCtxObj = {}
deviceCtxObj['entityId'] = {}
deviceCtxObj['entityId']['id'] = ((('Device.' + profile['type']) + '.') + profile['id'])
deviceCtxObj['entityId']['type'] = profile['type']
deviceCtxObj['entityId']['isPattern'] = False
deleteContext(brokerURL, deviceCtxObj)
streamCtxObj = {}
streamCtxObj['entityId'] = {}
streamCtxObj['entityId']['id'] = ((('Stream.' + profile['type']) + '.') + profile['id'])
streamCtxObj['entityId']['type'] = 'RainObservation'
streamCtxObj['entityId']['isPattern'] = False
deleteContext(brokerURL, streamCtxObj) |
def test_analyze_with_all_actions_as_list():
img = 'dataset/img4.jpg'
demography_objs = DeepFace.analyze(img, actions=['age', 'gender', 'race', 'emotion'], silent=True)
for demography in demography_objs:
logger.debug(f'Demography: {demography}')
age = demography['age']
gender = demography['dominant_gender']
race = demography['dominant_race']
emotion = demography['dominant_emotion']
logger.debug(f'Age: {age}')
logger.debug(f'Gender: {gender}')
logger.debug(f'Race: {race}')
logger.debug(f'Emotion: {emotion}')
assert (demography.get('age') is not None)
assert (demography.get('dominant_gender') is not None)
assert (demography.get('dominant_race') is not None)
assert (demography.get('dominant_emotion') is not None)
logger.info(' test analyze for all actions as array done') |
def getter_setter_test():
cluster = generate_test_cluster('tests.fixtures.linecoverage.setter_getter')
transformer = AstToTestCaseTransformer(cluster, False, EmptyConstantProvider())
transformer.visit(ast.parse('def test_case_0():\n setter_getter_0 = module_0.SetterGetter()\n int_0 = 3360\n int_1 = setter_getter_0.getter()\n'))
tc = transformer.testcases[0]
tc.add_statement(MethodStatement(tc, GenericMethod(cluster.type_system.to_type_info(SetterGetter), SetterGetter.setter, InferredSignature(signature=inspect.signature(SetterGetter.setter), original_parameters={'new_attribute': cluster.type_system.convert_type_hint(int)}, original_return_type=cluster.type_system.convert_type_hint(None), type_system=cluster.type_system)), tc.statements[0].ret_val, {'new_value': tc.statements[1].ret_val}))
return tc |
def main(env_name='Acrobot-v1', n_episodes=1000, actor_lr=0.001, critic_lr=0.01, gamma=0.98, gae_lambda=0.95, epsilon=0.2, jax_seed=42):
env = gym.make(env_name)
assert (len(env.observation_space.shape) == 1)
agent = PPOAgent(state_dim=env.observation_space.shape[0], action_dim=env.action_space.n, actor_lr=actor_lr, critic_lr=critic_lr, gamma=gamma, gae_lambda=gae_lambda, epsilon=epsilon, jax_seed=jax_seed)
episode_rewards = []
for episode_idx in range(n_episodes):
sum_rewards = 0.0
(state, info) = env.reset()
transitions = []
while True:
action = agent.predict_action(state=state)
(next_state, reward, terminated, truncated, info) = env.step(action)
sum_rewards += reward
transitions.append(Transition(state=state, action=action, next_state=next_state, reward=reward, done=int(terminated)))
state = next_state
if (terminated or truncated):
print(f'Episode {episode_idx}: reward = {sum_rewards}')
episode_rewards.append(sum_rewards)
agent.update(transitions=transitions)
agent.train(n_epochs=10)
break
env.close()
plt.plot(np.arange(len(episode_rewards)), episode_rewards, label='ppo')
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.title(env_name)
plt.legend()
plt.savefig(f'ppo_{env_name}.png')
plt.show() |
def get_sequence_check_dna(f):
sequence_list = []
for e in read_fasta_yield(f):
res = is_under_alphabet(e.seq, ALPHABET)
if (res is not True):
raise ValueError(' '.join(['Sorry, sequence', str(e.no), 'has character', str(res), '(The character must be A, C, G or T)']))
else:
sequence_list.append(e.seq)
return sequence_list |
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean(((1 - dg) ** 2))
gen_losses.append(l)
loss += l
return (loss, gen_losses) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.