code stringlengths 101 5.91M |
|---|
def get_genre_list(fname):
edgelist = open(fname, 'r')
lines = list(edgelist.readlines())
edgelist.close()
genre_dict = {}
for i in range(1, len(lines)):
vals = lines[i].split(',')
user_id = vals[1]
time = vals[2]
genre = vals[3].strip('"').strip("['")
w = float(vals[4][:(- 3)])
if (genre not in genre_dict):
genre_dict[genre] = 1
else:
genre_dict[genre] += 1
genre_list_10 = []
genre_list_100 = []
genre_list_1000 = []
genre_list_2000 = []
for (key, freq) in genre_dict.items():
if (freq > 10):
genre_list_10.append([key])
if (freq > 100):
genre_list_100.append([key])
if (freq > 1000):
genre_list_1000.append([key])
if (freq > 2000):
genre_list_2000.append([key])
print(('number of genres with frequency > 10: ' + str(len(genre_list_10))))
print(('number of genres with frequency > 100: ' + str(len(genre_list_100))))
print(('number of genres with frequency > 1000: ' + str(len(genre_list_1000))))
print(('number of genres with frequency > 2000: ' + str(len(genre_list_2000))))
fields = ['genre']
with open('genre_list_1000.csv', 'w') as f:
write = csv.writer(f)
write.writerow(fields)
write.writerows(genre_list_1000) |
def sdf(o):
wall = ti.min((o[1] + 0.1), (o[2] + 0.4))
sphere = ((o - ti.Vector([0.0, 0.35, 0.0])).norm() - 0.36)
q = (ti.abs((o - ti.Vector([0.8, 0.3, 0]))) - ti.Vector([0.3, 0.3, 0.3]))
box = (ti.Vector([ti.max(0, q[0]), ti.max(0, q[1]), ti.max(0, q[2])]).norm() + ti.min(q.max(), 0))
O = (o - ti.Vector([(- 0.8), 0.3, 0]))
d = ti.Vector([(ti.Vector([O[0], O[2]]).norm() - 0.3), (abs(O[1]) - 0.3)])
cylinder = (ti.min(d.max(), 0.0) + ti.Vector([ti.max(0, d[0]), ti.max(0, d[1])]).norm())
geometry = make_nested(ti.min(sphere, box, cylinder))
geometry = ti.max(geometry, (- (0.32 - ((o[1] * 0.6) + (o[2] * 0.8)))))
return ti.min(wall, geometry) |
class Wood(Resource):
def __init__(self, *args, **kwargs):
super().__init__('Wood', *args, **kwargs) |
def save(model, filename):
save_filename = '{}.pt'.format(filename)
torch.save(model, save_filename)
print(('Saved as %s' % save_filename)) |
class JsonInputReader(BaseInputReader):
def __init__(self, types_path: str, tokenizer: BertTokenizer, neg_term_count: int=None, neg_rel_count: int=None, max_span_size: int=None, logger: Logger=None):
super().__init__(types_path, tokenizer, neg_term_count, neg_rel_count, max_span_size, logger)
def read(self, dataset_paths):
for (dataset_label, dataset_path) in dataset_paths.items():
dataset = Dataset(dataset_label, self._relation_types, self._term_types, self._neg_term_count, self._neg_rel_count, self._max_span_size)
self._parse_dataset(dataset_path, dataset)
self._datasets[dataset_label] = dataset
self._context_size = self._calc_context_size(self._datasets.values())
def _parse_dataset(self, dataset_path, dataset):
documents = json.load(open(dataset_path))
for document in tqdm(documents, desc=("Parse dataset '%s'" % dataset.label)):
self._parse_document(document, dataset)
def _parse_document(self, doc, dataset) -> Document:
jtokens = doc['tokens']
jrelations = doc['relations']
jterms = doc['entities']
jpols = doc['polarities']
jdep_label = doc['dep_label']
jdep_label_indices = doc['dep_label_indices']
jdep = doc['dep']
jpos = doc['pos']
jpos_indices = doc['pos_indices']
(doc_tokens, doc_encoding) = self._parse_tokens(jtokens, dataset)
terms = self._parse_terms(jterms, doc_tokens, dataset)
relations = self._parse_relations(jrelations, terms, dataset)
document = dataset.create_document(doc_tokens, terms, relations, jpols, doc_encoding, jdep_label, jdep_label_indices, jdep, jpos, jpos_indices)
return document
def _parse_tokens(self, jtokens, dataset):
doc_tokens = []
doc_encoding = [self._tokenizer.convert_tokens_to_ids('[CLS]')]
for (i, token_phrase) in enumerate(jtokens):
token_encoding = self._tokenizer.encode(token_phrase, add_special_tokens=False)
(span_start, span_end) = (len(doc_encoding), (len(doc_encoding) + len(token_encoding)))
token = dataset.create_token(i, span_start, span_end, token_phrase)
doc_tokens.append(token)
doc_encoding += token_encoding
doc_encoding += [self._tokenizer.convert_tokens_to_ids('[SEP]')]
return (doc_tokens, doc_encoding)
def _parse_terms(self, jterms, doc_tokens, dataset) -> List[Term]:
terms = []
for (term_idx, jterm) in enumerate(jterms):
term_type = self._term_types[jterm['type']]
(start, end) = (jterm['start'], jterm['end'])
tokens = doc_tokens[start:(end + 1)]
phrase = ' '.join([t.phrase for t in tokens])
term = dataset.create_term(term_type, tokens, phrase)
terms.append(term)
return terms
def _parse_relations(self, jrelations, terms, dataset) -> List[Relation]:
relations = []
for jrelation in jrelations:
relation_type = self._relation_types[jrelation['type']]
head_idx = jrelation['head']
tail_idx = jrelation['tail']
head = terms[head_idx]
tail = terms[tail_idx]
reverse = (int(tail.tokens[0].index) < int(head.tokens[0].index))
if (relation_type.symmetric and reverse):
(head, tail) = util.swap(head, tail)
relation = dataset.create_relation(relation_type, head_term=head, tail_term=tail, reverse=reverse)
relations.append(relation)
return relations |
class PCSingle(BaseTabularAlgo):
def __init__(self, data: TabularData, prior_knowledge: Optional[PriorKnowledge]=None, CI_test: Union[(PartialCorrelation, KCI, DiscreteCI_tests)]=PartialCorrelation(), use_multiprocessing: Optional[bool]=False):
BaseTabularAlgo.__init__(self, data=data, prior_knowledge=prior_knowledge, CI_test=CI_test, use_multiprocessing=use_multiprocessing)
self.CI_test_ = (lambda x, y, z: CI_test.run_test(x, y, z))
if use_multiprocessing:
if ('ray' in globals()):
self.CI_test_ = ray.remote(self.CI_test_)
else:
print('use_multiprocessing was specified as True but cannot be used because the ray library is not installed. Install using pip install ray.')
def run_greedy(self, target_var: Union[(int, str)], pvalue_thres: float=0.05, max_condition_set_size: Optional[int]=None) -> List[Tuple]:
candidate_parents = self.get_candidate_parents(target_var)
parents = deepcopy(candidate_parents)
all_parents = self.get_all_parents(target_var)
separation_set_dict = {p: [] for p in all_parents}
value_dict = {p: None for p in all_parents}
pvalue_dict = {p: None for p in all_parents}
depth = ((len(all_parents) - 1) if (max_condition_set_size is None) else min(max_condition_set_size, (len(all_parents) - 1)))
X = target_var
greedy_subroutine = _greedy_subroutine
if ((self.use_multiprocessing == True) and ('ray' in globals())):
greedy_subroutine = ray.remote(greedy_subroutine)
for condition_set_size in range((depth + 1)):
nonsignificant_parents = []
val_pval_Z_ray = []
if ((len(all_parents) - 1) < condition_set_size):
break
for (index_parent, parent) in enumerate(parents):
Y = parent
Z_all = [p for p in all_parents if (p != parent)]
if ((self.use_multiprocessing == True) and ('ray' in globals())):
val_pval_Z = greedy_subroutine.remote(self.data, self.CI_test, X, Y, Z_all, condition_set_size, pvalue_thres)
else:
val_pval_Z = greedy_subroutine(self.data, self.CI_test, X, Y, Z_all, condition_set_size, pvalue_thres)
val_pval_Z_ray.append(val_pval_Z)
if ((self.use_multiprocessing == True) and ('ray' in globals())):
val_pval_Z_ray = [ray.get(val_pval_Z) for val_pval_Z in val_pval_Z_ray]
val_ray = [v[0] for v in val_pval_Z_ray]
pval_ray = [v[1] for v in val_pval_Z_ray]
separation_sets = [v[2] for v in val_pval_Z_ray]
for (index_parent, parent) in enumerate(parents):
separation_set_dict[parent] = separation_sets[index_parent]
if (pval_ray[index_parent] > pvalue_thres):
nonsignificant_parents.append(parent)
if ((pvalue_dict[parent] is None) or (pvalue_dict[parent] < pval_ray[index_parent])):
pvalue_dict[parent] = pval_ray[index_parent]
value_dict[parent] = val_ray[index_parent]
for parent in nonsignificant_parents:
parents.remove(parent)
all_parents.remove(parent)
return (parents, value_dict, pvalue_dict, separation_set_dict)
def run(self, target_var: Union[(int, str)], pvalue_thres: float=0.05, max_condition_set_size: Optional[int]=4, full_cd: bool=False) -> ResultInfoTabularSingle:
assert (target_var in self.data.var_names), f'{target_var} not found in the variable names specified for the data!'
assert ((max_condition_set_size is None) or ((type(max_condition_set_size) == int) and (max_condition_set_size >= 0))), f'max_condition_set_size must be a non-negative integer, but {max_condition_set_size} was given.'
self.target_var = target_var
self.start(full_cd)
(parents, value_dict, pvalue_dict, separation_set_dict) = self.run_greedy(target_var, pvalue_thres, max_condition_set_size)
keys = list(pvalue_dict.keys())
for p in keys:
if (p not in parents):
del pvalue_dict[p]
del value_dict[p]
self.pvalue_dict = pvalue_dict
self.value_dict = value_dict
self.undirected_edges = self.get_parents(pvalue_thres)
self.stop(full_cd)
self.separation_set_dict = separation_set_dict
self.result = {'parents': [], 'value_dict': self.value_dict, 'pvalue_dict': self.pvalue_dict, 'undirected_edges': [p for p in self.undirected_edges]}
return self.result |
def gaussian_cnn_baseline_tf_ppo_benchmarks():
iterate_experiments(gaussian_cnn_baseline, PIXEL_ENV_SET, seeds=_seeds) |
def normal_precursor_regions(path_data, keys_options=['all'], causal=False):
dict_of_dfs = functions_pp.load_hdf5(path_data)
df_data = dict_of_dfs['df_data']
splits = df_data.index.levels[0]
try:
df_sum = dict_of_dfs['df_sum']
except:
pass
skip = ['all_spatcov']
keys_d = {}
for option in keys_options:
keys_d_ = {}
for s in splits:
if ((causal == True) or ('causal' in option)):
all_keys = df_sum[df_sum['causal']].loc[s].index
elif ((causal == False) and ('causal' not in option)):
df_s = df_data.loc[s]
all_keys = df_s.columns.delete(0)
mask_f = np.logical_or((df_s.dtypes == 'float64'), (df_s.dtypes == 'float32'))
all_keys = all_keys[mask_f[1:].values]
all_keys = [k for k in all_keys if (k[(- 4):] != 'caus')]
if (option == 'all'):
keys_ = [k for k in all_keys if (k not in skip)]
elif ('only_db_regs' in option):
keys_ = [k for k in all_keys if ('spatcov' not in k)]
keys_ = [k for k in keys_ if (k not in skip)]
elif (option == 'sp_and_regs'):
keys_ = [k for k in all_keys if (k not in skip)]
elif (option == 'CPPA'):
skip_ex = ['0..103..PEPsv', 'sm123_spatcov', 'all_spatcov']
keys_ = [k for k in all_keys if ('v200hpa' not in k)]
keys_ = [k for k in keys_ if ('sm' not in k)]
keys_ = [k for k in keys_ if ('ENSO' not in k)]
keys_ = [k for k in keys_ if ('PDO' not in k)]
keys_ = [k for k in keys_ if ('PEPsv' not in k)]
keys_ = [k for k in keys_ if ('OLR' not in k)]
keys_ = [k for k in keys_ if (k not in skip_ex)]
elif (option == 'sst combined'):
keys_ = [k for k in all_keys if ('sm' not in k)]
elif (option == 'sst combined+sm'):
keys_ = all_keys
elif ((option == 'sst(CPPA Pattern)') or (option == 'CPPA Pattern')):
keys_ = [k for k in all_keys if ('CPPAsv' in k)]
elif (option == 'sst+sm+z500'):
keys_ = []
keys_.append([k for k in all_keys if ('..sst' in k)])
keys_.append([k for k in all_keys if ('..sm' in k)])
keys_.append([k for k in all_keys if ('..z500' in k)])
keys_ = flatten(keys_)
elif (option == 'CPPA+sm'):
keys_ = [k for k in all_keys if ('PDO' not in k)]
keys_ = [k for k in keys_ if ('ENSO' not in k)]
keys_ = [k for k in keys_ if ('PEP' not in k)]
keys_ = [k for k in keys_ if ('OLR' not in k)]
keys_ = [k for k in keys_ if (k not in skip)]
elif (option == 'CPPA+PEP+sm'):
keys_ = [k for k in all_keys if ('PDO' not in k)]
keys_ = [k for k in keys_ if ('ENSO' not in k)]
elif (option == 'CPPApr+PEP+sm'):
keys_ = [k for k in all_keys if ('PDO' not in k)]
keys_ = [k for k in keys_ if ('ENSO' not in k)]
keys_ = [k for k in keys_ if ('CPPAsv' not in k)]
elif (option == 'CPPA+sm+OLR'):
keys_ = [k for k in all_keys if ('PDO' not in k)]
keys_ = [k for k in keys_ if ('ENSO' not in k)]
keys_ = [k for k in keys_ if ('PEP' not in k)]
keys_ = [k for k in keys_ if (k not in skip)]
elif (option == 'CPPAregs+sm'):
keys_ = [k for k in all_keys if ('v200hpa' not in k)]
keys_ = [k for k in keys_ if ('PDO' not in k)]
keys_ = [k for k in keys_ if ('ENSO' not in k)]
keys_ = [k for k in keys_ if ('PEP' not in k)]
keys_ = [k for k in keys_ if ('CPPAsv' not in k)]
elif (option == 'CPPApattern+sm'):
skip_ex = ['0..100..ENSO34', '0..101..PDO']
keys_ = [k for k in all_keys if ('v200hpa' not in k)]
keys_ = [k for k in keys_ if ('PDO' not in k)]
keys_ = [k for k in keys_ if ('ENSO' not in k)]
keys_ = [k for k in keys_ if ('PEP' not in k)]
keys_ = [k for k in keys_ if ('OLR' not in k)]
keys_ = [k for k in keys_ if (('spatcov' in k) or ('sm' in k))]
elif (option == 'sm'):
keys_ = [k for k in all_keys if ('sm' in k)]
keys_ = [k for k in keys_ if ('spatcov' not in k)]
elif (option == 'sst(PEP)+sm'):
keys_ = [k for k in all_keys if (('sm' in k) or ('PEP' in k))]
keys_ = [k for k in keys_ if (k != 'sm123_spatcov')]
elif (option == 'PEP'):
keys_ = [k for k in all_keys if ('PEP' in k)]
elif (option == 'PEP+sm'):
keys_ = [k for k in all_keys if ('PEP' in k)]
keys_ = [k for k in keys_ if ('sm' in k)]
elif (option == 'sst(PDO,ENSO)+sm'):
keys_ = [k for k in all_keys if (('sm' in k) or ('PDO' in k) or ('ENSO' in k))]
keys_ = [k for k in keys_ if ('spatcov' not in k)]
elif (option == 'PDO+ENSO'):
keys_ = [k for k in all_keys if (('PDO' in k) or ('ENSO' in k))]
keys_ = [k for k in keys_ if ('spatcov' not in k)]
elif (option == 'sst(CPPA) expert knowledge'):
keys_ = [k for k in all_keys if ('sm' not in k)]
keys_ = [k for k in keys_ if ('PDO' not in k)]
keys_ = [k for k in keys_ if ('ENSO' not in k)]
keys_ = [k for k in keys_ if ('PEP' not in k)]
expert = ['CPPAsv', '..9..sst', '..2..sst', '..6..sst', '..1..sst', '..7..sst']
keys_ = [k for k in keys_ for e in expert if (e in k)]
if (option == ' '):
keys_ = []
keys_d_[s] = np.unique(keys_)
keys_d[option] = keys_d_
return keys_d |
def array_function_dispatch(dispatcher, module=None, verify=True, docs_from_dispatcher=False):
if (not ARRAY_FUNCTION_ENABLED):
def decorator(implementation):
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
if (module is not None):
implementation.__module__ = module
return implementation
return decorator
def decorator(implementation):
if verify:
verify_matching_signatures(implementation, dispatcher)
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
source = textwrap.dedent('\n (implementation)\n def {name}(*args, **kwargs):\n relevant_args = dispatcher(*args, **kwargs)\n return implement_array_function(\n implementation, {name}, relevant_args, args, kwargs)\n ').format(name=implementation.__name__)
source_object = compile(source, filename='<__array_function__ internals>', mode='exec')
scope = {'implementation': implementation, 'dispatcher': dispatcher, 'functools': functools, 'implement_array_function': implement_array_function}
exec(source_object, scope)
public_api = scope[implementation.__name__]
if (module is not None):
public_api.__module__ = module
public_api._implementation = implementation
return public_api
return decorator |
class ONNXConfigNode(TreeConfigNode):
def modify_label(self, label):
return ('Onnx=' + str(label))
def init2(self, node_name):
self.props['is_onnx'] = node_name
def child_constructor(self):
return ImportantConfigNode |
class GSM8K():
def __init__(self) -> None:
super().__init__()
self.do_shuffle = False
dataset = load_dataset('gsm8k', 'main')
hf_official_train = dataset['train']
hf_official_test = dataset['test']
official_train = []
official_test = []
for example in tqdm.tqdm(hf_official_train):
question = example['question']
answer = example['answer'].strip().split()
assert (answer[(- 2)] == '####')
gold_reasoning = ' '.join(answer[:(- 2)])
answer = str(int(answer[(- 1)].replace(',', '')))
official_train.append(dict(question=question, gold_reasoning=gold_reasoning, answer=answer))
for example in tqdm.tqdm(hf_official_test):
question = example['question']
answer = example['answer'].strip().split()
assert (answer[(- 2)] == '####')
gold_reasoning = ' '.join(answer[:(- 2)])
answer = str(int(answer[(- 1)].replace(',', '')))
official_test.append(dict(question=question, gold_reasoning=gold_reasoning, answer=answer))
rng = random.Random(0)
rng.shuffle(official_train)
rng = random.Random(0)
rng.shuffle(official_test)
trainset = official_train[:200]
devset = official_train[200:500]
testset = official_test[:]
import dspy
trainset = [dspy.Example(**x).with_inputs('question') for x in trainset]
devset = [dspy.Example(**x).with_inputs('question') for x in devset]
testset = [dspy.Example(**x).with_inputs('question') for x in testset]
self.train = trainset
self.dev = devset
self.test = testset |
class IdentityOperation(BaseTransformer):
def transform(self, **kwargs):
return kwargs
def persist(self, filepath):
logger.info('"IdentityOperation" is not persistable.')
pass |
class PredictDiffHead(nn.Module):
def __init__(self, config, cln=21, in_channel=256, dr_rate_a=0.5, in_channel2=128):
super(PredictDiffHead, self).__init__()
self.config = config
chn = 256
self.conv1ab = Conv2dbnPR((in_channel2 + in_channel), chn, kernel_size=3, stride=1, padding=1)
def forward(self, inputs):
(xa_in, xb_in) = inputs
xab = torch.cat((xa_in, xb_in), dim=1)
xab = self.conv1ab(xab)
return xab |
def simGetScriptAssociatedWithObject(objectHandle):
ret = lib.simGetScriptAssociatedWithObject(objectHandle)
return ret |
def test_dual(capsys):
m.captured_dual('a', 'b')
(stdout, stderr) = capsys.readouterr()
assert (stdout == 'a')
assert (stderr == 'b') |
def get_learning_rate(optimizer):
lr = []
for param_group in optimizer.param_groups:
lr += [param_group['lr']]
return lr |
def test_record():
record = ak.contents.RecordArray([ak.contents.NumpyArray(np.arange(10))], ['x'])
array = ak.Array(record)
record = array[0]
with pytest.raises(AttributeError):
record.x = 10
with pytest.raises(AttributeError):
record.not_an_existing_attribute = 10
record._not_an_existing_attribute = 10
assert (record._not_an_existing_attribute == 10) |
def with_native_function(func: Callable[([NativeFunction], T)]) -> Callable[([NativeFunction], T)]:
(func)
def wrapper(f: NativeFunction) -> T:
with context(f'''in {f.loc}:
{f.func}'''):
with local.parametrize(use_c10_dispatcher=f.use_c10_dispatcher):
return func(f)
return wrapper |
def compute_fid_trans(opts, max_real, num_gen):
detector_url = '
detector_kwargs = dict(return_features=True)
domains = os.listdir(opts.dataset_kwargs.path)
domains = [domain for domain in domains if (not domain.endswith('.json'))]
domains.sort()
src_idxs = {k: v for (v, k) in enumerate(domains)}
num_domains = len(domains)
print(('Number of domains: %d' % num_domains))
fid_dict = OrderedDict()
for (trg_idx, trg_domain) in enumerate(domains):
print(f'target class: {trg_idx}, target domain: {trg_domain}')
opts_domain = copy.deepcopy(opts)
opts_domain.dataset_kwargs.path += trg_domain
(mu_real, sigma_real) = metric_utils.compute_feature_stats_for_dataset(opts=opts_domain, detector_url=detector_url, detector_kwargs=detector_kwargs, rel_lo=0, rel_hi=0, capture_mean_cov=True, max_items=max_real).get_mean_cov()
src_domains = [x for x in domains if (x != trg_domain)]
for src_domain in src_domains:
src_idx = src_idxs[src_domain]
task = ('%s2%s' % (src_domain, trg_domain))
print(('Generating and translating images and calculating FID and LPIPS for %s...' % task))
(stats, lpips_mean) = metric_utils.compute_feature_stats_for_transgenerator(opts=opts_domain, detector_url=detector_url, detector_kwargs=detector_kwargs, rel_lo=0, rel_hi=1, label_dim=num_domains, src_idx=src_idx, trg_idx=trg_idx, task=task, capture_mean_cov=True, max_items=num_gen)
(mu_gen, sigma_gen) = stats.get_mean_cov()
if (opts.rank != 0):
return float('nan')
m = np.square((mu_gen - mu_real)).sum()
(s, _) = scipy.linalg.sqrtm(np.dot(sigma_gen, sigma_real), disp=False)
fid = np.real((m + np.trace(((sigma_gen + sigma_real) - (s * 2)))))
fid_dict[('fid_%s' % task)] = fid
fid_dict[('LPIPS_%s' % task)] = lpips_mean
return fid_dict |
def process_book(bert_tok_dir, pred_scores_dir, BertNSP, device, cls, sep, book_id):
with open(os.path.join(bert_tok_dir, (book_id + '.pkl')), 'rb') as f:
d = pickle.load(f)
m = max(d.keys())
scores = dict()
for idx in range(0, (m - 1)):
toks1 = d[idx]
toks2 = d[(idx + 1)]
i = (idx - 1)
while ((len(toks1) < 254) and (i >= 0)):
toks1 = (d[i] + toks1)
i -= 1
toks1 = toks1[(- 254):]
i = (idx + 2)
while ((len(toks2) < 254) and (i <= m)):
toks2 = (toks2 + d[i])
i += 1
toks2 = toks2[:254]
ids1 = (([cls] + toks1) + [sep])
ids2 = (toks2 + [sep])
indexed_tokens = (ids1 + ids2)
segments_ids = (([0] * len(ids1)) + ([1] * len(ids2)))
indexed_tokens = pad_sequences([indexed_tokens], maxlen=512, dtype='long', value=0, truncating='pre', padding='post')
segments_ids = pad_sequences([segments_ids], maxlen=512, dtype='long', value=1, truncating='pre', padding='post')
attention_masks = [[int((token_id > 0)) for token_id in sent] for sent in indexed_tokens]
tokens_tensor = torch.tensor(indexed_tokens)
segments_tensors = torch.tensor(segments_ids)
attention_tensor = torch.tensor(attention_masks)
tokens_tensor = tokens_tensor.to(device)
segments_tensors = segments_tensors.to(device)
attention_tensor = attention_tensor.to(device)
BertNSP.eval()
prediction = BertNSP(tokens_tensor, token_type_ids=segments_tensors, attention_mask=attention_tensor)
prediction = prediction[0]
softmax = torch.nn.Softmax(dim=1)
prediction_sm = softmax(prediction)
scores[idx] = prediction_sm[0][1].item()
with open(os.path.join(pred_scores_dir, (book_id + '.pkl')), 'wb') as f:
pickle.dump(scores, f)
return |
class SetAbstraction(nn.Module):
def __init__(self, in_channels, out_channels, layers=2, stride=1, group_args={'NAME': 'ballquery', 'radius': 0.1, 'nsample': 16}, norm_args={'norm': 'bn1d'}, act_args={'act': 'relu'}, conv_args=None, sampler='fps', use_res=True, is_head=False):
super().__init__()
self.stride = stride
self.is_head = is_head
self.all_aggr = ((not is_head) and (stride == 1))
self.use_res = (use_res and (not self.all_aggr) and (not self.is_head))
mid_channel = ((out_channels // 2) if (stride > 1) else out_channels)
channels = (([in_channels] + ([mid_channel] * (layers - 1))) + [out_channels])
channels[0] = (in_channels + (3 * (not is_head)))
if self.use_res:
self.skipconv = (create_linearblock(in_channels, channels[(- 1)], norm_args=None, act_args=None) if (in_channels != channels[(- 1)]) else nn.Identity())
create_conv = create_linearblock
convs = []
for i in range((len(channels) - 1)):
convs.append(create_conv(channels[i], channels[(i + 1)], norm_args=norm_args, act_args=(None if ((i == (len(channels) - 2)) and self.use_res) else act_args), **conv_args))
self.convs = nn.Sequential(*convs)
self.act = create_act(act_args)
if (not is_head):
if self.all_aggr:
group_args.nsample = None
group_args.radius = None
self.grouper = create_grouper(group_args, support_same_as_query=False)
self.pool = (lambda x, index: scatter(x, index, dim=0, reduce='max'))
if (sampler.lower() == 'fps'):
self.sample_fn = fps
elif (sampler.lower() == 'random'):
self.sample_fn = random_sample
def forward(self, pxb):
(p, x, b) = pxb
if self.is_head:
x = self.convs(x)
else:
if (not self.all_aggr):
idx = self.sample_fn(p, b, ratio=(1.0 / self.stride))
new_p = p[idx]
new_b = b[idx]
else:
new_p = p
new_b = b
if self.use_res:
identity = x[idx]
identity = self.skipconv(identity)
edge_index = self.grouper(p, new_p, b, new_b)
dp = (torch.index_select(p, 0, edge_index[1]) - torch.index_select(p, 0, edge_index[0]))
xj = torch.index_select(x, 0, edge_index[1])
x = self.pool(self.convs(torch.cat((dp, xj), dim=1)), edge_index[0])
if self.use_res:
x = self.act((x + identity))
p = new_p
b = new_b
return (p, x, b) |
def check_build_status(conf):
buildFolder = os.path.join(PROJECT_CONFIG['build_dir'], conf.build_folder())
kernelFolder = os.path.join(buildFolder, '_x', 'link', 'vivado')
logPath = os.path.join(kernelFolder, 'vivado.log')
try:
log = open(logPath, 'r').read()
except:
print('No build found for {}.'.format(conf))
return 'no_build'
m = re.search('Implementation Feasibility check failed', log)
if m:
return 'failed_feasibility'
m = re.search('Detail Placement failed', log)
if m:
return 'failed_placement'
m = re.search('Placer could not place all instances', log)
if m:
return 'failed_placement'
m = re.search('Routing results verification failed due to partially-conflicted nets', log)
if m:
return 'failed_routing'
m = re.search('route_design ERROR', log)
if m:
return 'failed_routing'
m = re.search('Internal Data Exception', log)
if m:
return 'crashed'
m = re.search('Design failed to meet timing - hold violation.', log)
if m:
return 'failed_hold'
m = re.search('auto frequency scaling failed', log)
if m:
return 'failed_timing'
m = re.search('Unable to write message .+ as it exceeds maximum size', log)
if m:
return 'failed_report'
for fileName in os.listdir(buildFolder):
if ((len(fileName) >= 7) and fileName.endswith('.xclbin')):
return 'success'
print('Unfinished build or unknown error for {} [{}].'.format(conf, logPath))
return 'failed_unknown' |
class DeepGraphCNN(GCNSupervisedGraphClassification):
def __init__(self, layer_sizes, activations, k, generator, bias=True, dropout=0.0, kernel_initializer=None, kernel_regularizer=None, kernel_constraint=None, bias_initializer=None, bias_regularizer=None, bias_constraint=None):
super().__init__(layer_sizes=layer_sizes, activations=activations, generator=generator, bias=bias, dropout=dropout, pooling=SortPooling(k=k, flatten_output=True), pool_all_layers=True, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, kernel_constraint=kernel_constraint, bias_initializer=bias_initializer, bias_regularizer=bias_regularizer, bias_constraint=bias_constraint) |
def download_webfile(url, filename, overwrite=False):
if (os.path.exists(filename) and (not overwrite)):
return
if ('.' in url):
r = requests.get(url, stream=True)
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
else:
try:
subprocess_call(['youtube-dl', url, '-o', filename])
except OSError as e:
raise OSError((e.message + '\n A possible reason is that youtube-dl is not installed on your computer. Install it with "pip install youtube_dl"')) |
class Generator(BaseGenerator):
def __init__(self, config, mode, X=None):
super(Generator, self).__init__(config, mode)
self.build_generator(X=X)
def generate_random_X(self, shape):
return np.random.rand(*shape) |
def pair_cascade_protocols(sender: 'Cascade', receiver: 'Cascade') -> None:
sender.another = receiver
receiver.another = sender
sender.role = 0
receiver.role = 1 |
def print_prop(num, f):
f.write(f'''; ACAS Xu property {num}
''')
for x in range(5):
f.write(f'''(declare-const X_{x} Real)
''')
f.write('\n')
for x in range(5):
f.write(f'''(declare-const Y_{x} Real)
''')
means_for_scaling = [19791.091, 0.0, 0.0, 650.0, 600.0, 7.]
range_for_scaling = [60261.0, 6., 6., 1100.0, 1200.0]
if (num != '6'):
(init_lb, init_ub) = get_init_box(num)
for i in range(len(init_lb)):
f.write(f'''
; Unscaled Input {i}: {(init_lb[i], init_ub[i])}
''')
lb = ((init_lb[i] - means_for_scaling[i]) / range_for_scaling[i])
ub = ((init_ub[i] - means_for_scaling[i]) / range_for_scaling[i])
f.write(f'''(assert (<= X_{i} {round(ub, 9)}))
''')
f.write(f'''(assert (>= X_{i} {round(lb, 9)}))
''')
f.write('\n')
else:
(init_lb1, init_ub1) = get_init_box('6.1')
(init_lb2, init_ub2) = get_init_box('6.2')
(desc, mat_rhs_list) = get_spec(num)
f.write('\n; Spec 6\n')
f.write(f'''; {desc}
''')
for i in range(len(init_lb1)):
f.write(f'''; Unscaled Input {i}: {(init_lb1[i], init_ub1[i])}
''')
f.write(';;;; or ;;;;\n')
for i in range(len(init_lb1)):
f.write(f'''; Unscaled Input {i}: {(init_lb2[i], init_ub2[i])}
''')
f.write('(assert (or\n')
for (init_lb, init_ub) in zip([init_lb1, init_lb2], [init_ub1, init_ub2]):
f.write(' (and')
for i in range(len(init_lb)):
lb = ((init_lb[i] - means_for_scaling[i]) / range_for_scaling[i])
ub = ((init_ub[i] - means_for_scaling[i]) / range_for_scaling[i])
f.write(f' (<= X_{i} {round(ub, 9)})')
f.write(f' (>= X_{i} {round(lb, 9)})')
f.write(')\n')
f.write('))\n\n')
(desc, mat_rhs_list) = get_spec(num)
f.write(f'''; {desc}
''')
if (len(mat_rhs_list) == 1):
(mat, rhs_vec) = mat_rhs_list[0]
for (row, rhs) in zip(mat, rhs_vec):
row = np.array(row, dtype=float)
if (rhs == 0):
assert (sum((row != 0)) == 2)
(i1, i2) = np.where(row)[0]
if ((row[i1] == 1.0) and (row[i2] == (- 1.0))):
f.write(f'''(assert (<= Y_{i1} Y_{i2}))
''')
else:
assert ((row[i1] == (- 1.0)) and (row[i2] == 1.0))
f.write(f'''(assert (<= Y_{i2} Y_{i1}))
''')
else:
assert (sum((row != 0)) == 1)
i = np.argmax(np.abs(row))
if (row[i] > 0):
f.write(f'''(assert (<= Y_{i} {rhs}))
''')
else:
f.write(f'''(assert (>= Y_{i} {(- rhs)}))
''')
else:
f.write('(assert (or\n')
for (mat, rhs_vec) in mat_rhs_list:
f.write(' ')
f.write('(and')
for (row, rhs) in zip(mat, rhs_vec):
row = np.array(row, dtype=float)
assert (rhs == 0), 'only spec 1 has threshold'
assert (sum((row != 0)) == 2)
(i1, i2) = np.where(row)[0]
if ((row[i1] == 1.0) and (row[i2] == (- 1.0))):
f.write(f' (<= Y_{i1} Y_{i2})')
else:
assert ((row[i1] == (- 1.0)) and (row[i2] == 1.0))
f.write(f' (<= Y_{i2} Y_{i1})')
f.write(')')
f.write('\n')
f.write('))\n') |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('treebanks', type=str, nargs='*', help='Which treebanks to run on')
parser.add_argument('--pretrain', type=str, default='/home/john/extern_data/wordvec/glove/armenian.pt', help='Which pretrain to use')
parser.set_defaults(treebanks=['/home/john/extern_data/ud2/ud-treebanks-v2.7/UD_Western_Armenian-ArmTDP/hyw_armtdp-ud-train.conllu', '/home/john/extern_data/ud2/ud-treebanks-v2.7/UD_Armenian-ArmTDP/hy_armtdp-ud-train.conllu'])
args = parser.parse_args()
return args |
class BytesURL(BaseURL):
__slots__ = ()
_at = b''
_colon = b':'
_lbracket = b'['
_rbracket = b']'
def __str__(self):
return self.to_url().decode('utf-8', 'replace')
def encode_netloc(self):
return self.netloc
def decode(self, charset='utf-8', errors='replace'):
return URL(self.scheme.decode('ascii'), self.decode_netloc(), self.path.decode(charset, errors), self.query.decode(charset, errors), self.fragment.decode(charset, errors)) |
def batch_normalization_layer(input_layer, dimension):
(mean, variance) = tf.nn.moments(input_layer, axes=[0, 1, 2])
beta = tf.get_variable('beta', dimension, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable('gamma', dimension, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32))
bn_layer = tf.nn.batch_normalization(input_layer, mean, variance, beta, gamma, BN_EPSILON)
return bn_layer |
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([tf.keras.layers.Dense(dff, activation='relu'), tf.keras.layers.Dense(d_model)]) |
class Log():
def __init__(self):
pass
def process(self, pid):
print(grey('Process ID: {}'.format(pid), bold=True))
def model(self, message):
print(blue(message, bold=True))
def title(self, message):
print(yellow(message, bold=True, underline=True))
def warning(self, message):
print(red(message, bold=True))
def info(self, message):
print(magenta(message, bold=True))
def options(self, opt, level=0):
for (key, value) in sorted(opt.items()):
if isinstance(value, (dict, edict)):
print(((((' ' * level) + cyan('* ')) + green(key)) + ':'))
self.options(value, (level + 1))
else:
print(((((' ' * level) + cyan('* ')) + green(key)) + ':'), yellow(value))
def loss_train(self, opt, ep, lr, loss, timer):
if (not opt.max_epoch):
return
message = grey('[train] ', bold=True)
message += 'epoch {}/{}'.format(cyan(ep, bold=True), opt.max_epoch)
message += ', lr:{}'.format(yellow('{:.2e}'.format(lr), bold=True))
message += ', loss:{}'.format(red('{:.3e}'.format(loss), bold=True))
message += ', time:{}'.format(blue('{0}-{1:02d}:{2:02d}:{3:02d}'.format(*get_time(timer.elapsed)), bold=True))
message += ' (ETA:{})'.format(blue('{0}-{1:02d}:{2:02d}:{3:02d}'.format(*get_time(timer.arrival))))
print(message)
def loss_val(self, opt, loss):
message = grey('[val] ', bold=True)
message += 'loss:{}'.format(red('{:.3e}'.format(loss), bold=True))
message += ', psnr:{}'.format(red('{:.2e}'.format(((- 10) * loss.log10())), bold=True))
print(message) |
class FlaxGPTJPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def build_spk_hashtable_librimix(hparams):
libri_utterances = glob.glob(os.path.join(hparams['base_folder_dm'], '**/*.wav'), recursive=True)
spk_hashtable = {}
assert (torchaudio.info(libri_utterances[0]).sample_rate == hparams['sample_rate'])
for utt in tqdm(libri_utterances):
path = os.path.normpath(utt)
path_list = path.split(os.sep)
spk_id = path_list[(- 3)]
if (spk_id not in spk_hashtable.keys()):
spk_hashtable[spk_id] = [utt]
else:
spk_hashtable[spk_id].append(utt)
spk_weights = [len(spk_hashtable[x]) for x in spk_hashtable.keys()]
return (spk_hashtable, spk_weights) |
class BlockStack(list):
def push(self, instr: UniqueInstruction) -> None:
self.append(instr)
def peek(self) -> (UniqueInstruction | None):
try:
return self[(- 1)]
except IndexError:
return None |
_numpy_output(positive=True, check_dtype=True)
def test_ufunc_log2_f(A: dace.float32[10]):
return np.log2(A) |
def _broadcast_and_stack(tensors, dim=(- 1)):
broadcast_shape = torch.broadcast_shapes(*(x.size() for x in tensors))
broadcast_tensors = [x.broadcast_to(broadcast_shape) for x in tensors]
return torch.stack(broadcast_tensors, dim=dim) |
class InvertedDoublePendulumEnv(MujocoEnv, Serializable):
FILE = 'inverted_double_pendulum.xml.mako'
('random_start', type=bool, help='Randomized starting position by adjusting the anglesWhen this is false, the double pendulum started outin balanced position')
def __init__(self, *args, **kwargs):
self.random_start = kwargs.get('random_start', True)
super(InvertedDoublePendulumEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def get_current_obs(self):
return np.concatenate([self.model.data.qpos[:1], np.sin(self.model.data.qpos[1:]), np.cos(self.model.data.qpos[1:]), np.clip(self.model.data.qvel, (- 10), 10), np.clip(self.model.data.qfrc_constraint, (- 10), 10)]).reshape((- 1))
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
(x, _, y) = self.model.data.site_xpos[0]
dist_penalty = ((0.01 * (x ** 2)) + ((y - 2) ** 2))
(v1, v2) = self.model.data.qvel[1:3]
vel_penalty = ((0.001 * (v1 ** 2)) + (0.005 * (v2 ** 2)))
alive_bonus = 10
r = float(((alive_bonus - dist_penalty) - vel_penalty))
done = (y <= 1)
return Step(next_obs, r, done)
def reset_mujoco(self, init_state=None):
assert (init_state is None)
qpos = np.copy(self.init_qpos)
if self.random_start:
qpos[1] = ((((np.random.rand() - 0.5) * 40) / 180.0) * np.pi)
self.model.data.qpos = qpos
self.model.data.qvel = self.init_qvel
self.model.data.qacc = self.init_qacc
self.model.data.ctrl = self.init_ctrl |
class IDD_Dataset(SegmentationDataset):
num_classes = 26
label_names = ['road', 'drivable fallback', 'sidewalk', 'non-drivable fallback', 'animal', 'rider', 'motorcycle', 'bicycle', 'autorickshaw', 'car', 'truck', 'bus', 'vehicle fallback', 'curb', 'wall', 'fence', 'guard rail', 'billboard', 'traffic sign', 'traffic light', 'pole', 'obs-str-bar-fallback', 'building', 'bridge', 'vegetation', 'sky']
color_map = np.array([[128, 64, 128], [81, 0, 81], [244, 35, 232], [152, 251, 152], [220, 20, 60], [255, 0, 0], [0, 0, 230], [119, 11, 32], [255, 204, 54], [0, 0, 142], [0, 0, 70], [0, 60, 100], [136, 143, 153], [220, 190, 40], [102, 102, 156], [190, 153, 153], [180, 165, 180], [174, 64, 67], [220, 220, 0], [250, 170, 30], [153, 153, 153], [169, 187, 214], [70, 70, 70], [150, 120, 90], [107, 142, 35], [70, 130, 180]], dtype=np.uint8)
def __init__(self, root, subset='train', transform=None, file_path=False, num_images=None, mode='labeled'):
self.d_idx = 'IDD'
self.mode = mode
super().__init__(root, subset, img_path='leftImg8bit', label_path='gtFine', pattern='*/*', img_suffix='_leftImg8bit.png', label_suffix='_gtFine_labellevel3Ids.png', transform=transform, file_path=file_path, num_images=num_images) |
class FactorizationMachineModelnofeatures(keras.Model):
def __init__(self, num_users, num_items, embed_mf_size, lambda_weights, learning_rate=0.01, random_seed=42, name='FM', **kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(random_seed)
self.num_users = num_users
self.num_items = num_items
self.embed_mf_size = embed_mf_size
self.lambda_weights = lambda_weights
self.initializer = tf.initializers.GlorotUniform()
self.user_mf_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='U_MF', embeddings_regularizer=keras.regularizers.l2(self.lambda_weights), dtype=tf.float32)
self.item_mf_embedding = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size, embeddings_regularizer=keras.regularizers.l2(self.lambda_weights), embeddings_initializer=self.initializer, name='I_MF', dtype=tf.float32)
self.u_bias = keras.layers.Embedding(input_dim=self.num_users, output_dim=1, embeddings_initializer=self.initializer, name='B_U_MF', dtype=tf.float32)
self.i_bias = keras.layers.Embedding(input_dim=self.num_items, output_dim=1, embeddings_initializer=self.initializer, name='B_I_MF', dtype=tf.float32)
self.bias_ = tf.Variable(0.0, name='GB')
self.user_mf_embedding(0)
self.item_mf_embedding(0)
self.u_bias(0)
self.i_bias(0)
self.loss = keras.losses.MeanSquaredError()
self.optimizer = tf.optimizers.Adam(learning_rate)
def call(self, inputs, training=None, mask=None):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e = self.item_mf_embedding(item)
mf_output = tf.reduce_sum((user_mf_e * item_mf_e), axis=(- 1))
return (((mf_output + self.bias_) + self.u_bias(user)) + self.i_bias(item))
def train_step(self, batch):
(user, pos, label) = batch
with tf.GradientTape() as tape:
output = self(inputs=(user, pos), training=True)
loss = self.loss(label, output)
grads = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss
def predict(self, inputs, training=False, **kwargs):
output = self.call(inputs=inputs, training=training)
return output
def get_recs(self, inputs, training=False, **kwargs):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e = self.item_mf_embedding(item)
mf_output = tf.expand_dims(tf.reduce_sum((user_mf_e * item_mf_e), axis=(- 1)), (- 1))
return tf.squeeze((((mf_output + self.bias_) + self.u_bias(user)) + self.i_bias(item)))
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True) |
def check_used(port: int) -> bool:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', port))
if (result == 0):
sock.close()
return True
else:
return False |
def test_argmin_argmax_axis_None():
array = ak.highlevel.Array([[[np.datetime64('2022'), np.datetime64('2023'), np.datetime64('2025')], [], [np.datetime64('2027'), np.datetime64('2011')], [np.datetime64('2013')]], [], [[np.datetime64('2017'), np.datetime64('2019')], [np.datetime64('2023')]]], check_valid=True)
assert (ak.operations.argmin(array) == 4)
assert (ak.operations.argmax(array) == 3) |
def build_anchor_generator(cfg, default_args=None):
warnings.warn('``build_anchor_generator`` would be deprecated soon, please use ``build_prior_generator`` ')
return build_prior_generator(cfg, default_args=default_args) |
def main(args):
now = datetime.now()
current_date = now.strftime('%m/%d/%Y')
all_num_prompt_tokens = [1, 256, 512, 1024, 1536]
all_num_output_tokens = [1, 2, 4, 8, 16, 32, 64]
scenario = 'synthetic_efficiency'
all_models_and_tokenizers = []
for tokenizer_provider in args.tokenizer_providers:
if (tokenizer_provider == 'ai21'):
all_models_and_tokenizers.append(('ai21_tokenizer', 'ai21/j1'))
elif (tokenizer_provider == 'openai'):
all_models_and_tokenizers.append(('gpt2_tokenizer', 'huggingface/gpt2'))
elif (tokenizer_provider == 'cohere'):
all_models_and_tokenizers.append(('cohere_tokenizer', 'cohere/cohere'))
elif (tokenizer_provider == 'opt'):
all_models_and_tokenizers.append(('opt_tokenizer', 'meta/opt'))
elif (tokenizer_provider == 'yandex'):
all_models_and_tokenizers.append(('together/yalm', 'yandex/yalm'))
elif (tokenizer_provider == 'bloom'):
all_models_and_tokenizers.append(('together/bloom', 'bigscience/bloom'))
elif (tokenizer_provider == 't5'):
all_models_and_tokenizers.append(('together/t5-11b', 'google/t5'))
elif (tokenizer_provider == 't0'):
all_models_and_tokenizers.append(('together/t0pp', 'bigscience/t0pp'))
elif (tokenizer_provider == 'ul2'):
all_models_and_tokenizers.append(('together/ul2', 'google/ul2'))
elif (tokenizer_provider == 'glm'):
all_models_and_tokenizers.append(('together/glm', 'tsinghua/glm'))
elif (tokenizer_provider == 'gptj'):
all_models_and_tokenizers.append(('together/gpt-j-6b', 'eleutherai/gptj'))
elif (tokenizer_provider == 'gptneox'):
all_models_and_tokenizers.append(('together/gpt-neox-20b', 'eleutherai/gptneox'))
specs = []
num_specs = ((len(all_models_and_tokenizers) * len(all_num_prompt_tokens)) * len(all_num_output_tokens))
print(f'Generating {num_specs} specs...')
for (model, tokenizer) in all_models_and_tokenizers:
for num_prompt_tokens in all_num_prompt_tokens:
for num_output_tokens in all_num_output_tokens:
if args.no_random:
random = None
else:
random = current_date
spec = generate_spec(scenario, model, tokenizer, num_prompt_tokens, num_output_tokens, random=random)
specs.append(spec)
print(f'Writing out {len(specs)} specs...')
with open(args.output_path, 'w') as f:
for spec in specs:
f.write(f'''{spec}
''') |
def _shift_seq(seq: torch.Tensor) -> torch.Tensor:
shifted_seq = seq.roll((- 1), dims=0)
shifted_seq[((- 1), ...)] = 0
return shifted_seq |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = conv3x3_EW(3, 64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = Linear_EW((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def freeze_hidden_layers(self):
self._freeze_layer(self.conv1)
self._freeze_layer(self.bn1)
self._freeze_layer(self.layer1)
self._freeze_layer(self.layer2)
self._freeze_layer(self.layer3)
self._freeze_layer(self.layer4)
def unfreeze_model(self):
self._freeze_layer(self.conv1, freeze=False)
self._freeze_layer(self.bn1, freeze=False)
self._freeze_layer(self.layer1, freeze=False)
self._freeze_layer(self.layer2, freeze=False)
self._freeze_layer(self.layer3, freeze=False)
self._freeze_layer(self.layer4, freeze=False)
self._freeze_layer(self.linear, freeze=False)
def embed_in_n_layer(self, n):
self._freeze_layer(self.conv1)
self._freeze_layer(self.bn1)
if (n == 1):
self._freeze_layer(self.layer1)
elif (n == 2):
self._freeze_layer(self.layer2)
elif (n == 3):
self._freeze_layer(self.layer3)
elif (n == 4):
self._freeze_layer(self.layer4)
else:
self._freeze_layer(self.linear)
def _freeze_layer(self, layer, freeze=True):
if freeze:
for p in layer.parameters():
p.requires_grad = False
else:
for p in layer.parameters():
p.requires_grad = True
def feature(self, x, stage_id):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
if (stage_id == 1):
return out
out = self.layer2(out)
if (stage_id == 2):
return out
out = self.layer3(out)
if (stage_id == 3):
return out
out = self.layer4(out)
return out
def forward(self, x, stage_id=0):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
if (stage_id == 1):
return out
out = self.layer2(out)
if (stage_id == 2):
return out
out = self.layer3(out)
if (stage_id == 3):
return out
out = self.layer4(out)
if (stage_id == 4):
return out
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
if (stage_id == 5):
return out
out = self.linear(out)
return out |
def get_basic_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_20sec', input_tdim=2000)
model = PasstBasicWrapper(mel=mel, net=net, max_model_window=20000, **kwargs)
return model |
def logging_manager(*, debug: bool=False) -> Iterator[None]:
formatter = Formatter(fmt='%(levelname)s: %(message)s', datefmt='')
root_logger = logging.getLogger('conda-pytorch')
root_logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
if debug:
console_handler.setLevel(logging.DEBUG)
else:
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
log_file = os.path.join(logging_run_dir(), 'nightly.log')
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
logging_record_argv()
try:
logging_rotate()
print(f'log file: {log_file}')
(yield root_logger)
except Exception as e:
logging.exception('Fatal exception')
logging_record_exception(e)
print(f'log file: {log_file}')
sys.exit(1)
except BaseException as e:
logging.info('', exc_info=True)
logging_record_exception(e)
print(f'log file: {log_file}')
sys.exit(1) |
class Capture(object):
ctx: Dict[(str, List[Any])]
def __init__(self):
self.ctx = {'operations': [], 'variables': []}
def __str__(self):
return self.ops_str()
def ops_str(self):
res = ''
for op in self.ctx['operations']:
if (len(res) > 0):
res += '\n'
res += str(op)
return res
def __getattr__(self, attrname):
if (attrname == 'kwarg'):
raise Exception('no kwargs!')
if (attrname in DATAPIPES_OPS):
return self.as_datapipe().__getattr__(attrname)
return CaptureGetAttr(self, attrname, ctx=self.ctx)
def __getitem__(self, key):
return CaptureGetItem(self, key, ctx=self.ctx)
def __setitem__(self, key, value):
self.ctx['operations'].append(CaptureSetItem(self, key, value, ctx=self.ctx))
def __add__(self, add_val):
res = CaptureAdd(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
self.ctx['operations'].append(CaptureVariableAssign(variable=var, value=res, ctx=self.ctx))
return var
def __sub__(self, add_val):
res = CaptureSub(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
self.ctx['operations'].append(CaptureVariableAssign(variable=var, value=res, ctx=self.ctx))
return var
def __mul__(self, add_val):
res = CaptureMul(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
t = CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
self.ctx['operations'].append(t)
return var
def as_datapipe(self):
return DataFrameTracedOps(self.ctx['variables'][0].source_datapipe, self)
def raw_iterator(self):
return self.as_datapipe().__iter__()
def __iter__(self):
return iter(self._dataframes_as_tuples())
def batch(self, batch_size=10):
dp = self._dataframes_per_row()._dataframes_concat(batch_size)
dp = dp.as_datapipe().batch(1, wrapper_class=DataChunkDF)
dp._dp_contains_dataframe = True
return dp
def groupby(self, group_key_fn, *, buffer_size=10000, group_size=None, unbatch_level=0, guaranteed_group_size=None, drop_remaining=False):
if (unbatch_level != 0):
dp = self.unbatch(unbatch_level)._dataframes_per_row()
else:
dp = self._dataframes_per_row()
dp = dp.as_datapipe().groupby(group_key_fn, buffer_size=buffer_size, group_size=group_size, guaranteed_group_size=guaranteed_group_size, drop_remaining=drop_remaining)
return dp
def shuffle(self, *args, **kwargs):
return self._dataframes_shuffle(*args, **kwargs)
def filter(self, *args, **kwargs):
return self._dataframes_filter(*args, **kwargs) |
class MemoryCopySlice(MemoryCopyNode):
is_memview_copy_assignment = True
copy_slice_cname = '__pyx_memoryview_copy_contents'
def _generate_assignment_code(self, src, code):
dst = self.dst
src.type.assert_direct_dims(src.pos)
dst.type.assert_direct_dims(dst.pos)
code.putln(code.error_goto_if_neg(('%s(%s, %s, %d, %d, %d)' % (self.copy_slice_cname, src.result(), dst.result(), src.type.ndim, dst.type.ndim, dst.type.dtype.is_pyobject)), dst.pos)) |
def bivariate_plateau_type1(kernel_size, sig_x, sig_y, theta, beta, grid=None):
if (grid is None):
(grid, _, _) = mesh_grid(kernel_size)
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.reciprocal((np.power(np.sum((np.dot(grid, inverse_sigma) * grid), 2), beta) + 1))
kernel = (kernel / np.sum(kernel))
return kernel |
class StabilityTask(SequenceToFloatTask):
def __init__(self):
d_output = 1
super().__init__(key_metric='MAE', deserialization_func=deserialize_stability_sequence, d_output=d_output, label='stability_score', input_name='encoder_output', output_name='prediction') |
.hypothesis_nested
def test_case_insensitive_headers(empty_open_api_3_schema):
empty_open_api_3_schema['paths'] = {'/data': {'post': {'parameters': [{'name': 'X-id', 'in': 'header', 'required': True, 'schema': {'type': 'string'}}], 'responses': {'200': {'description': 'OK'}}}}}
schema = schemathesis.from_dict(empty_open_api_3_schema)
(case=schema['/data']['POST'].as_strategy())
(max_examples=1)
def test(case):
assert ('X-id' in case.headers)
case.headers['x-ID'] = 'foo'
assert (len(case.headers) == 1)
assert (case.headers['X-id'] == 'foo')
test() |
def min_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, axes=None, keep_dims=False, with_index=False, only_index=False):
dy = grad_inputs[0]
x0 = inputs[0]
y0 = outputs[0]
if keep_dims:
y0 = F.broadcast(y0, x0.shape)
dy = F.broadcast(dy, x0.shape)
else:
axes = ([i for i in range(x0.ndim)] if (axes is None) else force_list(axes))
shape = [(1 if (i in axes) else s) for (i, s) in enumerate(x0.shape)]
y0 = F.broadcast(F.reshape(y0, shape, inplace=False), x0.shape)
dy = F.broadcast(F.reshape(dy, shape, inplace=False), x0.shape)
m0 = F.equal(x0, y0)
m0 = no_grad(m0)
dx0 = (dy * m0)
if ((not with_index) and (not only_index)):
return dx0
elif with_index:
return (dx0, None)
elif only_index:
return None |
class Writer(SummaryWriter):
def __init__(self, logdir):
super(Writer, self).__init__(logdir)
cmap_custom = {'red': ((0.0, 0.0, 0.0), ((1 / 63), 0.0, 0.0), ((2 / 63), 0.0, 0.0), ((3 / 63), 0.0, 0.0), ((4 / 63), 0.0, 0.0), ((5 / 63), 0.0, 0.0), ((6 / 63), 0.0, 0.0), ((7 / 63), 0.0, 0.0), ((8 / 63), 0.0, 0.0), ((9 / 63), 0.0, 0.0), ((10 / 63), 0.0, 0.0), ((11 / 63), 0.0, 0.0), ((12 / 63), 0.0, 0.0), ((13 / 63), 0.0, 0.0), ((14 / 63), 0.0, 0.0), ((15 / 63), 0.0, 0.0), ((16 / 63), 0.0, 0.0), ((17 / 63), 0.0, 0.0), ((18 / 63), 0.0, 0.0), ((19 / 63), 0.0, 0.0), ((20 / 63), 0.0, 0.0), ((21 / 63), 0.0, 0.0), ((22 / 63), 0.0, 0.0), ((23 / 63), 0.0, 0.0), ((24 / 63), 0.5625, 0.5625), ((25 / 63), 0.625, 0.625), ((26 / 63), 0.6875, 0.6875), ((27 / 63), 0.75, 0.75), ((28 / 63), 0.8125, 0.8125), ((29 / 63), 0.875, 0.875), ((30 / 63), 0.9375, 0.9375), ((31 / 63), 1.0, 1.0), ((32 / 63), 1.0, 1.0), ((33 / 63), 1.0, 1.0), ((34 / 63), 1.0, 1.0), ((35 / 63), 1.0, 1.0), ((36 / 63), 1.0, 1.0), ((37 / 63), 1.0, 1.0), ((38 / 63), 1.0, 1.0), ((39 / 63), 1.0, 1.0), ((40 / 63), 1.0, 1.0), ((41 / 63), 1.0, 1.0), ((42 / 63), 1.0, 1.0), ((43 / 63), 1.0, 1.0), ((44 / 63), 1.0, 1.0), ((45 / 63), 1.0, 1.0), ((46 / 63), 1.0, 1.0), ((47 / 63), 1.0, 1.0), ((48 / 63), 1.0, 1.0), ((49 / 63), 1.0, 1.0), ((50 / 63), 1.0, 1.0), ((51 / 63), 1.0, 1.0), ((52 / 63), 1.0, 1.0), ((53 / 63), 1.0, 1.0), ((54 / 63), 1.0, 1.0), ((55 / 63), 1.0, 1.0), ((56 / 63), 0.9375, 0.9375), ((57 / 63), 0.875, 0.875), ((58 / 63), 0.8125, 0.8125), ((59 / 63), 0.75, 0.75), ((60 / 63), 0.6875, 0.6875), ((61 / 63), 0.625, 0.625), ((62 / 63), 0.5625, 0.5625), ((63 / 63), 0.5, 0.5)), 'green': ((0.0, 0.0, 0.0), ((1 / 63), 0.0, 0.0), ((2 / 63), 0.0, 0.0), ((3 / 63), 0.0, 0.0), ((4 / 63), 0.0, 0.0), ((5 / 63), 0.0, 0.0), ((6 / 63), 0.0, 0.0), ((7 / 63), 0.0, 0.0), ((8 / 63), 0.0625, 0.0625), ((9 / 63), 0.125, 0.125), ((10 / 63), 0.1875, 0.1875), ((11 / 63), 0.25, 0.25), ((12 / 63), 0.3125, 0.3125), ((13 / 63), 0.375, 0.375), ((14 / 63), 0.4375, 0.4375), ((15 / 63), 0.5, 0.5), ((16 / 63), 0.5625, 0.5625), ((17 / 63), 0.625, 0.625), ((18 / 63), 0.6875, 0.6875), ((19 / 63), 0.75, 0.75), ((20 / 63), 0.8125, 0.8125), ((21 / 63), 0.875, 0.875), ((22 / 63), 0.9375, 0.9375), ((23 / 63), 1.0, 1.0), ((24 / 63), 1.0, 1.0), ((25 / 63), 1.0, 1.0), ((26 / 63), 1.0, 1.0), ((27 / 63), 1.0, 1.0), ((28 / 63), 1.0, 1.0), ((29 / 63), 1.0, 1.0), ((30 / 63), 1.0, 1.0), ((31 / 63), 1.0, 1.0), ((32 / 63), 1.0, 1.0), ((33 / 63), 1.0, 1.0), ((34 / 63), 1.0, 1.0), ((35 / 63), 1.0, 1.0), ((36 / 63), 1.0, 1.0), ((37 / 63), 1.0, 1.0), ((38 / 63), 1.0, 1.0), ((39 / 63), 1.0, 1.0), ((40 / 63), 0.9375, 0.9375), ((41 / 63), 0.875, 0.875), ((42 / 63), 0.8125, 0.8125), ((43 / 63), 0.75, 0.75), ((44 / 63), 0.6875, 0.6875), ((45 / 63), 0.625, 0.625), ((46 / 63), 0.5625, 0.5625), ((47 / 63), 0.5, 0.5), ((48 / 63), 0.4375, 0.4375), ((49 / 63), 0.375, 0.375), ((50 / 63), 0.3125, 0.3125), ((51 / 63), 0.25, 0.25), ((52 / 63), 0.1875, 0.1875), ((53 / 63), 0.125, 0.125), ((54 / 63), 0.0625, 0.0625), ((55 / 63), 0.0, 0.0), ((56 / 63), 0.0, 0.0), ((57 / 63), 0.0, 0.0), ((58 / 63), 0.0, 0.0), ((59 / 63), 0.0, 0.0), ((60 / 63), 0.0, 0.0), ((61 / 63), 0.0, 0.0), ((62 / 63), 0.0, 0.0), ((63 / 63), 0.0, 0.0)), 'blue': ((0.0, 0.5625, 0.5625), ((1 / 63), 0.625, 0.625), ((2 / 63), 0.6875, 0.6875), ((3 / 63), 0.75, 0.75), ((4 / 63), 0.8125, 0.8125), ((5 / 63), 0.875, 0.875), ((6 / 63), 0.9375, 0.9375), ((7 / 63), 1.0, 1.0), ((8 / 63), 1.0, 1.0), ((9 / 63), 1.0, 1.0), ((10 / 63), 1.0, 1.0), ((11 / 63), 1.0, 1.0), ((12 / 63), 1.0, 1.0), ((13 / 63), 1.0, 1.0), ((14 / 63), 1.0, 1.0), ((15 / 63), 1.0, 1.0), ((16 / 63), 1.0, 1.0), ((17 / 63), 1.0, 1.0), ((18 / 63), 1.0, 1.0), ((19 / 63), 1.0, 1.0), ((20 / 63), 1.0, 1.0), ((21 / 63), 1.0, 1.0), ((22 / 63), 1.0, 1.0), ((23 / 63), 1.0, 1.0), ((24 / 63), 1.0, 1.0), ((25 / 63), 1.0, 1.0), ((26 / 63), 1.0, 1.0), ((27 / 63), 1.0, 1.0), ((28 / 63), 1.0, 1.0), ((29 / 63), 1.0, 1.0), ((30 / 63), 1.0, 1.0), ((31 / 63), 1.0, 1.0), ((32 / 63), 0.9375, 0.9375), ((33 / 63), 0.875, 0.875), ((34 / 63), 0.8125, 0.8125), ((35 / 63), 0.75, 0.75), ((36 / 63), 0.6875, 0.6875), ((37 / 63), 0.625, 0.625), ((38 / 63), 0.5625, 0.5625), ((39 / 63), 0.0, 0.0), ((40 / 63), 0.0, 0.0), ((41 / 63), 0.0, 0.0), ((42 / 63), 0.0, 0.0), ((43 / 63), 0.0, 0.0), ((44 / 63), 0.0, 0.0), ((45 / 63), 0.0, 0.0), ((46 / 63), 0.0, 0.0), ((47 / 63), 0.0, 0.0), ((48 / 63), 0.0, 0.0), ((49 / 63), 0.0, 0.0), ((50 / 63), 0.0, 0.0), ((51 / 63), 0.0, 0.0), ((52 / 63), 0.0, 0.0), ((53 / 63), 0.0, 0.0), ((54 / 63), 0.0, 0.0), ((55 / 63), 0.0, 0.0), ((56 / 63), 0.0, 0.0), ((57 / 63), 0.0, 0.0), ((58 / 63), 0.0, 0.0), ((59 / 63), 0.0, 0.0), ((60 / 63), 0.0, 0.0), ((61 / 63), 0.0, 0.0), ((62 / 63), 0.0, 0.0), ((63 / 63), 0.0, 0.0))}
cmap_custom2 = {'red': ((0.0, 1.0, 1.0), ((1 / 32), 1.0, 1.0), ((2 / 32), 1.0, 1.0), ((3 / 32), 1.0, 1.0), ((4 / 32), 1.0, 1.0), ((5 / 32), 1.0, 1.0), ((6 / 32), 1.0, 1.0), ((7 / 32), 1.0, 1.0), ((8 / 32), 1.0, 1.0), ((9 / 32), 1.0, 1.0), ((10 / 32), 1.0, 1.0), ((11 / 32), 1.0, 1.0), ((12 / 32), 1.0, 1.0), ((13 / 32), 1.0, 1.0), ((14 / 32), 1.0, 1.0), ((15 / 32), 1.0, 1.0), ((16 / 32), 1.0, 1.0), ((17 / 32), 1.0, 1.0), ((18 / 32), 1.0, 1.0), ((19 / 32), 1.0, 1.0), ((20 / 32), 1.0, 1.0), ((21 / 32), 1.0, 1.0), ((22 / 32), 1.0, 1.0), ((23 / 32), 1.0, 1.0), ((24 / 32), 1.0, 1.0), ((25 / 32), 0.9375, 0.9375), ((26 / 32), 0.875, 0.875), ((27 / 32), 0.8125, 0.8125), ((28 / 32), 0.75, 0.75), ((29 / 32), 0.6875, 0.6875), ((30 / 32), 0.625, 0.625), ((31 / 32), 0.5625, 0.5625), ((32 / 32), 0.5, 0.5)), 'green': ((0.0, 1.0, 1.0), ((1 / 32), 1.0, 1.0), ((2 / 32), 1.0, 1.0), ((3 / 32), 1.0, 1.0), ((4 / 32), 1.0, 1.0), ((5 / 32), 1.0, 1.0), ((6 / 32), 1.0, 1.0), ((7 / 32), 1.0, 1.0), ((8 / 32), 1.0, 1.0), ((9 / 32), 0.9375, 0.9375), ((10 / 32), 0.875, 0.875), ((11 / 32), 0.8125, 0.8125), ((12 / 32), 0.75, 0.75), ((13 / 32), 0.6875, 0.6875), ((14 / 32), 0.625, 0.625), ((15 / 32), 0.5625, 0.5625), ((16 / 32), 0.5, 0.5), ((17 / 32), 0.4375, 0.4375), ((18 / 32), 0.375, 0.375), ((19 / 32), 0.3125, 0.3125), ((20 / 32), 0.25, 0.25), ((21 / 32), 0.1875, 0.1875), ((22 / 32), 0.125, 0.125), ((23 / 32), 0.0625, 0.0625), ((24 / 32), 0.0, 0.0), ((25 / 32), 0.0, 0.0), ((26 / 32), 0.0, 0.0), ((27 / 32), 0.0, 0.0), ((28 / 32), 0.0, 0.0), ((29 / 32), 0.0, 0.0), ((30 / 32), 0.0, 0.0), ((31 / 32), 0.0, 0.0), ((32 / 32), 0.0, 0.0)), 'blue': ((0.0, 1.0, 1.0), ((1 / 32), 0.9375, 0.9375), ((2 / 32), 0.875, 0.875), ((3 / 32), 0.8125, 0.8125), ((4 / 32), 0.75, 0.75), ((5 / 32), 0.6875, 0.6875), ((6 / 32), 0.625, 0.625), ((7 / 32), 0.5625, 0.5625), ((8 / 32), 0.0, 0.0), ((9 / 32), 0.0, 0.0), ((10 / 32), 0.0, 0.0), ((11 / 32), 0.0, 0.0), ((12 / 32), 0.0, 0.0), ((13 / 32), 0.0, 0.0), ((14 / 32), 0.0, 0.0), ((15 / 32), 0.0, 0.0), ((16 / 32), 0.0, 0.0), ((17 / 32), 0.0, 0.0), ((18 / 32), 0.0, 0.0), ((19 / 32), 0.0, 0.0), ((20 / 32), 0.0, 0.0), ((21 / 32), 0.0, 0.0), ((22 / 32), 0.0, 0.0), ((23 / 32), 0.0, 0.0), ((24 / 32), 0.0, 0.0), ((25 / 32), 0.0, 0.0), ((26 / 32), 0.0, 0.0), ((27 / 32), 0.0, 0.0), ((28 / 32), 0.0, 0.0), ((29 / 32), 0.0, 0.0), ((30 / 32), 0.0, 0.0), ((31 / 32), 0.0, 0.0), ((32 / 32), 0.0, 0.0))}
self.cmap_custom = matplotlib.colors.LinearSegmentedColormap('testCmap', segmentdata=cmap_custom, N=256)
self.cmap_custom2 = matplotlib.colors.LinearSegmentedColormap('testCmap2', segmentdata=cmap_custom2, N=256)
def log_loss(self, train_loss, vali_loss, step):
self.add_scalar('train_loss', train_loss, step)
self.add_scalar('vali_loss', vali_loss, step)
def log_sub_loss(self, train_main_loss, train_sub_loss, vali_main_loss, vali_sub_loss, step):
self.add_scalar('train_main_loss', train_main_loss, step)
self.add_scalar('train_sub_loss', train_sub_loss, step)
self.add_scalar('vali_main_loss', vali_main_loss, step)
self.add_scalar('vali_sub_loss', vali_sub_loss, step)
def log_score(self, vali_pesq, vali_stoi, step):
self.add_scalar('vali_pesq', vali_pesq, step)
self.add_scalar('vali_stoi', vali_stoi, step)
def log_wav(self, mixed_wav, clean_wav, est_wav, step):
self.add_audio('mixed_wav', mixed_wav, step, cfg.fs)
self.add_audio('clean_target_wav', clean_wav, step, cfg.fs)
self.add_audio('estimated_wav', est_wav, step, cfg.fs)
def log_spectrogram(self, mixed_wav, clean_wav, noise_wav, est_wav, step):
self.add_image('data/mixed_spectrogram', plot_spectrogram_to_numpy(mixed_wav, cfg.fs, cfg.win_len, int(cfg.ola_ratio), None, [(- 150), (- 40)], 'dB'), step, dataformats='HWC')
self.add_image('data/clean_spectrogram', plot_spectrogram_to_numpy(clean_wav, cfg.fs, cfg.win_len, int(cfg.ola_ratio), None, [(- 150), (- 40)], 'dB'), step, dataformats='HWC')
self.add_image('data/noise_spectrogram', plot_spectrogram_to_numpy(noise_wav, cfg.fs, cfg.win_len, int(cfg.ola_ratio), None, [(- 150), (- 40)], 'dB'), step, dataformats='HWC')
self.add_image('data/clean_unwrap_phase', plot_spectrogram_to_numpy(clean_wav, cfg.fs, cfg.win_len, int(cfg.ola_ratio), 'phase', [(- 500), 500], None), step, dataformats='HWC')
self.add_image('result/estimated_spectrogram', plot_spectrogram_to_numpy(est_wav, cfg.fs, cfg.win_len, int(cfg.ola_ratio), None, [(- 150), (- 40)], 'dB'), step, dataformats='HWC')
self.add_image('result/estimated_unwrap_phase', plot_spectrogram_to_numpy(est_wav, cfg.fs, cfg.win_len, int(cfg.ola_ratio), 'phase', [(- 500), 500], None), step, dataformats='HWC')
self.add_image('result/estimated_magnitude-clean_magnitude', plot_spectrogram_to_numpy((est_wav - clean_wav), cfg.fs, cfg.win_len, int(cfg.ola_ratio), None, [(- 80), 80], 'dB'), step, dataformats='HWC')
self.add_image('result/estimated_unwrap_phase-clean_unwrap_phase', plot_spectrogram_to_numpy((est_wav - clean_wav), cfg.fs, cfg.win_len, int(cfg.ola_ratio), 'phase', [(- 500), 500], None), step, dataformats='HWC')
def log_mask_spectrogram(self, est_mask_real, est_mask_imag, step):
self.add_image('result/estimated_mask_magnitude', plot_mask_to_numpy(np.sqrt(((est_mask_real ** 2) + (est_mask_imag ** 2))), cfg.fs, cfg.win_len, int(cfg.ola_ratio), 0, 2, cmap=self.cmap_custom2), step, dataformats='HWC')
self.add_image('result/estimated_mask_real', plot_mask_to_numpy(est_mask_real, cfg.fs, cfg.win_len, int(cfg.ola_ratio), (- 2), 2, cmap=self.cmap_custom), step, dataformats='HWC')
self.add_image('result/estimated_mask_imag', plot_mask_to_numpy(est_mask_imag, cfg.fs, cfg.win_len, int(cfg.ola_ratio), (- 2), 2, cmap=self.cmap_custom), step, dataformats='HWC') |
class PredUtteranceItem():
def __init__(self, input_sequence, interaction_item, previous_query, index, available_snippets):
self.input_seq_to_use = input_sequence
self.interaction_item = interaction_item
self.index = index
self.available_snippets = available_snippets
self.prev_pred_query = previous_query
def input_sequence(self):
return self.input_seq_to_use
def histories(self, maximum):
if (maximum == 0):
return histories
histories = []
for utterance in self.interaction_item.processed_utterances[:self.index]:
histories.append(utterance.input_sequence())
if (len(histories) > maximum):
histories = histories[(- maximum):]
return histories
def snippets(self):
return self.available_snippets
def previous_query(self):
return self.prev_pred_query
def flatten_sequence(self, sequence):
return self.interaction_item.flatten_sequence(sequence)
def remove_snippets(self, sequence):
return sql_util.fix_parentheses(self.interaction_item.expand_snippets(sequence))
def set_predicted_query(self, query):
self.anonymized_pred_query = query |
def load_dataset(path, dataset_type, *args, **kwargs):
return load_dataset_reader(dataset_type, *args, **kwargs).read(path) |
def all_gather_list(data, group=None, max_size=16384):
rank = get_rank()
world_size = get_world_size()
buffer_size = (max_size * world_size)
if ((not hasattr(all_gather_list, '_buffer')) or (all_gather_list._buffer.numel() < buffer_size)):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4
size = (header_size + enc_size)
if (size > max_size):
raise ValueError('encoded data size ({}) exceeds max_size ({})'.format(size, max_size))
header = struct.pack('>I', enc_size)
cpu_buffer[:size] = torch.ByteTensor(list((header + enc)))
start = (rank * max_size)
buffer[start:(start + size)].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[(i * max_size):((i + 1) * max_size)]
(enc_size,) = struct.unpack('>I', bytes(out_buffer[:header_size].tolist()))
if (enc_size > 0):
result.append(pickle.loads(bytes(out_buffer[header_size:(header_size + enc_size)].tolist())))
return result
except pickle.UnpicklingError:
raise Exception('Unable to unpickle data from other workers. all_gather_list requires all workers to enter the function together, so this error usually indicates that the workers have fallen out of sync somehow. Workers can fall out of sync if one of them runs out of memory, or if there are other conditions in your training script that can cause one worker to finish an epoch while other workers are still iterating over their portions of the data. Try rerunning with --ddp-backend=no_c10d and see if that helps.') |
class BBQMetric(EvaluateInstancesMetric):
def evaluate_instances(self, request_states: List[RequestState]) -> List[Stat]:
amb_non_unknown = 0
disamb_non_unknown = 0
amb_non_target_and_non_neg = 0
amb_target_and_neg = 0
disamb_non_target_and_non_neg = 0
disamb_target_and_neg = 0
acc = Stat(MetricName('bbq_accuracy'))
for request_state in request_states:
references = request_state.instance.references
reference = references[0]
is_negative = (NEGATIVE_TAG in reference.tags)
is_ambiguous = (AMBIGUOUS_TAG in reference.tags)
assert (request_state.result is not None)
request_result: RequestResult = request_state.result
completions: List[str] = [completion.text.strip() for completion in request_result.completions if completion.text]
for completion in completions:
is_correct = (completion == reference.tags[(- 4)])
is_target = (completion == reference.tags[(- 3)])
is_non_target = (completion == reference.tags[(- 2)])
is_unknown = (completion == reference.tags[(- 1)])
no_non_targets_exist = (reference.tags[(- 2)] == '')
if (not is_unknown):
if is_ambiguous:
amb_non_unknown += 1
if ((is_target or no_non_targets_exist) and is_negative):
amb_target_and_neg += 1
elif (is_non_target and (not is_negative)):
amb_non_target_and_non_neg += 1
else:
disamb_non_unknown += 1
if (is_target and is_negative):
disamb_target_and_neg += 1
elif (is_non_target and (not is_negative)):
disamb_non_target_and_non_neg += 1
acc.add(is_correct)
amb_bias_score_numer = (amb_target_and_neg + amb_non_target_and_non_neg)
amb_bias_score_denom = amb_non_unknown
if ((amb_bias_score_denom == 0) or (acc.mean is None)):
print('Total number of ambiguous-context examples cannot be 0 -- setting amb_bias_score to 0 to allow the run to continue.')
amb_bias_score = ERROR_SCORE
else:
amb_bias_score = ((((2 * amb_bias_score_numer) / amb_bias_score_denom) - 1) * (1 - acc.mean))
disamb_bias_score_numer = (disamb_target_and_neg + disamb_non_target_and_non_neg)
disamb_bias_score_denom = disamb_non_unknown
if (disamb_bias_score_denom == 0):
print('Total number of disambiguated-context examples cannot be 0 -- setting disamb_bias_score to 0 to allow the run to continue.')
disamb_bias_score = ERROR_SCORE
else:
disamb_bias_score = (((2 * disamb_bias_score_numer) / disamb_bias_score_denom) - 1)
amb_bias_stat = Stat(MetricName('bbq_metric_ambiguous_bias'))
amb_bias_stat.add(amb_bias_score)
disamb_bias_stat = Stat(MetricName('bbq_metric_unambiguous_bias'))
disamb_bias_stat.add(disamb_bias_score)
stats = [acc, amb_bias_stat, disamb_bias_stat]
return stats |
def test_specify_column_type(simpledf: dd.DataFrame) -> None:
plot_diff([simpledf, simpledf], dtype={'a': Nominal()})
plot_diff([simpledf, simpledf], dtype=Nominal()) |
def scipy_minimize(objective: goos.Function, *args, **kwargs) -> ScipyOptimizer:
optimizer = ScipyOptimizer(objective, *args, **kwargs)
goos.get_default_plan().add_action(optimizer)
return optimizer |
def interp(x0, x1, num_midpoints):
lerp = torch.linspace(0, 1.0, (num_midpoints + 2), device='cuda').to(x0.dtype)
return ((x0 * (1 - lerp.view(1, (- 1), 1))) + (x1 * lerp.view(1, (- 1), 1))) |
def minimize_split(labels, stats, cross_val_split, seg_len, input_dir, output_dir):
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
cross_val_dir = path.join(output_dir, str(cross_val_split))
if (not path.exists(cross_val_dir)):
os.makedirs(cross_val_dir)
minimize_partition('dev', cross_val_split, labels, stats, tokenizer, seg_len, input_dir, output_dir)
minimize_partition('train', cross_val_split, labels, stats, tokenizer, seg_len, input_dir, output_dir)
minimize_partition('test', cross_val_split, labels, stats, tokenizer, seg_len, input_dir, output_dir) |
def parse_args(parser):
(args, _) = parser.parse_known_args()
if (args.decoder is not None):
decoding.DECODER_REGISTRY[args.decoder].add_args(parser)
if (args.predictor is not None):
import predictors
predictors.PREDICTOR_REGISTRY[args.predictor].add_args(parser)
return parser.parse_args() |
def eulerAngleToRoatationMatrix(theta):
R_x = np.array([[1, 0, 0], [0, math.cos(theta[0]), (- math.sin(theta[0]))], [0, math.sin(theta[0]), math.cos(theta[0])]])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])], [0, 1, 0], [(- math.sin(theta[1])), 0, math.cos(theta[1])]])
R_z = np.array([[math.cos(theta[2]), (- math.sin(theta[2])), 0], [math.sin(theta[2]), math.cos(theta[2]), 0], [0, 0, 1]])
R = np.dot(R_z, np.dot(R_y, R_x))
return R |
_optimizer('sgd')
class SGD(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.SGD(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--momentum', default=0.0, type=float, metavar='M', help='momentum factor')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay}
def supports_flat_params(self):
return True |
def get_layer_extractors(backbone):
assert isinstance(backbone, torchvision.models.ResNet), 'layer extraction is only supported for resnet models for now'
models = {}
for i in range(5):
models[f'layer_{i}'] = LayerModel(backbone, i)
return models |
class KerasModelTester(FeedableTester):
def output_tensors(self, model):
return model.output_tensors
.usefixtures('clean_test_session')
def test_placeholders(self, model, feed_dict):
assert (set(model.placeholders) == set(feed_dict.keys())) |
class TestSanityCheck():
def test_ds_wrapper_integration(self):
ds_path = os.path.join('./tests', 'test_datasets', 'ds_coco_dataset')
ds_wrapper = DSWrapper(data_path=ds_path)
with tempfile.TemporaryDirectory() as out_path:
dss = SanityCheck(ds_wrapper=ds_wrapper, output_path=out_path)
assert (dss.data_path == ds_path)
def test_init(self):
current_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(current_path, 'test_datasets', 'ds_coco_dataset')
with tempfile.TemporaryDirectory() as output_path:
assert os.path.exists(data_path), (data_path + ' data_path does not exist')
assert os.path.exists(output_path), 'output_path does not exist'
assert [el for el in glob(os.path.join(data_path, '*.*json'))][0], 'Could not retrieve the json / geojson file'
annotfn = [el for el in glob(os.path.join(data_path, '*.*json'))][0]
assert read_json(annotfn), 'Could not read the annotation ground-truth file as json'
sc = SanityCheck(data_path=data_path, output_path=output_path)
assert (sc.isgeo == annotfn.endswith('geojson')), 'is is geo only if annotfn ends with geojson'
assert (sc.data_path == data_path), 'missing data_path'
assert (sc.output_path == output_path), 'missing output_path'
assert os.path.exists(os.path.join(data_path, 'images')), 'dataset images does not exist'
assert os.listdir(os.path.join(data_path, 'images')), 'cannot retrieve list of images within dataset'
def test_geojson(self):
current_path = os.path.dirname(os.path.realpath(__file__))
orig_path = os.path.join(current_path, 'test_datasets', 'ds_geo_dataset')
with tempfile.TemporaryDirectory() as tmp_dir:
data_path = os.path.join(tmp_dir, 'data_path')
output_path = os.path.join(tmp_dir, 'output_path')
assert os.path.exists(orig_path), 'orig_path does not exist'
assert (not os.path.exists(data_path)), "data_path does exists and it shoudn't yet"
shutil.copytree(orig_path, data_path)
sc = SanityCheck(data_path=data_path, output_path=output_path)
assert sc.isgeo, 'it should be detected as geojson type of annotations'
gdf = gpd.read_file(sc.annotfn)
gdf.loc[(0, ['image_filename'])] = None
gdf_nan = sc._rm_geo_nan(gdf)
assert (list(gdf_nan.index.values.tolist()) == [i for i in range(len(gdf.index.values.tolist())) if (i != 0)]), 'Remove Nan did not respond as expected'
gdf = gpd.read_file(sc.annotfn)
gdf.loc[(1, ['geometry'])] = None
gdf_empty = sc._rm_geo_empty(gdf)
assert (list(gdf_empty.index.values.tolist()) == [i for i in range(len(gdf.index.values.tolist())) if (i != 1)]), 'Remove empty geometries did not respond as expected'
gdf = gpd.read_file(sc.annotfn)
gdf.loc[(2, ['geometry'])] = Polygon([(0, 0), (0, 3), (3, 3), (3, 0), (2, 0), (2, 2), (1, 2), (1, 1), (2, 1), (2, 0), (0, 0)])
gdf_valid = sc._rm_geo_invalid(gdf, try_fix_geoms=False)
assert (list(gdf_valid.index.values.tolist()) == [i for i in range(len(gdf.index.values.tolist())) if (i != 2)]), 'Remove invalid geometries did not respond as expected'
gdf = gpd.read_file(sc.annotfn)
gdf.loc[(2, ['geometry'])] = Polygon([(0, 0), (0, 3), (3, 3), (3, 0), (2, 0), (2, 2), (1, 2), (1, 1), (2, 1), (2, 0), (0, 0)])
assert (list(sc._rm_geo_invalid(gdf, try_fix_geoms=True).index.values.tolist()) == list(gdf.index.values.tolist())), 'Fix geometries does not work'
gdf = gpd.read_file(sc.annotfn)
gdf.loc[(0, ['image_filename'])] = None
gdf.loc[(1, ['geometry'])] = None
gdf.loc[(2, ['geometry'])] = Polygon([(0, 0), (0, 3), (3, 3), (3, 0), (2, 0), (2, 2), (1, 2), (1, 1), (2, 1), (2, 0), (0, 0)])
gdf.to_file(sc.annotfn, driver='GeoJSON')
sc.autofix(missing=True, empty_geom=True, invalid_geom=True, try_fix_geoms=True)
assert os.path.exists(os.path.join(sc.output_path, 'images')), 'images folder does not exist'
assert os.path.exists(os.path.join(sc.output_path, sc.out_annotfn)), 'annots file does not exist'
assert (len(gpd.read_file(sc.annotfn).index.values.tolist()) == 15), 'There should be 15 rows in the dataframe'
assert (len(gpd.read_file(sc.annotfn)['image_filename'].unique()) == 10), 'There should be 10 images in the dataframe'
assert (len(gpd.read_file(sc.out_annotfn).index.values.tolist()) == 13), 'There should be 13 rows in the dataframe'
assert (len(gpd.read_file(sc.out_annotfn)['image_filename'].unique()) == 8), 'There should be 8 images in the dataframe'
def test_check_imgs_dup(self):
current_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(current_path, 'test_datasets', 'ds_coco_dataset')
with tempfile.TemporaryDirectory() as output_path:
sc = SanityCheck(data_path=data_path, output_path=output_path)
with open(sc.annotfn) as json_file:
gt = json.load(json_file)
imgs_lst = gt['images']
imgs_lst.append(imgs_lst[1].copy())
imgs_lst[(- 1)]['file_name'] = '.jpg'
imgs_lst.append(imgs_lst[2].copy())
imgs_lst[(- 1)]['id'] = 884613
problems = sc.check_imgs_dup(imgs_lst)
assert (len(problems) == 2), 'SanityCheck.check_imgs_dup() error list size mismatch'
assert ((problems[0]['err_sbi'] == 10) and (problems[0]['err_code'] == 'ERR_JSON_IMG_ID_DUP')), 'Duplicate img id check failed'
assert ((problems[1]['err_sbi'] == 11) and (problems[1]['err_code'] == 'ERR_JSON_IMG_FNAME_DUP')), 'Duplicate img file_name check failed'
clean_list(imgs_lst, [p['err_sbi'] for p in problems])
problems = sc.check_imgs_dup(imgs_lst)
assert (problems == []), 'Clean duplicated images failed'
def test_err_image_type(self):
current_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(current_path, 'test_datasets', 'ds_coco_dataset')
with tempfile.TemporaryDirectory() as output_path:
sc = SanityCheck(data_path=data_path, output_path=output_path)
with open(sc.annotfn) as json_file:
gt = json.load(json_file)
imgs_lst = gt['images']
imgs_lst[1]['id'] = 666
imgs_lst[1]['file_name'] = '666.rar'
assert (not sc.err_image_type(imgs_lst[0])), 'SanityCheck.err_image_type() failed with valid file extension'
assert sc.err_image_type(imgs_lst[1]), 'SanityCheck.err_image_type() failed with invalid file extension'
def test_check_coco_annotation(self):
current_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(current_path, 'test_datasets', 'ds_coco_dataset')
with tempfile.TemporaryDirectory() as output_path:
sc = SanityCheck(data_path=data_path, output_path=output_path)
with open(sc.annotfn) as json_file:
gt = json.load(json_file)
anns = gt['annotations']
anns[0]['bbox'].append(2.5)
anns[1]['bbox'][0] = 'a'
anns[2]['bbox'][1] = (- 2.5)
anns[3]['segmentation'][0][0] = 'a'
anns[4]['segmentation'][0][0] = (- 5.5)
anns[5]['segmentation'][0].append(34)
assert (not SanityCheck.check_coco_annotation(anns[6])), 'SanityCheck.check_coco_annotation() failed on valid annotation'
assert (SanityCheck.check_coco_annotation(anns[0]) == ['ERR_JSON_ANN_BBOX_LEN']), 'SanityCheck.check_coco_annotation() LEN BBOX check failed'
assert (SanityCheck.check_coco_annotation(anns[1]) == ['ERR_JSON_ANN_BBOX_TYPE']), 'SanityCheck.check_coco_annotation() TYPE BBOX check failed'
assert (SanityCheck.check_coco_annotation(anns[2]) == ['ERR_JSON_ANN_BBOX_NEG']), 'SanityCheck.check_coco_annotation() NEG BBOX check failed'
assert (SanityCheck.check_coco_annotation(anns[3]) == ['ERR_JSON_ANN_SEG_TYPE']), 'SanityCheck.check_coco_annotation() TYPE SEGMENTATION check failed'
assert (SanityCheck.check_coco_annotation(anns[4]) == ['ERR_JSON_ANN_SEG_NEG']), 'SanityCheck.check_coco_annotation() NEG SEGMENTATION check failed'
assert (SanityCheck.check_coco_annotation(anns[5]) == ['ERR_JSON_ANN_SEG_PAR']), 'SanityCheck.check_coco_annotation() PAR SEGMENTATION check failed'
def test_check_annotations(self):
current_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(current_path, 'test_datasets', 'ds_coco_dataset')
with tempfile.TemporaryDirectory() as output_path:
sc = SanityCheck(data_path=data_path, output_path=output_path)
problems = sc.check_annotations()
assert (problems == []), 'SanityCheck.check_annotations() failed on valid COCO json'
def test_fix_coco_image_size(self):
current_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(current_path, 'test_datasets', 'ds_coco_dataset')
with tempfile.TemporaryDirectory() as output_folder:
sc = SanityCheck(data_path, output_folder)
with open(sc.annotfn) as json_file:
gt = json.load(json_file)
sc.fix_coco_image_size(gt['images'][0:2], (data_path + '/images'))
assert ([(img['height'], img['width']) for img in gt['images'][0:2]] == [(360, 640), (480, 640)]), 'SanityCheck.fix_coco_image_size() failed.'
def test_rasterize(self):
current_path = os.path.dirname(os.path.realpath(__file__))
orig_path = os.path.join(current_path, 'test_datasets', 'ds_geo_dataset')
with tempfile.TemporaryDirectory() as tmp_dir:
data_path = os.path.join(tmp_dir, 'data_path')
mask_path = os.path.join(data_path, 'images_mask')
annotfn = os.path.join(data_path, 'annots.geojson')
shutil.copytree(orig_path, data_path)
assert (not os.path.exists(mask_path)), f'mask_path [{mask_path}] should not exist yet'
assert os.path.exists(annotfn), f'annotfn [{annotfn}] should exist'
_ = Rasterize(annotfn)
assert os.path.exists(mask_path), f'mask_path [{mask_path}] should exist after rasterization'
for img_fn in glob(os.path.join(data_path, 'images', '*')):
msk_fn = os.path.join(data_path, 'images_mask', (os.path.basename(img_fn) + '_mask'))
assert os.path.exists(msk_fn), f'File [{msk_fn}] should exist'
mask_arr = cv2.imread(msk_fn)
assert (mask_arr.shape[(- 2)] == cv2.imread(img_fn).shape[(- 2)]), f'{msk_fn} AND {img_fn} should have the same width and high'
assert ('int' in mask_arr.dtype.name), f'The type of the array {msk_fn} should be int or uint' |
class Tokenizer(BaseEstimator, TransformerMixin):
def __init__(self, tokenizer):
self.tokenizer = SpacyModel(tokenizer)
def fit(self, X):
return self
def transform(self, X):
try:
res = []
for (idx, row) in tqdm(X.iterrows(), total=len(X)):
res.append(self.tokenizer.tokenize(**row)[1:])
res = pd.DataFrame(res, columns=['tokens', 'pronoun_offset_token', 'a_offset_token', 'b_offset_token', 'a_span', 'b_span', 'pronoun_token', 'a_tokens', 'b_tokens'])
cols = set(X.columns).difference(res.columns)
X = pd.concat([X[cols], res], axis=1)
return AttrDict({'X': X})
except Exception as e:
print(row.text)
raise e |
def start_advertising(key, interval_ms=2000):
addr = bytearray(key[:6])
addr[0] |= 192
adv = advertisement_template()
adv[7:29] = key[6:28]
adv[29] = (key[0] >> 6)
print(f'key ({len(key):2}) {key.hex()}')
print(f'address ({len(addr):2}) {addr.hex()}')
print(f'payload ({len(adv):2}) {adv.hex()}')
run_hci_cmd((['0x3f', '0x001'] + bytes_to_strarray(addr, with_prefix=True)[::(- 1)]))
subprocess.run(['systemctl', 'restart', 'bluetooth'])
time.sleep(1)
run_hci_cmd(((['0x08', '0x0008'] + [format(len(adv), 'x')]) + bytes_to_strarray(adv)))
interval_enc = struct.pack('<h', interval_ms)
hci_set_adv_params = ['0x08', '0x0006']
hci_set_adv_params += bytes_to_strarray(interval_enc)
hci_set_adv_params += bytes_to_strarray(interval_enc)
hci_set_adv_params += ['03', '00', '00', '00', '00', '00', '00', '00', '00']
hci_set_adv_params += ['07', '00']
run_hci_cmd(hci_set_adv_params)
run_hci_cmd((['0x08', '0x000a'] + ['01']), wait=0) |
def _subtract_constant_clip(image, const_value):
(min_dtype, max_dtype) = dtype_limits(image, clip_negative=False)
if (const_value > (max_dtype - min_dtype)):
raise ValueError('The subtracted constant is not compatiblewith the image data type.')
result = (image - const_value)
result[(image < (const_value + min_dtype))] = min_dtype
return result |
def single_instance_process(line, isLower, mode='train'):
instance = json.loads(line)
code_graph = instance['code_graph']
if (len(code_graph['nodes']) > 200):
return False
sent1 = Graph(instance, codeGraph=True, isLower=isLower)
if (mode == 'train'):
sent2 = Graph(instance, docGraph=True, isLower=isLower)
if ((sent1.get_node_length() > 200) or (sent2.get_token_length() > 200)):
return False
else:
return (sent1, sent2)
elif (sent1.get_node_length() > 200):
return False
else:
return (sent1, None) |
def register_Ns3FlowMonitorFlowStats_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::FlowMonitor::FlowStats const &', 'arg0')])
cls.add_instance_attribute('bytesDropped', 'std::vector< unsigned long long >', is_const=False)
cls.add_instance_attribute('delayHistogram', 'ns3::Histogram', is_const=False)
cls.add_instance_attribute('delaySum', 'ns3::Time', is_const=False)
cls.add_instance_attribute('flowInterruptionsHistogram', 'ns3::Histogram', is_const=False)
cls.add_instance_attribute('jitterHistogram', 'ns3::Histogram', is_const=False)
cls.add_instance_attribute('jitterSum', 'ns3::Time', is_const=False)
cls.add_instance_attribute('lastDelay', 'ns3::Time', is_const=False)
cls.add_instance_attribute('lostPackets', 'uint32_t', is_const=False)
cls.add_instance_attribute('packetSizeHistogram', 'ns3::Histogram', is_const=False)
cls.add_instance_attribute('packetsDropped', 'std::vector< unsigned int >', is_const=False)
cls.add_instance_attribute('rxBytes', 'uint64_t', is_const=False)
cls.add_instance_attribute('rxPackets', 'uint32_t', is_const=False)
cls.add_instance_attribute('timeFirstRxPacket', 'ns3::Time', is_const=False)
cls.add_instance_attribute('timeFirstTxPacket', 'ns3::Time', is_const=False)
cls.add_instance_attribute('timeLastRxPacket', 'ns3::Time', is_const=False)
cls.add_instance_attribute('timeLastTxPacket', 'ns3::Time', is_const=False)
cls.add_instance_attribute('timesForwarded', 'uint32_t', is_const=False)
cls.add_instance_attribute('txBytes', 'uint64_t', is_const=False)
cls.add_instance_attribute('txPackets', 'uint32_t', is_const=False)
return |
class BasePA():
def __init__(self, C, mode, fit_intercept, data, learning_rate, rho):
self.C = C
self.calc_tau = {0: self._calc_tau_0, 1: self._calc_tau_1, 2: self._calc_tau_2}[mode]
self.fit_intercept = fit_intercept
self.weights_x = collections.defaultdict(float)
self.weights_y = collections.defaultdict(float)
self.intercept_x = 0.0
self.intercept_y = 0.0
self.data = data
self.learning_rate = learning_rate
self.rho = rho
self.momentum_x = collections.defaultdict(float)
self.momentum_y = collections.defaultdict(float)
def _calc_tau_0(cls, x, loss):
norm = (utils.math.norm(x, order=2) ** 2)
if (norm > 0):
return (loss / (utils.math.norm(x, order=2) ** 2))
return 0
def _calc_tau_1(self, x, loss):
norm = (utils.math.norm(x, order=2) ** 2)
if (norm > 0):
return min(self.C, (loss / norm))
return 0
def _calc_tau_2(self, x, loss):
return (loss / ((utils.math.norm(x, order=2) ** 2) + (0.5 / self.C))) |
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)()
for (key, val) in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu |
class _Loss(Module):
reduction: str
def __init__(self, size_average=None, reduce=None, reduction: str='mean') -> None:
super(_Loss, self).__init__()
if ((size_average is not None) or (reduce is not None)):
self.reduction: str = _Reduction.legacy_get_string(size_average, reduce)
else:
self.reduction = reduction |
def main():
parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments, ScriptArguments))
(model_args, data_args, training_args, script_args) = parser.parse_args_into_dataclasses()
logger.info(f'Model args: {model_args}')
logger.info(f'Data args: {data_args}')
logger.info(f'Training args: {training_args}')
logger.info(f'Script args: {script_args}')
logger.info((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f' distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
set_seed(training_args.seed)
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[model_args.model_type]
if model_args.model_name_or_path:
torch_dtype = (model_args.torch_dtype if (model_args.torch_dtype in ['auto', None]) else getattr(torch, model_args.torch_dtype))
world_size = int(os.environ.get('WORLD_SIZE', '1'))
if (world_size > 1):
model_args.device_map = {'': int(os.environ.get('LOCAL_RANK', '0'))}
config = config_class.from_pretrained(model_args.model_name_or_path, num_labels=1, torch_dtype=torch_dtype, trust_remote_code=model_args.trust_remote_code, cache_dir=model_args.cache_dir)
if (model_args.model_type in ['bloom', 'llama']):
model = model_class.from_pretrained(model_args.model_name_or_path, config=config, torch_dtype=torch_dtype, load_in_4bit=model_args.load_in_4bit, load_in_8bit=model_args.load_in_8bit, device_map=model_args.device_map, trust_remote_code=model_args.trust_remote_code)
else:
model = model_class.from_pretrained(model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, ignore_mismatched_sizes=True)
model.to(training_args.device)
else:
raise ValueError(f'Error, model_name_or_path is None, RM must be loaded from a pre-trained model')
if (model_args.model_type == 'bloom'):
model_args.use_fast_tokenizer = True
tokenizer_kwargs = {'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'trust_remote_code': model_args.trust_remote_code}
tokenizer_name_or_path = model_args.tokenizer_name_or_path
if (not tokenizer_name_or_path):
tokenizer_name_or_path = model_args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(tokenizer_name_or_path, **tokenizer_kwargs)
if (tokenizer.pad_token_id is None):
tokenizer.pad_token_id = 0
if script_args.use_peft:
logger.info('Fine-tuning method: LoRA(PEFT)')
if (script_args.peft_path is not None):
logger.info(f'Peft from pre-trained model: {script_args.peft_path}')
model = PeftModel.from_pretrained(model, script_args.peft_path, is_trainable=True)
else:
logger.info('Init new peft model')
if model_args.load_in_8bit:
model = prepare_model_for_int8_training(model)
target_modules = (script_args.target_modules.split(',') if script_args.target_modules else None)
if (target_modules and ('all' in target_modules)):
target_modules = find_all_linear_names(model, int4=False, int8=model_args.load_in_8bit)
modules_to_save = script_args.modules_to_save
if (modules_to_save is not None):
modules_to_save = modules_to_save.split(',')
logger.info(f'Peft target_modules: {target_modules}')
logger.info(f'Peft lora_rank: {script_args.lora_rank}')
peft_config = LoraConfig(task_type=TaskType.SEQ_CLS, target_modules=target_modules, inference_mode=False, r=script_args.lora_rank, lora_alpha=script_args.lora_alpha, lora_dropout=script_args.lora_dropout, modules_to_save=modules_to_save)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
else:
logger.info('Fine-tuning method: Full parameters training')
print_trainable_parameters(model)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir)
raw_datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir)
else:
data_files = {}
if ((data_args.train_file_dir is not None) and os.path.exists(data_args.train_file_dir)):
train_data_files = (glob(f'{data_args.train_file_dir}/**/*.json', recursive=True) + glob(f'{data_args.train_file_dir}/**/*.jsonl', recursive=True))
logger.info(f"train files: {', '.join(train_data_files)}")
data_files['train'] = train_data_files
if ((data_args.validation_file_dir is not None) and os.path.exists(data_args.validation_file_dir)):
eval_data_files = (glob(f'{data_args.validation_file_dir}/**/*.json', recursive=True) + glob(f'{data_args.validation_file_dir}/**/*.jsonl', recursive=True))
logger.info(f"eval files: {', '.join(eval_data_files)}")
data_files['validation'] = eval_data_files
raw_datasets = load_dataset('json', data_files=data_files, cache_dir=model_args.cache_dir)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset('json', data_files=data_files, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir)
raw_datasets['train'] = load_dataset('json', data_files=data_files, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir)
logger.info(f'Raw datasets: {raw_datasets}')
full_max_length = (data_args.max_source_length + data_args.max_target_length)
def preprocess_reward_function(examples):
new_examples = {'input_ids_chosen': [], 'attention_mask_chosen': [], 'input_ids_rejected': [], 'attention_mask_rejected': []}
for (question, chosen, rejected) in zip(examples['question'], examples['response_chosen'], examples['response_rejected']):
tokenized_chosen = tokenizer(((('Question: ' + question) + '\n\nAnswer: ') + chosen))
tokenized_rejected = tokenizer(((('Question: ' + question) + '\n\nAnswer: ') + rejected))
new_examples['input_ids_chosen'].append(tokenized_chosen['input_ids'])
new_examples['attention_mask_chosen'].append(tokenized_chosen['attention_mask'])
new_examples['input_ids_rejected'].append(tokenized_rejected['input_ids'])
new_examples['attention_mask_rejected'].append(tokenized_rejected['attention_mask'])
return new_examples
train_dataset = None
max_train_samples = 0
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
max_train_samples = len(train_dataset)
if ((data_args.max_train_samples is not None) and (data_args.max_train_samples > 0)):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
logger.debug(f'Example train_dataset[0]: {train_dataset[0]}')
with training_args.main_process_first(desc='Train dataset tokenization'):
tokenized_dataset = train_dataset.shuffle().map(preprocess_reward_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=train_dataset.column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset')
train_dataset = tokenized_dataset.filter((lambda x: ((0 < len(x['input_ids_rejected']) <= full_max_length) and (0 < len(x['input_ids_chosen']) <= full_max_length))))
logger.debug(f'Num train_samples: {len(train_dataset)}')
logger.debug('Tokenized training example:')
logger.debug(tokenizer.decode(train_dataset[0]['input_ids_chosen']))
eval_dataset = None
max_eval_samples = 0
if training_args.do_eval:
with training_args.main_process_first(desc='Eval dataset tokenization'):
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
max_eval_samples = len(eval_dataset)
if ((data_args.max_eval_samples is not None) and (data_args.max_eval_samples > 0)):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
logger.debug(f'Example eval_dataset[0]: {eval_dataset[0]}')
tokenized_dataset = eval_dataset.map(preprocess_reward_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=eval_dataset.column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset')
eval_dataset = tokenized_dataset.filter((lambda x: ((0 < len(x['input_ids_rejected']) <= full_max_length) and (0 < len(x['input_ids_chosen']) <= full_max_length))))
logger.debug(f'Num eval_samples: {len(eval_dataset)}')
logger.debug('Tokenized eval example:')
logger.debug(tokenizer.decode(eval_dataset[0]['input_ids_chosen']))
if training_args.gradient_checkpointing:
model.gradient_checkpointing_enable()
model.config.use_cache = False
else:
model.config.use_cache = True
model.enable_input_require_grads()
if (torch.cuda.device_count() > 1):
model.is_parallelizable = True
model.model_parallel = True
trainer = RewardTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, compute_metrics=compute_metrics, data_collator=RewardDataCollatorWithPadding(tokenizer=tokenizer, max_length=full_max_length, padding='max_length'))
if training_args.do_train:
logger.info('*** Train ***')
logger.debug(f'Train dataloader example: {next(iter(trainer.get_train_dataloader()))}')
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
metrics['train_samples'] = max_train_samples
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
model.config.use_cache = True
if trainer.is_world_process_zero():
logger.debug(f'Training metrics: {metrics}')
logger.info(f'Saving model checkpoint to {training_args.output_dir}')
save_model(model, tokenizer, training_args)
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
metrics['eval_samples'] = max_eval_samples
try:
perplexity = math.exp(metrics['eval_loss'])
except OverflowError:
perplexity = float('inf')
metrics['perplexity'] = perplexity
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if trainer.is_world_process_zero():
logger.debug(f'Eval metrics: {metrics}') |
def test_RegularArray_RecordArray_NumpyArray():
v2a = ak.contents.regulararray.RegularArray(ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), 3)
assert (to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a))
v2b = ak.contents.regulararray.RegularArray(ak.contents.recordarray.RecordArray([ak.contents.emptyarray.EmptyArray()], ['nest']), 0, zeros_length=10)
assert (to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)) |
_model
def resnet18(pretrained=False, **kwargs):
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)
return _create_resnet('resnet18', pretrained, **model_args) |
def l1norm(X, eps=1e-13, dim=1):
norm = ((torch.abs(X).sum(dim=dim, keepdim=True) + eps) + 1e-14)
X = torch.div(X, norm)
return X |
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed) |
.parametrize('estimator, key, expected_results', [(NoTagsEstimator(), None, _DEFAULT_TAGS), (NoTagsEstimator(), 'allow_nan', _DEFAULT_TAGS['allow_nan']), (MoreTagsEstimator(), None, {**_DEFAULT_TAGS, **{'allow_nan': True}}), (MoreTagsEstimator(), 'allow_nan', True), (BaseEstimator(), None, _DEFAULT_TAGS), (BaseEstimator(), 'allow_nan', _DEFAULT_TAGS['allow_nan']), (BaseEstimator(), 'allow_nan', _DEFAULT_TAGS['allow_nan'])])
def test_safe_tags_no_get_tags(estimator, key, expected_results):
assert (_safe_tags(estimator, key=key) == expected_results) |
def drop_block_fast_2d(x: torch.Tensor, drop_prob: float=0.1, block_size: int=7, gamma_scale: float=1.0, with_noise: bool=False, inplace: bool=False):
(B, C, H, W) = x.shape
total_size = (W * H)
clipped_block_size = min(block_size, min(W, H))
gamma = ((((gamma_scale * drop_prob) * total_size) / (clipped_block_size ** 2)) / (((W - block_size) + 1) * ((H - block_size) + 1)))
block_mask = torch.empty_like(x).bernoulli_(gamma)
block_mask = F.max_pool2d(block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=(clipped_block_size // 2))
if with_noise:
normal_noise = torch.empty_like(x).normal_()
if inplace:
x.mul_((1.0 - block_mask)).add_((normal_noise * block_mask))
else:
x = ((x * (1.0 - block_mask)) + (normal_noise * block_mask))
else:
block_mask = (1 - block_mask)
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-06)).to(dtype=x.dtype)
if inplace:
x.mul_((block_mask * normalize_scale))
else:
x = ((x * block_mask) * normalize_scale)
return x |
def test_forward(model, epoch):
tic = time.time()
for i in range(epoch):
model.forward(is_train=True)
model.outputs[0].wait_to_read()
toc = time.time()
return ((toc - tic) / epoch) |
def CVAE_function(data, dimention_x, dimention_y, comandoEndoder='Encoder', redeVAE='CVAE45(sig)'):
from keras.models import model_from_json
from keras.utils import to_categorical
import keras.backend as K
from Model.BiLinearUp import BilinearUpsampling
function = comandoEndoder
def load_AE(name):
def load(model_name):
model_path = ('%s.json' % model_name)
weights_path = ('%s_weights.hdf5' % model_name)
json_file = open(model_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json, custom_objects={'BilinearUpsampling': BilinearUpsampling})
loaded_model.load_weights(weights_path)
return loaded_model
encoder = load((name + '_encoder'))
Decoder = load((name + '_decoder'))
return (encoder, Decoder)
(encoder, decoder) = load_AE(redeVAE)
if (function == 'Encoder'):
x_test = data.T
x_test = x_test.reshape(((x_test.shape[0],) + (dimention_x, dimention_y, 1)))
x_test = to_categorical(x_test, 2)
x_out = encoder.predict(x_test)
if (function == 'Decoder'):
x_test = data.T
x_decoded = decoder.predict(x_test)
x_decoded = np.argmax(x_decoded, axis=(- 1))
x_out = x_decoded.reshape((x_decoded.shape[0], (dimention_x * dimention_y)))
K.clear_session()
return x_out.T |
class GaussianLSTMPolicy(StochasticPolicy):
def __init__(self, env_spec, hidden_dim=32, name='GaussianLSTMPolicy', hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), recurrent_nonlinearity=tf.nn.sigmoid, recurrent_w_init=tf.glorot_uniform_initializer(), output_nonlinearity=None, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), hidden_state_init=tf.zeros_initializer(), hidden_state_init_trainable=False, cell_state_init=tf.zeros_initializer(), cell_state_init_trainable=False, forget_bias=True, learn_std=True, std_share_network=False, init_std=1.0, layer_normalization=False, state_include_action=True):
if (not isinstance(env_spec.action_space, akro.Box)):
raise ValueError('GaussianLSTMPolicy only works with akro.Box action space, but not {}'.format(env_spec.action_space))
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._hidden_dim = hidden_dim
self._state_include_action = state_include_action
if state_include_action:
self._input_dim = (self._obs_dim + self._action_dim)
else:
self._input_dim = self._obs_dim
self.model = GaussianLSTMModel(output_dim=self._action_dim, hidden_dim=hidden_dim, name='GaussianLSTMModel', hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, recurrent_nonlinearity=recurrent_nonlinearity, recurrent_w_init=recurrent_w_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, hidden_state_init=hidden_state_init, hidden_state_init_trainable=hidden_state_init_trainable, cell_state_init=cell_state_init, cell_state_init_trainable=cell_state_init_trainable, forget_bias=forget_bias, layer_normalization=layer_normalization, learn_std=learn_std, std_share_network=std_share_network, init_std=init_std)
self._prev_actions = None
self._prev_hiddens = None
self._prev_cells = None
self._initialize()
def _initialize(self):
obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, None, self._input_dim))
step_input_var = tf.compat.v1.placeholder(shape=(None, self._input_dim), name='step_input', dtype=tf.float32)
step_hidden_var = tf.compat.v1.placeholder(shape=(None, self._hidden_dim), name='step_hidden_input', dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(None, self._hidden_dim), name='step_cell_input', dtype=tf.float32)
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self.model.build(obs_ph, step_input_var, step_hidden_var, step_cell_var)
self._f_step_mean_std = tf.compat.v1.get_default_session().make_callable([self.model.networks['default'].step_mean, self.model.networks['default'].step_log_std, self.model.networks['default'].step_hidden, self.model.networks['default'].step_cell], feed_list=[step_input_var, step_hidden_var, step_cell_var])
def vectorized(self):
return True
def dist_info_sym(self, obs_var, state_info_vars, name=None):
if self._state_include_action:
prev_action_var = state_info_vars['prev_action']
prev_action_var = tf.cast(prev_action_var, tf.float32)
all_input_var = tf.concat(axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
with tf.compat.v1.variable_scope(self._variable_scope):
(mean_var, _, log_std_var, _, _, _, _, _, _) = self.model.build(all_input_var, self.model.networks['default'].step_input, self.model.networks['default'].step_hidden_input, self.model.networks['default'].step_cell_input, name=name)
return dict(mean=mean_var, log_std=log_std_var)
def reset(self, dones=None):
if (dones is None):
dones = np.array([True])
if ((self._prev_actions is None) or (len(dones) != len(self._prev_actions))):
self._prev_actions = np.zeros((len(dones), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(dones), self._hidden_dim))
self._prev_cells = np.zeros((len(dones), self._hidden_dim))
self._prev_actions[dones] = 0.0
self._prev_hiddens[dones] = self.model.networks['default'].init_hidden.eval()
self._prev_cells[dones] = self.model.networks['default'].init_cell.eval()
def get_action(self, observation):
(actions, agent_infos) = self.get_actions([observation])
return (actions[0], {k: v[0] for (k, v) in agent_infos.items()})
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert (self._prev_actions is not None)
all_input = np.concatenate([flat_obs, self._prev_actions], axis=(- 1))
else:
all_input = flat_obs
(means, log_stds, hidden_vec, cell_vec) = self._f_step_mean_std(all_input, self._prev_hiddens, self._prev_cells)
rnd = np.random.normal(size=means.shape)
samples = ((rnd * np.exp(log_stds)) + means)
samples = self.action_space.unflatten_n(samples)
prev_actions = self._prev_actions
self._prev_actions = samples
self._prev_hiddens = hidden_vec
self._prev_cells = cell_vec
agent_infos = dict(mean=means, log_std=log_stds)
if self._state_include_action:
agent_infos['prev_action'] = np.copy(prev_actions)
return (samples, agent_infos)
def recurrent(self):
return True
def distribution(self):
return self.model.networks['default'].dist
def state_info_specs(self):
if self._state_include_action:
return [('prev_action', (self._action_dim,))]
return []
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_f_step_mean_std']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
class AutoProcessor():
def __init__(self):
raise EnvironmentError('AutoProcessor is designed to be instantiated using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method.')
_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
config = kwargs.pop('config', None)
kwargs['_from_auto'] = True
get_file_from_repo_kwargs = {key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if (key in kwargs)}
preprocessor_config_file = get_file_from_repo(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs)
if (preprocessor_config_file is not None):
(config_dict, _) = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
if ('processor_class' in config_dict):
processor_class = processor_class_from_name(config_dict['processor_class'])
return processor_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
tokenizer_config_file = get_file_from_repo(pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs)
if (tokenizer_config_file is not None):
with open(tokenizer_config_file, encoding='utf-8') as reader:
config_dict = json.load(reader)
if ('processor_class' in config_dict):
processor_class = processor_class_from_name(config_dict['processor_class'])
return processor_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
model_type = config_class_to_model_type(type(config).__name__)
if (getattr(config, 'processor_class', None) is not None):
processor_class = processor_class_from_name(config.processor_class)
return processor_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
model_type = config_class_to_model_type(type(config).__name__)
if (model_type is not None):
return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs)
raise ValueError(f"Unrecognized processor in {pretrained_model_name_or_path}. Should have a `processor_type` key in its {FEATURE_EXTRACTOR_NAME}, or one of the following `model_type` keys in its {CONFIG_NAME}: {', '.join((c for c in PROCESSOR_MAPPING_NAMES.keys()))}") |
class BaseOptimizer(Configurable):
def __init__(self, *args, **kwargs):
self._global_step = kwargs.pop('global_step', tf.Variable(0.0, trainable=False))
super(BaseOptimizer, self).__init__(*args, **kwargs)
self._accumulators = {}
return
def __call__(self, loss):
return self.minimize(loss)
def minimize(self, loss, name=None):
var_list = tf.trainable_variables()
for x_tm1 in var_list:
if (not isinstance(x_tm1, tf.Variable)):
raise TypeError(('Argument is not a tf.Variable: %s' % x_tm1))
if (not var_list):
raise ValueError('No variables to optimize')
if (loss.dtype.base_dtype != tf.float32):
raise ValueError('Loss is not float32')
grads = tf.gradients(loss, var_list, colocate_gradients_with_ops=True, gate_gradients=True, aggregation_method=2)
for (x_tm1, g_t) in zip(var_list, grads):
if (g_t is not None):
if (x_tm1.dtype.base_dtype != tf.float32):
raise ValueError(('%s is not float32' % x_tm1.name))
with tf.control_dependencies(None):
self._init_acc(var_list, grads)
with tf.name_scope(name, self.name.title(), []) as name:
caches = filter((lambda cache: (cache['g_t'] is not None)), self._prepare(var_list, grads))
for cache in caches:
(x_tm1, g_t) = (cache['x_tm1'], cache['g_t'])
with tf.name_scope(('update_' + x_tm1.op.name)), tf.device(x_tm1.device):
if isinstance(g_t, tf.Tensor):
cache['g_t'] = tf.where(tf.is_finite(g_t), g_t, tf.zeros_like(g_t))
self._apply_dense(cache)
else:
cache['g_t'] = tf.where(tf.is_finite(g_t.values), g_t.values, tf.zeros_like(g_t.values))
cache['idxs'] = g_t.indices
self._apply_sparse(cache)
with tf.control_dependencies([self._finish(caches)]):
with tf.device(self.global_step.device):
return tf.assign_add(self.global_step, 1, name=name).op
def _init_acc(self, var_list, grads):
for (x_tm1, g_t) in zip(var_list, grads):
if (self.chi > 0):
tf.add_to_collection(self.get_accumulator(x_tm1, 'x'), tf.GraphKeys.MOVING_AVERAGE_VARIABLES)
shape = self.get_variable_shape(x_tm1)
if isinstance(g_t, tf.Tensor):
self.get_accumulator(x_tm1, 'x/tm1', [])
else:
self.get_accumulator(x_tm1, 'x/tm1', ([shape[0]] + ([1] * (len(shape) - 1))))
return
def _prepare(self, var_list, grads):
caches = []
for (x_tm1, g_t) in zip(var_list, grads):
caches.append({'x_tm1': x_tm1, 'g_t': g_t, 'updates': []})
return caches
def _apply_dense(self, cache):
raise NotImplementedError()
def _apply_sparse(self, cache):
raise NotImplementedError()
def get_variable_shape(x_tm1):
return x_tm1.initialized_value().get_shape().as_list()
def get_accumulator(self, x_tm1, acc_name, shape=None):
if (shape is None):
shape = self.get_variable_shape(x_tm1)
if (acc_name not in self._accumulators):
self._accumulators[acc_name] = {}
accumulator = self._accumulators[acc_name]
if (x_tm1 not in accumulator):
new_name = ('%s/%s' % (self.name.title(), acc_name))
zeros = tf.zeros(shape, dtype=x_tm1.dtype)
with tf.name_scope(('%s/%s' % (x_tm1.op.name, new_name))) as scope:
with tf.device(x_tm1.device):
accumulator[x_tm1] = b_tm1 = tf.Variable(zeros, name=scope, trainable=False)
if (isinstance(x_tm1, tf.Variable) and x_tm1._save_slice_info):
real_acc_name = scope[len((x_tm1.op.name + '/')):(- 1)]
slice_info = x_tm1._save_slice_info
b_tm1._set_save_slice_info(tf.Variable.SaveSliceInfo(('%s/%s' % (slice_info.full_name, real_slot_name)), slice_info.full_shape[:], slice_info.var_offset[:], slice_info.var_shape[:]))
return accumulator[x_tm1]
def _dense_moving_average(self, x_tm1, a_t, name, beta=0.9):
b_tm1 = self.get_accumulator(x_tm1, ('%s' % name))
tm1 = self.get_accumulator(x_tm1, ('%s/tm1' % name), shape=[])
t = tf.assign_add(tm1, 1)
if (beta < 1):
beta_t = tf.convert_to_tensor(beta, name=('%s/decay' % name))
beta_t = ((beta_t * (1 - (beta ** tm1))) / (1 - (beta ** t)))
else:
beta_t = (tm1 / t)
b_t = tf.assign(b_tm1, (beta_t * b_tm1))
b_t = tf.assign_add(b_t, ((1 - beta_t) * a_t))
return (b_t, t)
def _sparse_moving_average(self, x_tm1, idxs, a_t_, name, beta=0.9):
b_tm1 = self.get_accumulator(x_tm1, ('%s' % name))
b_tm1_ = tf.gather(b_tm1, idxs)
shape = self.get_variable_shape(x_tm1)
tm1 = self.get_accumulator(x_tm1, ('%s/tm1' % name), shape=([shape[0]] + ([1] * (len(shape) - 1))))
tm1_ = tf.gather(tm1, idxs)
t = tf.scatter_add(tm1, idxs, tf.ones_like(tm1_))
t_ = tf.gather(t, idxs)
if (beta < 1):
beta_t = tf.convert_to_tensor(beta, name=('%s/decay' % name))
beta_t_ = ((beta_t * (1 - (beta_t ** tm1_))) / (1 - (beta_t ** t_)))
else:
beta_t_ = (tm1_ / t_)
b_t = tf.scatter_update(b_tm1, idxs, (beta_t_ * b_tm1_))
b_t = tf.scatter_add(b_t, idxs, ((1 - beta_t_) * a_t_))
return (b_t, t)
def _finish(self, caches):
if (self.clip > 0):
S_t = [cache['s_t'] for cache in caches]
(S_t, _) = tf.clip_by_global_norm(S_t, self.clip)
for (cache, s_t) in zip(caches, S_t):
cache['s_t'] = s_t
for cache in caches:
x_tm1 = cache['x_tm1']
s_t = cache['s_t']
updates = cache['updates']
with tf.name_scope(('update_' + x_tm1.op.name)), tf.device(x_tm1.device):
if ('idxs' in cache):
idxs = cache['idxs']
x_t = tf.scatter_sub(x_tm1, idxs, s_t)
if (self.chi > 0):
x_t_ = tf.gather(x_t, idxs)
(x_bar_t, t_x_bar) = self._sparse_moving_average(x_tm1, idxs, x_t_, 'x', beta=self.chi)
else:
x_t = tf.assign_sub(x_tm1, s_t)
if (self.chi > 0):
(x_bar_t, t_x_bar) = self._dense_moving_average(x_tm1, x_t, 'x', beta=self.chi)
updates.append(x_t)
if (self.chi > 0):
updates.extend([x_bar_t, t_x_bar])
update_ops = [tf.group(*cache['updates']) for cache in caches]
return tf.group(*update_ops, name='update')
def average(self, x_tm1):
if ('x' in self._accumulators):
return self._accumulators['x'].get(x_tm1, x_tm1)
else:
return x_tm1
def average_name(self, x_tm1):
return ((((x_tm1.op.name + '/') + self.name) + '/') + 'x')
def variables_to_restore(self, moving_avg_variables=None):
name_map = {}
if (moving_avg_variables is None):
moving_avg_variables = tf.trainable_variables()
moving_avg_variables += tf.moving_average_variables()
moving_avg_variables = set(moving_avg_variables)
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
for v in list((set(tf.all_variables()) - moving_avg_variables)):
if (v.op.name not in name_map):
name_map[v.op.name] = v
return name_map
def learning_rate(self):
if (self.decay_steps > 0):
return (super(BaseOptimizer, self).learning_rate * (self.decay ** (self.global_step / self.decay_steps)))
else:
return super(BaseOptimizer, self).learning_rate
def global_step(self):
return self._global_step
def accumulators(self):
return self._accumulators |
def test_linear_same_dim():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
(in_dim, out_dim) = (Dim(7, name='in'), Dim(13, name='out'))
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, time_dim], dtype='int32', sparse_dim=out_dim)})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.layer1 = rf.Linear(in_dim, out_dim)
self.layer2 = rf.Linear(out_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
x = rf.relu(self.layer1(x))
x = self.layer2(x)
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output()
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step) |
def load_file(p_path_to_data):
all_answers = []
query_ids = []
no_answer_query_ids = set()
yes_answer_query_ids = set()
with open(p_path_to_data, 'r', encoding='utf-8') as data_file:
for line in data_file:
try:
json_object = json.loads(line)
except json.JSONDecodeError:
raise Exception(('"%s" is not a valid json' % line))
assert (QUERY_ID_JSON_ID in json_object), ('"%s" json does not have "%s" field' % (line, QUERY_ID_JSON_ID))
query_id = json_object[QUERY_ID_JSON_ID]
assert (ANSWERS_JSON_ID in json_object), ('"%s" json does not have "%s" field' % (line, ANSWERS_JSON_ID))
answers = json_object[ANSWERS_JSON_ID]
if ('No Answer Present.' in answers):
no_answer_query_ids.add(query_id)
else:
yes_answer_query_ids.add(query_id)
all_answers.extend(answers)
query_ids.extend(([query_id] * len(answers)))
all_normalized_answers = normalize_batch(all_answers)
query_id_to_answers_map = {}
for (i, normalized_answer) in enumerate(all_normalized_answers):
query_id = query_ids[i]
if (query_id not in query_id_to_answers_map):
query_id_to_answers_map[query_id] = []
query_id_to_answers_map[query_id].append(normalized_answer)
return (query_id_to_answers_map, no_answer_query_ids, yes_answer_query_ids) |
def plot_results(dataset, ax):
markers = ['o', 'v', 's']
colors_available = 10
ax.plot(observation_data, 'o-', label='Observation', color='k', lw=3, zorder=999)
ax.set_xlabel('Season', fontsize=18)
ax.set_ylabel('Temperature anomaly [C]', fontsize=18)
ax.set_xlim(('NDJ 2014/15', 'SON'))
ax.set_ylim(((- 1), 3))
ax.axvspan('NDJ 2014/15', 'JFM', alpha=0.2, color='gray')
corrcoeffs = []
for (i, model) in enumerate(dataset.index):
data = dataset.loc[model].dropna()
data_np = np.array(data)
obs = observation_data.loc[observation_data.index[2:]].to_numpy()[:len(data_np)]
corrcoeffs.append(pearsonr(data_np, obs.T[0])[0])
models = list(dataset.index)
(corrcoeffs, models) = zip(*sorted(zip(corrcoeffs, models), reverse=True))
for (i, model) in enumerate(models):
data = dataset.loc[model].dropna()
data_np = np.array(data)
ax.plot(data, f'{markers[int((i / colors_available))]}-', label='{}, ={:.3f}'.format(model, corrcoeffs[i]))
(point_from_x, point_from_y) = ('DJF 2014/15', observation_data.loc['DJF 2014/15'])
(point_to_x, point_to_y) = ('JFM', data_np[0])
ax.plot([point_from_x, point_to_x], [point_from_y, point_to_y], f'{markers[int((i / colors_available))]}--', color='k', alpha=0.5) |
def parse_optfloat(val, default_val=None) -> Optional[float]:
if ((val == 'None') or (val is None)):
return default_val
return float(val) |
def mark_volatile(obj):
if torch.is_tensor(obj):
obj = Variable(obj)
if isinstance(obj, Variable):
obj.no_grad = True
return obj
elif isinstance(obj, collections.Mapping):
return {k: mark_volatile(o) for (k, o) in obj.items()}
elif isinstance(obj, collections.Sequence):
return [mark_volatile(o) for o in obj]
else:
return obj |
class GraphLogger(Callback):
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
for logger in trainer.loggers:
if isinstance(logger, AnomalibWandbLogger):
logger.watch(pl_module, log_graph=True, log='all')
break
def on_train_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
for logger in trainer.loggers:
if isinstance(logger, (AnomalibCometLogger, AnomalibTensorBoardLogger)):
logger.log_graph(pl_module, input_array=torch.ones((1, 3, 256, 256)))
elif isinstance(logger, AnomalibWandbLogger):
logger.unwatch(pl_module) |
class ReplaceLayer(BaseAction):
def __init__(self, layer_type: type, get_params_and_weights_fn: Callable):
self.layer_type = layer_type
self.get_params_and_weights_fn = get_params_and_weights_fn
def apply(self, node: BaseNode, graph: Graph, fw_info: FrameworkInfo):
activation_quantization_cfg = {}
if (node.final_activation_quantization_cfg is not None):
activation_quantization_cfg = node.final_activation_quantization_cfg.activation_quantization_params
(weights, config) = self.get_params_and_weights_fn(node.weights, activation_quantization_cfg, **node.framework_attr)
node.framework_attr = config
node.weights = weights
node.layer_class = self.layer_type
Logger.warning(f'Layer {node.name} was replaced but quantization parameters were set by original layer') |
def _pad_target(t, length):
return np.pad(t, [(0, (length - t.shape[0])), (0, 0)], mode='constant', constant_values=_pad) |
def main():
description = '# Matcha-TTS: A fast TTS architecture with conditional flow matching\n ### [Shivam Mehta]( [Ruibo Tu]( [Jonas Beskow]( [Eva Szekely]( and [Gustav Eje Henter]( We propose Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up ODE-based speech synthesis. Our method:\n\n\n * Is probabilistic\n * Has compact memory footprint\n * Sounds highly natural\n * Is very fast to synthesise from\n\n\n Check out our [demo page]( Read our [arXiv preprint for more details]( Code is available in our [GitHub repository]( along with pre-trained models.\n\n Cached examples are available at the bottom of the page.\n '
with gr.Blocks(title=' Matcha-TTS: A fast TTS architecture with conditional flow matching') as demo:
processed_text = gr.State(value=None)
processed_text_len = gr.State(value=None)
with gr.Box():
with gr.Row():
gr.Markdown(description, scale=3)
with gr.Column():
gr.Image(LOGO_URL, label='Matcha-TTS logo', height=50, width=50, scale=1, show_label=False)
html = '<br><iframe width="560" height="315" src=" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>'
gr.HTML(html)
with gr.Box():
radio_options = list(RADIO_OPTIONS.keys())
model_type = gr.Radio(radio_options, value=radio_options[0], label='Choose a Model', interactive=True, container=False)
with gr.Row():
gr.Markdown('# Text Input')
with gr.Row():
text = gr.Textbox(value='', lines=2, label='Text to synthesise', scale=3)
spk_slider = gr.Slider(minimum=0, maximum=107, step=1, value=args.spk, label='Speaker ID', interactive=True, scale=1)
with gr.Row():
gr.Markdown('### Hyper parameters')
with gr.Row():
n_timesteps = gr.Slider(label='Number of ODE steps', minimum=1, maximum=100, step=1, value=10, interactive=True)
length_scale = gr.Slider(label='Length scale (Speaking rate)', minimum=0.5, maximum=1.5, step=0.05, value=1.0, interactive=True)
mel_temp = gr.Slider(label='Sampling temperature', minimum=0.0, maximum=2.001, step=0.16675, value=0.667, interactive=True)
synth_btn = gr.Button('Synthesise')
with gr.Box():
with gr.Row():
gr.Markdown('### Phonetised text')
phonetised_text = gr.Textbox(interactive=False, scale=10, label='Phonetised text')
with gr.Box():
with gr.Row():
mel_spectrogram = gr.Image(interactive=False, label='mel spectrogram')
audio = gr.Audio(interactive=False, label='Audio')
with gr.Row(visible=False) as example_row_lj_speech:
examples = gr.Examples(examples=[['We propose Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up O D E-based speech synthesis.', 50, 0.677, 0.95], ['The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.', 2, 0.677, 0.95], ['The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.', 4, 0.677, 0.95], ['The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.', 10, 0.677, 0.95], ['The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.', 50, 0.677, 0.95], ['The narrative of these events is based largely on the recollections of the participants.', 10, 0.677, 0.95], ['The jury did not believe him, and the verdict was for the defendants.', 10, 0.677, 0.95]], fn=ljspeech_example_cacher, inputs=[text, n_timesteps, mel_temp, length_scale], outputs=[phonetised_text, audio, mel_spectrogram], cache_examples=True)
with gr.Row() as example_row_multispeaker:
multi_speaker_examples = gr.Examples(examples=[['Hello everyone! I am speaker 0 and I am here to tell you that Matcha-TTS is amazing!', 10, 0.677, 0.85, 0], ['Hello everyone! I am speaker 16 and I am here to tell you that Matcha-TTS is amazing!', 10, 0.677, 0.85, 16], ['Hello everyone! I am speaker 44 and I am here to tell you that Matcha-TTS is amazing!', 50, 0.677, 0.85, 44], ['Hello everyone! I am speaker 45 and I am here to tell you that Matcha-TTS is amazing!', 50, 0.677, 0.85, 45], ['Hello everyone! I am speaker 58 and I am here to tell you that Matcha-TTS is amazing!', 4, 0.677, 0.85, 58]], fn=multispeaker_example_cacher, inputs=[text, n_timesteps, mel_temp, length_scale, spk_slider], outputs=[phonetised_text, audio, mel_spectrogram], cache_examples=True, label='Multi Speaker Examples')
model_type.change((lambda x: gr.update(interactive=False)), inputs=[synth_btn], outputs=[synth_btn]).then(load_model_ui, inputs=[model_type, text], outputs=[text, synth_btn, spk_slider, example_row_lj_speech, example_row_multispeaker, length_scale])
synth_btn.click(fn=process_text_gradio, inputs=[text], outputs=[phonetised_text, processed_text, processed_text_len], api_name='matcha_tts', queue=True).then(fn=synthesise_mel, inputs=[processed_text, processed_text_len, n_timesteps, mel_temp, length_scale, spk_slider], outputs=[audio, mel_spectrogram])
demo.queue().launch(share=True) |
class ProphetNetForConditionalGeneration():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
(**njit_dict_no_parallel)
def sample_energy(energy, intensity):
z = np.random.random()
average = (energy * intensity).sum()
total = 0
for (e, i) in zip(energy, intensity):
total += ((e * i) / average)
if (z <= total):
return e
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.