code stringlengths 101 5.91M |
|---|
class DummyArmController():
def __init__(self, _):
self.state = 'idle'
self.gripper_state = 'open'
self.target_ee_pos = None
self.arm_heading = 0
self.queue = Queue()
def disconnect(self):
pass
def execute_command(self, command):
self.queue.put(command)
def stop(self):
pass
def get_controller_data(self):
return {'state': self.state, 'gripper_state': self.gripper_state, 'target_ee_pos': self.target_ee_pos, 'arm_heading': self.arm_heading}
def run(self):
while True:
if self.queue.empty():
self.state = 'idle'
else:
self.state = 'manipulating'
arm_command = self.queue.get()
self.target_ee_pos = arm_command['target_ee_pos']
time.sleep(1)
if (arm_command['primitive_name'] == 'pick'):
self.gripper_state = 'closed'
else:
self.gripper_state = 'open'
self.target_ee_pos = None
time.sleep(0.001) |
_flax
class FlaxAutoModelTest(unittest.TestCase):
def test_bert_from_pretrained(self):
for model_name in ['bert-base-cased', 'bert-large-uncased']:
with self.subTest(model_name):
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = FlaxAutoModel.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, FlaxBertModel)
def test_roberta_from_pretrained(self):
for model_name in ['roberta-base', 'roberta-large']:
with self.subTest(model_name):
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = FlaxAutoModel.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertIsInstance(model, FlaxRobertaModel)
def test_bert_jax_jit(self):
for model_name in ['bert-base-cased', 'bert-large-uncased']:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = FlaxBertModel.from_pretrained(model_name)
tokens = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX)
def eval(**kwargs):
return model(**kwargs)
eval(**tokens).block_until_ready()
def test_roberta_jax_jit(self):
for model_name in ['roberta-base', 'roberta-large']:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = FlaxRobertaModel.from_pretrained(model_name)
tokens = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX)
def eval(**kwargs):
return model(**kwargs)
eval(**tokens).block_until_ready()
def test_repo_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'bert-base is not a local folder and is not a valid model identifier'):
_ = FlaxAutoModel.from_pretrained('bert-base')
def test_revision_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)'):
_ = FlaxAutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision='aaaaaa')
def test_model_file_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack'):
_ = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def test_model_from_pt_suggestion(self):
with self.assertRaisesRegex(EnvironmentError, 'Use `from_pt=True` to load this model'):
_ = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only') |
def evaluate(gold_ud, system_ud, deprel_weights=None):
class Score():
def __init__(self, gold_total, system_total, correct, aligned_total=None):
self.precision = ((correct / system_total) if system_total else 0.0)
self.recall = ((correct / gold_total) if gold_total else 0.0)
self.f1 = (((2 * correct) / (system_total + gold_total)) if (system_total + gold_total) else 0.0)
self.aligned_accuracy = ((correct / aligned_total) if aligned_total else aligned_total)
class AlignmentWord():
def __init__(self, gold_word, system_word):
self.gold_word = gold_word
self.system_word = system_word
self.gold_parent = None
self.system_parent_gold_aligned = None
class Alignment():
def __init__(self, gold_words, system_words):
self.gold_words = gold_words
self.system_words = system_words
self.matched_words = []
self.matched_words_map = {}
def append_aligned_words(self, gold_word, system_word):
self.matched_words.append(AlignmentWord(gold_word, system_word))
self.matched_words_map[system_word] = gold_word
def fill_parents(self):
for words in self.matched_words:
words.gold_parent = (words.gold_word.parent if (words.gold_word.parent is not None) else 0)
words.system_parent_gold_aligned = (self.matched_words_map.get(words.system_word.parent, None) if (words.system_word.parent is not None) else 0)
def lower(text):
if ((sys.version_info < (3, 0)) and isinstance(text, str)):
return text.decode('utf-8').lower()
return text.lower()
def spans_score(gold_spans, system_spans):
(correct, gi, si) = (0, 0, 0)
while ((gi < len(gold_spans)) and (si < len(system_spans))):
if (system_spans[si].start < gold_spans[gi].start):
si += 1
elif (gold_spans[gi].start < system_spans[si].start):
gi += 1
else:
correct += (gold_spans[gi].end == system_spans[si].end)
si += 1
gi += 1
return Score(len(gold_spans), len(system_spans), correct)
def alignment_score(alignment, key_fn, weight_fn=(lambda w: 1)):
(gold, system, aligned, correct) = (0, 0, 0, 0)
for word in alignment.gold_words:
gold += weight_fn(word)
for word in alignment.system_words:
system += weight_fn(word)
for words in alignment.matched_words:
aligned += weight_fn(words.gold_word)
if (key_fn is None):
return Score(gold, system, aligned)
for words in alignment.matched_words:
if (key_fn(words.gold_word, words.gold_parent) == key_fn(words.system_word, words.system_parent_gold_aligned)):
correct += weight_fn(words.gold_word)
return Score(gold, system, correct, aligned)
def beyond_end(words, i, multiword_span_end):
if (i >= len(words)):
return True
if words[i].is_multiword:
return (words[i].span.start >= multiword_span_end)
return (words[i].span.end > multiword_span_end)
def extend_end(word, multiword_span_end):
if (word.is_multiword and (word.span.end > multiword_span_end)):
return word.span.end
return multiword_span_end
def find_multiword_span(gold_words, system_words, gi, si):
if gold_words[gi].is_multiword:
multiword_span_end = gold_words[gi].span.end
if ((not system_words[si].is_multiword) and (system_words[si].span.start < gold_words[gi].span.start)):
si += 1
else:
multiword_span_end = system_words[si].span.end
if ((not gold_words[gi].is_multiword) and (gold_words[gi].span.start < system_words[si].span.start)):
gi += 1
(gs, ss) = (gi, si)
while ((not beyond_end(gold_words, gi, multiword_span_end)) or (not beyond_end(system_words, si, multiword_span_end))):
if ((gi < len(gold_words)) and ((si >= len(system_words)) or (gold_words[gi].span.start <= system_words[si].span.start))):
multiword_span_end = extend_end(gold_words[gi], multiword_span_end)
gi += 1
else:
multiword_span_end = extend_end(system_words[si], multiword_span_end)
si += 1
return (gs, ss, gi, si)
def compute_lcs(gold_words, system_words, gi, si, gs, ss):
lcs = [([0] * (si - ss)) for i in range((gi - gs))]
for g in reversed(range((gi - gs))):
for s in reversed(range((si - ss))):
if (lower(gold_words[(gs + g)].columns[FORM]) == lower(system_words[(ss + s)].columns[FORM])):
lcs[g][s] = (1 + (lcs[(g + 1)][(s + 1)] if (((g + 1) < (gi - gs)) and ((s + 1) < (si - ss))) else 0))
lcs[g][s] = max(lcs[g][s], (lcs[(g + 1)][s] if ((g + 1) < (gi - gs)) else 0))
lcs[g][s] = max(lcs[g][s], (lcs[g][(s + 1)] if ((s + 1) < (si - ss)) else 0))
return lcs
def align_words(gold_words, system_words):
alignment = Alignment(gold_words, system_words)
(gi, si) = (0, 0)
while ((gi < len(gold_words)) and (si < len(system_words))):
if (gold_words[gi].is_multiword or system_words[si].is_multiword):
(gs, ss, gi, si) = find_multiword_span(gold_words, system_words, gi, si)
if ((si > ss) and (gi > gs)):
lcs = compute_lcs(gold_words, system_words, gi, si, gs, ss)
(s, g) = (0, 0)
while ((g < (gi - gs)) and (s < (si - ss))):
if (lower(gold_words[(gs + g)].columns[FORM]) == lower(system_words[(ss + s)].columns[FORM])):
alignment.append_aligned_words(gold_words[(gs + g)], system_words[(ss + s)])
g += 1
s += 1
elif (lcs[g][s] == (lcs[(g + 1)][s] if ((g + 1) < (gi - gs)) else 0)):
g += 1
else:
s += 1
elif ((gold_words[gi].span.start, gold_words[gi].span.end) == (system_words[si].span.start, system_words[si].span.end)):
alignment.append_aligned_words(gold_words[gi], system_words[si])
gi += 1
si += 1
elif (gold_words[gi].span.start <= system_words[si].span.start):
gi += 1
else:
si += 1
alignment.fill_parents()
return alignment
if (gold_ud.characters != system_ud.characters):
index = 0
while (gold_ud.characters[index] == system_ud.characters[index]):
index += 1
raise UDError(('The concatenation of tokens in gold file and in system file differ!\n' + "First 20 differing characters in gold file: '{}' and system file: '{}'".format(''.join(gold_ud.characters[index:(index + 20)]), ''.join(system_ud.characters[index:(index + 20)]))))
alignment = align_words(gold_ud.words, system_ud.words)
result = {'Tokens': spans_score(gold_ud.tokens, system_ud.tokens), 'Sentences': spans_score(gold_ud.sentences, system_ud.sentences), 'Words': alignment_score(alignment, None), 'UPOS': alignment_score(alignment, (lambda w, parent: w.columns[UPOS])), 'XPOS': alignment_score(alignment, (lambda w, parent: w.columns[XPOS])), 'Feats': alignment_score(alignment, (lambda w, parent: w.columns[FEATS])), 'AllTags': alignment_score(alignment, (lambda w, parent: (w.columns[UPOS], w.columns[XPOS], w.columns[FEATS]))), 'Lemmas': alignment_score(alignment, (lambda w, parent: w.columns[LEMMA])), 'UAS': alignment_score(alignment, (lambda w, parent: parent)), 'LAS': alignment_score(alignment, (lambda w, parent: (parent, w.columns[DEPREL])))}
if (deprel_weights is not None):
def weighted_las(word):
return deprel_weights.get(word.columns[DEPREL], 1.0)
result['WeightedLAS'] = alignment_score(alignment, (lambda w, parent: (parent, w.columns[DEPREL])), weighted_las)
return result |
class TestBoxMode(unittest.TestCase):
def _convert_xy_to_wh(self, x):
return BoxMode.convert(x, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
def _convert_xywha_to_xyxy(self, x):
return BoxMode.convert(x, BoxMode.XYWHA_ABS, BoxMode.XYXY_ABS)
def _convert_xywh_to_xywha(self, x):
return BoxMode.convert(x, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
def test_convert_int_mode(self):
BoxMode.convert([1, 2, 3, 4], 0, 1)
def test_box_convert_list(self):
for tp in [list, tuple]:
box = tp([5.0, 5.0, 10.0, 10.0])
output = self._convert_xy_to_wh(box)
self.assertIsInstance(output, tp)
self.assertIsInstance(output[0], float)
self.assertEqual(output, tp([5.0, 5.0, 5.0, 5.0]))
with self.assertRaises(Exception):
self._convert_xy_to_wh([box])
def test_box_convert_array(self):
box = np.asarray([[5, 5, 10, 10], [1, 1, 2, 3]])
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
def test_box_convert_cpu_tensor(self):
box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
output = output.numpy()
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
((not torch.cuda.is_available()), 'CUDA not available')
def test_box_convert_cuda_tensor(self):
box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]).cuda()
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
self.assertEqual(output.device, box.device)
output = output.cpu().numpy()
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
def test_box_convert_xywha_to_xyxy_list(self):
for tp in [list, tuple]:
box = tp([50, 50, 30, 20, 0])
output = self._convert_xywha_to_xyxy(box)
self.assertIsInstance(output, tp)
self.assertEqual(output, tp([35, 40, 65, 60]))
with self.assertRaises(Exception):
self._convert_xywha_to_xyxy([box])
def test_box_convert_xywha_to_xyxy_array(self):
for dtype in [np.float64, np.float32]:
box = np.asarray([[50, 50, 30, 20, 0], [50, 50, 30, 20, 90], [1, 1, math.sqrt(2), math.sqrt(2), (- 45)]], dtype=dtype)
output = self._convert_xywha_to_xyxy(box)
self.assertEqual(output.dtype, box.dtype)
expected = np.asarray([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
self.assertTrue(np.allclose(output, expected, atol=1e-06), 'output={}'.format(output))
def test_box_convert_xywha_to_xyxy_tensor(self):
for dtype in [torch.float32, torch.float64]:
box = torch.tensor([[50, 50, 30, 20, 0], [50, 50, 30, 20, 90], [1, 1, math.sqrt(2), math.sqrt(2), (- 45)]], dtype=dtype)
output = self._convert_xywha_to_xyxy(box)
self.assertEqual(output.dtype, box.dtype)
expected = torch.tensor([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
self.assertTrue(torch.allclose(output, expected, atol=1e-06), 'output={}'.format(output))
def test_box_convert_xywh_to_xywha_list(self):
for tp in [list, tuple]:
box = tp([50, 50, 30, 20])
output = self._convert_xywh_to_xywha(box)
self.assertIsInstance(output, tp)
self.assertEqual(output, tp([65, 60, 30, 20, 0]))
with self.assertRaises(Exception):
self._convert_xywh_to_xywha([box])
def test_box_convert_xywh_to_xywha_array(self):
for dtype in [np.float64, np.float32]:
box = np.asarray([[30, 40, 70, 60], [30, 40, 60, 70], [(- 1), (- 1), 2, 2]], dtype=dtype)
output = self._convert_xywh_to_xywha(box)
self.assertEqual(output.dtype, box.dtype)
expected = np.asarray([[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype)
self.assertTrue(np.allclose(output, expected, atol=1e-06), 'output={}'.format(output))
def test_box_convert_xywh_to_xywha_tensor(self):
for dtype in [torch.float32, torch.float64]:
box = torch.tensor([[30, 40, 70, 60], [30, 40, 60, 70], [(- 1), (- 1), 2, 2]], dtype=dtype)
output = self._convert_xywh_to_xywha(box)
self.assertEqual(output.dtype, box.dtype)
expected = torch.tensor([[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype)
self.assertTrue(torch.allclose(output, expected, atol=1e-06), 'output={}'.format(output))
def test_json_serializable(self):
payload = {'box_mode': BoxMode.XYWH_REL}
try:
json.dumps(payload)
except Exception:
self.fail('JSON serialization failed')
def test_json_deserializable(self):
payload = '{"box_mode": 2}'
obj = json.loads(payload)
try:
obj['box_mode'] = BoxMode(obj['box_mode'])
except Exception:
self.fail('JSON deserialization failed') |
def train_new_models(dir, iter, srand, num_jobs, num_archives_processed, num_archives, raw_model_string, egs_dir, apply_deriv_weights, min_deriv_time, max_deriv_time_relative, l2_regularize, xent_regularize, leaky_hmm_coefficient, momentum, max_param_change, shuffle_buffer_size, num_chunk_per_minibatch_str, frame_subsampling_factor, run_opts, train_opts, backstitch_training_scale=0.0, backstitch_training_interval=1, use_multitask_egs=False):
deriv_time_opts = []
if (min_deriv_time is not None):
deriv_time_opts.append('--optimization.min-deriv-time={0}'.format(min_deriv_time))
if (max_deriv_time_relative is not None):
deriv_time_opts.append('--optimization.max-deriv-time-relative={0}'.format(int(max_deriv_time_relative)))
threads = []
verbose_opt = ('--verbose=1' if (((iter % 20) == 0) and (iter > 0)) else '')
for job in range(1, (num_jobs + 1)):
k = ((num_archives_processed + job) - 1)
archive_index = ((k % num_archives) + 1)
frame_shift = ((archive_index + (k // num_archives)) % frame_subsampling_factor)
multitask_egs_opts = common_train_lib.get_multitask_egs_opts(egs_dir, egs_prefix='cegs.', archive_index=archive_index, use_multitask_egs=use_multitask_egs)
scp_or_ark = ('scp' if use_multitask_egs else 'ark')
cache_io_opts = (('--read-cache={dir}/cache.{iter}'.format(dir=dir, iter=iter) if (iter > 0) else '') + (' --write-cache={0}/cache.{1}'.format(dir, (iter + 1)) if (job == 1) else ''))
thread = common_lib.background_command('{command} {train_queue_opt} {dir}/log/train.{iter}.{job}.log nnet3-chain-train {parallel_train_opts} {verbose_opt} --apply-deriv-weights={app_deriv_wts} --l2-regularize={l2} --leaky-hmm-coefficient={leaky} {cache_io_opts} --xent-regularize={xent_reg} {deriv_time_opts} --print-interval=10 --momentum={momentum} --max-param-change={max_param_change} --backstitch-training-scale={backstitch_training_scale} --backstitch-training-interval={backstitch_training_interval} --l2-regularize-factor={l2_regularize_factor} {train_opts} --srand={srand} "{raw_model}" {dir}/den.fst "ark,bg:nnet3-chain-copy-egs {multitask_egs_opts} --frame-shift={fr_shft} {scp_or_ark}:{egs_dir}/cegs.{archive_index}.{scp_or_ark} ark:- | nnet3-chain-shuffle-egs --buffer-size={buf_size} --srand={srand} ark:- ark:- | nnet3-chain-merge-egs --minibatch-size={num_chunk_per_mb} ark:- ark:- |" {dir}/{next_iter}.{job}.raw'.format(command=run_opts.command, train_queue_opt=run_opts.train_queue_opt, dir=dir, iter=iter, srand=(iter + srand), next_iter=(iter + 1), job=job, deriv_time_opts=' '.join(deriv_time_opts), app_deriv_wts=apply_deriv_weights, fr_shft=frame_shift, l2=l2_regularize, train_opts=train_opts, xent_reg=xent_regularize, leaky=leaky_hmm_coefficient, cache_io_opts=cache_io_opts, parallel_train_opts=run_opts.parallel_train_opts, verbose_opt=verbose_opt, momentum=momentum, max_param_change=max_param_change, backstitch_training_scale=backstitch_training_scale, backstitch_training_interval=backstitch_training_interval, l2_regularize_factor=(1.0 / num_jobs), raw_model=raw_model_string, egs_dir=egs_dir, archive_index=archive_index, buf_size=shuffle_buffer_size, num_chunk_per_mb=num_chunk_per_minibatch_str, multitask_egs_opts=multitask_egs_opts, scp_or_ark=scp_or_ark), require_zero_status=True)
threads.append(thread)
for thread in threads:
thread.join() |
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(((- ((x - (window_size // 2)) ** 2)) / float((2 * (sigma ** 2))))) for x in range(window_size)])
return (gauss / gauss.sum()) |
class Vocabulary():
def __init__(self, excluds_stopwords=True, wordfreq_threshold=10):
self.vocas = []
self.vocas_id = dict()
self.wordfreq = []
self.excluds_stopwords = excluds_stopwords
self.wordfreq_threshold = wordfreq_threshold
def gen_vocabs(self, corpus, prev_voca, prev_lda):
tmp_wf = {}
if ((prev_voca == None) and (prev_lda == None)):
for doc in corpus:
for word in doc:
if ((self.excluds_stopwords and is_stopword(word)) or (len(word) < 3)):
pass
else:
if (word not in tmp_wf):
tmp_wf[word] = 0
tmp_wf[word] += 1
for (word, freq) in tmp_wf.items():
if (freq < self.wordfreq_threshold):
del tmp_wf[word]
self.vocas = tmp_wf.keys()
for (vid, word) in enumerate(self.vocas):
self.vocas_id[word] = vid
self.wordfreq = ([0] * len(self.vocas))
else:
for doc in corpus:
for word in doc:
if ((self.excluds_stopwords and is_stopword(word)) or (len(word) < 3)):
pass
elif (word in prev_voca.vocas_id):
prev_voca.wordfreq[prev_voca.vocas_id[word]] += 1
else:
if (word not in tmp_wf):
tmp_wf[word] = 0
tmp_wf[word] += 1
wordids_to_delete = []
for (wordid, freq) in enumerate(prev_voca.wordfreq):
if (freq < self.wordfreq_threshold):
wordids_to_delete.append(wordid)
for (word, freq) in tmp_wf.items():
if (freq < self.wordfreq_threshold):
del tmp_wf[word]
for (wid, word) in enumerate(prev_voca.vocas):
if (wid not in wordids_to_delete):
self.vocas.append(word)
for word in tmp_wf.keys():
self.vocas.append(word)
for (wid, word) in enumerate(self.vocas):
self.vocas_id[word] = wid
for wordid in sorted(wordids_to_delete, reverse=True):
prev_lda.n_z_t = numpy.delete(prev_lda.n_z_t, wordid, 1)
smooth = numpy.amin(prev_lda.n_z_t)
for i in range(0, len(tmp_wf.keys())):
prev_lda.n_z_t = numpy.append(prev_lda.n_z_t, ([[smooth]] * prev_lda.K), axis=1)
self.wordfreq = ([0] * len(self.vocas))
for (docid, doc) in enumerate(prev_lda.docs):
doc_in_word = [prev_voca.vocas[wordid] for wordid in doc]
new_doc = self.doc_to_ids(doc_in_word)
prev_lda.docs[docid] = new_doc
tmp_wf.clear()
def term_to_id(self, term):
if (term in self.vocas_id):
voca_id = self.vocas_id[term]
return voca_id
else:
return None
def doc_to_ids(self, doc):
list = []
for term in doc:
id = self.term_to_id(term)
if (id != None):
list.append(id)
self.wordfreq[id] += 1
if ('close' in dir(doc)):
doc.close()
return list
def __getitem__(self, v):
return self.vocas[v]
def size(self):
return len(self.vocas)
def is_stopword_id(self, id):
return (self.vocas[id] in stopwords_list) |
class PrefetchOnGPUs(PrefetchDataZMQ):
def __init__(self, ds, gpus, pipedir=None):
self.gpus = gpus
super(PrefetchOnGPUs, self).__init__(ds, len(gpus), pipedir)
def start_processes(self):
with mask_sigint():
for (gpu, proc) in zip(self.gpus, self.procs):
with change_gpu(gpu):
proc.start() |
def write_tf_session_graph(sess, model_name='model.pb', output_name='sr_output'):
graph_def = sess.graph.as_graph_def()
for node in graph_def.node:
node.device = ''
constant_graph = tf.graph_util.convert_variables_to_constants(sess, graph_def, output_node_names=[output_name])
tf.io.write_graph(constant_graph, '.', model_name, as_text=False) |
def n_colors(lowcolor, highcolor, n_colors):
diff_0 = float((highcolor[0] - lowcolor[0]))
incr_0 = (diff_0 / (n_colors - 1))
diff_1 = float((highcolor[1] - lowcolor[1]))
incr_1 = (diff_1 / (n_colors - 1))
diff_2 = float((highcolor[2] - lowcolor[2]))
incr_2 = (diff_2 / (n_colors - 1))
color_tuples = []
for index in range(n_colors):
new_tuple = ((lowcolor[0] + (index * incr_0)), (lowcolor[1] + (index * incr_1)), (lowcolor[2] + (index * incr_2)))
color_tuples.append(new_tuple)
return color_tuples |
def test_early_stopping_restore_weights_with_state():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[16, 8])
model = WideDeep(wide=wide, deeptabular=deeptabular)
fpath = 'tests/test_model_functioning/modelcheckpoint/weights_out'
model_checkpoint = ModelCheckpoint(filepath=fpath, save_best_only=False, max_save=10, min_delta=1000)
early_stopping = EarlyStopping(patience=3, min_delta=1000, restore_best_weights=True)
trainer = Trainer(model, objective='binary', callbacks=[early_stopping, model_checkpoint], verbose=0)
trainer.fit(X_train={'X_wide': X_wide, 'X_tab': X_tab, 'target': target}, X_val={'X_wide': X_wide_val, 'X_tab': X_tab_val, 'target': target_val}, target=target, n_epochs=5, batch_size=16)
new_wide = Wide(np.unique(X_wide).shape[0], 1)
new_deeptabular = TabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[16, 8])
new_model = WideDeep(wide=new_wide, deeptabular=new_deeptabular)
full_best_epoch_path = '_'.join([model_checkpoint.filepath, (str(((early_stopping.stopped_epoch - early_stopping.patience) + 1)) + '.p')])
new_model.load_state_dict(torch.load(full_best_epoch_path))
new_model.to(next(model.parameters()).device)
shutil.rmtree('tests/test_model_functioning/modelcheckpoint/')
assert torch.allclose(new_model.state_dict()['deeptabular.0.encoder.mlp.dense_layer_1.1.weight'], model.state_dict()['deeptabular.0.encoder.mlp.dense_layer_1.1.weight']) |
def _fix_wst(ex):
def _fix_span_text(k):
text = ex[(k + '_text')]
index = ex[(k + '_index')]
if (text in ex['text']):
return
if (text in ('Kamenev and Zinoviev', 'Kamenev, Zinoviev, and Stalin')):
return
if ('theyscold' in text):
ex['text'].replace('theyscold', 'they scold')
ex['span2_index'] = 10
first_word = ex['text'].split()[index]
if first_word[0].islower():
text = (text[0].lower() + text[1:])
else:
text = (text[0].upper() + text[1:])
text = text.rstrip('.')
text = text.replace('\n', ' ')
ex[(k + '_text')] = text
assert (ex[(k + '_text')] in ex['text']), ex
_fix_span_text('span1')
_fix_span_text('span2')
return ex |
def tensors_to_numpy(tensors, dtype=None):
if isinstance(dtype, tf.DType):
dtype = dtype.as_numpy_dtype
if isinstance(tensors, (list, tuple)):
return type(tensors)((tensors_to_numpy(tensor, dtype) for tensor in tensors))
elif isinstance(tensors, dict):
return {key: tensors_to_numpy(value, dtype) for (key, value) in tensors.items()}
elif isinstance(tensors, tf.Tensor):
if (dtype is None):
return tensors.numpy()
else:
return tensors.numpy().astype(dtype)
elif (isinstance(tensors, np.ndarray) and (dtype is not None)):
return tensors.astype(dtype)
else:
return tensors |
def apogeeFieldPath(dr=None):
if (dr is None):
dr = _default_dr()
if ((dr == '11') or (dr == '12')):
platename = 'apogeeField.fits'
elif ((int(dr) > 13) & (int(dr) <= 15)):
platename = 'apogee2Field.fits'
elif (int(dr) >= 16):
platename = 'allField.fits'
else:
platename = ('apogeeField_DR%s.fits' % dr)
return os.path.join(apogeeTargetDirPath(dr=dr), platename) |
def parse_subset_size_0to1(dataset_name):
if re.search('cifar([\\d]+)', dataset_name):
percent_str = re.search('cifar([\\d]+)', dataset_name).group(0).split('cifar')[(- 1)]
assert (len(percent_str) >= 2), 'require to has length at least 2'
percent_float = (int(percent_str) / (10 ** len(percent_str)))
elif re.search('celebaf([\\d]+)', dataset_name):
percent_str = re.search('celebaf([\\d]+)', dataset_name).group(0).split('celebaf')[(- 1)]
assert (len(percent_str) >= 2), 'require to has length at least 2'
percent_float = (int(percent_str) / (10 ** len(percent_str)))
elif re.search('celebaCr148f([\\d]+)', dataset_name):
percent_str = re.search('celebaCr148f([\\d]+)', dataset_name).group(0).split('celebaCr148f')[(- 1)]
assert (len(percent_str) >= 2), 'require to has length at least 2'
percent_float = (int(percent_str) / (10 ** len(percent_str)))
elif re.search('mnistf([\\d]+)', dataset_name):
percent_str = re.search('mnistf([\\d]+)', dataset_name).group(0).split('mnistf')[(- 1)]
assert (len(percent_str) >= 2), 'require to has length at least 2'
percent_float = (int(percent_str) / (10 ** len(percent_str)))
elif re.search('omnif([\\d]+)', dataset_name):
percent_str = re.search('omnif([\\d]+)', dataset_name).group(0).split('omnif')[(- 1)]
assert (len(percent_str) >= 2), 'require to has length at least 2'
percent_float = (int(percent_str) / (10 ** len(percent_str)))
else:
raise ValueError(dataset_name)
return percent_float |
def _graph_network_no_edge_update(graph_tuple):
update_node_fn = (lambda n, se, re, g: n)
update_edge_fn = None
update_global_fn = (lambda gn, ge, g: g)
net = nn.GraphNetwork(update_edge_fn, update_node_fn, update_global_fn)
return net(graph_tuple) |
class LSTMModel(Model):
def __init__(self, output_dim, hidden_dim, name=None, hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), recurrent_nonlinearity=tf.nn.sigmoid, recurrent_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), hidden_state_init=tf.zeros_initializer(), hidden_state_init_trainable=False, cell_state_init=tf.zeros_initializer(), cell_state_init_trainable=False, forget_bias=True, layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_dim = hidden_dim
self._forget_bias = forget_bias
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._cell_state_init = cell_state_init
self._cell_state_init_trainable = cell_state_init_trainable
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._initialize()
def _initialize(self):
self._lstm_cell = tf.keras.layers.LSTMCell(units=self._hidden_dim, activation=self._hidden_nonlinearity, kernel_initializer=self._hidden_w_init, bias_initializer=self._hidden_b_init, recurrent_activation=self._recurrent_nonlinearity, recurrent_initializer=self._recurrent_w_init, unit_forget_bias=self._forget_bias, name='lstm_layer')
self._output_nonlinearity_layer = tf.keras.layers.Dense(units=self._output_dim, activation=self._output_nonlinearity, kernel_initializer=self._output_w_init, bias_initializer=self._output_b_init, name='output_layer')
def network_input_spec(self):
return ['full_input', 'step_input', 'step_hidden_input', 'step_cell_input']
def network_output_spec(self):
return ['all_output', 'step_output', 'step_hidden', 'step_cell', 'init_hidden', 'init_cell']
def _build(self, all_input_var, step_input_var, step_hidden_var, step_cell_var, name=None):
del name
return lstm(name='lstm', lstm_cell=self._lstm_cell, all_input_var=all_input_var, step_input_var=step_input_var, step_hidden_var=step_hidden_var, step_cell_var=step_cell_var, hidden_state_init=self._hidden_state_init, hidden_state_init_trainable=self._hidden_state_init_trainable, cell_state_init=self._cell_state_init, cell_state_init_trainable=self._cell_state_init_trainable, output_nonlinearity_layer=self._output_nonlinearity_layer)
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_lstm_cell']
del new_dict['_output_nonlinearity_layer']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
def compute_classification_metric(p: EvalPrediction):
predictions = p.predictions.argmax(axis=1)
references = p.label_ids
metric = accuracy(predictions=predictions, references=references)
metric.update(precision(predictions=predictions, references=references))
metric.update(recall(predictions=predictions, references=references))
metric.update(f1(predictions=predictions, references=references))
return metric |
class Decoder(Generic[Action], ABC):
def action_space(self) -> gym.Space:
def decode(self, ctx: Context, action: Action) -> List[Tuple[(AgentID, MsgPayload)]]:
def chain(self, others: Iterable['Decoder']) -> 'ChainedDecoder':
return ChainedDecoder(flatten([self, others]))
def reset(self):
def __repr__(self) -> str:
return repr(self.action_space)
def __str__(self) -> str:
return str(self.action_space) |
def save_videos_grid_pil(videos: List[PIL.Image.Image], path: str, rescale=False, n_rows=4, fps=8):
videos = rearrange(videos, 'b c t h w -> t b c h w')
outputs = []
for x in videos:
x = torchvision.utils.make_grid(x, nrow=n_rows)
x = x.transpose(0, 1).transpose(1, 2).squeeze((- 1))
if rescale:
x = ((x + 1.0) / 2.0)
x = (x * 255).numpy().astype(np.uint8)
outputs.append(x)
os.makedirs(os.path.dirname(path), exist_ok=True)
imageio.mimsave(path, outputs, fps=fps) |
_model
def resnetv2_50x3_bitm_in21k(pretrained=False, **kwargs):
return _create_resnetv2_bit('resnetv2_50x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), layers=[3, 4, 6, 3], width_factor=3, **kwargs) |
def read_images_from_disk2(input_queue, size1=64):
label = input_queue[2]
fn = input_queue[0]
file_contents = tf.read_file(input_queue[0])
file_contents2 = tf.read_file(input_queue[1])
example = tf.image.decode_jpeg(file_contents, channels=3)
example2 = tf.image.decode_jpeg(file_contents2, channels=3)
example = tf.image.resize_images(example, [size1, size1])
example2 = tf.image.resize_images(example2, [size1, size1])
return (example, example2, label, fn) |
def stats(criterion, a, y, mask):
if (mask is not None):
(_, preds) = t.max(a.data, 2)
(batch, sLen, c) = a.size()
loss = criterion(a.view((- 1), c), y.view((- 1)))
m = t.sum(mask)
mask = _sequence_mask(mask, sLen)
acc = (t.sum((mask.data.float() * (y.data == preds).float())) / float(m.data[0]))
else:
(_, preds) = t.max(a.data, 1)
loss = criterion(a, y)
acc = t.mean((y.data == preds).float())
return (loss, acc) |
def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokenizer):
ltp_res = []
for i in range(0, len(lines), 100):
res = ltp_tokenizer.seg(lines[i:(i + 100)])[0]
res = [get_chinese_word(r) for r in res]
ltp_res.extend(res)
assert (len(ltp_res) == len(lines))
bert_res = []
for i in range(0, len(lines), 100):
res = bert_tokenizer(lines[i:(i + 100)], add_special_tokens=True, truncation=True, max_length=512)
bert_res.extend(res['input_ids'])
assert (len(bert_res) == len(lines))
ref_ids = []
for (input_ids, chinese_word) in zip(bert_res, ltp_res):
input_tokens = []
for id in input_ids:
token = bert_tokenizer._convert_id_to_token(id)
input_tokens.append(token)
input_tokens = add_sub_symbol(input_tokens, chinese_word)
ref_id = []
for (i, token) in enumerate(input_tokens):
if (token[:2] == '##'):
clean_token = token[2:]
if ((len(clean_token) == 1) and _is_chinese_char(ord(clean_token))):
ref_id.append(i)
ref_ids.append(ref_id)
assert (len(ref_ids) == len(bert_res))
return ref_ids |
def get_remote_dir_to_local(remote_dir, local_dir, over_write=False):
file_list = get_file_list(remote_dir)
[get_remote_file_to_local(file, os.path.join(local_dir, os.path.basename(file)), over_write=over_write) for file in file_list] |
class BatchInput(collections.namedtuple('BatchInput', ('key_input', 'val_input', 'input_lens', 'target_input', 'target_output', 'output_lens', 'group', 'group_lens', 'group_cnt', 'target_type', 'target_type_lens', 'text', 'slens', 'category'))):
pass |
class Wav2Vec2CTCTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', unk_token='<unk>', pad_token='<pad>', word_delimiter_token='|', replace_word_delimiter_char=' ', do_lower_case=False, **kwargs):
super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, do_lower_case=do_lower_case, word_delimiter_token=word_delimiter_token, replace_word_delimiter_char=replace_word_delimiter_char, **kwargs)
self._word_delimiter_token = word_delimiter_token
self.do_lower_case = do_lower_case
self.replace_word_delimiter_char = replace_word_delimiter_char
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
for token in self.encoder.keys():
if (len(token) > 1):
self.unique_no_split_tokens.append(token)
self._create_trie(self.unique_no_split_tokens)
def word_delimiter_token(self) -> str:
if ((self._word_delimiter_token is None) and self.verbose):
logger.error('Using word_delimiter_token, but it is not set yet.')
return None
return str(self._word_delimiter_token)
def word_delimiter_token_id(self) -> Optional[int]:
if (self._word_delimiter_token is None):
return None
return self.convert_tokens_to_ids(self.word_delimiter_token)
_delimiter_token.setter
def word_delimiter_token(self, value):
self._word_delimiter_token = value
_delimiter_token_id.setter
def word_delimiter_token_id(self, value):
self._word_delimiter_token = self.convert_tokens_to_ids(value)
def vocab_size(self) -> int:
return len(self.decoder)
def get_vocab(self) -> Dict:
return dict(self.encoder, **self.added_tokens_encoder)
def _tokenize(self, text, **kwargs):
if self.do_lower_case:
text = text.upper()
return list(text.replace(' ', self.word_delimiter_token))
def _convert_token_to_id(self, token: str) -> int:
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index: int) -> str:
result = self.decoder.get(index, self.unk_token)
return result
def convert_tokens_to_string(self, tokens: List[str], group_tokens: bool=True, spaces_between_special_tokens: bool=False, output_char_offsets: bool=False, output_word_offsets: bool=False) -> Dict[(str, Union[(str, float)])]:
if (len(tokens) == 0):
return {'text': '', 'char_offsets': [], 'word_offsets': []}
if group_tokens:
(chars, char_repetitions) = zip(*((token, len(list(group_iter))) for (token, group_iter) in groupby(tokens)))
else:
chars = tokens
char_repetitions = (len(tokens) * [1])
processed_chars = list(filter((lambda char: (char != self.pad_token)), chars))
processed_chars = [(self.replace_word_delimiter_char if (char == self.word_delimiter_token) else char) for char in processed_chars]
char_offsets = word_offsets = None
if (output_char_offsets or output_word_offsets):
char_offsets = self._compute_offsets(char_repetitions, chars, self.pad_token)
if (len(char_offsets) != len(processed_chars)):
raise ValueError(f'`char_offsets`: {char_offsets} and `processed_tokens`: {processed_chars} have to be of the same length, but are: `len(offsets)`: {len(char_offsets)} and `len(processed_tokens)`: {len(processed_chars)}')
for (i, char) in enumerate(processed_chars):
char_offsets[i]['char'] = char
word_offsets = None
if output_word_offsets:
word_offsets = self._get_word_offsets(char_offsets, self.replace_word_delimiter_char)
if (not output_char_offsets):
char_offsets = None
join_char = (' ' if spaces_between_special_tokens else '')
string = join_char.join(processed_chars).strip()
if self.do_lower_case:
string = string.lower()
return {'text': string, 'char_offsets': char_offsets, 'word_offsets': word_offsets}
def _compute_offsets(char_repetitions: List[int], chars: List[str], ctc_token: int) -> List[Dict[(str, Union[(str, int)])]]:
end_indices = np.asarray(char_repetitions).cumsum()
start_indices = np.concatenate(([0], end_indices[:(- 1)]))
offsets = [{'char': t, 'start_offset': s, 'end_offset': e} for (t, s, e) in zip(chars, start_indices, end_indices)]
offsets = list(filter((lambda offsets: (offsets['char'] != ctc_token)), offsets))
return offsets
def _get_word_offsets(offsets: Dict[(str, Union[(str, float)])], word_delimiter_char: str=' ') -> Dict[(str, Union[(str, float)])]:
word_offsets = []
last_state = 'SPACE'
word = ''
start_offset = 0
end_offset = 0
for (i, offset) in enumerate(offsets):
char = offset['char']
state = ('SPACE' if (char == word_delimiter_char) else 'WORD')
if (state == last_state):
end_offset = offset['end_offset']
word += char
elif (state == 'SPACE'):
word_offsets.append({'word': word, 'start_offset': start_offset, 'end_offset': end_offset})
else:
start_offset = offset['start_offset']
end_offset = offset['end_offset']
word = char
last_state = state
if (last_state == 'WORD'):
word_offsets.append({'word': word, 'start_offset': start_offset, 'end_offset': end_offset})
return word_offsets
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
if is_split_into_words:
text = (' ' + text)
return (text, kwargs)
def _decode(self, token_ids: List[int], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, group_tokens: bool=True, spaces_between_special_tokens: bool=False, output_word_offsets: Optional[bool]=False, output_char_offsets: Optional[bool]=False) -> str:
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
result = []
for token in filtered_tokens:
if (skip_special_tokens and (token in self.all_special_ids)):
continue
result.append(token)
string_output = self.convert_tokens_to_string(result, group_tokens=group_tokens, spaces_between_special_tokens=spaces_between_special_tokens, output_word_offsets=output_word_offsets, output_char_offsets=output_char_offsets)
text = string_output['text']
clean_up_tokenization_spaces = (clean_up_tokenization_spaces if (clean_up_tokenization_spaces is not None) else self.clean_up_tokenization_spaces)
if clean_up_tokenization_spaces:
text = self.clean_up_tokenization(text)
if (output_word_offsets or output_char_offsets):
return Wav2Vec2CTCTokenizerOutput(text=text, char_offsets=string_output['char_offsets'], word_offsets=string_output['word_offsets'])
else:
return text
def batch_decode(self, sequences: Union[(List[int], List[List[int]], 'np.ndarray', 'torch.Tensor', 'tf.Tensor')], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, output_char_offsets: bool=False, output_word_offsets: bool=False, **kwargs) -> List[str]:
batch_decoded = [self.decode(seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, output_word_offsets=output_word_offsets, **kwargs) for seq in sequences]
if (output_char_offsets or output_word_offsets):
return Wav2Vec2CTCTokenizerOutput({k: [d[k] for d in batch_decoded] for k in batch_decoded[0]})
return batch_decoded
def decode(self, token_ids: Union[(int, List[int], 'np.ndarray', 'torch.Tensor', 'tf.Tensor')], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, output_char_offsets: bool=False, output_word_offsets: bool=False, **kwargs) -> str:
token_ids = to_py_obj(token_ids)
return self._decode(token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, output_word_offsets=output_word_offsets, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
return (vocab_file,)
def _add_tokens(self, new_tokens: Union[(List[str], List[AddedToken])], special_tokens: bool=False) -> int:
new_tokens = [str(tok) for tok in new_tokens]
tokens_to_add = []
for token in new_tokens:
assert isinstance(token, str)
if ((not special_tokens) and hasattr(self, 'do_lower_case') and self.do_lower_case):
token = token.lower()
if ((token != self.unk_token) and (self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)) and (token not in tokens_to_add)):
tokens_to_add.append(token)
if self.verbose:
logger.info(f'Adding {token} to the vocabulary')
added_tok_encoder = {tok: (len(self) + i) for (i, tok) in enumerate(tokens_to_add)}
added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
for token in tokens_to_add:
if (len(token) > 1):
self._additional_special_tokens.append(AddedToken(token))
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, token)
self._create_trie(self.unique_no_split_tokens)
return len(tokens_to_add) |
class DataLoader():
max_time_num: int
full_node_list: list
node2idx_dict: dict
node_num: int
has_cuda: bool
def __init__(self, node_list, max_time_num, has_cuda=False):
self.max_time_num = max_time_num
self.full_node_list = node_list
self.node_num = len(self.full_node_list)
self.node2idx_dict = dict(zip(self.full_node_list, np.arange(self.node_num)))
self.has_cuda = has_cuda
def get_date_adj_list(self, origin_base_path, start_idx, duration, sep='\t', normalize=False, row_norm=False, add_eye=False, data_type='tensor'):
assert (data_type in ['tensor', 'matrix'])
date_dir_list = sorted(os.listdir(origin_base_path))
date_adj_list = []
for i in range(start_idx, min((start_idx + duration), self.max_time_num)):
original_graph_path = os.path.join(origin_base_path, date_dir_list[i])
spmat = get_sp_adj_mat(original_graph_path, self.full_node_list, sep=sep)
if add_eye:
spmat = (spmat + sp.eye(spmat.shape[0]))
if normalize:
spmat = get_normalized_adj(spmat, row_norm=row_norm)
if (data_type == 'tensor'):
sptensor = sparse_mx_to_torch_sparse_tensor(spmat)
date_adj_list.append((sptensor.cuda() if self.has_cuda else sptensor))
else:
date_adj_list.append(spmat)
return date_adj_list
def get_core_adj_list(self, core_base_path, start_idx, duration, max_core=(- 1)):
date_dir_list = sorted(os.listdir(core_base_path))
time_stamp_num = len(date_dir_list)
assert (start_idx < time_stamp_num)
core_adj_list = []
for i in range(start_idx, min((start_idx + duration), self.max_time_num)):
date_dir_path = os.path.join(core_base_path, date_dir_list[i])
f_list = sorted(os.listdir(date_dir_path))
core_file_num = len(f_list)
tmp_adj_list = []
if (max_core == (- 1)):
max_core = core_file_num
f_list = f_list[:max_core]
f_list = f_list[::(- 1)]
spmat_list = []
for (j, f_name) in enumerate(f_list):
spmat = sp.load_npz(os.path.join(date_dir_path, f_name))
spmat_list.append(spmat)
if (j == 0):
spmat = (spmat + sp.eye(spmat.shape[0]))
else:
delta = (spmat - spmat_list[(j - 1)])
if (delta.sum() == 0):
continue
sptensor = sparse_mx_to_torch_sparse_tensor(spmat)
tmp_adj_list.append((sptensor.cuda() if self.has_cuda else sptensor))
core_adj_list.append(tmp_adj_list)
return core_adj_list
def get_node_pair_list(self, walk_pair_base_path, start_idx, duration):
walk_file_list = sorted(os.listdir(walk_pair_base_path))
node_pair_list = []
for i in range(start_idx, min((start_idx + duration), self.max_time_num)):
walk_file_path = os.path.join(walk_pair_base_path, walk_file_list[i])
walk_spadj = sp.load_npz(walk_file_path)
neighbor_arr = walk_spadj.tolil().rows
node_pair_list.append(neighbor_arr)
return node_pair_list
def get_node_freq_list(self, node_freq_base_path, start_idx, duration):
freq_file_list = sorted(os.listdir(node_freq_base_path))
node_freq_list = []
for i in range(start_idx, min((start_idx + duration), self.max_time_num)):
freq_file_path = os.path.join(node_freq_base_path, freq_file_list[i])
with open(freq_file_path, 'r') as fp:
node_freq_arr = json.load(fp)
node_freq_list.append(node_freq_arr)
return node_freq_list
def get_degree_feature_list(self, origin_base_path, start_idx, duration, sep='\t', init_type='gaussian', std=0.0001):
assert (init_type in ['gaussian', 'adj', 'combine', 'one-hot'])
x_list = []
max_degree = 0
adj_list = []
degree_list = []
date_dir_list = sorted(os.listdir(origin_base_path))
for i in range(start_idx, min((start_idx + duration), self.max_time_num)):
original_graph_path = os.path.join(origin_base_path, date_dir_list[i])
adj = get_sp_adj_mat(original_graph_path, self.full_node_list, sep=sep)
adj_list.append(adj)
degrees = adj.sum(axis=1).astype(np.int)
max_degree = max(max_degree, degrees.max())
degree_list.append(degrees)
input_dim = 0
for (i, degrees) in enumerate(degree_list):
if (init_type == 'gaussian'):
fea_list = []
for degree in degrees:
fea_list.append(np.random.normal(degree, std, (max_degree + 1)))
fea_arr = np.array(fea_list).astype(np.float32)
input_dim = fea_arr.shape[1]
fea_tensor = torch.from_numpy(fea_arr).float()
x_list.append((fea_tensor.cuda() if self.has_cuda else fea_tensor))
elif (init_type == 'adj'):
input_dim = self.node_num
feat_tensor = sparse_mx_to_torch_sparse_tensor(adj_list[i])
x_list.append((feat_tensor.cuda() if self.has_cuda else feat_tensor))
elif (init_type == 'combine'):
fea_list = []
for degree in degrees:
fea_list.append(np.random.normal(degree, std, (max_degree + 1)))
sp_feat = sp.coo_matrix(np.array(fea_list))
sp_feat = sp.hstack((sp_feat, adj_list[i])).astype(np.float32)
input_dim = sp_feat.shape[1]
feat_tensor = sparse_mx_to_torch_sparse_tensor(sp_feat)
x_list.append((feat_tensor.cuda() if self.has_cuda else feat_tensor))
else:
data = np.ones(degrees.shape[0], dtype=np.int)
row = np.arange(degrees.shape[0])
col = degrees.flatten().A[0]
spmat = sp.csr_matrix((data, (row, col)), shape=(degrees.shape[0], (max_degree + 1)))
sptensor = sparse_mx_to_torch_sparse_tensor(spmat)
x_list.append((sptensor.cuda() if self.has_cuda else sptensor))
input_dim = (max_degree + 1)
return (x_list, input_dim)
def get_feature_list(self, feature_base_path, start_idx, duration, sep='\t', shuffle=False):
if (feature_base_path is None):
x_list = []
for i in range(start_idx, min((start_idx + duration), self.max_time_num)):
if shuffle:
node_indices = (np.random.permutation(np.arange(self.node_num)) if shuffle else np.arange(self.node_num))
spmat = sp.coo_matrix((np.ones(self.node_num), (np.arange(self.node_num), node_indices)), shape=(self.node_num, self.node_num))
else:
spmat = sp.eye(self.node_num)
sptensor = sparse_mx_to_torch_sparse_tensor(spmat)
x_list.append((sptensor.cuda() if self.has_cuda else sptensor))
input_dim = self.node_num
else:
feature_file_list = sorted(os.listdir(feature_base_path))
x_list = []
feature_arr_list = []
max_feature_dim = 0
for i in range(start_idx, min((start_idx + duration), self.max_time_num)):
feature_file_path = os.path.join(feature_base_path, feature_file_list[i])
df_feature = pd.read_csv(feature_file_path, sep=sep, header=0)
max_feature_dim = max(max_feature_dim, df_feature.shape[1])
feature_arr = df_feature.values
feature_arr_list.append(feature_arr)
for feature_arr in feature_arr_list:
(batch_dim, feature_dim) = feature_arr.shape
expand_feature_arr = np.hstack((feature_arr, np.zeros((batch_dim, (max_feature_dim - feature_dim))))).astype(np.float32)
fea_tensor = torch.from_numpy(expand_feature_arr).float()
x_list.append((fea_tensor.cuda() if self.has_cuda else fea_tensor))
input_dim = max_feature_dim
return (x_list, input_dim)
def get_node_label_list(self, nlabel_base_path, start_idx, duration, sep='\t'):
nlabel_file_list = sorted(os.listdir(nlabel_base_path))
node_label_list = []
label_dict = dict()
for i in range(start_idx, min((start_idx + duration), self.max_time_num)):
nlabel_file_path = os.path.join(nlabel_base_path, nlabel_file_list[i])
df_nodes = pd.read_csv(nlabel_file_path, sep=sep, header=0, names=['node', 'label'])
df_nodes['node'] = df_nodes['node'].apply((lambda x: self.node2idx_dict[x]))
unique_labels = df_nodes['label'].unique()
for label in unique_labels:
label_dict[label] = 1
node_labels = torch.from_numpy(df_nodes.values).long()
node_label_list.append((node_labels.cuda() if self.has_cuda else node_labels))
return (node_label_list, len(label_dict.keys()))
def get_edge_label_list(self, elabel_base_path, start_idx, duration, sep='\t'):
elabel_file_list = sorted(os.listdir(elabel_base_path))
edge_label_list = []
label_dict = dict()
for i in range(start_idx, min((start_idx + duration), self.max_time_num)):
elabel_file_path = os.path.join(elabel_base_path, elabel_file_list[i])
df_edges = pd.read_csv(elabel_file_path, sep=sep, header=0, names=['from_id', 'to_id', 'label'])
df_edges[['from_id', 'to_id']] = df_edges[['from_id', 'to_id']].applymap((lambda x: self.node2idx_dict[x]))
unique_labels = df_edges['label'].unique()
for label in unique_labels:
label_dict[label] = 1
edge_labels = torch.from_numpy(df_edges.values).long()
edge_label_list.append((edge_labels.cuda() if self.has_cuda else edge_labels))
return (edge_label_list, len(label_dict.keys())) |
class EnglishSpellingNormalizer():
def __init__(self):
mapping_path = os.path.join(os.path.dirname(__file__), 'english.json')
self.mapping = json.load(open(mapping_path))
def __call__(self, s: str):
return ' '.join((self.mapping.get(word, word) for word in s.split())) |
class FLSampler(Sampler):
def __init__(self, indices_partition: List[List], num_round, data_per_client, client_selection, client_per_round=None):
self.sequence = []
num_partition = len(indices_partition)
range_partition = list(range(num_partition))
copy_list_ind = deepcopy(indices_partition)
new_list_ind = [[] for _ in range(num_partition)]
if client_selection:
assert (client_per_round is not None)
assert (client_per_round <= num_partition)
list_pos = ([0] * num_partition)
for rd_idx in range(num_round):
if client_selection:
selected_client_idx = random.sample(range_partition, client_per_round)
else:
selected_client_idx = range_partition
for client_idx in selected_client_idx:
ind = copy_list_ind[client_idx]
pos = list_pos[client_idx]
while (len(new_list_ind[client_idx]) < (pos + data_per_client)):
random.shuffle(ind)
new_list_ind[client_idx].extend(ind)
self.sequence.extend(new_list_ind[client_idx][pos:(pos + data_per_client)])
list_pos[client_idx] = (pos + data_per_client)
def __iter__(self):
return iter(self.sequence)
def __len__(self):
return len(self.sequence) |
def split_ds(ds, split=[0.8, 0.2, 0.0], shuffle=True):
split_sum = sum(split)
if (split_sum == 0):
raise Exception('Split cannot sum to 0.')
split = np.array(split)
split /= split_sum
ds_len = len(ds)
inds = np.arange(ds_len)
if shuffle:
np.random.shuffle(inds)
start_idx = 0
residual_idx = 0
rtn_ds = ([None] * len(split))
for (i, f) in enumerate(split):
if (f != 0):
proportion = (ds_len * split[i])
residual_idx += (proportion % 1)
split_ = int((int(proportion) + residual_idx))
split_inds = inds[start_idx:(start_idx + max(split_, 1))]
rtn_ds[i] = SplitDataset(ds, split_inds)
start_idx += split_
residual_idx %= 1
return rtn_ds |
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1')
scaled_up = (up * scale)
if (activation_fn == tf.nn.relu6):
scaled_up = tf.clip_by_value(scaled_up, (- 6.0), 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net |
class PolySineTX(PolyGenerator):
def help(self):
return 'Used for Hamiltonian simultion for time tau. Error is epsilon'
def generate(self, tau=10.0, epsilon=0.1, return_coef=True, ensure_bounded=True, return_scale=False):
r = scipy.optimize.fsolve((lambda r: ((((np.e * np.abs(tau)) / (2 * r)) ** r) - ((5 / 4) * epsilon))), tau)[0]
print(r)
R = np.floor((r / 2)).astype(int)
R = max(R, 1)
print(f'R={R}')
g = np.polynomial.chebyshev.Chebyshev([0])
for k in range(0, (R + 1)):
gcoef = (2 * scipy.special.jv(((2 * k) + 1), tau))
deg = ((2 * k) + 1)
g += ((((- 1) ** k) * gcoef) * np.polynomial.chebyshev.Chebyshev((([0] * deg) + [1])))
if ensure_bounded:
scale = 0.5
g = (scale * g)
print(f'[PolySineTX] rescaling by {scale}.')
if return_coef:
pcoefs = np.polynomial.chebyshev.cheb2poly(g.coef)
if (ensure_bounded and return_scale):
return (pcoefs, scale)
else:
return pcoefs
return g |
class ImprovementEmitter(EmitterBase):
def __init__(self, archive, x0, sigma0, selection_rule='filter', restart_rule='no_improvement', weight_rule='truncation', bounds=None, batch_size=None, seed=None):
self._rng = np.random.default_rng(seed)
self._batch_size = batch_size
self._x0 = np.array(x0, dtype=archive.dtype)
self._sigma0 = sigma0
EmitterBase.__init__(self, archive, len(self._x0), bounds)
if (selection_rule not in ['mu', 'filter']):
raise ValueError(f'Invalid selection_rule {selection_rule}')
self._selection_rule = selection_rule
if (restart_rule not in ['basic', 'no_improvement']):
raise ValueError(f'Invalid restart_rule {restart_rule}')
self._restart_rule = restart_rule
opt_seed = (None if (seed is None) else self._rng.integers(10000))
self.opt = CMAEvolutionStrategy(sigma0, batch_size, self._solution_dim, weight_rule, opt_seed, self.archive.dtype)
self.opt.reset(self._x0)
self._num_parents = ((self.opt.batch_size // 2) if (selection_rule == 'mu') else None)
self._batch_size = self.opt.batch_size
self._restarts = 0
def x0(self):
return self._x0
def sigma0(self):
return self._sigma0
def batch_size(self):
return self._batch_size
def ask(self, grad_estimate=False):
return self.opt.ask(self.lower_bounds, self.upper_bounds)
def _check_restart(self, num_parents):
if (self._restart_rule == 'no_improvement'):
return (num_parents == 0)
return False
def tell(self, solutions, objective_values, behavior_values, jacobian=None, metadata=None):
ranking_data = []
new_sols = 0
metadata = (itertools.repeat(None) if (metadata is None) else metadata)
for (i, (sol, obj, beh, meta)) in enumerate(zip(solutions, objective_values, behavior_values, metadata)):
(status, value) = self.archive.add(sol, obj, beh, meta)
ranking_data.append((status, value, i))
if (status in (AddStatus.NEW, AddStatus.IMPROVE_EXISTING)):
new_sols += 1
ranking_data.sort(reverse=True)
indices = [d[2] for d in ranking_data]
num_parents = (new_sols if (self._selection_rule == 'filter') else self._num_parents)
self.opt.tell(solutions[indices], num_parents)
if (self.opt.check_stop([value for (status, value, i) in ranking_data]) or self._check_restart(new_sols)):
new_x0 = self.archive.get_random_elite()[0]
self.opt.reset(new_x0)
self._restarts += 1 |
_function('ger')
class AutogradGer(AutogradFunction):
def forward(ctx, input, other):
ctx.save_multiple_for_backward([input, other])
return input.ger(other)
def backward(ctx, grad_output):
(input, other) = ctx.saved_tensors
return (grad_output.matmul(other), input.matmul(grad_output)) |
class SparseBasicBlock(BasicBlock, spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, conv_cfg=None, norm_cfg=None):
spconv.SparseModule.__init__(self)
BasicBlock.__init__(self, inplanes, planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
def forward(self, x):
identity = x.features
assert (x.features.dim() == 2), f'x.features.dim()={x.features.dim()}'
out = self.conv1(x)
out.features = self.norm1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.norm2(out.features)
if (self.downsample is not None):
identity = self.downsample(x)
out.features += identity
out.features = self.relu(out.features)
return out |
def add_noise(input):
ns = torch.normal(mean=torch.zeros(input.shape[0], input.shape[1], config.noise_res, config.noise_res), std=config.noise_std).to(config.device)
ns = F.interpolate(ns, size=config.image_size, mode='bilinear', align_corners=True)
roll_x = random.choice(range(config.image_size))
roll_y = random.choice(range(config.image_size))
ns = torch.roll(ns, shifts=[roll_x, roll_y], dims=[(- 2), (- 1)])
if (config.modality == 'MRI'):
mask = torch.cat([(inp > inp.min()) for inp in input]).unsqueeze(1)
ns *= mask
if config.center:
ns = ((ns - 0.5) * 2)
res = (input + ns)
return (res, ns) |
def val_epoch(model, val_loader, val_transform, criterion):
if (args.dataset == 'ICBHI'):
TP = [0, 0, 0, 0]
GT = [0, 0, 0, 0]
elif (args.dataset == 'SPRS'):
TP = [0, 0, 0, 0, 0, 0, 0]
GT = [0, 0, 0, 0, 0, 0, 0]
epoch_loss = 0.0
model.eval()
with torch.no_grad():
for (data, target, _) in val_loader:
(data, target) = (data.to(args.device), target.to(args.device))
output = model(val_transform(data))
loss = criterion(output, target)
epoch_loss += loss.item()
(_, labels_predicted) = torch.max(output, dim=1)
for idx in range(len(TP)):
TP[idx] += torch.logical_and((labels_predicted == idx), (target == idx)).sum().item()
GT[idx] += (target == idx).sum().item()
epoch_loss = (epoch_loss / len(val_loader))
se = (sum(TP[1:]) / sum(GT[1:]))
sp = (TP[0] / GT[0])
icbhi_score = ((se + sp) / 2)
acc = (sum(TP) / sum(GT))
return (epoch_loss, se, sp, icbhi_score, acc) |
class IGCV3(nn.Module):
def __init__(self, channels, init_block_channels, final_block_channels, in_channels=3, in_size=(224, 224), num_classes=1000):
super(IGCV3, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', conv3x3_block(in_channels=in_channels, out_channels=init_block_channels, stride=2, activation='relu6'))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
stride = (2 if ((j == 0) and (i != 0)) else 1)
expansion = ((i != 0) or (j != 0))
stage.add_module('unit{}'.format((j + 1)), InvResUnit(in_channels=in_channels, out_channels=out_channels, stride=stride, expansion=expansion))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('final_block', conv1x1_block(in_channels=in_channels, out_channels=final_block_channels, activation='relu6'))
in_channels = final_block_channels
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1))
self.output = nn.Linear(in_features=in_channels, out_features=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
_model
def resnet26(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['resnet26']
model = ResNet(Bottleneck, [2, 2, 2, 2], num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def test_add_edges_max(g1, g2):
assert (g1.num_e == 2)
g1.add_edges((3, 2), e_weight=0.5, merge_op='max')
assert (g1.num_e == 3)
assert ((2, 3) in g1.e[0])
assert ((3, 2) not in g1.e[0])
assert (g1.A[(3, 2)] == 0.5)
assert (g2.num_e == 3)
g2.add_edges(((1, 2), (1, 3)), e_weight=[0.1, 0.2], merge_op='max')
assert (g2.num_e == 5)
assert ((1, 2) in g2.e[0])
assert (g2.A[(1, 2)] == 0.1)
assert ((2, 1) in g2.e_both_side[0])
assert (g2.A[(2, 1)] == 0.1)
g2.add_edges(((3, 2), (3, 1)), e_weight=[1.1, 2.1], merge_op='max')
assert (g2.num_e == 6)
assert ((2, 3) in g2.e[0])
assert (g2.A[(3, 2)] == 1.1)
assert ((2, 3) in g2.e_both_side[0])
assert (g2.A[(2, 3)] == 1.1)
assert (g2.A[(1, 3)] == 2.1) |
def double_double_cascade_step(dim, embsys, esols, tasks=0):
from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_dobldobl_cascade_homotopy
from phcpy.phcpy2c3 import py2c_solve_by_dobldobl_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_dobldobl_solutions
from phcpy.phcpy2c3 import py2c_copy_dobldobl_target_solutions_to_container
from phcpy.interface import store_dobldobl_witness_set
from phcpy.interface import load_dobldobl_solutions
store_dobldobl_witness_set(len(embsys), dim, embsys, esols)
py2c_copy_dobldobl_container_to_start_system()
py2c_copy_dobldobl_container_to_start_solutions()
py2c_dobldobl_cascade_homotopy()
py2c_solve_by_dobldobl_homotopy_continuation(tasks)
py2c_solcon_clear_dobldobl_solutions()
py2c_copy_dobldobl_target_solutions_to_container()
return load_dobldobl_solutions() |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, TCP_module=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.bn_data = norm_layer(3, eps=2e-05)
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes, eps=2e-05)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilate=replace_stride_with_dilation[2])
self.bn2 = norm_layer((512 * block.expansion), eps=2e-05)
self.fc = nn.Linear((512 * block.expansion), num_classes)
if (TCP_module is None):
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.TCP = None
else:
self.TCP = TCP_module
for m in self.modules():
if (m == self.TCP):
break
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.bn_data(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = self.relu(x)
if (self.TCP is not None):
x = self.TCP(x)
else:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x) |
def _create_skresnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(ResNet, variant, pretrained, default_cfg=default_cfgs[variant], **kwargs) |
def get_strategy(load_strategy):
status = True
data = None
if isinstance(load_strategy, str):
try:
with open(load_strategy, 'rb') as fp:
data = fp.read()
except Exception as e:
logger.error(e)
status = False
elif isinstance(load_strategy, bytes):
data = load_strategy
elif isinstance(load_strategy, Strategy):
strategy = load_strategy
elif isinstance(load_strategy, list):
strategy = Strategy()
for item in load_strategy:
if isinstance(item, str):
strategy.add_opt((item, None, None))
elif ((isinstance(item, tuple) or isinstance(item, list)) and (len(item) == 2)):
strategy.add_opt((item[0], item[1], (item[0] in SEMIAUTO_STRATEGIES)))
else:
logger.error('When use list for load_strategy, should be a list of name or (name, config).')
status = False
break
else:
logger.error('load_strategy should be str for file, or bytes for memory, or list of name or (name, config)')
status = False
if (status and (data is not None)):
try:
strategy = pickle.loads(data)
status = isinstance(strategy, Strategy)
except Exception:
logger.error('load_strategy is not in correct strategy format')
status = False
if (status is False):
return (False, None)
return (status, strategy) |
def is_cudnn_snafu(exception):
return (isinstance(exception, RuntimeError) and (len(exception.args) == 1) and ('cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.' in exception.args[0])) |
def _f_p_r_2(l, m, n):
r = ((l / m) if (m > 0) else 0.0)
p = ((l / n) if (n > 0) else 0.0)
beta = (p / (r + 1e-12))
num = (((1 + (beta ** 2)) * r) * p)
denom = (r + ((beta ** 2) * p))
f = (num / (denom + 1e-12))
return (f, p, r) |
_DENSEPOSE_HEAD_REGISTRY.register()
class DensePoseDeepLabHead(nn.Module):
def __init__(self, cfg: CfgNode, input_channels: int):
super(DensePoseDeepLabHead, self).__init__()
hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL
norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM
self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS
self.use_nonlocal = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON
pad_size = (kernel_size // 2)
n_channels = input_channels
self.ASPP = ASPP(input_channels, [6, 12, 56], n_channels)
self.add_module('ASPP', self.ASPP)
if self.use_nonlocal:
self.NLBlock = NONLocalBlock2D(input_channels, bn_layer=True)
self.add_module('NLBlock', self.NLBlock)
for i in range(self.n_stacked_convs):
norm_module = (nn.GroupNorm(32, hidden_dim) if (norm == 'GN') else None)
layer = Conv2d(n_channels, hidden_dim, kernel_size, stride=1, padding=pad_size, bias=(not norm), norm=norm_module)
weight_init.c2_msra_fill(layer)
n_channels = hidden_dim
layer_name = self._get_layer_name(i)
self.add_module(layer_name, layer)
self.n_out_channels = hidden_dim
def forward(self, features):
x0 = features
x = self.ASPP(x0)
if self.use_nonlocal:
x = self.NLBlock(x)
output = x
for i in range(self.n_stacked_convs):
layer_name = self._get_layer_name(i)
x = getattr(self, layer_name)(x)
x = F.relu(x)
output = x
return output
def _get_layer_name(self, i: int):
layer_name = 'body_conv_fcn{}'.format((i + 1))
return layer_name |
class LoggerHook(Hook):
__metaclass__ = ABCMeta
def __init__(self, interval=10, ignore_last=True, reset_flag=False, by_epoch=True):
self.interval = interval
self.ignore_last = ignore_last
self.reset_flag = reset_flag
self.by_epoch = by_epoch
def log(self, runner):
pass
def is_scalar(val, include_np=True, include_torch=True):
if isinstance(val, numbers.Number):
return True
elif (include_np and isinstance(val, np.ndarray) and (val.ndim == 0)):
return True
elif (include_torch and isinstance(val, torch.Tensor) and (len(val) == 1)):
return True
else:
return False
def get_mode(self, runner):
if (runner.mode == 'train'):
if ('time' in runner.log_buffer.output):
mode = 'train'
else:
mode = 'val'
elif (runner.mode == 'val'):
mode = 'val'
else:
raise ValueError(f"runner mode should be 'train' or 'val', but got {runner.mode}")
return mode
def get_epoch(self, runner):
if (runner.mode == 'train'):
epoch = (runner.epoch + 1)
elif (runner.mode == 'val'):
epoch = runner.epoch
else:
raise ValueError(f"runner mode should be 'train' or 'val', but got {runner.mode}")
return epoch
def get_iter(self, runner, inner_iter=False):
if (self.by_epoch and inner_iter):
current_iter = (runner.inner_iter + 1)
else:
current_iter = (runner.iter + 1)
return current_iter
def get_lr_tags(self, runner):
tags = {}
lrs = runner.current_lr()
if isinstance(lrs, dict):
for (name, value) in lrs.items():
tags[f'learning_rate/{name}'] = value[0]
else:
tags['learning_rate'] = lrs[0]
return tags
def get_momentum_tags(self, runner):
tags = {}
momentums = runner.current_momentum()
if isinstance(momentums, dict):
for (name, value) in momentums.items():
tags[f'momentum/{name}'] = value[0]
else:
tags['momentum'] = momentums[0]
return tags
def get_loggable_tags(self, runner, allow_scalar=True, allow_text=False, add_mode=True, tags_to_skip=('time', 'data_time')):
tags = {}
for (var, val) in runner.log_buffer.output.items():
if (var in tags_to_skip):
continue
if (self.is_scalar(val) and (not allow_scalar)):
continue
if (isinstance(val, str) and (not allow_text)):
continue
if add_mode:
var = f'{self.get_mode(runner)}/{var}'
tags[var] = val
tags.update(self.get_lr_tags(runner))
tags.update(self.get_momentum_tags(runner))
return tags
def before_run(self, runner):
for hook in runner.hooks[::(- 1)]:
if isinstance(hook, LoggerHook):
hook.reset_flag = True
break
def before_epoch(self, runner):
runner.log_buffer.clear()
def after_train_iter(self, runner):
if (self.by_epoch and self.every_n_inner_iters(runner, self.interval)):
runner.log_buffer.average(self.interval)
elif ((not self.by_epoch) and self.every_n_iters(runner, self.interval)):
runner.log_buffer.average(self.interval)
elif (self.end_of_epoch(runner) and (not self.ignore_last)):
runner.log_buffer.average(self.interval)
if runner.log_buffer.ready:
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output()
def after_train_epoch(self, runner):
if runner.log_buffer.ready:
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output()
def after_val_epoch(self, runner):
runner.log_buffer.average()
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output() |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--eval_model', type=str, default='', help='evaluation model path')
parser.add_argument('--data_dir', type=str, default='./data', help='data directory')
args = parser.parse_args()
logging.root.handlers = []
logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(message)s', handlers=[logging.StreamHandler()])
model = define_model()
assert os.path.isfile(args.eval_model), f"No checkpoint found at '{args.eval_model}'"
model = torch.nn.DataParallel(model).cuda()
model_state = torch.load(args.eval_model)
logging.info(f'Loading checkpoint from {args.eval_model}')
model.load_state_dict(model_state['state_dict'], strict=False)
logging.info('Loaded successfully!')
test_loader = loaddata.getTestingData(args, 1)
test(test_loader, model) |
def desirable(tag):
return ((tag[0] in ['paragraph', '-', '[']) or ((tag[1] in ['CD']) and tag[0].isdigit())) |
def test_env_supertype_in_env_bad():
with pytest.raises(Exception):
MockEnv(env_supertype={'xxx': 0.0}) |
class DataToTensor():
def __init__(self, dtype=None):
if (dtype is None):
dtype = torch.float
self.dtype = dtype
def __call__(self, data):
return torch.tensor(data, dtype=self.dtype) |
def project_real_images(network_pkl, dataset_name, data_dir, num_images, num_snapshots):
print(('Loading networks from "%s"...' % network_pkl))
(_G, _D, Gs) = pretrained_networks.load_networks(network_pkl)
proj = projector.Projector()
proj.set_network(Gs)
print(('Loading images from "%s"...' % dataset_name))
dataset_obj = dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=0, repeat=False, shuffle_mb=0)
assert (dataset_obj.shape == Gs.output_shape[1:])
for image_idx in range(num_images):
print(('Projecting image %d/%d ...' % (image_idx, num_images)))
(images, _labels) = dataset_obj.get_minibatch_np(1)
images = misc.adjust_dynamic_range(images, [0, 255], [(- 1), 1])
project_image(proj, targets=images, png_prefix=dnnlib.make_run_dir_path(('image%04d-' % image_idx)), num_snapshots=num_snapshots) |
def _get_train_test_by_character(plays, test_fraction=0.2):
skipped_characters = 0
all_train_examples = collections.defaultdict(list)
all_test_examples = collections.defaultdict(list)
def add_examples(example_dict, example_tuple_list):
for (play, character, sound_bite) in example_tuple_list:
example_dict[play_and_character(play, character)].append(sound_bite)
users_and_plays = {}
for (play, characters) in plays:
curr_characters = list(characters.keys())
for c in curr_characters:
users_and_plays[play_and_character(play, c)] = play
for (character, sound_bites) in characters.items():
examples = [(play, character, sound_bite) for sound_bite in sound_bites]
if (len(examples) <= 2):
skipped_characters += 1
continue
train_examples = examples
if (test_fraction > 0):
num_test = max(int((len(examples) * test_fraction)), 1)
train_examples = examples[:(- num_test)]
test_examples = examples[(- num_test):]
assert (len(test_examples) == num_test)
assert (len(train_examples) >= len(test_examples))
add_examples(all_test_examples, test_examples)
add_examples(all_train_examples, train_examples)
return (users_and_plays, all_train_examples, all_test_examples) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_corpus_path', type=str, required=True, help='Path to corpus, each line separated by tab, and the first element is id.')
parser.add_argument('--input_query_path', type=str, required=True, help='Path to queries, each line separated by tab, and the first element is id.')
parser.add_argument('--input_qrel_path', type=str, required=True, help='Path to TREC-format qrel file.')
parser.add_argument('--input_run_path', type=str, required=True, help='Path to TREC-format qrel file.')
parser.add_argument('--topk', type=int, required=True, help='Topk passages/docs per query.')
parser.add_argument('--output_corpus_path', type=str, required=True, help='Output path of corpus.')
parser.add_argument('--output_query_path', type=str, required=True, help='Output path to queries.')
parser.add_argument('--output_qrel_path', type=str, required=True, help='Output path to TREC-format qrel file.')
args = parser.parse_args()
os.makedirs(os.path.dirname(args.output_corpus_path), exist_ok=True)
os.makedirs(os.path.dirname(args.output_query_path), exist_ok=True)
os.makedirs(os.path.dirname(args.output_qrel_path), exist_ok=True)
subprocess.check_call(['cp', args.input_qrel_path, args.output_qrel_path])
subprocess.check_call(['cp', args.input_query_path, args.output_query_path])
docids = sample_docs_from_topics(args.output_qrel_path, args.input_run_path, args.topk)
output_corpus(args.input_corpus_path, args.output_corpus_path, docids) |
class IMDBProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_corpus(os.path.join(data_dir, 'train_tok.csv'), MR=True, clean=False, shuffle=True), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_corpus(os.path.join(data_dir, 'test_tok.csv'), MR=True, clean=False, shuffle=True), 'dev')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = ('%s-%s' % (set_type, i))
text_a = line[0]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples |
def plot_final_scores():
font = {'size': 12}
mpl.rc('font', **font)
(fig, ax) = plt.subplots(nrows=1, ncols=1, figsize=(7, 4))
outfiles = [(RESULT_DIR + 'seq2seq_sample_imagenet_%s_iter_20000.json'), (RESULT_DIR + 'seq2seq_teacher_imagenet_%s_iter_5000.json'), (RESULT_DIR + '%s_stop_agent.json'), (RESULT_DIR + '%s_random_agent.json')]
for split in ['val_seen']:
ev = Evaluation([split])
for (i, outfile) in enumerate(outfiles):
(score_summary, scores) = ev.score((outfile % split))
if (i == 1):
method = 'Teacher-forcing'
ax.hist(scores['nav_errors'], bins=range(0, 30, 3), label=method, normed=True, histtype='step', linewidth=2.5, color='C1')
elif (i == 0):
method = 'Student-forcing'
ax.hist(scores['nav_errors'], bins=range(0, 30, 3), label=method, alpha=0.7, normed=True, color='C0')
elif (i == 2):
method = 'Start locations'
ax.hist(scores['nav_errors'], bins=range(0, 30, 3), label=method, normed=True, histtype='step', linewidth=2.5, color='C3')
elif (i == 3):
method = 'Random agent'
ax.hist(scores['nav_errors'], bins=range(0, 30, 3), label=method, normed=True, histtype='step', linewidth=2.5, color='C2')
ax.set_title('Val Seen Navigation Error')
ax.set_xlabel('Error (m)')
ax.set_ylabel('Frequency')
ax.set_ylim([0, 0.14])
ax.set_xlim([0, 30])
plt.axvline(x=3, color='black', linestyle='--')
legend = ax.legend(loc='upper right')
plt.tight_layout()
plt.savefig(('%s/val_seen_error.png' % PLOT_DIR))
plt.close(fig) |
class ResultWriter():
extension: str
def __init__(self, output_dir: str):
self.output_dir = output_dir
def __call__(self, result: dict, audio_path: str, options: dict):
audio_basename = os.path.basename(audio_path)
audio_basename = os.path.splitext(audio_basename)[0]
output_path = os.path.join(self.output_dir, ((audio_basename + '.') + self.extension))
with open(output_path, 'w', encoding='utf-8') as f:
self.write_result(result, file=f, options=options)
def write_result(self, result: dict, file: TextIO, options: dict):
raise NotImplementedError |
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, cover=False):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if (loss_scaler is not None):
if cover:
checkpoint_paths = [(output_dir / 'checkpoint.pth')]
else:
checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))]
for checkpoint_path in checkpoint_paths:
to_save = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args}
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
model.save_checkpoint(save_dir=args.output_dir, tag=('checkpoint-%s' % epoch_name), client_state=client_state) |
def test_pieri_problem(vrblvl=0):
if (vrblvl > 0):
print('making a real problem ...')
planes = real_osculating_planes(2, 2, 0, vrblvl)
pols = make_pieri_system(2, 2, 0, planes, is_real=True, vrblvl=vrblvl)
if (vrblvl > 0):
for pol in pols:
print(pol)
if (vrblvl > 0):
print('making a complex problem ...')
planes = random_complex_matrices((2 * 2), 4, 2)
if (vrblvl > 0):
for (idx, plane) in enumerate(planes):
print('plane', idx, ':')
for row in plane:
print(row)
pols = make_pieri_system(2, 2, 0, planes, is_real=False, vrblvl=vrblvl)
if (vrblvl > 0):
for pol in pols:
print(pol)
return 0 |
class model():
def __init__(self, config, data, test=False):
self.device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
self.config = config
self.training_opt = self.config['training_opt']
self.memory = self.config['memory']
self.data = data
self.test_mode = test
self.num_gpus = torch.cuda.device_count()
self.do_shuffle = (config['shuffle'] if ('shuffle' in config) else False)
self.logger = Logger(self.training_opt['log_dir'])
self.init_models()
if (('model_dir' in self.config) and (self.config['model_dir'] is not None)):
self.load_model(self.config['model_dir'])
if (not self.test_mode):
print('Using steps for training.')
self.training_data_num = len(self.data['train'].dataset)
self.epoch_steps = int((self.training_data_num / self.training_opt['batch_size']))
print('Initializing model optimizer.')
self.scheduler_params = self.training_opt['scheduler_params']
(self.model_optimizer, self.model_optimizer_scheduler) = self.init_optimizers(self.model_optim_params_list)
self.init_criterions()
if self.memory['init_centroids']:
self.criterions['FeatureLoss'].centroids.data = self.centroids_cal(self.data['train_plain'])
self.log_file = os.path.join(self.training_opt['log_dir'], 'log.txt')
if os.path.isfile(self.log_file):
os.remove(self.log_file)
self.logger.log_cfg(self.config)
else:
if ('KNNClassifier' in self.config['networks']['classifier']['def_file']):
self.load_model()
if (not self.networks['classifier'].initialized):
cfeats = self.get_knncentroids()
print(('===> Saving features to %s' % os.path.join(self.training_opt['log_dir'], 'cfeats.pkl')))
with open(os.path.join(self.training_opt['log_dir'], 'cfeats.pkl'), 'wb') as f:
pickle.dump(cfeats, f)
self.networks['classifier'].update(cfeats)
self.log_file = None
def init_models(self, optimizer=True):
networks_defs = self.config['networks']
self.networks = {}
self.model_optim_params_list = []
print('Using', torch.cuda.device_count(), 'GPUs.')
for (key, val) in networks_defs.items():
def_file = val['def_file']
model_args = val['params']
model_args.update({'test': self.test_mode})
self.networks[key] = source_import(def_file).create_model(**model_args)
if ('KNNClassifier' in type(self.networks[key]).__name__):
self.networks[key] = self.networks[key].cuda()
else:
self.networks[key] = nn.DataParallel(self.networks[key]).cuda()
if (('fix' in val) and val['fix']):
print('Freezing feature weights except for self attention weights (if exist).')
for (param_name, param) in self.networks[key].named_parameters():
if (('selfatt' not in param_name) and ('fc' not in param_name)):
param.requires_grad = False
optim_params = val['optim_params']
self.model_optim_params_list.append({'params': self.networks[key].parameters(), 'lr': optim_params['lr'], 'momentum': optim_params['momentum'], 'weight_decay': optim_params['weight_decay']})
def init_criterions(self):
criterion_defs = self.config['criterions']
self.criterions = {}
self.criterion_weights = {}
for (key, val) in criterion_defs.items():
def_file = val['def_file']
loss_args = list(val['loss_params'].values())
self.criterions[key] = source_import(def_file).create_loss(*loss_args).cuda()
self.criterion_weights[key] = val['weight']
if val['optim_params']:
print('Initializing criterion optimizer.')
optim_params = val['optim_params']
optim_params = [{'params': self.criterions[key].parameters(), 'lr': optim_params['lr'], 'momentum': optim_params['momentum'], 'weight_decay': optim_params['weight_decay']}]
(self.criterion_optimizer, self.criterion_optimizer_scheduler) = self.init_optimizers(optim_params)
else:
self.criterion_optimizer = None
def init_optimizers(self, optim_params):
optimizer = optim.SGD(optim_params)
if self.config['coslr']:
print('===> Using coslr eta_min={}'.format(self.config['endlr']))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, self.training_opt['num_epochs'], eta_min=self.config['endlr'])
else:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=self.scheduler_params['step_size'], gamma=self.scheduler_params['gamma'])
return (optimizer, scheduler)
def batch_forward(self, inputs, labels=None, centroids=False, feature_ext=False, phase='train'):
(self.features, self.feature_maps) = self.networks['feat_model'](inputs)
if (not feature_ext):
if (phase != 'test'):
if (centroids and ('FeatureLoss' in self.criterions.keys())):
self.centroids = self.criterions['FeatureLoss'].centroids.data
self.centroids = self.centroids.unsqueeze(0).expand(torch.cuda.device_count(), self.centroids.shape[0], self.centroids.shape[1]).reshape((- 1), (torch.cuda.device_count() * self.centroids.shape[0]), self.centroids.shape[1]).squeeze(0)
else:
self.centroids = None
if (self.centroids is not None):
centroids_ = torch.cat(([self.centroids] * self.num_gpus))
else:
centroids_ = self.centroids
(self.logits, self.direct_memory_feature) = self.networks['classifier'](self.features, centroids_)
def batch_backward(self):
self.model_optimizer.zero_grad()
if self.criterion_optimizer:
self.criterion_optimizer.zero_grad()
self.loss.backward()
self.model_optimizer.step()
if self.criterion_optimizer:
self.criterion_optimizer.step()
def batch_loss(self, labels):
self.loss = 0
if ('PerformanceLoss' in self.criterions.keys()):
self.loss_perf = self.criterions['PerformanceLoss'](self.logits, labels)
self.loss_perf *= self.criterion_weights['PerformanceLoss']
self.loss += self.loss_perf
if ('FeatureLoss' in self.criterions.keys()):
self.loss_feat = self.criterions['FeatureLoss'](self.features, labels)
self.loss_feat = (self.loss_feat * self.criterion_weights['FeatureLoss'])
self.loss += self.loss_feat
def shuffle_batch(self, x, y):
index = torch.randperm(x.size(0))
x = x[index]
y = y[index]
return (x, y)
def train(self):
print_str = ['Phase: train']
print_write(print_str, self.log_file)
time.sleep(0.25)
print_write(['Do shuffle??? --- ', self.do_shuffle], self.log_file)
best_model_weights = {}
best_model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
best_model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
best_acc = 0.0
best_epoch = 0
end_epoch = self.training_opt['num_epochs']
for epoch in range(1, (end_epoch + 1)):
for model in self.networks.values():
model.train()
torch.cuda.empty_cache()
self.model_optimizer_scheduler.step()
if self.criterion_optimizer:
self.criterion_optimizer_scheduler.step()
total_preds = []
total_labels = []
for (step, (inputs, labels, indexes)) in enumerate(self.data['train']):
if (step == self.epoch_steps):
break
if self.do_shuffle:
(inputs, labels) = self.shuffle_batch(inputs, labels)
(inputs, labels) = (inputs.cuda(), labels.cuda())
with torch.set_grad_enabled(True):
self.batch_forward(inputs, labels, centroids=self.memory['centroids'], phase='train')
self.batch_loss(labels)
self.batch_backward()
(_, preds) = torch.max(self.logits, 1)
total_preds.append(torch2numpy(preds))
total_labels.append(torch2numpy(labels))
if ((step % self.training_opt['display_step']) == 0):
minibatch_loss_feat = (self.loss_feat.item() if ('FeatureLoss' in self.criterions.keys()) else None)
minibatch_loss_perf = (self.loss_perf.item() if ('PerformanceLoss' in self.criterions) else None)
minibatch_loss_total = self.loss.item()
minibatch_acc = mic_acc_cal(preds, labels)
print_str = [('Epoch: [%d/%d]' % (epoch, self.training_opt['num_epochs'])), ('Step: %5d' % step), (('Minibatch_loss_feature: %.3f' % minibatch_loss_feat) if minibatch_loss_feat else ''), (('Minibatch_loss_performance: %.3f' % minibatch_loss_perf) if minibatch_loss_perf else ''), ('Minibatch_accuracy_micro: %.3f' % minibatch_acc)]
print_write(print_str, self.log_file)
loss_info = {'Epoch': epoch, 'Step': step, 'Total': minibatch_loss_total, 'CE': minibatch_loss_perf, 'feat': minibatch_loss_feat}
self.logger.log_loss(loss_info)
if hasattr(self.data['train'].sampler, 'update_weights'):
if hasattr(self.data['train'].sampler, 'ptype'):
ptype = self.data['train'].sampler.ptype
else:
ptype = 'score'
ws = get_priority(ptype, self.logits.detach(), labels)
inlist = [indexes.cpu().numpy(), ws]
if (self.training_opt['sampler']['type'] == 'ClassPrioritySampler'):
inlist.append(labels.cpu().numpy())
self.data['train'].sampler.update_weights(*inlist)
if hasattr(self.data['train'].sampler, 'get_weights'):
self.logger.log_ws(epoch, self.data['train'].sampler.get_weights())
if hasattr(self.data['train'].sampler, 'reset_weights'):
self.data['train'].sampler.reset_weights(epoch)
rsls = {'epoch': epoch}
rsls_train = self.eval_with_preds(total_preds, total_labels)
rsls_eval = self.eval(phase='val')
rsls.update(rsls_train)
rsls.update(rsls_eval)
if hasattr(self.data['train'].sampler, 'reset_priority'):
ws = get_priority(self.data['train'].sampler.ptype, self.total_logits.detach(), self.total_labels)
self.data['train'].sampler.reset_priority(ws, self.total_labels.cpu().numpy())
self.logger.log_acc(rsls)
if (self.eval_acc_mic_top1 > best_acc):
best_epoch = epoch
best_acc = self.eval_acc_mic_top1
best_centroids = self.centroids
best_model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
best_model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
print('===> Saving checkpoint')
self.save_latest(epoch)
print()
print('Training Complete.')
print_str = [('Best validation accuracy is %.3f at epoch %d' % (best_acc, best_epoch))]
print_write(print_str, self.log_file)
self.save_model(epoch, best_epoch, best_model_weights, best_acc, centroids=best_centroids)
self.reset_model(best_model_weights)
self.eval(('test' if ('test' in self.data) else 'val'))
print('Done')
def eval_with_preds(self, preds, labels):
n_total = sum([len(p) for p in preds])
(normal_preds, normal_labels) = ([], [])
(mixup_preds, mixup_labels1, mixup_labels2, mixup_ws) = ([], [], [], [])
for (p, l) in zip(preds, labels):
if isinstance(l, tuple):
mixup_preds.append(p)
mixup_labels1.append(l[0])
mixup_labels2.append(l[1])
mixup_ws.append((l[2] * np.ones_like(l[0])))
else:
normal_preds.append(p)
normal_labels.append(l)
rsl = {'train_all': 0.0, 'train_many': 0.0, 'train_median': 0.0, 'train_low': 0.0}
if (len(normal_preds) > 0):
(normal_preds, normal_labels) = list(map(np.concatenate, [normal_preds, normal_labels]))
n_top1 = mic_acc_cal(normal_preds, normal_labels)
(n_top1_many, n_top1_median, n_top1_low) = shot_acc(normal_preds, normal_labels, self.data['train'])
rsl['train_all'] += ((len(normal_preds) / n_total) * n_top1)
rsl['train_many'] += ((len(normal_preds) / n_total) * n_top1_many)
rsl['train_median'] += ((len(normal_preds) / n_total) * n_top1_median)
rsl['train_low'] += ((len(normal_preds) / n_total) * n_top1_low)
if (len(mixup_preds) > 0):
(mixup_preds, mixup_labels, mixup_ws) = list(map(np.concatenate, [(mixup_preds * 2), (mixup_labels1 + mixup_labels2), mixup_ws]))
mixup_ws = np.concatenate([mixup_ws, (1 - mixup_ws)])
n_top1 = weighted_mic_acc_cal(mixup_preds, mixup_labels, mixup_ws)
(n_top1_many, n_top1_median, n_top1_low) = weighted_shot_acc(mixup_preds, mixup_labels, mixup_ws, self.data['train'])
rsl['train_all'] += (((len(mixup_preds) / 2) / n_total) * n_top1)
rsl['train_many'] += (((len(mixup_preds) / 2) / n_total) * n_top1_many)
rsl['train_median'] += (((len(mixup_preds) / 2) / n_total) * n_top1_median)
rsl['train_low'] += (((len(mixup_preds) / 2) / n_total) * n_top1_low)
print_str = [('\n Training acc Top1: %.3f \n' % rsl['train_all']), ('Many_top1: %.3f' % rsl['train_many']), ('Median_top1: %.3f' % rsl['train_median']), ('Low_top1: %.3f' % rsl['train_low']), '\n']
print_write(print_str, self.log_file)
return rsl
def eval(self, phase='val', openset=False, save_feat=False):
print_str = [('Phase: %s' % phase)]
print_write(print_str, self.log_file)
time.sleep(0.25)
if openset:
print(('Under openset test mode. Open threshold is %.1f' % self.training_opt['open_threshold']))
torch.cuda.empty_cache()
for model in self.networks.values():
model.eval()
self.total_logits = torch.empty((0, self.training_opt['num_classes'])).cuda()
self.total_labels = torch.empty(0, dtype=torch.long).cuda()
self.total_paths = np.empty(0)
get_feat_only = save_feat
(feats_all, labels_all, idxs_all, logits_all) = ([], [], [], [])
featmaps_all = []
for (inputs, labels, paths) in tqdm(self.data[phase]):
(inputs, labels) = (inputs.cuda(), labels.cuda())
with torch.set_grad_enabled(False):
self.batch_forward(inputs, labels, centroids=self.memory['centroids'], phase=phase)
if (not get_feat_only):
self.total_logits = torch.cat((self.total_logits, self.logits))
self.total_labels = torch.cat((self.total_labels, labels))
self.total_paths = np.concatenate((self.total_paths, paths))
if get_feat_only:
logits_all.append(self.logits.cpu().numpy())
feats_all.append(self.features.cpu().numpy())
labels_all.append(labels.cpu().numpy())
idxs_all.append(paths.numpy())
if get_feat_only:
typ = 'feat'
if (phase == 'train_plain'):
name = 'train{}_all.pkl'.format(typ)
elif (phase == 'test'):
name = 'test{}_all.pkl'.format(typ)
elif (phase == 'val'):
name = 'val{}_all.pkl'.format(typ)
fname = os.path.join(self.training_opt['log_dir'], name)
print(('===> Saving feats to ' + fname))
with open(fname, 'wb') as f:
pickle.dump({'feats': np.concatenate(feats_all), 'labels': np.concatenate(labels_all), 'idxs': np.concatenate(idxs_all)}, f, protocol=4)
return
(probs, preds) = F.softmax(self.total_logits.detach(), dim=1).max(dim=1)
if openset:
preds[(probs < self.training_opt['open_threshold'])] = (- 1)
self.openset_acc = mic_acc_cal(preds[(self.total_labels == (- 1))], self.total_labels[(self.total_labels == (- 1))])
print(('\n\nOpenset Accuracy: %.3f' % self.openset_acc))
self.eval_acc_mic_top1 = mic_acc_cal(preds[(self.total_labels != (- 1))], self.total_labels[(self.total_labels != (- 1))])
self.eval_f_measure = F_measure(preds, self.total_labels, openset=openset, theta=self.training_opt['open_threshold'])
(self.many_acc_top1, self.median_acc_top1, self.low_acc_top1, self.cls_accs) = shot_acc(preds[(self.total_labels != (- 1))], self.total_labels[(self.total_labels != (- 1))], self.data['train'], acc_per_cls=True)
print_str = ['\n\n', ('Phase: %s' % phase), '\n\n', ('Evaluation_accuracy_micro_top1: %.3f' % self.eval_acc_mic_top1), '\n', ('Averaged F-measure: %.3f' % self.eval_f_measure), '\n', ('Many_shot_accuracy_top1: %.3f' % self.many_acc_top1), ('Median_shot_accuracy_top1: %.3f' % self.median_acc_top1), ('Low_shot_accuracy_top1: %.3f' % self.low_acc_top1), '\n']
rsl = {(phase + '_all'): self.eval_acc_mic_top1, (phase + '_many'): self.many_acc_top1, (phase + '_median'): self.median_acc_top1, (phase + '_low'): self.low_acc_top1, (phase + '_fscore'): self.eval_f_measure}
if (phase == 'val'):
print_write(print_str, self.log_file)
else:
acc_str = ['{:.1f} \t {:.1f} \t {:.1f} \t {:.1f}'.format((self.many_acc_top1 * 100), (self.median_acc_top1 * 100), (self.low_acc_top1 * 100), (self.eval_acc_mic_top1 * 100))]
if ((self.log_file is not None) and os.path.exists(self.log_file)):
print_write(print_str, self.log_file)
print_write(acc_str, self.log_file)
else:
print(*print_str)
print(*acc_str)
if (phase == 'test'):
with open(os.path.join(self.training_opt['log_dir'], 'cls_accs.pkl'), 'wb') as f:
pickle.dump(self.cls_accs, f)
return rsl
def centroids_cal(self, data, save_all=False):
centroids = torch.zeros(self.training_opt['num_classes'], self.training_opt['feature_dim']).cuda()
print('Calculating centroids.')
torch.cuda.empty_cache()
for model in self.networks.values():
model.eval()
(feats_all, labels_all, idxs_all) = ([], [], [])
with torch.set_grad_enabled(False):
for (inputs, labels, idxs) in tqdm(data):
(inputs, labels) = (inputs.cuda(), labels.cuda())
self.batch_forward(inputs, feature_ext=True)
for i in range(len(labels)):
label = labels[i]
centroids[label] += self.features[i]
if save_all:
feats_all.append(self.features.cpu().numpy())
labels_all.append(labels.cpu().numpy())
idxs_all.append(idxs.numpy())
if save_all:
fname = os.path.join(self.training_opt['log_dir'], 'feats_all.pkl')
with open(fname, 'wb') as f:
pickle.dump({'feats': np.concatenate(feats_all), 'labels': np.concatenate(labels_all), 'idxs': np.concatenate(idxs_all)}, f)
centroids /= torch.tensor(class_count(data)).float().unsqueeze(1).cuda()
return centroids
def get_knncentroids(self):
datakey = 'train_plain'
assert (datakey in self.data)
print('===> Calculating KNN centroids.')
torch.cuda.empty_cache()
for model in self.networks.values():
model.eval()
(feats_all, labels_all) = ([], [])
with torch.set_grad_enabled(False):
for (inputs, labels, idxs) in tqdm(self.data[datakey]):
(inputs, labels) = (inputs.cuda(), labels.cuda())
self.batch_forward(inputs, feature_ext=True)
feats_all.append(self.features.cpu().numpy())
labels_all.append(labels.cpu().numpy())
feats = np.concatenate(feats_all)
labels = np.concatenate(labels_all)
featmean = feats.mean(axis=0)
def get_centroids(feats_, labels_):
centroids = []
for i in np.unique(labels_):
centroids.append(np.mean(feats_[(labels_ == i)], axis=0))
return np.stack(centroids)
un_centers = get_centroids(feats, labels)
l2n_feats = torch.Tensor(feats.copy())
norm_l2n = torch.norm(l2n_feats, 2, 1, keepdim=True)
l2n_feats = (l2n_feats / norm_l2n)
l2n_centers = get_centroids(l2n_feats.numpy(), labels)
cl2n_feats = torch.Tensor(feats.copy())
cl2n_feats = (cl2n_feats - torch.Tensor(featmean))
norm_cl2n = torch.norm(cl2n_feats, 2, 1, keepdim=True)
cl2n_feats = (cl2n_feats / norm_cl2n)
cl2n_centers = get_centroids(cl2n_feats.numpy(), labels)
return {'mean': featmean, 'uncs': un_centers, 'l2ncs': l2n_centers, 'cl2ncs': cl2n_centers}
def reset_model(self, model_state):
for (key, model) in self.networks.items():
weights = model_state[key]
weights = {k: weights[k] for k in weights if (k in model.state_dict())}
model.load_state_dict(weights)
def load_model(self, model_dir=None):
model_dir = (self.training_opt['log_dir'] if (model_dir is None) else model_dir)
if (not model_dir.endswith(tuple(['.pth', '.pth.tar']))):
model_dir = os.path.join(model_dir, 'final_model_checkpoint.pth')
print('Validation on the best model.')
print(('Loading model from %s' % model_dir))
checkpoint = torch.load(model_dir)
model_state = (checkpoint['state_dict_best'] if (not model_dir.endswith('.pth.tar')) else checkpoint['state_dict'])
self.centroids = (checkpoint['centroids'] if ('centroids' in checkpoint) else None)
for (key, model) in self.networks.items():
if ((not self.test_mode) and ('DotProductClassifier' in self.config['networks'][key]['def_file'])):
print('Skipping classifier initialization')
continue
if (not model_dir.endswith('.pth.tar')):
weights = model_state[key]
else:
for k in list(model_state.keys()):
if (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.fc'))):
model_state[f"module.{k[len('module.encoder_q.'):]}"] = model_state[k]
del model_state[k]
weights = model_state
print(list(model.state_dict().keys()))
print(f'''TOTAL: {len(list(model.state_dict().keys()))}
======''')
weights = {k: weights[k] for k in weights if (k in model.state_dict())}
print(f'''Pretrained weights found (TOTAL: {len(list(weights.keys()))}):
{weights.keys()}
''')
x = model.state_dict()
x.update(weights)
model.load_state_dict(x)
def save_latest(self, epoch):
model_weights = {}
model_weights['feat_model'] = copy.deepcopy(self.networks['feat_model'].state_dict())
model_weights['classifier'] = copy.deepcopy(self.networks['classifier'].state_dict())
model_states = {'epoch': epoch, 'state_dict': model_weights}
model_dir = os.path.join(self.training_opt['log_dir'], 'latest_model_checkpoint.pth')
torch.save(model_states, model_dir)
def save_model(self, epoch, best_epoch, best_model_weights, best_acc, centroids=None):
model_states = {'epoch': epoch, 'best_epoch': best_epoch, 'state_dict_best': best_model_weights, 'best_acc': best_acc, 'centroids': centroids}
model_dir = os.path.join(self.training_opt['log_dir'], 'final_model_checkpoint.pth')
torch.save(model_states, model_dir)
def output_logits(self, openset=False):
filename = os.path.join(self.training_opt['log_dir'], ('logits_%s' % ('open' if openset else 'close')))
print(('Saving total logits to: %s.npz' % filename))
np.savez(filename, logits=self.total_logits.detach().cpu().numpy(), labels=self.total_labels.detach().cpu().numpy(), paths=self.total_paths) |
class Map(list):
def __init__(self, function=(lambda x: x), items=[]):
self._f = function
self._a = items
def items(self):
return self._a
def __repr__(self):
return repr(list(iter(self)))
def __getitem__(self, i):
return self._f(self._a[i])
def __len__(self):
return len(self._a)
def __iter__(self):
i = 0
while (i < len(self._a)):
(yield self._f(self._a[i]))
i += 1 |
def mobilenet_wd4(**kwargs):
return get_mobilenet(version='orig', width_scale=0.25, model_name='mobilenet_wd4', **kwargs) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', required=True, type=str)
parser.add_argument('-n', '--repeat_times', required=True, type=int)
parser.add_argument('-o', '--output_file', required=False, type=str)
args = parser.parse_args()
stream = (open(args.output_file, 'w') if args.output_file else sys.stdout)
for line in open(args.input_file):
for _ in range(args.repeat_times):
stream.write((_normalize_spaces(line) + '\n')) |
def update_loss_qf(algo, tensors, v, obs_flat, actions_flat, next_obs_flat, dones_flat, rewards_flat, policy):
with torch.no_grad():
alpha = algo.log_alpha.param.exp()
q1_pred = algo.qf1(obs_flat, actions_flat).flatten()
q2_pred = algo.qf2(obs_flat, actions_flat).flatten()
(next_action_dists_flat, *_) = policy(next_obs_flat)
if hasattr(next_action_dists_flat, 'rsample_with_pre_tanh_value'):
(new_next_actions_flat_pre_tanh, new_next_actions_flat) = next_action_dists_flat.rsample_with_pre_tanh_value()
new_next_action_log_probs = next_action_dists_flat.log_prob(new_next_actions_flat, pre_tanh_value=new_next_actions_flat_pre_tanh)
else:
new_next_actions_flat = next_action_dists_flat.rsample()
new_next_actions_flat = _clip_actions(algo, new_next_actions_flat)
new_next_action_log_probs = next_action_dists_flat.log_prob(new_next_actions_flat)
target_q_values = torch.min(algo.target_qf1(next_obs_flat, new_next_actions_flat).flatten(), algo.target_qf2(next_obs_flat, new_next_actions_flat).flatten())
target_q_values = (target_q_values - (alpha * new_next_action_log_probs))
target_q_values = (target_q_values * algo.discount)
with torch.no_grad():
q_target = (rewards_flat + (target_q_values * (1.0 - dones_flat)))
loss_qf1 = (F.mse_loss(q1_pred, q_target) * 0.5)
loss_qf2 = (F.mse_loss(q2_pred, q_target) * 0.5)
tensors.update({'QTargetsMean': q_target.mean(), 'QTdErrsMean': (((q_target - q1_pred).mean() + (q_target - q2_pred).mean()) / 2), 'LossQf1': loss_qf1, 'LossQf2': loss_qf2}) |
def main_MVSA():
global args, best_prec1, use_gpu
args = parser.parse_args()
use_gpu = torch.cuda.is_available()
emb_path = os.path.join(args.data_root_path, 'glove_embedding', 'glove_embedding_{}.pkl'.format(args.text_min_count))
if os.path.exists(emb_path):
print('The glove_embedding has built!!')
else:
print('Not found the embedding file: {}'.format(emb_path))
data_path = os.path.join(args.data_root_path, 'all_anno_json')
vocab_path = os.path.join(args.data_root_path, 'vocab')
vocab = get_vocab_list(args.data_root_path, args.data_root_path, args.text_min_count)
vocab_size = len(vocab)
opt = {'emb_path': emb_path, 'bidirectional': args.bidirectional, 'hidden_size': args.hidden_size, 'emb_size': args.emb_size, 'num_layers': args.num_layers, 'dropout': args.dropout, 'emb_type': args.emb_type, 'vocab_size': vocab_size, 'stack_num': args.stack_num, 'n_head': args.n_head, 'd_kv': args.d_kv, 'is_regu': args.is_regu}
print('opt')
print(opt['emb_path'])
train_dataset = Tumblr_Dataset(root=args.data_root_path, dataset=args.dataset, text_min_count=args.text_min_count, vocab=None, transform=None, phase='train', object_inp_name='data/glove/object_glove_word2vec.pkl', place_inp_name='data/glove/place_glove_word2vec.pkl')
val_dataset = Tumblr_Dataset(root=args.data_root_path, dataset=args.dataset, text_min_count=args.text_min_count, vocab=None, transform=None, phase='val', object_inp_name='data/glove/object_glove_word2vec.pkl', place_inp_name='data/glove/place_glove_word2vec.pkl')
test_dataset = Tumblr_Dataset(root=args.data_root_path, dataset=args.dataset, text_min_count=args.text_min_count, vocab=None, transform=None, phase='test', object_inp_name='data/glove/object_glove_word2vec.pkl', place_inp_name='data/glove/place_glove_word2vec.pkl')
model = multi_gcn_multihead_att_model(opt=opt, num_labels=args.num_labels, object_num_classes=args.object_num_classes, place_num_classes=args.place_num_classes, object_t=args.object_t_value, place_t=args.place_t_value, data_root_path=args.data_root_path, vocab_root_path=args.data_root_path, text_min_count=args.text_min_count, window_size=args.window_size, ngram=args.ngram, min_cooccurence=args.min_cooccurence, text_dropout=0.5, pretrained=True, object_adj_file='data/adj/tumblr_objects_adj.pkl', place_adj_file='data/adj/tumblr_resnet50_places_adj.pkl', in_channel=300)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.get_config_optim(args.lr, args.lrp), lr=args.lr, weight_decay=args.weight_decay)
state = {'batch_size': args.batch_size, 'image_size': args.image_size, 'max_epochs': args.epochs, 'evaluate': args.evaluate, 'resume': args.resume, 'object_num_classes': args.object_num_classes, 'place_num_classes': args.place_num_classes, 'model_name': args.model_name}
state['save_experiment_result_path'] = os.path.join(args.save_experiment_result_path, args.model_name)
os.makedirs(state['save_experiment_result_path'], exist_ok=True)
state['save_pred_result_path'] = os.path.join(args.save_pred_result_path, args.model_name)
os.makedirs(state['save_pred_result_path'], exist_ok=True)
state['save_model_path'] = os.path.join(args.save_model_path, args.model_name)
os.makedirs(state['save_model_path'], exist_ok=True)
state['workers'] = args.workers
state['epoch_step'] = args.epoch_step
state['lr'] = args.lr
state['text_min_count'] = args.text_min_count
state['ngram'] = args.ngram
state['window_size'] = args.window_size
state['object_t_value'] = args.object_t_value
state['place_t_value'] = args.place_t_value
state['accumulation_steps'] = args.accumulation_steps
state['fp16'] = args.fp16
state['fp16_opt_level'] = args.fp16_opt_level
if args.evaluate:
state['evaluate'] = True
engine = GCNMultiClassEngine(state)
engine.learning(model, criterion, train_dataset, val_dataset, test_dataset, optimizer) |
class ReadSaveImage(object):
def __init__(self):
super(ReadSaveImage, self).__init__()
def check_path(self, fullpath):
(path, filename) = os.path.split(fullpath)
if (not os.path.exists(path)):
os.makedirs(path) |
('/savedArticles', methods=['GET'])
def savedArticles():
return render_template('saves.html', endpoint='articles.savedArticles', **genArticleList(db.getSavedArticles)) |
def cached_path(url_or_filename: Union[(str, Path)], cache_dir: Union[(str, Path)]=None) -> str:
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if (parsed.scheme in (' ' 's3')):
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
return url_or_filename
elif (parsed.scheme == ''):
raise FileNotFoundError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename)) |
def resume_model(model, cfg, pretrained_path=None):
pretrained_path = (os.path.join(cfg.ckpt_dir, os.path.join(cfg.run_name, '_ckpt_latest.pth')) if (pretrained_path is None) else pretrained_path)
if (not os.path.exists(pretrained_path)):
logging.info(f'[RESUME INFO] no checkpoint file from path {pretrained_path}...')
return (0, 0)
logging.info(f'[RESUME INFO] Loading model weights from {pretrained_path}...')
state_dict = torch.load(pretrained_path, map_location='cpu')
base_ckpt = {k.replace('module.', ''): v for (k, v) in state_dict['model'].items()}
model.load_state_dict(base_ckpt, strict=True)
if ('epoch' in state_dict.keys()):
start_epoch = (state_dict['epoch'] + 1)
else:
start_epoch = 1
if ('best_metrics' in state_dict.keys()):
best_metrics = state_dict['best_metrics']
if (not isinstance(best_metrics, dict)):
best_metrics = best_metrics.state_dict()
else:
best_metrics = None
logging.info(f'[RESUME INFO] resume ckpts {(start_epoch - 1)} epoch( best_metrics = {str(best_metrics):s})')
return (start_epoch, best_metrics) |
_model
def regnety_016(pretrained=False, **kwargs):
return _regnet('regnety_016', pretrained, **kwargs) |
class TFPredictor():
def __init__(self, sess, outputs, inputs=None, dataset=None):
if (inputs is None):
(dataset, inputs) = TFPredictor._get_datasets_and_inputs(outputs)
self.sess = sess
self.dataset = dataset
self.inputs = inputs
self.tfnet = TFNet.from_session(sess, self.inputs, outputs)
if (self.dataset.batch_per_thread <= 0):
invalidInputError(False, ('You should set batch_per_thread on TFDataset ' + 'instead of batch_size for prediction'))
def _get_datasets_and_inputs(outputs):
import tensorflow as tf
all_required_inputs = find_placeholders(outputs)
dataset = tf.get_collection(all_required_inputs[0].name)[0]
inputs = dataset.tensors
_check_the_same(all_required_inputs, inputs)
return (dataset, inputs)
def from_outputs(cls, sess, outputs):
(dataset, inputs) = TFPredictor._get_datasets_and_inputs(outputs)
return cls(sess, outputs, inputs, dataset)
def from_keras(cls, keras_model, dataset):
import tensorflow.keras.backend as K
sess = K.get_session()
outputs = keras_model.outputs
inputs = keras_model.inputs
check_data_compatible(dataset, keras_model, mode='inference')
if isinstance(dataset, TFNdarrayDataset):
dataset = _standarize_feature_dataset(dataset, keras_model)
return cls(sess, outputs, inputs, dataset)
def predict(self):
return self.tfnet.predict(self.dataset.get_prediction_data(), mini_batch=True) |
class FieldEntrySelector(EntrySelector):
_SPEC_DELIM = ','
_TYPE_DELIM = ':'
_RANGE_DELIM = '-'
_EQUAL = '='
_ERROR_PREFIX = 'Invalid field selector specifier'
class _FieldEntryValuePredicate(object):
def __init__(self, name: str, typespec: str, value: str):
import builtins
self.name = name
self.type = (getattr(builtins, typespec) if (typespec is not None) else str)
self.value = value
def __call__(self, entry):
return (entry[self.name] == self.type(self.value))
class _FieldEntryRangePredicate(object):
def __init__(self, name: str, typespec: str, vmin: str, vmax: str):
import builtins
self.name = name
self.type = (getattr(builtins, typespec) if (typespec is not None) else str)
self.vmin = vmin
self.vmax = vmax
def __call__(self, entry):
return ((entry[self.name] >= self.type(self.vmin)) and (entry[self.name] <= self.type(self.vmax)))
def __init__(self, spec: str):
self._predicates = self._parse_specifier_into_predicates(spec)
def __call__(self, entry: Dict[(str, Any)]):
for predicate in self._predicates:
if (not predicate(entry)):
return False
return True
def _parse_specifier_into_predicates(self, spec: str) -> List['_FieldEntryPredicate']:
predicates = []
specs = spec.split(self._SPEC_DELIM)
for subspec in specs:
eq_idx = subspec.find(self._EQUAL)
if (eq_idx > 0):
field_name_with_type = subspec[:eq_idx]
(field_name, field_type) = self._parse_field_name_type(field_name_with_type)
field_value_or_range = subspec[(eq_idx + 1):]
if self._is_range_spec(field_value_or_range):
(vmin, vmax) = self._get_range_spec(field_value_or_range)
predicate = FieldEntrySelector._FieldEntryRangePredicate(field_name, field_type, vmin, vmax)
else:
predicate = FieldEntrySelector._FieldEntryValuePredicate(field_name, field_type, field_value_or_range)
predicates.append(predicate)
elif (eq_idx == 0):
self._parse_error(f'"{subspec}", field name is empty!')
else:
self._parse_error(f'"{subspec}", should have format <field>=<value_or_range>!')
return predicates
def _parse_field_name_type(self, field_name_with_type: str) -> Tuple[(str, Optional[str])]:
type_delim_idx = field_name_with_type.find(self._TYPE_DELIM)
if (type_delim_idx > 0):
field_name = field_name_with_type[:type_delim_idx]
field_type = field_name_with_type[(type_delim_idx + 1):]
elif (type_delim_idx == 0):
self._parse_error(f'"{field_name_with_type}", field name is empty!')
else:
field_name = field_name_with_type
field_type = None
return (field_name, field_type)
def _is_range_spec(self, field_value_or_range):
delim_idx = field_value_or_range.find(self._RANGE_DELIM)
return (delim_idx > 0)
def _get_range_spec(self, field_value_or_range):
if self._is_range_spec(field_value_or_range):
delim_idx = field_value_or_range.find(self._RANGE_DELIM)
vmin = field_value_or_range[:delim_idx]
vmax = field_value_or_range[(delim_idx + 1):]
return (vmin, vmax)
else:
self._parse_error('"field_value_or_range", range of values expected!')
def _parse_error(self, msg):
raise ValueError(f'{self._ERROR_PREFIX}: {msg}') |
def copy_dtypes_for_restore(images, force_list=False):
if ia.is_np_array(images):
if force_list:
return [images.dtype for _ in sm.xrange(len(images))]
else:
return images.dtype
else:
return [image.dtype for image in images] |
def preprocess_data_to_merge(input_standoff_folder_gold, output_conll_folder_gold, output_conll_file_gold, input_standoff_folder_pred, output_conll_folder_pred, output_conll_file_pred):
anntoconll_wlp.convert_standoff_conll_single_file(input_standoff_folder_gold, output_conll_folder_gold, output_conll_file_gold)
list_of_test_files_stand_off = Read_Files_in_Input_Folder(output_conll_folder_gold)
for file_name in list_of_test_files_stand_off:
file_values = file_name.split('/')
protocol_name = file_values[(- 1)]
conll2standoff.process(file_name, input_standoff_folder_gold)
copy_text_files(input_standoff_folder_gold, input_standoff_folder_pred)
anntoconll_wlp.convert_standoff_conll_single_file(input_standoff_folder_pred, output_conll_folder_pred, output_conll_file_pred) |
class PeriodicCheckpointerWithEval(HookBase):
def __init__(self, eval_period, eval_function, checkpointer, checkpoint_period, max_to_keep=5):
self.eval = hooks.EvalHook(eval_period, eval_function)
self.checkpointer = hooks.PeriodicCheckpointer(checkpointer, checkpoint_period, max_to_keep=max_to_keep)
self.best_ap = 0.0
best_model_path = (checkpointer.save_dir + 'best_model_final.pth.pth')
if os.path.isfile(best_model_path):
best_model = torch.load(best_model_path, map_location=torch.device('cpu'))
try:
self.best_ap = best_model['']
except:
self.best_ap = best_model['AP50']
del best_model
else:
self.best_ap = 0.0
def before_train(self):
self.max_iter = self.trainer.max_iter
self.checkpointer.max_iter = self.trainer.max_iter
def _do_eval(self):
results = self.eval._func()
if results:
assert isinstance(results, dict), 'Eval function must return a dict. Got {} instead.'.format(results)
flattened_results = flatten_results_dict(results)
for (k, v) in flattened_results.items():
try:
v = float(v)
except Exception as e:
raise ValueError("[EvalHook] eval_function should return a nested dict of float. Got '{}: {}' instead.".format(k, v)) from e
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
comm.synchronize()
return results
def after_step(self):
next_iter = (self.trainer.iter + 1)
is_final = (next_iter == self.trainer.max_iter)
if (is_final or ((self.eval._period > 0) and ((next_iter % self.eval._period) == 0))):
results = self._do_eval()
if comm.is_main_process():
try:
dataset = ('VG_val' if ('VG_val' in results.keys()) else 'VG_test')
if (results[dataset]['SG'][''] > self.best_ap):
self.best_ap = results[dataset]['SG']['']
additional_state = {'iteration': self.trainer.iter, '': self.best_ap}
self.checkpointer.checkpointer.save('best_model_final.pth', **additional_state)
except:
current_ap = results['bbox']['AP50']
if (current_ap > self.best_ap):
self.best_ap = current_ap
additional_state = {'iteration': self.trainer.iter, 'AP50': self.best_ap}
self.checkpointer.checkpointer.save('best_model_final.pth', **additional_state)
if comm.is_main_process():
self.checkpointer.step(self.trainer.iter)
comm.synchronize()
def after_train(self):
if ((self.trainer.iter + 1) >= self.trainer.max_iter):
self._do_eval()
del self.eval._func |
def weak_tp(guess_entities, gold_entities):
tp = 0
for pred in guess_entities:
for gold in gold_entities:
if ((pred[0] == gold[0]) and ((gold[1] <= pred[1] <= (gold[1] + gold[2])) or (gold[1] <= (pred[1] + pred[2]) <= (gold[1] + gold[2]))) and (pred[3] == gold[3])):
tp += 1
return tp |
class VariableGenomeDecoder(ChannelBasedDecoder):
RESIDUAL = 0
PREACT_RESIDUAL = 1
DENSE = 2
def __init__(self, list_genome, channels, repeats=None):
phase_types = [gene.pop() for gene in list_genome]
genome_copy = copy(list_genome)
super().__init__(list_genome, channels, repeats=repeats)
if (self._model is not None):
return
self._types = self.adjust_types(genome_copy, phase_types)
phases = []
for (idx, (gene, (in_channels, out_channels), phase_type)) in enumerate(zip(self._genome, self._channels, self._types)):
if (phase_type == self.RESIDUAL):
phases.append(ResidualPhase(gene, in_channels, out_channels, idx))
elif (phase_type == self.PREACT_RESIDUAL):
phases.append(ResidualPhase(gene, in_channels, out_channels, idx, preact=True))
elif (phase_type == self.DENSE):
phases.append(DensePhase(gene, in_channels, out_channels, idx))
else:
raise NotImplementedError('Phase type corresponding to {} not implemented.'.format(phase_type))
self._model = nn.Sequential(*self.build_layers(phases))
def adjust_types(self, genome, phase_types):
effective_types = []
for (idx, (gene, phase_type)) in enumerate(zip(genome, phase_types)):
if phase_active(gene):
for _ in range(self._repeats[idx]):
effective_types.append(*phase_type)
return effective_types
def get_model(self):
return self._model |
class NLISentenceReader(NLIReader):
def read_sentences(self, filename):
sentences = []
extra = {}
example_ids = []
with open(filename) as f:
for line in tqdm(f, desc='read'):
smap = self.read_line(line)
if (smap is None):
continue
(s1, s2, label) = (smap['s1'], smap['s2'], smap['label'])
example_id = smap['example_id']
skip_s1 = ((self.filter_length > 0) and (len(s1) > self.filter_length))
skip_s2 = ((self.filter_length > 0) and (len(s2) > self.filter_length))
if (not skip_s1):
example_ids.append((example_id + '_1'))
sentences.append(s1)
if (not skip_s2):
example_ids.append((example_id + '_2'))
sentences.append(s2)
extra['example_ids'] = example_ids
return {'sentences': sentences, 'extra': extra} |
class Sparse(Initializer):
def __init__(self, sparsity=0.1, std=0.01):
self.sparsity = sparsity
self.std = std
def sample(self, shape):
if (len(shape) != 2):
raise RuntimeError('sparse initializer only works with shapes of length 2')
w = floatX(np.zeros(shape))
(n_inputs, n_outputs) = shape
size = int((self.sparsity * n_inputs))
for k in range(n_outputs):
indices = np.arange(n_inputs)
get_rng().shuffle(indices)
indices = indices[:size]
values = floatX(get_rng().normal(0.0, self.std, size=size))
w[(indices, k)] = values
return w |
def prepare_run(args):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
run_name = (args.name or args.model)
log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name, args.slack_url)
return log_dir |
class Synthesizer(Generator):
def __init__(self, params=None, samprate=48000):
self.gtype = 'synth'
self.preset = getattr(presets, self.gtype).load_preset()
self.preset['ranges'] = getattr(presets, self.gtype).load_ranges()
super().__init__(params, samprate)
self.setup_oscillators()
def setup_oscillators(self):
oscdict = self.preset['oscillators']
self.osclist = []
for osc in oscdict.keys():
lvl = oscdict[osc]['level']
det = oscdict[osc]['detune']
phase = oscdict[osc]['phase']
form = oscdict[osc]['form']
snorm = self.samprate
fnorm = (1 + (det / 100.0))
if (phase == 'random'):
oscf = (lambda samp, f: (lvl * getattr(self, form)((samp / snorm), (f * fnorm), np.random.random())))
else:
oscf = (lambda samp, f: (lvl * getattr(self, form)((samp / snorm), (f * fnorm), phase)))
self.osclist.append(oscf)
self.generate = self.combine_oscs
def modify_preset(self, parameters, clear_oscs=True):
if clear_oscs:
super().modify_preset(parameters, ['oscillators'])
else:
super().modify_preset(parameters)
self.setup_oscillators()
def combine_oscs(self, s, f):
tot = 0.0
if isinstance(f, str):
f = notes.parse_note(f)
for osc in self.osclist:
tot += osc(s, f)
return tot
def play(self, mapping):
samprate = self.samprate
audbuff = self.audbuff
params = copy.deepcopy(self.preset)
utils.linear_to_nested_dict_reassign(mapping, params)
nlength = ((params['note_length'] + params['volume_envelope']['R']) * samprate)
sstream = stream.Stream((nlength / samprate), samprate)
samples = sstream.samples
sstream.get_sampfracs()
pindex = np.zeros(samples.size)
if callable(params['pitch_shift']):
pindex += (params['pitch_shift'](sstream.sampfracs) / 12.0)
elif (params['pitch_shift'] != 0):
pindex += (params['pitch_shift'] / 12.0)
if params['pitch_lfo']['use']:
pindex += (self.lfo(samples, sstream.sampfracs, params, 'pitch') / 12.0)
if np.any(pindex):
samples = np.cumsum(pow(2.0, pindex))
values = self.generate(samples, params['note'])
env = self.envelope(sstream.samples, params)
if params['volume_lfo']['use']:
env *= np.clip((1.0 - (self.lfo(sstream.samples, sstream.sampfracs, params, 'volume') * 0.5)), 0, 1)
sstream.values = ((values * utils.const_or_evo(params['volume'], sstream.sampfracs)) * env)
if (params['filter'] == 'on'):
if hasattr(params['cutoff'], '__iter__'):
sstream.bufferize((sstream.length / 4))
else:
sstream.bufferize(0.03)
sstream.filt_sweep(getattr(filters, params['filter_type']), utils.const_or_evo_func(params['cutoff']))
return sstream |
def get_pretraining_cifar10(data_dir):
train_data = CIFAR10Pair(numpy_file=(data_dir + 'train.npz'), class_type=classes, transform=train_transform)
memory_data = CIFAR10Mem(numpy_file=(data_dir + 'train.npz'), class_type=classes, transform=test_transform_cifar10)
test_data = CIFAR10Mem(numpy_file=(data_dir + 'test.npz'), class_type=classes, transform=test_transform_cifar10)
return (train_data, memory_data, test_data) |
def train_nxdo_best_response(br_player: int, scenario_name: str, nxdo_manager_port: int, nxdo_manager_host: str, print_train_results: bool=True, previous_br_checkpoint_path=None):
scenario: NXDOScenario = scenario_catalog.get(scenario_name=scenario_name)
if (not isinstance(scenario, NXDOScenario)):
raise TypeError(f'Only instances of {NXDOScenario} can be used here. {scenario.name} is a {type(scenario)}.')
use_openspiel_restricted_game: bool = scenario.use_openspiel_restricted_game
get_restricted_game_custom_model = scenario.get_restricted_game_custom_model
env_class = scenario.env_class
base_env_config = scenario.env_config
trainer_class = scenario.trainer_class_br
policy_classes: Dict[(str, Type[Policy])] = scenario.policy_classes_br
get_trainer_config = scenario.get_trainer_config_br
nxdo_br_get_stopping_condition = scenario.get_stopping_condition_br
should_log_result_fn = scenario.ray_should_log_result_filter
nxdo_metanash_method: str = scenario.xdo_metanash_method
if (nxdo_metanash_method != 'nfsp'):
raise NotImplementedError("Only 'nfsp' is currently supported for the nxdo_metanash_method")
nxdo_manager = RemoteNXDOManagerClient(n_players=2, port=nxdo_manager_port, remote_server_host=nxdo_manager_host)
manager_metadata = nxdo_manager.get_manager_metadata()
results_dir = nxdo_manager.get_log_dir()
br_params = nxdo_manager.claim_new_active_policy_for_player(player=br_player)
(metanash_specs_for_players, delegate_specs_for_players, active_policy_num) = br_params
other_player = (1 - br_player)
br_learner_name = f'policy {active_policy_num} player {br_player}'
def log(message):
print(f'({br_learner_name}): {message}')
def select_policy(agent_id):
if (agent_id == br_player):
return f'best_response'
elif (agent_id == other_player):
return f'metanash'
else:
raise ValueError(f'Unknown agent id: {agent_id}')
restricted_env_config = {'create_env_fn': (lambda : env_class(env_config=base_env_config)), 'raise_if_no_restricted_players': (metanash_specs_for_players is not None)}
tmp_base_env = env_class(env_config=base_env_config)
if use_openspiel_restricted_game:
restricted_game_class = OpenSpielRestrictedGame
else:
restricted_game_class = RestrictedGame
restricted_env_config['use_delegate_policy_exploration'] = scenario.allow_stochastic_best_responses
tmp_env = restricted_game_class(env_config=restricted_env_config)
if ((metanash_specs_for_players is None) or use_openspiel_restricted_game):
other_player_restricted_action_space = tmp_env.base_action_space
metanash_class = policy_classes['best_response']
else:
other_player_restricted_action_space = Discrete(n=len(delegate_specs_for_players[other_player]))
metanash_class = policy_classes['metanash']
print(f'metanash class: {metanash_class}, other_player_restricted_action_space: {other_player_restricted_action_space}')
if ((metanash_specs_for_players is None) and use_openspiel_restricted_game):
other_player_restricted_obs_space = tmp_env.base_observation_space
else:
other_player_restricted_obs_space = tmp_env.observation_space
trainer_config = {'env': restricted_game_class, 'env_config': restricted_env_config, 'gamma': 1.0, 'num_gpus': 0, 'num_workers': 0, 'num_envs_per_worker': 1, 'multiagent': {'policies_to_train': [f'best_response'], 'policies': {f'metanash': (metanash_class, other_player_restricted_obs_space, other_player_restricted_action_space, {'explore': False}), f'metanash_delegate': (policy_classes['best_response'], tmp_env.base_observation_space, tmp_env.base_action_space, {'explore': scenario.allow_stochastic_best_responses}), f'best_response': (policy_classes['best_response'], tmp_env.base_observation_space, tmp_env.base_action_space, {})}, 'policy_mapping_fn': select_policy}}
if ((metanash_specs_for_players is not None) and (get_restricted_game_custom_model is not None)):
trainer_config['multiagent']['policies']['metanash'][3]['model'] = {'custom_model': get_restricted_game_custom_model(tmp_env)}
trainer_config = merge_dicts(trainer_config, get_trainer_config(tmp_base_env))
ray_head_address = manager_metadata['ray_head_address']
init_ray_for_scenario(scenario=scenario, head_address=ray_head_address, logging_level=logging.INFO)
trainer = trainer_class(config=trainer_config, logger_creator=get_trainer_logger_creator(base_dir=results_dir, scenario_name=scenario_name, should_log_result_fn=should_log_result_fn))
def _set_worker_metanash(worker: RolloutWorker):
if (metanash_specs_for_players is not None):
metanash_policy = worker.policy_map['metanash']
metanash_strategy_spec: StrategySpec = metanash_specs_for_players[other_player]
load_pure_strat(policy=metanash_policy, pure_strat_spec=metanash_strategy_spec)
trainer.workers.foreach_worker(_set_worker_metanash)
trainer.weights_cache = {}
if delegate_specs_for_players:
if use_openspiel_restricted_game:
set_restricted_game_conversions_for_all_workers_openspiel(trainer=trainer, tmp_base_env=tmp_base_env, delegate_policy_id='metanash_delegate', agent_id_to_restricted_game_specs={other_player: delegate_specs_for_players[other_player]}, load_policy_spec_fn=load_pure_strat)
else:
set_restricted_game_conversations_for_all_workers(trainer=trainer, delegate_policy_id='metanash_delegate', agent_id_to_restricted_game_specs={other_player: delegate_specs_for_players[other_player]}, load_policy_spec_fn=create_get_pure_strat_cached(cache=trainer.weights_cache))
log(f'got policy {active_policy_num}')
if (previous_br_checkpoint_path is not None):
def _set_br_initial_weights(worker: RolloutWorker):
br_policy = worker.policy_map['best_response']
load_pure_strat(policy=br_policy, checkpoint_path=previous_br_checkpoint_path)
trainer.workers.foreach_worker(_set_br_initial_weights)
stopping_condition: StoppingCondition = nxdo_br_get_stopping_condition()
while True:
train_iter_results = trainer.train()
if print_train_results:
train_iter_results['p2sro_active_policy_num'] = active_policy_num
train_iter_results['best_response_player'] = br_player
if ('hist_stats' in train_iter_results):
del train_iter_results['hist_stats']
if ('td_error' in train_iter_results['info']['learner'][f'best_response']):
del train_iter_results['info']['learner'][f'best_response']['td_error']
log(f'Trainer log dir is {trainer.logdir}')
log(pretty_dict_str(train_iter_results))
total_timesteps_training_br = train_iter_results['timesteps_total']
total_episodes_training_br = train_iter_results['episodes_total']
br_reward_this_iter = train_iter_results['policy_reward_mean'][f'best_response']
if stopping_condition.should_stop_this_iter(latest_trainer_result=train_iter_results):
log('Stopping condition met.')
break
log(f'Training stopped. Setting active policy {active_policy_num} as fixed.')
final_policy_metadata = create_metadata_with_new_checkpoint_for_current_best_response(trainer=trainer, player=br_player, save_dir=checkpoint_dir(trainer=trainer), timesteps_training_br=total_timesteps_training_br, episodes_training_br=total_episodes_training_br, active_policy_num=active_policy_num, average_br_reward=float(br_reward_this_iter))
nxdo_manager.submit_final_br_policy(player=br_player, policy_num=active_policy_num, metadata_dict=final_policy_metadata)
ray.shutdown()
time.sleep(10)
for player_to_wait_on in range(2):
wait_count = 0
while True:
if nxdo_manager.is_policy_fixed(player=player_to_wait_on, policy_num=active_policy_num):
break
if ((wait_count % 10) == 0):
log(f'Waiting for policy {active_policy_num} player {player_to_wait_on} to become fixed')
time.sleep(2.0)
wait_count += 1
return final_policy_metadata['checkpoint_path'] |
def response_function(hgf, response_function_parameters):
responses = response_function_parameters[0]
beliefs = hgf.node_trajectories[1]['expected_mean']
return jnp.sum(jnp.where(responses, (- jnp.log(beliefs)), (- jnp.log((1.0 - beliefs))))) |
class MfbExpand(nn.Module):
def __init__(self, img_feat_dim, txt_emb_dim, hidden_dim, dropout):
super(MfbExpand, self).__init__()
self.lc_image = nn.Linear(in_features=img_feat_dim, out_features=hidden_dim)
self.lc_ques = nn.Linear(in_features=txt_emb_dim, out_features=hidden_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, image_feat, question_embed):
image1 = self.lc_image(image_feat)
ques1 = self.lc_ques(question_embed)
if (len(image_feat.data.shape) == 3):
num_location = image_feat.data.size(1)
ques1_expand = torch.unsqueeze(ques1, 1).expand((- 1), num_location, (- 1))
else:
ques1_expand = ques1
joint_feature = (image1 * ques1_expand)
joint_feature = self.dropout(joint_feature)
return joint_feature |
def tokenize_queries(args, tokenizer):
for mode in ['dev']:
query_output = f'{args.output_dir}/queries.{mode}.json'
tokenize_file(tokenizer, f'{args.msmarco_dir}/queries.{mode}.tsv', query_output) |
class TFGPT2LMHeadModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def _merge_a_into_b(a, b):
if (type(a) is not edict):
return
for (k, v) in a.iteritems():
if (not b.has_key(k)):
raise KeyError('{} is not a valid config key'.format(k))
if (type(b[k]) is not type(v)):
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k))
if (type(v) is edict):
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v |
def random_hyperplane(vars):
cf0 = str(random_complex())
tf0 = cf0.replace('j', '*i')
result = tf0
for var in vars:
cff = str(random_complex())
tcf = cff.replace('j', '*i')
result = ((((result + '+') + tcf) + '*') + var)
return (result + ';') |
class CSL(CLSProcessor):
def __init__(self):
super().__init__(labels_origin=['0', '1'], labels_mapped=['', ''])
def get_examples(self, data_dir, split):
path = os.path.join(data_dir, f'{split}.json')
with open(path, encoding='utf8') as f:
for line in f:
example_json = json.loads(line)
example = InputExample(meta={'text': example_json['abst'], 'keywords': ','.join(example_json['keyword']), 'options': self.labels_mapped}, tgt_text=self.get_label(example_json['label']))
examples.append(example)
def get_templates(self):
return [':{text} :{keywords} :?:'] |
def register_func(func_name, f=None, override=False):
if callable(func_name):
f = func_name
func_name = f.__name__
if (not isinstance(func_name, str)):
raise ValueError('expect string function name')
ioverride = ctypes.c_int(override)
def register(myf):
if (not isinstance(myf, Function)):
myf = convert_to_tvm_func(myf)
check_call(_LIB.TVMFuncRegisterGlobal(c_str(func_name), myf.handle, ioverride))
return myf
if f:
return register(f)
return register |
def _test():
import torch
pretrained = False
models = [airnet50_1x64d_r2, airnet50_1x64d_r16, airnet101_1x64d_r2]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != airnet50_1x64d_r2) or (weight_count == ))
assert ((model != airnet50_1x64d_r16) or (weight_count == ))
assert ((model != airnet101_1x64d_r2) or (weight_count == ))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
def CheckArgs(args):
if (args.stats_file == '-'):
args.stats_file_handle = sys.stdin
else:
args.stats_file_handle = open(args.stats_file)
if (args.filter_lexicon is not ''):
if (args.filter_lexicon == '-'):
args.filter_lexicon_handle = sys.stdout
else:
args.filter_lexicon_handle = open(args.filter_lexicon)
if (args.out_lexicon == '-'):
args.out_lexicon_handle = sys.stdout
else:
args.out_lexicon_handle = open(args.out_lexicon, 'w')
if (args.set_max_to_one == args.set_sum_to_one):
raise Exception('Cannot have both set-max-to-one and set-sum-to-one as true or false.')
return args |
_loss('nll_loss')
class NLLLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
scores = model_output['scores']
targets = sample_list['targets']
(_, idx) = targets.max(dim=1)
loss = F.nll_loss(scores, idx, reduction='mean')
return (loss * targets.size(1)) |
def process(args):
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
feature_root = (out_root / 'fbank80')
feature_root.mkdir(exist_ok=True)
for split in SPLITS:
print(f'Fetching split {split}...')
dataset = LIBRISPEECH(out_root.as_posix(), url=split, download=True)
print('Extracting log mel filter bank features...')
for (wav, sample_rate, _, spk_id, chapter_no, utt_no) in tqdm(dataset):
sample_id = f'{spk_id}-{chapter_no}-{utt_no}'
extract_fbank_features(wav, sample_rate, (feature_root / f'{sample_id}.npy'))
zip_path = (out_root / 'fbank80.zip')
print('ZIPing features...')
create_zip(feature_root, zip_path)
print('Fetching ZIP manifest...')
zip_manifest = get_zip_manifest(zip_path)
print('Generating manifest...')
train_text = []
for split in SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = LIBRISPEECH(out_root.as_posix(), url=split)
for (wav, sample_rate, utt, spk_id, chapter_no, utt_no) in tqdm(dataset):
sample_id = f'{spk_id}-{chapter_no}-{utt_no}'
manifest['id'].append(sample_id)
manifest['audio'].append(zip_manifest[sample_id])
duration_ms = int(((wav.size(1) / sample_rate) * 1000))
manifest['n_frames'].append(int((1 + ((duration_ms - 25) / 10))))
manifest['tgt_text'].append(utt)
manifest['speaker'].append(spk_id)
save_df_to_tsv(pd.DataFrame.from_dict(manifest), (out_root / f'{split}.tsv'))
if split.startswith('train'):
train_text.extend(manifest['tgt_text'])
vocab_size = ('' if (args.vocab_type == 'char') else str(args.vocab_size))
spm_filename_prefix = f'spm_{args.vocab_type}{vocab_size}'
with NamedTemporaryFile(mode='w') as f:
for t in train_text:
f.write((t + '\n'))
gen_vocab(Path(f.name), (out_root / spm_filename_prefix), args.vocab_type, args.vocab_size)
gen_config_yaml(out_root, (spm_filename_prefix + '.model'), specaugment_policy='ld')
shutil.rmtree(feature_root) |
def normalize(s):
s = s.strip()
s = re.sub('\t', ' ', s)
s = _unicode_normalize('0-9A-Za-z-', s)
def _maketrans(f, t):
return {ord(x): ord(y) for (x, y) in zip(f, t)}
s = re.sub('[]+', '-', s)
s = re.sub('[--]+', '', s)
s = re.sub('[~~]+', '', s)
s = s.translate(_maketrans('!"#$%&\'()*+,-./:;<=>?[]^_`{|}~', '!#$%&()*+,-./:;<=>?[]^_`{|}'))
s = _remove_extra_spaces(s)
s = _unicode_normalize('!#$%&()*+,-./:;<>?[]^_`{|}', s)
s = re.sub('[]', "'", s)
s = re.sub('[]', '"', s)
s = re.sub('[]', '"', s)
return s |
def main():
args = parser.parse_args()
assert (args.dataset == 'imagenet')
args.num_classes = 1000
args.IMAGE_SIZE = 224
if (args.train_url and (not os.path.exists(args.train_url))):
os.makedirs(args.train_url)
model = eval(args.model)(args)
assert (args.convert_from is not None), 'Please give the checkpoint path of the model which is waited to be converted and tested!'
print("=> Converting model from '{}'".format(args.convert_from))
if args.wo_ema_weight:
state_dict = torch.load(args.convert_from)['state_dict']
print('Loading pretrained parameter from state_dict...')
else:
try:
state_dict = torch.load(args.convert_from)['state_dict_ema']
print('Loading pretrained parameter from state_dict_ema...')
except:
state_dict = torch.load(args.convert_from)['state_dict']
print('Loading pretrained parameter from state_dict...')
new_state_dict = OrderedDict()
for (k, v) in state_dict.items():
if k.startswith('module'):
name = k[7:]
new_state_dict[name] = v
else:
new_state_dict[k] = v
for (k, v) in new_state_dict.items():
if (k.split('.')[(- 1)] == '_mask'):
print((torch.sum(torch.flatten(v)) / (((v.size()[0] * v.size()[1]) * v.size()[2]) * v.size()[3])))
model.load_state_dict(new_state_dict)
model = model.cpu()
convert_model(model, args)
converted_checkpoint = {'state_dict': model.state_dict()}
torch.save(converted_checkpoint, os.path.join(args.train_url, 'converted_model_{}.pth.tar'.format(args.model)))
model = nn.DataParallel(model).cuda()
print('=> Converting Completed!')
criterion = nn.CrossEntropyLoss().cuda()
cudnn.benchmark = True
valdir = (args.data_url + 'val/')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256, interpolation=(PIL.Image.BILINEAR if (args.model in ['cdnv2_c', 'converted_cdnv2_c']) else PIL.Image.BICUBIC)), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
(val_acc_top1, val_acc_top5, valid_loss) = ([], [], [])
(val_acc1, val_acc5, val_loss) = validate(val_loader, model, criterion, args)
val_acc_top1.append(val_acc1)
val_acc_top5.append(val_acc5)
valid_loss.append(val_loss)
df = pd.DataFrame({'val_acc_top1': val_acc_top1, 'val_acc_top5': val_acc_top5, 'valid_loss': valid_loss})
if args.train_url:
log_file = os.path.join((args.train_url + 'log.txt'))
with open(log_file, 'w') as f:
df.to_csv(f)
(n_flops, n_params) = measure_model(model, args.IMAGE_SIZE, args.IMAGE_SIZE)
print(('FLOPs: %.2fM, Params: %.2fM' % ((n_flops / 1000000.0), (n_params / 1000000.0))))
if args.train_url:
log_file = os.path.join((args.train_url + 'measure_model.txt'))
with open(log_file, 'w') as f:
f.write(str((n_flops / 1000000.0)))
f.write(str((n_params / 1000000.0)))
f.close()
return |
def main():
args = parse_args()
accelerator = Accelerator()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state)
logger.setLevel((logging.INFO if accelerator.is_local_main_process else logging.ERROR))
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if accelerator.is_main_process:
if args.push_to_hub:
if (args.hub_model_id is None):
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
if (args.task_name is not None):
raw_datasets = load_dataset('glue', args.task_name)
else:
data_files = {}
if (args.train_file is not None):
data_files['train'] = args.train_file
if (args.validation_file is not None):
data_files['validation'] = args.validation_file
extension = (args.train_file if (args.train_file is not None) else args.valid_file).split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files)
if (args.task_name is not None):
is_regression = (args.task_name == 'stsb')
if (not is_regression):
label_list = raw_datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
else:
is_regression = (raw_datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = raw_datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=(not args.use_slow_tokenizer))
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config)
if (args.distill_loss_weight > 0):
teacher_path = args.teacher_model_name_or_path
if (teacher_path is None):
teacher_path = args.model_name_or_path
teacher_model = AutoModelForSequenceClassification.from_pretrained(teacher_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config)
if (args.task_name is not None):
(sentence1_key, sentence2_key) = task_to_keys[args.task_name]
else:
non_label_column_names = [name for name in raw_datasets['train'].column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
label_to_id = None
if ((model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (args.task_name is not None) and (not is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in model.config.label2id.items()}
if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))):
logger.info(f'The configuration of the model provided the following label correspondence: {label_name_to_id}. Using it!')
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
elif (args.task_name is None):
label_to_id = {v: i for (i, v) in enumerate(label_list)}
if (label_to_id is not None):
model.config.label2id = label_to_id
model.config.id2label = {id: label for (label, id) in config.label2id.items()}
elif ((args.task_name is not None) and (not is_regression)):
model.config.label2id = {l: i for (i, l) in enumerate(label_list)}
model.config.id2label = {id: label for (label, id) in config.label2id.items()}
padding = ('max_length' if args.pad_to_max_length else False)
def preprocess_function(examples):
texts = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)
if ('label' in examples):
if (label_to_id is not None):
result['labels'] = [label_to_id[l] for l in examples['label']]
else:
result['labels'] = examples['label']
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets['train'].column_names, desc='Running tokenizer on dataset')
train_dataset = processed_datasets['train']
eval_dataset = processed_datasets[('validation_matched' if (args.task_name == 'mnli') else 'validation')]
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
no_decay = ['bias', 'LayerNorm.weight']
no_decay_classifier = ['bias', 'LayerNorm.weight', 'classifier']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay_classifier)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if args.do_prune:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, betas=[0.9, 0.9])
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
if (args.distill_loss_weight > 0):
(teacher_model, model, optimizer, train_dataloader, eval_dataloader) = accelerator.prepare(teacher_model, model, optimizer, train_dataloader, eval_dataloader)
teacher_model.eval()
else:
(model, optimizer, train_dataloader, eval_dataloader) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
else:
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps)
if (args.task_name is not None):
metric = load_metric('glue', args.task_name)
else:
metric = load_metric('accuracy')
total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
num_iterations = (len(train_dataset) / total_batch_size)
num_warm = int((args.sparsity_warm_epochs * num_iterations))
total_iterations = int((num_iterations * (args.num_train_epochs - args.cooldown_epochs)))
frequency = (int((((total_iterations - num_warm) + 1) / 40)) if (args.pruning_frequency == (- 1)) else args.pruning_frequency)
pruning_start = num_warm
pruning_end = total_iterations
if (not args.do_prune):
pruning_start = ((num_iterations * args.num_train_epochs) + 1)
pruning_end = pruning_start
pruning_configs = [{'pruning_type': 'snip_momentum', 'pruning_scope': 'global', 'sparsity_decay_type': 'exp', 'excluded_op_names': ['classifier', 'pooler', '.*embeddings*'], 'pruning_op_types': ['Linear'], 'max_sparsity_ratio_per_op': 0.98}]
configs = WeightPruningConfig(pruning_configs, target_sparsity=args.target_sparsity, pattern=args.pruning_pattern, pruning_frequency=frequency, start_step=pruning_start, end_step=pruning_end, pruning_type=args.pruning_type)
pruner = Pruning(configs)
pruner.model = model
pruner.on_train_begin()
for epoch in range(args.num_train_epochs):
model.train()
for (step, batch) in enumerate(train_dataloader):
pruner.on_step_begin(local_step=step)
outputs = model(**batch, output_hidden_states=True)
loss = outputs.loss
loss = (loss / args.gradient_accumulation_steps)
if (args.distill_loss_weight > 0.0):
distill_loss_weight = args.distill_loss_weight
with torch.no_grad():
teacher_outputs = teacher_model(**batch, output_hidden_states=True)
MSELoss = torch.nn.MSELoss().cuda()
loss = (distill_loss_weight * MSELoss(outputs['hidden_states'][(- 1)], teacher_outputs['hidden_states'][(- 1)]))
accelerator.backward(loss)
if (((step % args.gradient_accumulation_steps) == 0) or (step == (len(train_dataloader) - 1))):
pruner.on_before_optimizer_step()
optimizer.step()
pruner.on_after_optimizer_step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if (completed_steps >= args.max_train_steps):
break
model.eval()
for (step, batch) in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = (outputs.logits.argmax(dim=(- 1)) if (not is_regression) else outputs.logits.squeeze())
metric.add_batch(predictions=accelerator.gather(predictions), references=accelerator.gather(batch['labels']))
eval_metric = metric.compute()
logger.info(f'epoch {epoch}: {eval_metric}')
if (args.push_to_hub and (epoch < (args.num_train_epochs - 1))):
accelerator.wait_for_everyone()
accelerator.save_state(args.output_dir)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
config.save_pretrained(args.output_dir)
repo.push_to_hub(commit_message=f'Training in progress epoch {epoch}', blocking=False, auto_lfs_prune=True)
if (args.output_dir is not None):
accelerator.wait_for_everyone()
file = os.path.join(args.output_dir, f'epoch{epoch}')
accelerator.save_state(file)
if (args.output_dir is not None):
accelerator.wait_for_everyone()
accelerator.save_state(args.output_dir)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
config.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message='End of training', auto_lfs_prune=True)
if (args.task_name == 'mnli'):
eval_dataset = processed_datasets['validation_mismatched']
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
eval_dataloader = accelerator.prepare(eval_dataloader)
model.eval()
for (step, batch) in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=(- 1))
metric.add_batch(predictions=accelerator.gather(predictions), references=accelerator.gather(batch['labels']))
eval_metric = metric.compute()
logger.info(f'mnli-mm: {eval_metric}') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.