code stringlengths 101 5.91M |
|---|
def number_double_double_solutions(vrblvl=0):
if (vrblvl > 0):
print('in number_double_double_solutions ...')
phc = get_phcfun()
aaa = pointer(c_int32(0))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> number_double_double_solutions calls phc', end='')
retval = phc(342, aaa, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return bbb[0] |
def lightgbm_eval_metric(ml_task, automl_eval_metric):
if (automl_eval_metric == 'user_defined_metric'):
return ('custom', automl_eval_metric)
metric_name_mapping = {BINARY_CLASSIFICATION: {'auc': 'auc', 'logloss': 'binary_logloss', 'f1': 'custom', 'average_precision': 'custom', 'accuracy': 'custom'}, MULTICLASS_CLASSIFICATION: {'logloss': 'multi_logloss', 'f1': 'custom', 'accuracy': 'custom'}, REGRESSION: {'rmse': 'rmse', 'mse': 'l2', 'mae': 'l1', 'mape': 'mape', 'r2': 'custom', 'spearman': 'custom', 'pearson': 'custom'}}
metric = metric_name_mapping[ml_task][automl_eval_metric]
custom_eval_metric = None
if (automl_eval_metric in ['r2', 'spearman', 'pearson', 'f1', 'average_precision', 'accuracy']):
custom_eval_metric = automl_eval_metric
return (metric, custom_eval_metric) |
def build_model(obs_space, action_space, args, device):
name = args.model
if ('single' in name):
model = A3C_Single(obs_space, action_space, args, device)
elif ('multi' in name):
model = A3C_Multi(obs_space, action_space, args, device)
model.train()
return model |
.xfail('env.PYPY')
def test_non_final_final():
with pytest.raises(TypeError) as exc_info:
class PyNonFinalFinalChild(m.IsNonFinalFinal):
pass
assert str(exc_info.value).endswith('is not an acceptable base type') |
def lang_type(filename):
if filename.endswith('.py'):
return 'Python'
elif filename.endswith('.go'):
return 'go'
elif filename.endswith('.proto'):
return 'go'
elif filename.endswith('.sh'):
return 'shell'
elif filename.endswith('.cc'):
return 'cpp'
elif filename.endswith('.h'):
return 'cpp'
elif filename.endswith('.hpp'):
return 'cpp'
else:
print('Unsupported filetype %s', filename)
exit(0) |
class SBXCrossoverTestCases(unittest.TestCase):
def test_should_constructor_assign_the_correct_probability_value(self):
crossover_probability = 0.1
crossover: SBXCrossover = SBXCrossover(crossover_probability, 2.0)
self.assertEqual(crossover_probability, crossover.probability)
def test_should_constructor_assign_the_correct_distribution_index_value(self):
distribution_index = 10.5
crossover: SBXCrossover = SBXCrossover(0.1, distribution_index)
self.assertEqual(distribution_index, crossover.distribution_index)
def test_should_constructor_raise_an_exception_if_the_probability_is_greater_than_one(self):
with self.assertRaises(Exception):
SBXCrossover(1.5, 2.0)
def test_should_constructor_raise_an_exception_if_the_probability_is_negative(self):
with self.assertRaises(Exception):
SBXCrossover((- 0.1), 2.0)
def test_should_constructor_raise_an_exception_if_the_distribution_index_is_negative(self):
with self.assertRaises(Exception):
SBXCrossover(0.1, (- 2.0))
def test_should_execute_with_an_invalid_solution_list_size_raise_an_exception(self):
crossover: SBXCrossover = SBXCrossover(0.1, 20.0)
solution = FloatSolution([1, 2], [2, 4], 2, 2)
with self.assertRaises(Exception):
crossover.execute([solution])
with self.assertRaises(Exception):
crossover.execute([solution, solution, solution])
def test_should_execute_return_the_parents_if_the_crossover_probability_is_zero(self):
crossover: SBXCrossover = SBXCrossover(0.0, 20.0)
solution1 = FloatSolution([1, 2], [2, 4], 2, 2)
solution2 = FloatSolution([1, 2], [2, 4], 2, 2)
solution1.variables = [1.5, 2.7]
solution2.variables = [1.7, 3.6]
offspring = crossover.execute([solution1, solution2])
self.assertEqual(2, len(offspring))
self.assertEqual(solution1.variables, offspring[0].variables)
self.assertEqual(solution2.variables, offspring[1].variables)
def test_should_execute_work_with_a_solution_subclass_of_float_solution(self):
class NewFloatSolution(FloatSolution):
def __init__(self, lower_bound: List[float], upper_bound: List[float], number_of_objectives: int, number_of_constraints: int=0):
super(NewFloatSolution, self).__init__(lower_bound, upper_bound, number_of_objectives, number_of_constraints)
solution1 = NewFloatSolution([1, 2], [2, 4], 2, 2)
solution2 = NewFloatSolution([1, 2], [2, 4], 2, 2)
solution1.variables = [1.5, 2.7]
solution2.variables = [1.7, 3.6]
crossover: SBXCrossover = SBXCrossover(0.0, 20.0)
offspring = crossover.execute([solution1, solution2])
self.assertEqual(2, len(offspring))
self.assertEqual(solution1.variables, offspring[0].variables)
self.assertEqual(solution2.variables, offspring[1].variables)
def test_should_execute_produce_valid_solutions_when_crossing_two_single_variable_solutions(self):
pass |
def save_git_diff_to_file(git_diff_file_path):
import subprocess
git_diff_file = open(git_diff_file_path, 'w')
p = subprocess.Popen(['git', 'diff', '--patch', 'HEAD'], stdout=git_diff_file)
p.wait() |
class Params():
def __init__(self, weight_path):
self.device = settings.torch_device()
self.weight_path = weight_path
self.batch_size = 200
self.num_batches = 100
time_run = strftime('%y-%m-%d_%H:%M:%S', gmtime())
f_name_weights = path.splitext(path.basename(self.weight_path))[0]
self.run_name = f'prior_samples_for_{f_name_weights}_done_{time_run}_'
print(f'Run name is {self.run_name}')
print(f'Checkpoint name is {self.weight_path}') |
def vgg10_w4a4_radioml(target_platform=None):
target_platform = resolve_target_platform(target_platform)
driver_mode = get_driver_mode()
model_name = 'vgg10-radioml-w4a4'
filename = find_bitfile(model_name, target_platform)
fclk_mhz = 250.0
return FINNExampleOverlay(filename, driver_mode, _radioml_io_shape_dict, fclk_mhz=fclk_mhz) |
class CNN(models.Sequential):
def __init__(self, input_shape, num_classes):
super().__init__()
self.add(layers.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
self.add(layers.Conv2D(64, (3, 3), activation='relu'))
self.add(layers.MaxPooling2D(pool_size=(2, 2)))
self.add(layers.Dropout(0.25))
self.add(layers.Flatten())
self.add(layers.Dense(128, activation='relu'))
self.add(layers.Dropout(0.5))
self.add(layers.Dense(num_classes, activation='softmax'))
self.compile(loss=keras.losses.categorical_crossentropy, optimizer='rmsprop', metrics=['accuracy']) |
class TrexSpoLoader(Loader):
def __init__(self, debug=False):
super().__init__()
self.debug = debug
def _load(self, path):
datas = load_json(path)
if self.debug:
datas = datas[0:100]
dataset = DataTable()
for data in tqdm(datas):
text = data['text']
sentences_boundaries = data['sentences_boundaries']
words_boundaries = data['words_boundaries']
triples = data['triples']
if (not triples):
continue
prev_length = 0
for (i, sentences_boundary) in enumerate(sentences_boundaries):
charid2wordid = {}
sentence = []
for (j, (start, end)) in enumerate(words_boundaries):
if ((start >= sentences_boundary[0]) and (end <= sentences_boundary[1])):
if (start == sentences_boundary[0]):
assert (j == prev_length)
charid2wordid = {**charid2wordid, **{key: (j - prev_length) for key in range(start, (end + 1))}}
sentence.append(text[start:end])
prev_length += len(sentence)
triples_one_sentence = []
for triple in triples:
if (triple['sentence_id'] != i):
continue
if ((triple['subject'] is not None) and (triple['predicate'] is not None) and (triple['object'] is not None)):
(subject, predicate, object) = (triple['subject'], triple['predicate'], triple['object'])
if ((subject['boundaries'] is not None) and (predicate['boundaries'] is not None) and (object['boundaries'] is not None)):
keys = ['subject', 'predicate', 'object']
for key in keys:
(start, end) = triple[key]['boundaries']
triple[key]['boundaries'] = sorted(list(set([charid2wordid[charid] for charid in range(start, end)])))
triples_one_sentence.append({'subject': triple['subject']['boundaries'], 'predicate': triple['predicate']['boundaries'], 'object': triple['object']['boundaries']})
if (not triples_one_sentence):
continue
dataset('sentence', sentence)
dataset('triple', triples_one_sentence)
return dataset
def load_all(self, path):
train_set = self._load(os.path.join(path, 'train.json'))
dev_set = self._load(os.path.join(path, 'dev.json'))
test_set = self._load(os.path.join(path, 'test.json'))
return [train_set, dev_set, test_set] |
def get_systems(user_id):
with closing(getDb().cursor(dictionary=True)) as cur:
sql = 'SELECT system_id, system_name, api_key, active\n FROM systems WHERE admin_user_id = %s'
cur.execute(sql, (user_id,))
return cur.fetchall() |
def double_estimated_distance(vrblvl=0):
if (vrblvl > 0):
print('in double_estimated_distance ...')
phc = get_phcfun()
apar = pointer(c_int32(0))
bvrb = pointer(c_int32(0))
cdist = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> double_estimated_distance calls phc', end='')
retval = phc(887, apar, bvrb, cdist, vrb)
if (vrblvl > 0):
print(', return value :', retval)
print('the estimated distance :', cdist[0])
return cdist[0] |
('weak_label')
class WeakLabelDatasetReader(DatasetReader):
def __init__(self, token_indexers: Dict[(str, TokenIndexer)]=None, split_sentences: bool=False) -> None:
super().__init__(lazy=False)
self.token_indexers = token_indexers
self.split_sentences = split_sentences
def text_to_instance(self, tokens: List[Token]) -> Instance:
tokens_field = TextField(tokens, self.token_indexers)
fields = {'tokens': tokens_field}
return Instance(fields)
def _read(self, file_path: str) -> Iterator[Instance]:
with open(file_path, 'rb') as f:
data = pickle.load(f)
if (not self.split_sentences):
for instance in data:
tokens = [token for token in instance['tokens']]
tokens_field = TextField(tokens, self.token_indexers)
instance.add_field('tokens', tokens_field)
(yield instance)
else:
for instance in data:
if ('sentence_spans' not in instance):
raise ValueError("No sentence spans detected in the dataset you're attempting to read. Did you forget to generate them?")
tokens_field = instance['tokens']
tags_field = (instance['tags'] if ('tags' in instance) else None)
unary_marginals_field = (instance['unary_marginals'] if ('unary_marginals' in instance) else None)
pairwise_marginals_field = (instance['pairwise_marginals'] if ('pairwise_marginals' in instance) else None)
vote_mask_field = (instance['vote_mask'] if ('vote_mask' in instance) else None)
tokens = [token for token in tokens_field]
tags = [tag for tag in tags_field]
(unary_marginals, pairwise_marginals, vote_mask) = [None, None, None]
if unary_marginals_field:
unary_marginals = unary_marginals_field.as_tensor(unary_marginals_field.get_padding_lengths()).numpy()
if pairwise_marginals_field:
pairwise_marginals = pairwise_marginals_field.as_tensor(pairwise_marginals_field.get_padding_lengths()).numpy()
if vote_mask_field:
vote_mask = vote_mask_field.as_tensor(vote_mask_field.get_padding_lengths()).numpy()
sentence_delimiters = instance['sentence_spans'].metadata
for delimiter in sentence_delimiters:
sentence_tokens = tokens[delimiter[0]:delimiter[1]]
if (len(sentence_tokens) == 0):
continue
sentence_tokens_field = TextField(sentence_tokens, self.token_indexers)
fields = {'tokens': sentence_tokens_field}
if (tags is not None):
sentence_tags = tags[delimiter[0]:delimiter[1]]
assert (len(sentence_tags) == len(sentence_tokens))
fields['tags'] = SequenceLabelField(labels=sentence_tags, sequence_field=sentence_tokens_field)
if (unary_marginals is not None):
sentence_unary_marginals = unary_marginals[delimiter[0]:delimiter[1]]
assert (len(sentence_unary_marginals) == len(sentence_tokens))
fields['unary_marginals'] = ArrayField(sentence_unary_marginals)
if (pairwise_marginals is not None):
sentence_pairwise_marginals = pairwise_marginals[delimiter[0]:delimiter[1]]
assert (len(sentence_pairwise_marginals) == len(sentence_tokens))
fields['pairwise_marginals'] = ArrayField(sentence_pairwise_marginals)
if (vote_mask is not None):
sentence_vote_mask = vote_mask[delimiter[0]:delimiter[1]]
assert (len(sentence_vote_mask) == len(sentence_tokens))
fields['vote_mask'] = ArrayField(sentence_vote_mask)
(yield Instance(fields)) |
class CIFAR100_LT(CIFAR10_LT):
base_folder = 'cifar-100-python'
url = '
filename = 'cifar-100-python.tar.gz'
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [['train', '16019d7e3df5f24257cddd939b257f8d']]
test_list = [['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc']]
meta = {'filename': 'meta', 'key': 'fine_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48'}
cls_num = 100 |
.parametrize('time_limit', [3, 4, 5])
def test_rubiks_cube__done(time_limit: int) -> None:
env = RubiksCube(time_limit=time_limit)
(state, timestep) = env.reset(jax.random.PRNGKey(0))
action = env.action_spec().generate_value()
episode_length = 0
step_fn = jax.jit(env.step)
while (not timestep.last()):
(state, timestep) = step_fn(state, action)
episode_length += 1
if (episode_length > 10):
raise Exception('Entered infinite loop')
assert (episode_length == time_limit) |
def get_libc_version():
import platform
if (get_platform() != 'linux'):
return 'N/A'
return '-'.join(platform.libc_ver()) |
def ncbi_annotators(docs):
dict_core = set()
dict_core_exact = set()
with open('../Dependency/AutoNER_dicts/NCBI/dict_core.txt') as f:
for line in f.readlines():
line = line.strip().split()
term = tuple(line[1:])
if ((len(term) > 1) or (len(term[0]) > 3)):
dict_core.add(term)
else:
dict_core_exact.add(term)
to_add = set()
for term in dict_core:
to_add.add((('inherited',) + term))
to_add.add((('Inherited',) + term))
to_add.add((('hereditary',) + term))
to_add.add((('Hereditary',) + term))
dict_core |= to_add
dict_core_exact.remove(('WT1',))
dict_core_exact.remove(('VHL',))
dict_full = set()
with open('../Dependency/AutoNER_dicts/NCBI/dict_full.txt') as f:
for line in f.readlines():
line = line.strip().split()
dict_full.add(tuple(line))
lf = DictionaryMatcher('CoreDictionaryUncased', dict_core, uncased=True, i_label='I')
lf.apply(docs)
lf = DictionaryMatcher('CoreDictionaryExact', dict_core_exact, i_label='I')
lf.apply(docs)
class CancerLike(TaggingRule):
def apply_instance(self, instance):
tokens = [token.text.lower() for token in instance['tokens']]
labels = (['ABS'] * len(tokens))
suffixes = ('edema', 'toma', 'coma', 'noma')
for (i, token) in enumerate(tokens):
for suffix in suffixes:
if (token.endswith(suffix) or token.endswith((suffix + 's'))):
labels[i] = 'I'
return labels
lf = CancerLike()
lf.apply(docs)
class CommonSuffixes(TaggingRule):
suffixes = {'agia', 'cardia', 'trophy', 'toxic', 'itis', 'emia', 'pathy', 'plasia'}
def apply_instance(self, instance):
labels = (['ABS'] * len(instance['tokens']))
for i in range(len(instance['tokens'])):
for suffix in self.suffixes:
if instance['tokens'][i].lemma_.endswith(suffix):
labels[i] = 'I'
return labels
lf = CommonSuffixes()
lf.apply(docs)
class Deficiency(TaggingRule):
def apply_instance(self, instance):
labels = (['ABS'] * len(instance['tokens']))
for i in range((len(instance['tokens']) - 1)):
if ((instance['tokens'][i].dep_ == 'compound') and (instance['tokens'][(i + 1)].lemma_ == 'deficiency')):
labels[i] = 'I'
labels[(i + 1)] = 'I'
for j in range((i - 1), (- 1), (- 1)):
if (instance['tokens'][j].dep_ == 'compound'):
labels[j] = 'I'
else:
break
for i in range((len(instance['tokens']) - 2)):
if ((instance['tokens'][i].lemma_ == 'deficiency') and (instance['tokens'][(i + 1)].lemma_ == 'of')):
labels[i] = 'I'
labels[(i + 1)] = 'I'
nnp_active = False
for j in range((i + 2), len(instance['tokens'])):
if (instance['tokens'][j].pos_ in ('NOUN', 'PROPN')):
if (not nnp_active):
nnp_active = True
elif nnp_active:
break
labels[j] = 'I'
return labels
lf = Deficiency()
lf.apply(docs)
class Disorder(TaggingRule):
def apply_instance(self, instance):
labels = (['ABS'] * len(instance['tokens']))
for i in range((len(instance['tokens']) - 1)):
if ((instance['tokens'][i].dep_ == 'compound') and (instance['tokens'][(i + 1)].lemma_ == 'disorder')):
labels[i] = 'I'
labels[(i + 1)] = 'I'
for j in range((i - 1), (- 1), (- 1)):
if (instance['tokens'][j].dep_ == 'compound'):
labels[j] = 'I'
else:
break
return labels
lf = Disorder()
lf.apply(docs)
class Lesion(TaggingRule):
def apply_instance(self, instance):
labels = (['ABS'] * len(instance['tokens']))
for i in range((len(instance['tokens']) - 1)):
if ((instance['tokens'][i].dep_ == 'compound') and (instance['tokens'][(i + 1)].lemma_ == 'lesion')):
labels[i] = 'I'
labels[(i + 1)] = 'I'
for j in range((i - 1), (- 1), (- 1)):
if (instance['tokens'][j].dep_ == 'compound'):
labels[j] = 'I'
else:
break
return labels
lf = Lesion()
lf.apply(docs)
class Syndrome(TaggingRule):
def apply_instance(self, instance):
labels = (['ABS'] * len(instance['tokens']))
for i in range((len(instance['tokens']) - 1)):
if ((instance['tokens'][i].dep_ == 'compound') and (instance['tokens'][(i + 1)].lemma_ == 'syndrome')):
labels[i] = 'I'
labels[(i + 1)] = 'I'
for j in range((i - 1), (- 1), (- 1)):
if (instance['tokens'][j].dep_ == 'compound'):
labels[j] = 'I'
else:
break
return labels
lf = Syndrome()
lf.apply(docs)
terms = []
with open('../Dependency/umls/umls_body_part.txt', 'r') as f:
for line in f.readlines():
terms.append(line.strip().split(' '))
lf = DictionaryMatcher('TEMP', terms, i_label='TEMP', uncased=True, match_lemmas=True)
lf.apply(docs)
class BodyTerms(TaggingRule):
def apply_instance(self, instance):
tokens = [token.text.lower() for token in instance['tokens']]
labels = (['ABS'] * len(tokens))
terms = {'cancer', 'cancers', 'damage', 'disease', 'diseases', 'pain', 'injury', 'injuries'}
for i in range(0, (len(tokens) - 1)):
if (instance['WISER_LABELS']['TEMP'][i] == 'TEMP'):
if (tokens[(i + 1)] in terms):
labels[i] = 'I'
labels[(i + 1)] = 'I'
return labels
lf = BodyTerms()
lf.apply(docs)
for doc in docs:
del doc['WISER_LABELS']['TEMP']
class OtherPOS(TaggingRule):
other_pos = {'ADP', 'ADV', 'DET', 'VERB'}
def apply_instance(self, instance):
labels = (['ABS'] * len(instance['tokens']))
for i in range(0, len(instance['tokens'])):
if (instance['tokens'][i].pos_ in self.other_pos):
labels[i] = 'O'
return labels
lf = OtherPOS()
lf.apply(docs)
stop_words = {'a', 'as', 'be', 'but', 'do', 'even', 'for', 'from', 'had', 'has', 'have', 'i', 'in', 'is', 'its', 'just', 'my', 'no', 'not', 'on', 'or', 'that', 'the', 'these', 'this', 'those', 'to', 'very', 'what', 'which', 'who', 'with'}
class StopWords(TaggingRule):
def apply_instance(self, instance):
labels = (['ABS'] * len(instance['tokens']))
for i in range(len(instance['tokens'])):
if (instance['tokens'][i].lemma_ in stop_words):
labels[i] = 'O'
return labels
lf = StopWords()
lf.apply(docs)
class Punctuation(TaggingRule):
other_punc = {'.', ',', '?', '!', ';', ':', '(', ')', '%', '<', '>', '=', '+', '/', '\\'}
def apply_instance(self, instance):
labels = (['ABS'] * len(instance['tokens']))
for i in range(len(instance['tokens'])):
if (instance['tokens'][i].text in self.other_punc):
labels[i] = 'O'
return labels
lf = Punctuation()
lf.apply(docs)
class PossessivePhrase(LinkingRule):
def apply_instance(self, instance):
links = ([0] * len(instance['tokens']))
for i in range(1, len(instance['tokens'])):
if ((instance['tokens'][(i - 1)].text == "'s") or (instance['tokens'][i].text == "'s")):
links[i] = 1
return links
lf = PossessivePhrase()
lf.apply(docs)
class HyphenatedPhrase(LinkingRule):
def apply_instance(self, instance):
links = ([0] * len(instance['tokens']))
for i in range(1, len(instance['tokens'])):
if ((instance['tokens'][(i - 1)].text == '-') or (instance['tokens'][i].text == '-')):
links[i] = 1
return links
lf = HyphenatedPhrase()
lf.apply(docs)
lf = ElmoLinkingRule(0.8)
lf.apply(docs)
class CommonBigram(LinkingRule):
def apply_instance(self, instance):
links = ([0] * len(instance['tokens']))
tokens = [token.text.lower() for token in instance['tokens']]
bigrams = {}
for i in range(1, len(tokens)):
bigram = (tokens[(i - 1)], tokens[i])
if (bigram in bigrams):
bigrams[bigram] += 1
else:
bigrams[bigram] = 1
for i in range(1, len(tokens)):
bigram = (tokens[(i - 1)], tokens[i])
count = bigrams[bigram]
if (count >= 6):
links[i] = 1
return links
lf = CommonBigram()
lf.apply(docs)
class ExtractedPhrase(LinkingRule):
def __init__(self, terms):
self.term_dict = {}
for term in terms:
term = [token.lower() for token in term]
if (term[0] not in self.term_dict):
self.term_dict[term[0]] = []
self.term_dict[term[0]].append(term)
for first_token in self.term_dict.keys():
to_sort = self.term_dict[first_token]
self.term_dict[first_token] = sorted(to_sort, reverse=True, key=(lambda x: len(x)))
def apply_instance(self, instance):
tokens = [token.text.lower() for token in instance['tokens']]
links = ([0] * len(instance['tokens']))
i = 0
while (i < len(tokens)):
if (tokens[i] in self.term_dict):
candidates = self.term_dict[tokens[i]]
for c in candidates:
if ((i + len(c)) <= len(tokens)):
equal = True
for j in range(len(c)):
if (tokens[(i + j)] != c[j]):
equal = False
break
if equal:
for j in range((i + 1), (i + len(c))):
links[j] = 1
i = ((i + len(c)) - 1)
break
i += 1
return links
lf = ExtractedPhrase(dict_full)
lf.apply(docs)
return docs |
class DiffusionDetDatasetMapper():
def __init__(self, cfg, is_train=True):
if (cfg.INPUT.CROP.ENABLED and is_train):
self.crop_gen = [T.ResizeShortestEdge([400, 500, 600], sample_style='choice'), T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)]
else:
self.crop_gen = None
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info('Full TransformGens used in training: {}, crop: {}'.format(str(self.tfm_gens), str(self.crop_gen)))
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
boxes_train = 'ibrahim/Diseasedataset_base_enumeration_m_t_inference_train/inference/coco_instances_results.json'
boxes_valid = 'ibrahim/Diseasedataset_base_enumeration_m_t_inference_val/inference/coco_instances_results.json'
self.train_boxes = []
self.valid_boxes = []
f_train = open(boxes_train)
dict_train = json.load(f_train)
f_valid = open(boxes_valid)
dict_valid = json.load(f_valid)
for inference in dict_train:
if (inference['score'] >= 0.5):
self.train_boxes.append(inference)
for inference in dict_valid:
if (inference['score'] >= 0.5):
self.valid_boxes.append(inference)
def return_boxes_for_current_image(self, i):
boxes = []
if self.is_train:
for box in self.train_boxes:
if (box['image_id'] == i):
boxes.append(box['bbox'])
else:
for box in self.valid_boxes:
if (box['image_id'] == i):
boxes.append(box['bbox'])
return boxes
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(dataset_dict['file_name'], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if (self.crop_gen is None):
(image, transforms) = T.apply_transform_gens(self.tfm_gens, image)
elif (np.random.rand() > 0.5):
(image, transforms) = T.apply_transform_gens(self.tfm_gens, image)
else:
(image, transforms) = T.apply_transform_gens(((self.tfm_gens[:(- 1)] + self.crop_gen) + self.tfm_gens[(- 1):]), image)
image_shape = image.shape[:2]
dataset_dict['image'] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if (not self.is_train):
dataset_dict.pop('annotations', None)
return dataset_dict
if ('annotations' in dataset_dict):
for anno in dataset_dict['annotations']:
anno.pop('segmentation', None)
anno.pop('keypoints', None)
usepretrainedboxes = True
if usepretrainedboxes:
imageid = dataset_dict['image_id']
bboxpre = self.return_boxes_for_current_image(imageid)
annos = [utils.transform_instance_annotations(obj, transforms, image_shape, bbox_pre=bboxpre) for obj in dataset_dict.pop('annotations') if (obj.get('iscrowd', 0) == 0)]
else:
annos = [utils.transform_instance_annotations(obj, transforms, image_shape) for obj in dataset_dict.pop('annotations') if (obj.get('iscrowd', 0) == 0)]
'\n if usepretrainedboxes:\n obj =\n preannos = [\n utils.transform_instance_annotations(, transforms, image_shape)\n for obj in dataset_dict.pop("annotations")\n if obj.get("iscrowd", 0) == 0\n ]\n '
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict['instances'] = utils.filter_empty_instances(instances)
return dataset_dict |
class DPTDepthModel(DPT):
def __init__(self, path=None, non_negative=True, scale=1.0, shift=0.0, invert=False, **kwargs):
features = (kwargs['features'] if ('features' in kwargs) else 256)
self.scale = scale
self.shift = shift
self.invert = invert
head = nn.Sequential(nn.Conv2d(features, (features // 2), kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear', align_corners=True), nn.Conv2d((features // 2), 32, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), (nn.ReLU(True) if non_negative else nn.Identity()), nn.Identity())
super().__init__(head, **kwargs)
if (path is not None):
self.load(path)
def forward(self, x):
inv_depth = super().forward(x).squeeze(dim=1)
if self.invert:
depth = ((self.scale * inv_depth) + self.shift)
depth[(depth < 1e-08)] = 1e-08
depth = (1.0 / depth)
return depth
else:
return inv_depth |
def convert_document_to_read_ready_string(path_read, path_write, fname_without_suffix: str, grammar, rules=None, max_sent=40, data_name='dm', merge_sz=5, depth=3, topk=10, set_of_del=[1, 2]):
doc_file = os.path.join(path_read, (fname_without_suffix + '.doc.json'))
abs_file = os.path.join(path_read, (fname_without_suffix + '.abs.json'))
if ((not os.path.isfile(doc_file)) or (not os.path.isfile(abs_file))):
raise TypeError
with open(doc_file, 'r') as fd:
doc_str = fd.read()
with open(abs_file, 'r') as fd:
abs_str = fd.read()
doc_dict = json.loads(doc_str)
abs_dict = json.loads(abs_str)
doc_parse = extract_parse(doc_dict)[:max_sent]
(abs_token, abs_str) = extract_tokens(abs_dict)
rt_sentences = []
dft = {'sidx': 0, 'eidx': 1, 'node': 'BASELINE', 'rouge': 0, 'selected_idx': []}
sent_sos_dict = {'token': ['<SOS>'], 'del_span': [dft], 'single_del': [dft], 'single_del_best': dft}
rt_sentences.append(sent_sos_dict)
for sent_parse in doc_parse:
sent_tree = read_single_parse_tree(sent_parse)
tree_len = len(sent_tree.text)
rt_del_spans = []
del_spans = find_deletable_span_rule_based(rules, sent_tree)
for del_sp in del_spans:
if (len(del_sp.text) < 2):
continue
full_set = set(range(len(sent_tree.text)))
selected_set = list((full_set - set(range(del_sp.start_idx, del_sp.end_idx))))
selected_set.sort()
text_left = (sent_tree.text[0:del_sp.start_idx] + sent_tree.text[del_sp.end_idx:])
_txt = ' '.join(text_left)
_rouge1 = get_rouge_est_str_4gram(gold=abs_str, pred=_txt)
if (len(selected_set) >= 2):
rt_del_spans.append({'sidx': del_sp.start_idx, 'eidx': del_sp.end_idx, 'node': del_sp.tag, 'rouge': _rouge1, 'selected_idx': selected_set})
rt_del_spans.append({'sidx': (tree_len - 1), 'eidx': tree_len, 'node': 'BASELINE', 'rouge': get_rouge_est_str_4gram(gold=abs_str, pred=' '.join(sent_tree.text[:(- 1)])), 'selected_idx': list(range((tree_len - 1)))})
(del_single_units, most_trash_single_unit) = comp_oracle_delete_one_unit(sent_tree, rt_del_spans, topk)
sent_pack = {'token': sent_tree.text, 'del_span': rt_del_spans, 'single_del': del_single_units, 'single_del_best': most_trash_single_unit}
rt_sentences.append(sent_pack)
doc_list = [' '.join(x['token']) for x in rt_sentences]
sent_ora_json = comp_document_oracle(doc_list, abs_str)
rt = {}
rt['name'] = fname_without_suffix
rt['part'] = data_name
rt['abs'] = abs_str
rt['abs_list'] = abs_token
rt['doc'] = ' '.join(doc_list)
rt['doc_list'] = [x['token'] for x in rt_sentences]
rt['sentences'] = rt_sentences
rt['sent_oracle'] = sent_ora_json
json_rt = json.dumps(rt)
with open(os.path.join(path_write, (fname_without_suffix + '.data')), 'w') as fd:
fd.write(json_rt) |
class ParallelTextAndSchemaCopyingPipeline(ParallelSchemaCopyingPipeline):
def _get_copying_decoder(self, tokens_feature_name, length_feature_name, prepend_token, append_token, delimiter):
return copying_decoder.SchemaAndWordCopyingDecoder(tokens_feature_name=tokens_feature_name, length_feature_name=length_feature_name, prepend_token=prepend_token, append_token=append_token, delimiter=delimiter)
def _get_copying_data_provider(self, target_files, **kwargs):
return copying_data_provider.make_schema_and_word_copying_data_provider(self.params['source_files'], target_files, self.params['schema_loc_files'], num_epochs=self.params['num_epochs'], shuffle=self.params['shuffle'], source_delimiter=self.params['source_delimiter'], target_delimiter=self.params['target_delimiter'], **kwargs)
def label_keys(self):
keys = super(ParallelTextAndSchemaCopyingPipeline, self).label_keys
keys.update({'source_copy_indices'})
return keys |
def set_seed(seed):
if (seed is not None):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
np.random.seed(seed) |
def mnasnet1_3(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> MNASNet:
model = MNASNet(1.3, **kwargs)
if pretrained:
_load_pretrained('mnasnet1_3', model, progress)
return model |
def build_model(num_chars, embedding_vector_length, maxlen):
model = Sequential()
model.add(Embedding(num_chars, embedding_vector_length, input_length=maxlen))
model.add(Bidirectional(LSTM(256, dropout=0.3, recurrent_dropout=0.3, return_sequences=True)))
model.add(Bidirectional(LSTM(256, dropout=0.3, recurrent_dropout=0.3, return_sequences=True)))
model.add(Bidirectional(LSTM(128, dropout=0.3, recurrent_dropout=0.3)))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return model |
def test_r2r_vln_dataset():
vln_config = get_config(CFG_TEST)
if (not r2r_vln_dataset.VLNDatasetV1.check_config_paths_exist(vln_config.DATASET)):
pytest.skip('Please download Matterport3D R2R dataset to data folder.')
dataset = make_dataset(id_dataset=vln_config.DATASET.TYPE, config=vln_config.DATASET)
assert dataset
assert (len(dataset.episodes) == R2R_VAL_SEEN_EPISODES), 'Val Seen split episode number mismatch'
check_json_serializaiton(dataset) |
def efficientnet_b7b(in_size=(600, 600), **kwargs):
return get_efficientnet(version='b7', in_size=in_size, tf_mode=True, bn_eps=0.001, model_name='efficientnet_b7b', **kwargs) |
def test_builtin_key_type():
if hasattr(__builtins__, 'keys'):
keys = __builtins__.keys()
else:
keys = __builtins__.__dict__.keys()
assert ({type(k) for k in keys} == {str}) |
def standard_pade_coefficients(idx):
from phcpy.phcpy2c3 import py2c_padcon_standard_numerator_coefficient
from phcpy.phcpy2c3 import py2c_padcon_standard_denominator_coefficient
numdeg = get_degree_of_numerator()
dendeg = get_degree_of_denominator()
numcfs = []
for col in range((numdeg + 1)):
(rcf, icf) = py2c_padcon_standard_numerator_coefficient(idx, col, 0)
numcfs.append(complex(rcf, icf))
dencfs = []
for col in range((dendeg + 1)):
(rcf, icf) = py2c_padcon_standard_denominator_coefficient(idx, col, 0)
dencfs.append(complex(rcf, icf))
return (numcfs, dencfs) |
class LegacySubMobileResnetGenerator(BaseNetwork):
def __init__(self, input_nc, output_nc, config, norm_layer=nn.BatchNorm2d, dropout_rate=0, n_blocks=9, padding_type='reflect'):
assert (n_blocks >= 0)
super(LegacySubMobileResnetGenerator, self).__init__()
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, config['channels'][0], kernel_size=7, padding=0, bias=use_bias), norm_layer(config['channels'][0]), nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = (2 ** i)
ic = config['channels'][i]
oc = config['channels'][(i + 1)]
model += [nn.Conv2d((ic * mult), ((oc * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ic * mult) * 2)), nn.ReLU(True)]
mult = (2 ** n_downsampling)
ic = config['channels'][2]
for i in range(n_blocks):
if (len(config['channels']) == 6):
offset = 0
else:
offset = (i // 3)
if ((i % 3) == 0):
oc = config['channels'][(offset + 3)]
else:
oc = config['channels'][n_downsampling]
model += [MobileResnetBlock((ic * mult), (oc * mult), padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)]
if (len(config['channels']) == 6):
offset = 4
else:
offset = 6
for i in range(n_downsampling):
oc = config['channels'][(offset + i)]
mult = (2 ** (n_downsampling - i))
model += [nn.ConvTranspose2d((ic * mult), int(((oc * mult) / 2)), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(int(((oc * mult) / 2))), nn.ReLU(True)]
ic = oc
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ic, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
input = input.clamp((- 1), 1)
return self.model(input) |
def get_subsequent_mask(seq):
(sz_b, len_s) = seq.size()
mask = (torch.triu(torch.ones(len_s, len_s)) == 1).transpose(0, 1)
mask = mask.float().masked_fill((mask == 0), float('-inf')).masked_fill((mask == 1), float(0.0))
return mask |
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
if isinstance(config, (str, Path)):
config = mmcv.Config.fromfile(config)
elif (not isinstance(config, mmcv.Config)):
raise TypeError(f'config must be a filename or Config object, but got {type(config)}')
if (cfg_options is not None):
config.merge_from_dict(cfg_options)
if ('pretrained' in config.model):
config.model.pretrained = None
elif ('init_cfg' in config.model.backbone):
config.model.backbone.init_cfg = None
config.model.train_cfg = None
model = build_detector(config.model, test_cfg=config.get('test_cfg'))
if (checkpoint is not None):
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn("Class names are not saved in the checkpoint's meta data, use COCO classes by default.")
model.CLASSES = get_classes('coco')
model.cfg = config
model.to(device)
model.eval()
return model |
class Prediction():
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
(z, c) = self.model(x.unsqueeze(0))
return (z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()) |
class TestBasicTuningStrategy(unittest.TestCase):
def setUpClass(self):
self.constant_graph = build_fake_model()
build_fake_yaml()
build_fake_yaml2()
build_fake_yaml3()
build_fake_yaml4()
build_fake_yaml_recipe()
def tearDownClass(self):
os.remove('fake_yaml.yaml')
os.remove('fake_yaml2.yaml')
os.remove('fake_yaml3.yaml')
os.remove('fake_yaml4.yaml')
os.remove('fake_yaml_recipe.yaml')
shutil.rmtree('saved', ignore_errors=True)
def test_run_basic_one_trial(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer.fit()
quantizer.conf.usr_cfg.tuning.workspace.resume = 'saved/history.snapshot'
quantizer.fit()
def test_run_basic_max_trials(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml2.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer.fit()
def test_run_basic_recipe(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml_recipe.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer.fit()
def test_run_basic_max_trials_multimetric(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml3.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer.fit()
def test_run_basic_max_trials_multimetric_weight(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml4.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer.fit() |
def _dict_generator(nested_vals):
iters = {k: iter(nested_generator(v)) for (k, v) in nested_vals.items()}
try:
while True:
(yield {k: next(i) for (k, i) in iters.items()})
except StopIteration:
pass |
def log_results(results, dataset, main_logger, test=False):
if test:
pre = 'test'
else:
pre = 'val'
main_logger.info('{}: Caption to audio: r1: {:.2f}, r5: {:.2f}, r10: {:.2f}, r50: {:.2f}, medr: {:.2f}, meanr: {:.2f}, mAP10: {:.3f}'.format(dataset, *results['t2a']))
main_logger.info('{}: Audio to caption: r1: {:.2f}, r5: {:.2f}, r10: {:.2f}, r50: {:.2f}, medr: {:.2f}, meanr: {:.2f}, mAP10: {:.3f}'.format(dataset, *results['a2t']))
wandb.log({f'{dataset}:{pre}_t2a/r1': results['t2a'][0], f'{dataset}:{pre}_t2a/r5': results['t2a'][1], f'{dataset}:{pre}_t2a/r10': results['t2a'][2], f'{dataset}:{pre}_t2a/mAP10': results['t2a'][(- 1)]})
wandb.log({f'{dataset}:{pre}_a2t/r1': results['a2t'][0], f'{dataset}:{pre}_a2t/r5': results['a2t'][1], f'{dataset}:{pre}_a2t/r10': results['a2t'][2], f'{dataset}:{pre}_a2t/mAP10': results['a2t'][(- 1)]}) |
def main():
pygame.init()
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
running = True
font = pygame.font.SysFont('Arial', 16)
sound = pygame.mixer.Sound('sfx.wav')
img = pygame.image.load('xmasgirl1.png')
space = pymunk.Space()
space.gravity = (0, (- 1000))
static = [pymunk.Segment(space.static_body, (10, 50), (300, 50), 5), pymunk.Segment(space.static_body, (300, 50), (325, 50), 5), pymunk.Segment(space.static_body, (325, 50), (350, 50), 5), pymunk.Segment(space.static_body, (350, 50), (375, 50), 5), pymunk.Segment(space.static_body, (375, 50), (680, 50), 5), pymunk.Segment(space.static_body, (680, 50), (680, 370), 5), pymunk.Segment(space.static_body, (680, 370), (10, 370), 5), pymunk.Segment(space.static_body, (10, 370), (10, 50), 5)]
static[1].color = pygame.color.THECOLORS['red']
static[2].color = pygame.color.THECOLORS['green']
static[3].color = pygame.color.THECOLORS['red']
rounded = [pymunk.Segment(space.static_body, (500, 50), (520, 60), 5), pymunk.Segment(space.static_body, (520, 60), (540, 80), 5), pymunk.Segment(space.static_body, (540, 80), (550, 100), 5), pymunk.Segment(space.static_body, (550, 100), (550, 150), 5)]
platforms = [pymunk.Segment(space.static_body, (170, 50), (270, 150), 5), pymunk.Segment(space.static_body, (400, 150), (450, 150), 5), pymunk.Segment(space.static_body, (400, 200), (450, 200), 5), pymunk.Segment(space.static_body, (220, 200), (300, 200), 5), pymunk.Segment(space.static_body, (50, 250), (200, 250), 5), pymunk.Segment(space.static_body, (10, 370), (50, 250), 5)]
for s in ((static + platforms) + rounded):
s.friction = 1.0
s.group = 1
space.add(static, (platforms + rounded))
platform_path = [(650, 100), (600, 200), (650, 300)]
platform_path_index = 0
platform_body = pymunk.Body(pymunk.inf, pymunk.inf)
platform_body.position = (650, 100)
s = pymunk.Segment(platform_body, ((- 25), 0), (25, 0), 5)
s.friction = 1.0
s.group = 1
s.color = pygame.color.THECOLORS['blue']
space.add(s)
passthrough = pymunk.Segment(space.static_body, (270, 100), (320, 100), 5)
passthrough.color = pygame.color.THECOLORS['yellow']
passthrough.friction = 1.0
passthrough.collision_type = 2
passthrough.layers = (passthrough.layers ^ 8)
space.add(passthrough)
def passthrough_handler(space, arbiter):
if (arbiter.shapes[0].body.velocity.y < 0):
return True
else:
return False
space.add_collision_handler(1, 2, begin=passthrough_handler)
body = pymunk.Body(5, pymunk.inf)
body.position = (100, 100)
head = pymunk.Circle(body, 10, (0, 5))
head2 = pymunk.Circle(body, 10, (0, 13))
feet = pymunk.Circle(body, 10, (0, (- 5)))
head.layers = head2.layers = 8
feet.collision_type = 1
feet.ignore_draw = head.ignore_draw = head2.ignore_draw = True
space.add(body, head, feet, head2)
direction = 1
remaining_jumps = 2
landing = {'p': Vec2d.zero(), 'n': 0}
frame_number = 0
landed_previous = False
while running:
grounding = {'normal': Vec2d.zero(), 'penetration': Vec2d.zero(), 'impulse': Vec2d.zero(), 'position': Vec2d.zero(), 'body': None}
def f(arbiter):
n = (- arbiter.contacts[0].normal)
if (n.y > grounding['normal'].y):
grounding['normal'] = n
grounding['penetration'] = (- arbiter.contacts[0].distance)
grounding['body'] = arbiter.shapes[1].body
grounding['impulse'] = arbiter.total_impulse
grounding['position'] = arbiter.contacts[0].position
body.each_arbiter(f)
well_grounded = False
if ((grounding['body'] != None) and (abs((grounding['normal'].x / grounding['normal'].y)) < feet.friction)):
well_grounded = True
remaining_jumps = 2
ground_velocity = Vec2d.zero()
if well_grounded:
ground_velocity = grounding['body'].velocity
for event in pygame.event.get():
if ((event.type == QUIT) or ((event.type == KEYDOWN) and (event.key in [K_ESCAPE, K_q]))):
running = False
elif ((event.type == KEYDOWN) and (event.key == K_p)):
pygame.image.save(screen, 'platformer.png')
elif ((event.type == KEYDOWN) and (event.key == K_d)):
feet.ignore_draw = (not feet.ignore_draw)
head.ignore_draw = (not head.ignore_draw)
head2.ignore_draw = (not head2.ignore_draw)
elif ((event.type == KEYDOWN) and (event.key == K_UP)):
if (well_grounded or (remaining_jumps > 0)):
jump_v = math.sqrt(((2.0 * JUMP_HEIGHT) * abs(space.gravity.y)))
body.velocity.y = (ground_velocity.y + jump_v)
remaining_jumps -= 1
elif ((event.type == KEYUP) and (event.key == K_UP)):
body.velocity.y = min(body.velocity.y, JUMP_CUTOFF_VELOCITY)
target_vx = 0
if (body.velocity.x > 0.01):
direction = 1
elif (body.velocity.x < (- 0.01)):
direction = (- 1)
keys = pygame.key.get_pressed()
if keys[K_LEFT]:
direction = (- 1)
target_vx -= PLAYER_VELOCITY
if keys[K_RIGHT]:
direction = 1
target_vx += PLAYER_VELOCITY
if keys[K_DOWN]:
direction = (- 3)
feet.surface_velocity = (target_vx, 0)
if (grounding['body'] != None):
feet.friction = ((- PLAYER_GROUND_ACCEL) / space.gravity.y)
head.friciton = HEAD_FRICTION
else:
(feet.friction, head.friction) = (0, 0)
if (grounding['body'] == None):
body.velocity.x = cpflerpconst(body.velocity.x, (target_vx + ground_velocity.x), (PLAYER_AIR_ACCEL * dt))
body.velocity.y = max(body.velocity.y, (- FALL_VELOCITY))
destination = platform_path[platform_path_index]
current = Vec2d(platform_body.position)
distance = current.get_distance(destination)
if (distance < PLATFORM_SPEED):
platform_path_index += 1
platform_path_index = (platform_path_index % len(platform_path))
t = 1
else:
t = (PLATFORM_SPEED / distance)
new = current.interpolate_to(destination, t)
platform_body.position = new
platform_body.velocity = ((new - current) / dt)
screen.fill(pygame.color.THECOLORS['black'])
for y in [50, 100, 150, 200, 250, 300]:
color = pygame.color.THECOLORS['darkgrey']
pygame.draw.line(screen, color, (10, y), (680, y), 1)
draw(screen, space)
if feet.ignore_draw:
direction_offset = (48 + ((((1 * direction) + 1) / 2) * 48))
if ((grounding['body'] != None) and (abs(target_vx) > 1)):
animation_offset = (32 * ((frame_number / 8) % 4))
elif (grounding['body'] is None):
animation_offset = (32 * 1)
else:
animation_offset = (32 * 0)
position = (body.position + ((- 16), 28))
screen.blit(img, to_pygame(position, screen), (animation_offset, direction_offset, 32, 48))
if (((abs(grounding['impulse'].y) / body.mass) > 200) and (not landed_previous)):
sound.play()
landing = {'p': grounding['position'], 'n': 5}
landed_previous = True
else:
landed_previous = False
if (landing['n'] > 0):
pygame.draw.circle(screen, pygame.color.THECOLORS['yellow'], to_pygame(landing['p'], screen), 5)
landing['n'] -= 1
screen.blit(font.render(('fps: ' + str(clock.get_fps())), 1, THECOLORS['white']), (0, 0))
screen.blit(font.render('Move with Left/Right, jump with Up, press again to double jump', 1, THECOLORS['darkgrey']), (5, (height - 35)))
screen.blit(font.render('Press D to toggle sprite draw, ESC or Q to quit', 1, THECOLORS['darkgrey']), (5, (height - 20)))
pygame.display.flip()
frame_number += 1
space.step(dt)
clock.tick(fps) |
class parentWrapperPotential():
def __new__(cls, *args, **kwargs):
if kwargs.pop('_init', False):
return object.__new__(cls)
pot = kwargs.get('pot', None)
if (_dim(pot) == 2):
parentWrapperPotential = planarWrapperPotential
elif (_dim(pot) == 3):
parentWrapperPotential = WrapperPotential
else:
raise ValueError('WrapperPotentials are only supported in 3D and 2D')
kwargs['_init'] = True
reduce = (lambda self: (_new_obj, (cls, kwargs, args), self.__dict__))
out = type.__new__(type, ('_%s' % cls.__name__), (parentWrapperPotential, cls), {'normalize': property(), '__reduce__': reduce})(*args, **kwargs)
kwargs.pop('_init', False)
cls.__init__(out, *args, **kwargs)
return out |
class HGNN_conv(nn.Module):
def __init__(self, in_ft, out_ft, bias=True):
super(HGNN_conv, self).__init__()
self.weight = Parameter(torch.Tensor(in_ft, out_ft))
if bias:
self.bias = Parameter(torch.Tensor(out_ft))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.weight.size(1)))
self.weight.data.uniform_((- stdv), stdv)
if (self.bias is not None):
self.bias.data.uniform_((- stdv), stdv)
def forward(self, x: torch.Tensor, G: torch.Tensor):
x = x.matmul(self.weight)
if (self.bias is not None):
x = (x + self.bias)
x = G.matmul(x)
return x |
def make_iterable(target, library='torch'):
import tensorflow as tf
import torch
tensor_checker = (torch.is_tensor if (library == 'torch') else tf.is_tensor)
def flatten(target):
if ((not hasattr(target, '__iter__')) or tensor_checker(target)):
(yield target)
else:
for item in target:
(yield from flatten(item))
return list(flatten(target)) |
class AdvCheckpointHook(CheckpointHook):
def __int__(self, **kwargs):
super(AdvCheckpointHook, self).__init__(**kwargs)
_only
def after_train_epoch(self, runner):
if (not self.every_n_epochs(runner, self.interval)):
return
if (not self.out_dir):
self.out_dir = runner.work_dir
runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, **self.args) |
class MeanSigmaMetricLogger(object):
def __init__(self, delimiter='\t', meter_creator=SmoothedValue):
from src.tools.logger import MetricLogger
self.mean_meters = MetricLogger(delimiter=delimiter, meter_creator=SmoothedValue)
self.sq_meters = MetricLogger(delimiter=delimiter, meter_creator=SmoothedValue)
def update(self, **kwargs):
self.mean_meters.update(**kwargs)
self.sq_meters.update(**dict(((k, (v * v)) for (k, v) in kwargs.items())))
def get_info(self):
key_to_sigma = {}
for (k, v) in self.mean_meters.meters.items():
mean = v.global_avg
mean_square = self.sq_meters.meters[k].global_avg
sigma = (mean_square - (mean * mean))
sigma = math.sqrt(sigma)
key_to_sigma[k] = sigma
result = []
for (name, mean_meter) in self.mean_meters.meters.items():
result.append({'name': name, 'global_avg': mean_meter.global_avg, 'median': mean_meter.median, 'count': mean_meter.count, 'sigma': key_to_sigma[name]})
return result
def __str__(self):
result = self.get_info()
loss_str = []
for info in result:
loss_str.append('{}: {:.4f} ({:.4f}+-{:.4f})'.format(info['name'], info['median'], info['global_avg'], info['sigma']))
return self.mean_meters.delimiter.join(loss_str) |
def _make_np_bool(arr):
if ((not isinstance(arr, list)) and (not isinstance(arr, np.ndarray))):
arr = np.asarray([arr]).astype(np.bool)
elif isinstance(arr, list):
arr = np.asarray(arr).astype(np.bool)
elif (arr.dtype != np.bool):
arr = arr.astype(np.bool)
return arr |
def test_digits_cosine_greedi_ln():
model = SaturatedCoverageSelection(100, 'cosine', optimizer='greedi', optimizer_kwds={'optimizer1': 'lazy', 'optimizer2': 'naive'}, random_state=0)
model.fit(X_digits)
assert_array_equal(model.ranking[:2], digits_cosine_greedi_ranking[:2])
assert_array_almost_equal(model.gains[:2], digits_cosine_greedi_gains[:2], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
class MyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
return pickle.Unpickler.find_class(self, PickleMapName(module), PickleMapName(name)) |
def main(argv):
trainIds = False
try:
(opts, args) = getopt.getopt(argv, 'ht')
except getopt.GetoptError:
printError('Invalid arguments')
for (opt, arg) in opts:
if (opt == '-h'):
printHelp()
sys.exit(0)
elif (opt == '-t'):
trainIds = True
else:
printError("Handling of argument '{}' not implementend".format(opt))
if (len(args) == 0):
printError('Missing input json file')
elif (len(args) == 1):
printError('Missing output image filename')
elif (len(args) > 2):
printError('Too many arguments')
inJson = args[0]
outImg = args[1]
if trainIds:
json2labelImg(inJson, outImg, 'trainIds')
else:
json2labelImg(inJson, outImg) |
def make_data_loader(cfg):
(train_spatial_transforms, _) = build_transforms_ST(cfg, is_train=True)
(val_spatial_transforms, val_temporal_transforms) = build_transforms_ST(cfg, is_train=False)
num_workers = cfg.DATALOADER.NUM_WORKERS
if (cfg.MODEL.SETTING == 'video'):
dataset = init_dataset(cfg.DATASETS.NAMES[0], root=cfg.DATASETS.ROOT_DIR, min_seq_len=cfg.INPUT.MIN_SEQ_LEN, new_eval=cfg.TEST.NEW_EVAL)
else:
raise NotImplementedError()
num_classes = dataset.num_train_pids
if (cfg.MODEL.SETTING == 'video'):
train_set = VideoDataset(dataset.train, cfg.INPUT.SEQ_LEN, cfg.INPUT.SAMPLE, train_spatial_transforms, None, mode='train')
else:
raise NotImplementedError()
if (cfg.DATALOADER.SAMPLER == 'softmax'):
train_loader = DataLoader(train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH, shuffle=True, num_workers=num_workers, collate_fn=train_collate_fn)
else:
train_loader = DataLoader(train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH, worker_init_fn=(lambda _: np.random.seed()), sampler=RandomIdentitySampler_alignedreid(dataset.train, cfg.DATALOADER.NUM_INSTANCE), num_workers=num_workers, collate_fn=train_collate_fn, drop_last=True)
if (cfg.MODEL.SETTING == 'video'):
val_set = VideoDataset((dataset.query + dataset.gallery), cfg.INPUT.SEQ_LEN, cfg.INPUT.SAMPLE, val_spatial_transforms, val_temporal_transforms, mode=cfg.TEST.TEST_MODE)
else:
raise NotImplementedError()
val_loader = DataLoader(val_set, batch_size=cfg.TEST.IMS_PER_BATCH, shuffle=False, num_workers=num_workers, collate_fn=val_collate_fn)
return (train_loader, val_loader, len(dataset.query), num_classes) |
class CLIPTextConfig(PretrainedConfig):
model_type = 'clip_text_model'
def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=77, hidden_act='quick_gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout |
class HumanUser(User):
def __init__(self):
super(User, self).__init__()
def _prompt_response():
response = None
while (not response):
response = input('USER> ')
return response
def init_dialog(self):
return self._prompt_response()
def generate_response(self, utterance):
return self._prompt_response() |
class TestMaskedLM(unittest.TestCase):
def test_masks_tokens(self):
with TemporaryDirectory() as dirname:
raw_file = os.path.join(dirname, 'raw')
data = make_data(out_file=raw_file)
vocab = build_vocab(data)
binarizer = VocabularyDatasetBinarizer(vocab, append_eos=False)
split = 'train'
bin_file = os.path.join(dirname, split)
FileBinarizer.multiprocess_dataset(input_file=raw_file, binarizer=binarizer, dataset_impl='mmap', vocab_size=len(vocab), output_prefix=bin_file)
cfg = MaskedLMConfig(data=dirname, seed=42, mask_prob=0.5, random_token_prob=0, leave_unmasked_prob=0)
task = MaskedLMTask(cfg, binarizer.dict)
original_dataset = task._load_dataset_split(bin_file, 1, False)
task.load_dataset(split)
masked_dataset = task.dataset(split)
mask_index = task.source_dictionary.index('<mask>')
iterator = task.get_batch_iterator(dataset=masked_dataset, max_tokens=65536, max_positions=4096).next_epoch_itr(shuffle=False)
for batch in iterator:
for sample in range(len(batch)):
net_input = batch['net_input']
masked_src_tokens = net_input['src_tokens'][sample]
masked_src_length = net_input['src_lengths'][sample]
masked_tgt_tokens = batch['target'][sample]
sample_id = batch['id'][sample]
original_tokens = original_dataset[sample_id]
original_tokens = original_tokens.masked_select((masked_src_tokens[:masked_src_length] == mask_index))
masked_tokens = masked_tgt_tokens.masked_select((masked_tgt_tokens != task.source_dictionary.pad()))
assert masked_tokens.equal(original_tokens) |
def preprocess(tbl):
tbl = tbl.fillna('', 'present_media')
tbl = tbl.cast((bool_cols + count_cols), 'int')
tbl = tbl.cut_bins(columns=count_cols, bins=[1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, .0], out_cols=count_cols)
if ('present_media' in cat_cols):
process_media = (lambda x: '_'.join(x.split('\t')[:2]))
tbl = tbl.apply('present_media', 'present_media', process_media, 'string')
tbl = tbl.encode_string('present_media', media_map)
if ('tweet_type' in cat_cols):
tbl = tbl.encode_string('tweet_type', type_map)
return tbl |
class SemiPrimalDualTrainer(SemiEntropyTrainer):
def __init__(self, model: Model, labeled_loader: DataLoader, unlabeled_loader: DataLoader, val_loader: DataLoader, max_epoch: int=100, save_dir: str='base', checkpoint_path: str=None, device='cpu', config: dict=None, max_iter: int=100, prior: Tensor=None, inverse_kl=False, **kwargs) -> None:
super().__init__(model, labeled_loader, unlabeled_loader, val_loader, max_epoch, save_dir, checkpoint_path, device, config, max_iter, prior, inverse_kl, **kwargs)
self.mu = nn.Parameter(((- 1.0) / self.prior))
self.mu_optim = RAdam((self.mu,), lr=0.0001, betas=(0.5, 0.999))
def __init_meters__(self) -> List[Union[(str, List[str])]]:
columns = super().__init_meters__()
self.METERINTERFACE.register_new_meter('residual', AverageValueMeter())
columns.append('residual_mean')
return columns
def _trainer_specific_loss(self, unlab_img: Tensor, **kwargs) -> Tensor:
unlab_img = unlab_img.to(self._device)
unlabeled_preds = self._model(unlab_img)
assert simplex(unlabeled_preds, 1)
marginal = unlabeled_preds.mean(0)
lagrangian = (self.prior * (((marginal * self.mu.detach()) + 1) + (- self.mu.detach()).log())).sum()
centropy = self.entropy(unlabeled_preds)
self.METERINTERFACE['centropy'].add(centropy.item())
lagrangian += (centropy * 0.1)
return lagrangian
def _update_mu(self, unlab_img: Tensor):
self.mu_optim.zero_grad()
unlab_img = unlab_img.to(self._device)
unlabeled_preds = self._model(unlab_img).detach()
assert simplex(unlabeled_preds, 1)
marginal = unlabeled_preds.mean(0)
lagrangian = ((- 1) * (self.prior * (((marginal * self.mu) + 1) + (- self.mu).log())).sum())
lagrangian.backward()
self.mu_optim.step()
self.METERINTERFACE['residual'].add(self.mu.grad.abs().sum().item())
marginal_loss = self.kl_criterion(marginal.unsqueeze(0), self.prior.unsqueeze(0), disable_assert=True)
self.METERINTERFACE['marginal'].add(marginal_loss.item())
def _train_loop(self, labeled_loader: DataLoader=None, unlabeled_loader: DataLoader=None, epoch: int=0, mode=ModelMode.TRAIN, *args, **kwargs):
self._model.set_mode(mode)
_max_iter = tqdm_(range(self.max_iter))
_max_iter.set_description(f'Training Epoch {epoch}')
self.METERINTERFACE['lr'].add(self._model.get_lr()[0])
for (batch_num, (lab_img, lab_gt), (unlab_img, _)) in zip(_max_iter, labeled_loader, unlabeled_loader):
(lab_img, lab_gt) = (lab_img.to(self._device), lab_gt.to(self._device))
lab_preds = self._model(lab_img)
sup_loss = self.kl_criterion(lab_preds, class2one_hot(lab_gt, C=self._model.torchnet.num_classes).float())
reg_loss = self._trainer_specific_loss(unlab_img)
self.METERINTERFACE['traloss'].add(sup_loss.item())
self.METERINTERFACE['traconf'].add(lab_preds.max(1)[1], lab_gt)
with ZeroGradientBackwardStep((sup_loss + reg_loss), self._model) as total_loss:
total_loss.backward()
self._update_mu(unlab_img)
report_dict = self._training_report_dict
_max_iter.set_postfix(report_dict)
print(f'Training Epoch {epoch}: {nice_dict(report_dict)}')
self.writer.add_scalar_with_tag('train', report_dict, global_step=epoch)
def _training_report_dict(self):
report_dict = super()._training_report_dict
report_dict.update({'residual': self.METERINTERFACE['residual'].summary()['mean']})
return report_dict |
def _create_dummy_dict_file(dict_file):
dict_str = '0123'
list_to_file(dict_file, list(dict_str)) |
.parametrize(['tree', 'i', 'element', 'expected_tree'], [((jnp.array([3, 6]),), 1, (1,), (jnp.array([3, 1]),)), ({'a': jnp.array([0, 1]), 'b': (jnp.array([(- 1), (- 1)]),)}, 0, {'a': 4, 'b': (2,)}, {'a': jnp.array([4, 1]), 'b': (jnp.array([2, (- 1)]),)})])
def test_tree_add_element(tree: T, i: chex.Numeric, element: T, expected_tree: T) -> None:
assert_trees_are_equal(tree_add_element(tree, i, element), expected_tree) |
.register('ShuffleNetV2')
class ShuffleNetV2(BaseRecognizer):
def __init__(self, cfg):
super().__init__(cfg)
def _init_weights(self, cfg):
pretrained_local = cfg.MODEL.RECOGNIZER.PRETRAINED_LOCAL
pretrained_num_classes = cfg.MODEL.RECOGNIZER.PRETRAINED_NUM_CLASSES
num_classes = cfg.MODEL.HEAD.NUM_CLASSES
load_pretrained_weights(self, cfg.MODEL.BACKBONE.ARCH, weights_path=(None if (pretrained_local == '') else pretrained_local), load_fc=(pretrained_num_classes == num_classes), verbose=True, url_map=(url_map if cfg.MODEL.RECOGNIZER.PRETRAINED_REMOTE else None)) |
def preprocess_tf(x):
(batch, height, width, channels) = x.shape
x = tf.cast(x, tf.float32)
mean_tensor = np.asarray([[[[127.5, 127.5, 127.5]]]], dtype=np.float32)
one_tensor = np.asarray([[[[1.0, 1.0, 1.0]]]], dtype=np.float32)
x = tf.keras.backend.reshape(x, ((- 1), 3))
result = ((x / mean_tensor) - one_tensor)
return tf.keras.backend.reshape(result, ((- 1), height, width, channels)) |
def adjust_opt(optimizer, epoch):
lr = np.interp(epoch, knots, vals)
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
def _best_distance(a_feature, pos_features, squared_d_dists, d_max_squared, f_max_squared):
(scaled_d_dists, scaled_f_dists) = _scale_distances(a_feature, pos_features, squared_d_dists, d_max_squared, f_max_squared)
squared_diffs = tf.squared_difference(scaled_f_dists, scaled_d_dists)
return tf.reduce_min(squared_diffs, 1) |
class MegaForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def convert_mzml_ipc(source: Path, target: Path, max_charge: int=10, use_old_schema: bool=False, verbose: bool=True) -> None:
schema = {'experiment_name': str, 'evidence_index': int, 'scan_number': int, 'sequence': str, 'modified_sequence': str, 'precursor_mass': float, 'precursor_mz': pl.Float64, 'precursor_charge': int, 'precursor_intensity': pl.Float64, 'retention_time': pl.Float64, 'mz_array': pl.List(pl.Float64), 'intensity_array': pl.List(pl.Float64)}
if use_old_schema:
schema = {'experiment_name': str, 'evidence_index': int, 'scan_number': int, 'Sequence': str, 'Modified sequence': str, 'Mass': float, 'MS/MS m/z': pl.Float64, 'Charge': int, 'precursor_intensity': pl.Float64, 'retention_time': pl.Float64, 'Mass values': pl.List(pl.Float64), 'Intensity': pl.List(pl.Float64)}
df = pl.DataFrame(schema=schema)
if source.is_file():
filenames = [source]
else:
filenames = list(source.iterdir())
for filepath in filenames:
if (not filepath.suffix.lower().endswith('mzml')):
logger.info(f'Skipping {filepath}... Not a mzml file...')
continue
if verbose:
logger.info(f'Processing {filepath}...')
if (not filepath.is_file()):
if verbose:
logger.warning('File not found, skipping...')
continue
exp = pyopenms.MSExperiment()
pyopenms.MzMLFile().load(str(filepath), exp)
evidence_index = 0
exp_iter = iter(exp)
if verbose:
exp_iter = tqdm(exp_iter, total=len(exp.getSpectra()))
data = []
for spectrum in exp_iter:
if (spectrum.getMSLevel() != 2):
continue
(mz_array, int_array) = spectrum.get_peaks()
precursor = spectrum.getPrecursors()[0]
if (precursor.getCharge() > max_charge):
continue
scan_id = int(re.findall('=(\\d+)', spectrum.getNativeID())[(- 1)])
data.append([filepath.stem, evidence_index, scan_id, '', ('' if (not use_old_schema) else '__'), precursor.getUnchargedMass(), precursor.getMZ(), precursor.getCharge(), precursor.getIntensity(), spectrum.getRT(), list(mz_array), list(int_array)])
evidence_index += 1
df = pl.concat([df, pl.DataFrame(data, schema=schema)])
Path(target).parent.mkdir(parents=True, exist_ok=True)
df.write_ipc(target) |
class Model():
name = 'alexnet'
kernels = 29
baseidle = 0.1
act_popt = []
l2cache = 0
powers = []
k_l2 = 1
tmpl2cache = 0
transferdata = 1000
def p(self):
print(self.name, self.kernels, self.baseidle, self.act_popt, self.l2cache, self.k_l2) |
class ToyConvNeXt(nn.Module):
def __init__(self):
super().__init__()
self.stages = nn.ModuleList()
for i in range(4):
stage = nn.Sequential(ConvModule(3, 4, kernel_size=1, bias=True))
self.stages.append(stage)
self.norm0 = nn.BatchNorm2d(2)
self.cls_token = nn.Parameter(torch.ones(1))
self.mask_token = nn.Parameter(torch.ones(1))
self.pos_embed = nn.Parameter(torch.ones(1))
self.stem_norm = nn.Parameter(torch.ones(1))
self.downsample_norm0 = nn.BatchNorm2d(2)
self.downsample_norm1 = nn.BatchNorm2d(2)
self.downsample_norm2 = nn.BatchNorm2d(2)
self.lin = nn.Parameter(torch.ones(1))
self.lin.requires_grad = False
self.downsample_layers = nn.ModuleList()
for _ in range(4):
stage = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=True))
self.downsample_layers.append(stage) |
class _cuda_SO3_mm(torch.autograd.Function):
def forward(ctx, x, y):
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
ctx.save_for_backward(x, y)
device = torch.cuda.current_device()
cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_out, nk=nfeature_in, conj_y=True, trans_y_spec=True, device=device)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(x, y, output)
return output
def backward(ctx, gradz):
(x, y) = ctx.saved_tensors
nspec = x.size(0)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
gradx = grady = None
device = torch.cuda.current_device()
if ctx.needs_input_grad[0]:
gradx_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_in, nk=nfeature_out, trans_y_feature=True, device=device)
gradx = gradz.new_empty((nspec, nbatch, nfeature_in, 2))
gradx_cuda_kernel(gradz, y, gradx)
if ctx.needs_input_grad[1]:
grady_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nfeature_out, nj=nfeature_in, nk=nbatch, trans_out_feature=True, conj_x=True, trans_x_spec=True, trans_x_feature=True, device=device)
grady = gradz.new_empty((nspec, nfeature_in, nfeature_out, 2))
grady_cuda_kernel(gradz, x, grady)
return (gradx, grady) |
def generate_model(input_shape_cdr3, num_outputs, filter_size):
features_cdr3 = Input(shape=input_shape_cdr3)
features_quantity = Input(shape=[])
feature_age = Input(batch_shape=[1])
weight = Input(batch_shape=[1])
level = Input(batch_shape=[1])
features_mask = Masking(mask_value=0.0)(features_cdr3)
features_length = Length()(features_mask)
features_abundance = Abundance()(features_quantity)
features_age = BatchExpand()([feature_age, features_abundance])
weights_instance = Multiply()([weight, features_quantity])
logits_cdr3 = Alignment(num_outputs, filter_size, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_mask)
logits_cdr3_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_cdr3, weights_instance, level])
feature_length_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_length, weights_instance, level])
logits_length = Dense(num_outputs)(feature_length_norm)
logits_length_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_length, weights_instance, level])
features_abundance_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_abundance, weights_instance, level])
logits_abundance = Dense(num_outputs)(features_abundance_norm)
logits_abundance_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_abundance, weights_instance, level])
features_age_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_age, weights_instance, level])
logits_age = Dense(num_outputs)(features_age_norm)
logits_age_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_age, weights_instance, level])
logits = Add()([logits_cdr3_norm, logits_length_norm, logits_abundance_norm, logits_age_norm])
logits_aggregate = Aggregate()(logits)
logits_aggregate_norm = NormalizeInitializationByAggregation(2, epsilon=1e-05)([logits_aggregate, weight, level])
logits_flat = FullFlatten()(logits_aggregate_norm)
model = Model(inputs=[features_cdr3, features_quantity, feature_age, weight, level], outputs=logits_flat)
return model |
_registry(pattern_type='TextEncoder_AttentionReshape')
class TextEncoder_AttentionReshape(Pattern):
def __call__(self, model):
pattern_mapping_config = {'TextEncoder_AttentionReshape': [{'patterns': {'in': [[(0, 'Shape'), (1, 'Gather'), (2, 'Unsqueeze'), (9, 'Concat'), (10, 'Reshape'), (11, 'MatMulWithBias')], [(), (3, 'Shape'), (4, 'Gather'), (5, 'Unsqueeze'), (9, 'Concat')], [(), (6, 'Shape'), (7, 'Gather'), (8, 'Unsqueeze'), (9, 'Concat')]], 'out': [[(0, 'Reshape'), (1, 'Reshape'), (1, 'MatMulWithBias')]]}, 'search_mode': 'op_type', 'node_names': {0: 10, 1: 'TextEncoder_AttentionReshape/reshape_to_2D', 2: 11}, 'input_tensors': {0: [[{10: [0]}, {'input_data': [0]}], [[0, 1], 2]], 1: [[{'input_data': [0]}], [[1], 2]], 2: [[{11: [1]}, {11: [2]}], [[1, 2], 3]]}, 'output_tensors': {0: [[{10: [0]}], [[0], 1]], 1: [[], [[], 1]], 2: [[{11: [0]}], [[0], 1]]}, 'returns': [9, 11]}]}
def _set_attr(node_names, model):
attr = OrderedDict()
attr['dst_shape'] = '-1,-1,-1'
attr['dims'] = '0, 1'
reshape_node_idx = model.get_node_id(node_names[0])
model.nodes[reshape_node_idx].attr = attr
attr_1 = OrderedDict()
attr_1['dst_shape'] = '-1,-1'
attr_1['dims'] = 1
reshape_node_idx = model.get_node_id(node_names[1])
model.nodes[reshape_node_idx].attr = attr_1
for i in range(len(pattern_mapping_config['TextEncoder_AttentionReshape'])):
pattern_dict = pattern_mapping_config['TextEncoder_AttentionReshape'][i]
(model, new_node_names, ret_old_nodes) = util.pattern_mapping('TextEncoder_AttentionReshape', pattern_dict, model)
if (len(new_node_names) != 0):
logger.info('TextEncoder_AttentionReshape mathched...')
logger.debug('TextEncoder_AttentionReshape = {}'.format(new_node_names))
for j in range(len(new_node_names)):
if (len(ret_old_nodes[j]) == 2):
_set_attr(new_node_names[j], model)
assert (ret_old_nodes[j][1].op_type == 'MatMulWithBias')
mat_node_idx = model.get_node_id(new_node_names[j][2])
model.nodes[mat_node_idx].attr = ret_old_nodes[j][1].attr
return model |
def _get_train_val_test_data(corpus, batch_size):
return [_batchify(corpus.train, batch_size), _batchify(corpus.valid, batch_size), _batchify(corpus.test, batch_size)] |
def construct_primitive_prompt(summary, objects):
primitive_prompt_template = '# Summary: Pick and place clothes, pick and toss snacks.\nobjects = ["granola bar", "hat", "toy car", "Lego brick", "fruit snacks", "shirt"]\npick_and_toss("granola bar")\npick_and_place("hat")\npick_and_place("toy car")\npick_and_place("Lego brick")\npick_and_toss("fruit snacks")\npick_and_place("shirt")\n\n# Summary: Pick and place granola bars, hats, toy cars, and Lego bricks, pick and toss fruit snacks and shirts.\nobjects = ["clothing", "snack"]\npick_and_place("clothing")\npick_and_toss("snack")\n\n# Summary: {summary}\nobjects = {objects_str}'
objects_str = (('[' + ', '.join(map((lambda x: f'"{x}"'), objects))) + ']')
return primitive_prompt_template.format(summary=summary, objects_str=objects_str) |
class AlexNet(nn.Module):
def __init__(self, args):
super(AlexNet, self).__init__()
self.taskcla = args.taskcla
self.features = AlexNetFeature(args)
self.last_dim = self.features.fc2.out_features
self.classifier = nn.ModuleList()
for (t, n) in self.taskcla:
self.classifier.append(nn.Linear(self.last_dim, n))
def forward(self, x):
x = self.features(x)
y = []
for t in range(len(self.classifier)):
y.append(self.classifier[t](x))
return y |
class TestTensorflowGpu(unittest.TestCase):
mb_model_url = '
pb_path = '/tmp/.neural_compressor/mobilenet_fp32.pb'
platforms = platform.system().lower()
if (platforms == 'windows'):
pb_path = 'C:\\tmp\\.neural_compressor\\mobilenet_fp32.pb'
def setUpClass(cls):
sys.meta_path.insert(0, ForbiddenModules({'intel_extension_for_pytorch'}))
if (not os.path.exists(cls.pb_path)):
if (cls.platforms == 'linux'):
os.system('mkdir -p /tmp/.neural_compressor && wget {} -O {} '.format(cls.mb_model_url, cls.pb_path))
elif (cls.platforms == 'windows'):
os.system('md C:\\tmp\\.neural_compressor && cd C:\\tmp\\.neural_compressor')
from urllib import request
request.urlretrieve(cls.mb_model_url)
cls.log_env = os.environ.get('LOGLEVEL')
cls.logger_root = logging.getLogger()
cls.logger_nc = logging.getLogger('neural_compressor')
cls.logger_root.setLevel(logging.CRITICAL)
cls.logger_nc.setLevel(logging.DEBUG)
cls.logger_root_level = cls.logger_root.level
cls.logger_nc.warning(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}")
cls.logger_nc.warning(f'Environment variable: LOGLEVEL = {cls.log_env}')
cls.logger_nc.warning(f'Before importing neural_compressor: {sys.modules[__name__].__file__}-{cls.__name__}, Root_Logger_Level = {cls.logger_root.level}')
cls.logger_nc.warning(f'Before importing neural_compressor: {sys.modules[__name__].__file__}-{cls.__name__}, NC_Logger_Level = {cls.logger_nc.level}')
import neural_compressor
from neural_compressor.adaptor.tensorflow import TensorflowQuery
cls.op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(os.path.dirname(neural_compressor.__file__), 'adaptor/tensorflow.yaml')).get_eightbit_patterns()
cls.logger_nc.warning(f'After importing neural_compressor: {sys.modules[__name__].__file__}-{cls.__name__}, Root_Logger_Level = {cls.logger_root.level}')
cls.logger_nc.warning(f'After importing neural_compressor: {sys.modules[__name__].__file__}-{cls.__name__}, NC_Logger_Level = {cls.logger_nc.level}')
def test_tensorflow_gpu_conversion(self):
from neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter import PostHostConstConverter
from neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu import QuantizeGraphForIntel
from neural_compressor.adaptor.tf_utils.util import read_graph
input_graph_def = read_graph(self.pb_path)
input_node_names = ['Placeholder']
output_node_names = ['MobilenetV1/Predictions/Reshape_1']
op_wise_config = {'MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Conv2D': (False, 'minmax', False, 7.0)}
tf.compat.v1.disable_eager_execution()
converter = QuantizeGraphForIntel(input_graph_def, input_node_names, output_node_names, op_wise_config, self.op_wise_sequences, 'gpu')
(converted_pb, _, _) = converter.do_transform()
hostconst_pb = PostHostConstConverter(converted_pb).do_transformation()
target_node_name = 'MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Conv2D_eightbit_quantized_conv'
node_details = {}
for i in hostconst_pb.node:
node_details[i.name] = i
converted_flag = (True if (target_node_name in node_details) else False)
self.assertEqual(converted_flag, True)
target_node = node_details[target_node_name]
weights_min_node = node_details[target_node.input[(- 2)]]
weights_max_node = node_details[target_node.input[(- 1)]]
self.assertEqual(weights_max_node.op, 'HostConst')
self.assertEqual(weights_min_node.op, 'HostConst')
self.assertEqual(self.logger_root.level, self.logger_root_level)
if self.log_env:
self.assertEqual(logging.getLevelName(self.logger_nc.level), self.log_env)
else:
self.assertEqual(self.logger_nc.level, 20) |
class LearnedPositionalEmbedding(nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
def forward(self, input, incremental_state=None, positions=None):
assert ((positions is None) or (self.padding_idx is None)), 'If positions is pre-computed then padding_idx should not be set.'
if (positions is None):
if (incremental_state is not None):
positions = input.data.new(1, 1).fill_(int((self.padding_idx + input.size(1))))
else:
positions = utils.make_positions(input.data, self.padding_idx, onnx_trace=self.onnx_trace)
return super().forward(positions)
def max_positions(self):
if (self.padding_idx is not None):
return ((self.num_embeddings - self.padding_idx) - 1)
else:
return self.num_embeddings
def _forward(self, positions):
return super().forward(positions) |
def _is_tpu_tensor(tensor):
if (not isinstance(tensor, ops.Tensor)):
return False
try:
tensor.op.get_attr(tpu._OUTSIDE_COMPILATION_ATTR)
except ValueError:
return True
else:
return False |
def convert_dataset(data_dir, tfrecords_dir, tfrecords_name, redo_matching=True, remove_zeros=True, policy='autopilot'):
print(f'Reading dataset from {data_dir}')
print(f'TFRecord will be saved at {tfrecords_dir}/{tfrecords_name}')
if (policy == 'autopilot'):
processed_frames_file_name = 'matched_frame_ctrl_cmd_processed.txt'
elif (policy == 'point_goal_nav'):
processed_frames_file_name = 'matched_frame_ctrl_goal_processed.txt'
else:
raise Exception('Unknown policy')
datasets = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
print('Number of Datasets Available: ', len(datasets))
max_offset = 1000.0
frames = associate_frames.match_frame_ctrl_input(data_dir, datasets, max_offset, redo_matching=redo_matching, remove_zeros=remove_zeros, policy=policy)
if (not os.path.exists(tfrecords_dir)):
os.makedirs(tfrecords_dir)
samples = load_labels(data_dir, datasets, policy)
with tf.io.TFRecordWriter(((tfrecords_dir + '/') + tfrecords_name)) as writer:
for (image_path, ctrl_input) in samples.items():
try:
image = tf.io.decode_jpeg(tf.io.read_file(image_path))
if (policy == 'autopilot'):
example = tfrecord_utils.create_example_autopilot(image, image_path, ctrl_input)
elif (policy == 'point_goal_nav'):
example = tfrecord_utils.create_example_point_goal_nav(image, image_path, ctrl_input)
writer.write(example.SerializeToString())
except:
print(f'Oops! Image {image_path} cannot be found.')
print('TFRecord file created successfully.') |
def _sgdr_learning_rate(name):
return hp.pchoice(name, [(0.5, 'invscaling'), (0.25, 'optimal'), (0.25, 'constant')]) |
def interactively_kill_instances(instance_killer):
while True:
instances = instance_killer.get_running_instances()
if (not instances):
print('No instances to kill!')
return
print('Active instances:')
for (i, instance) in enumerate(instances):
print('{}) {}'.format((i + 1), format_instance(instance)))
try:
res = input('Enter a number to kill that instance, "a" to kill all, or "q" to exit: ')
except KeyboardInterrupt:
return
except EOFError:
return
if (res[0].lower() == 'q'):
return
elif (res[0].lower() == 'a'):
instance_killer.kill_instances(instances)
else:
try:
index_to_kill = int(res)
except ValueError:
print('"{}" is not an integer.'.format(res))
continue
if ((index_to_kill < 1) or (index_to_kill > len(instances))):
print('{} is not between 1 and {}.'.format(index_to_kill, (len(instances) + 1)))
continue
instance_to_kill = instances[(index_to_kill - 1)]
instance_killer.kill_instances([instance_to_kill]) |
def run_uncertainty(image_folder):
subj_acq_lst = [file.name.split('_pred')[0] for file in Path(image_folder).iterdir() if (file.name.endswith('.nii.gz') and ('_pred' in file.name))]
subj_acq_lst = list(set(subj_acq_lst))
subj_acq_lst = [file for file in subj_acq_lst if (not Path(image_folder, (file + '_unc-cv.nii.gz')).is_file())]
for subj_acq in tqdm(subj_acq_lst, desc='Uncertainty Computation'):
fname_pred: Path = Path(image_folder, (subj_acq + '_pred.nii.gz'))
fname_soft: Path = Path(image_folder, (subj_acq + '_soft.nii.gz'))
fname_pred_lst: List[str] = []
for file in Path(image_folder).iterdir():
if (((subj_acq + '_pred_') in file.name) and ('_painted' not in file.name) and ('_color' not in file.name)):
fname_pred_lst.append(str(file))
if ((not fname_pred.is_file()) or (not fname_soft.is_file())):
thr = (1.0 / len(fname_pred_lst))
combine_predictions(fname_pred_lst, str(fname_pred), str(fname_soft), thr=thr)
fname_unc_vox = Path(image_folder, (subj_acq + '_unc-vox.nii.gz'))
if (not fname_unc_vox.is_file()):
voxelwise_uncertainty(fname_pred_lst, str(fname_unc_vox))
fname_unc_struct = Path(image_folder, (subj_acq + '_unc.nii.gz'))
if (not Path(image_folder, (subj_acq + '_unc-cv.nii.gz')).is_file()):
structurewise_uncertainty(fname_pred_lst, str(fname_pred), str(fname_unc_vox), str(fname_unc_struct)) |
def make_chain():
chain = [1]
while (chain[(- 1)] != states[(- 1)]):
choices = transitions[chain[(- 1)]]
j = np.random.randint(len(choices))
chain.append(choices[j])
return chain |
_legacy_interface(weights=('pretrained', EfficientNet_B6_Weights.IMAGENET1K_V1))
def efficientnet_b6(*, weights: Optional[EfficientNet_B6_Weights]=None, progress: bool=True, **kwargs: Any) -> EfficientNet:
weights = EfficientNet_B6_Weights.verify(weights)
(inverted_residual_setting, last_channel) = _efficientnet_conf('efficientnet_b6', width_mult=1.8, depth_mult=2.6)
return _efficientnet(inverted_residual_setting, 0.5, last_channel, weights, progress, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01), **kwargs) |
def print_dag_chart(dag_file: str, path: str, dag: str):
dag_pic = '{}_{}.png'.format(dag, 'pic')
cmd = 'python {} output-dot | dot -Tpng -o {}/{}'.format(dag_file, path, dag_pic)
os.system(cmd)
return dag_pic |
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = SerialSampler(EnvCls=gym_make, env_kwargs=config['env'], CollectorCls=CpuResetCollector, eval_env_kwargs=config['env'], **config['sampler'])
algo = DDPG(optim_kwargs=config['optim'], **config['algo'])
agent = DdpgAgent(**config['agent'])
runner = MinibatchRlEval(algo=algo, agent=agent, sampler=sampler, affinity=affinity, **config['runner'])
name = ('ddpg_' + config['env']['id'])
with logger_context(log_dir, run_ID, name, config):
runner.train() |
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = (nn.BatchNorm2d(out_planes, eps=1e-05, momentum=0.01, affine=True) if bn else None)
self.relu = (nn.PReLU() if relu else None)
def forward(self, x):
x = self.conv(x)
if (self.bn is not None):
x = self.bn(x)
if (self.relu is not None):
x = self.relu(x)
return x |
def parse_xml(args):
(xml_path, img_path) = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = label_ids[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [int(bnd_box.find('xmin').text), int(bnd_box.find('ymin').text), int(bnd_box.find('xmax').text), int(bnd_box.find('ymax').text)]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = (np.array(bboxes, ndmin=2) - 1)
labels = np.array(labels)
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1)
labels_ignore = np.array(labels_ignore)
annotation = {'filename': img_path, 'width': w, 'height': h, 'ann': {'bboxes': bboxes.astype(np.float32), 'labels': labels.astype(np.int64), 'bboxes_ignore': bboxes_ignore.astype(np.float32), 'labels_ignore': labels_ignore.astype(np.int64)}}
return annotation |
def compute_in_batches(f, calc_batch_size, *args, n=None):
if (n is None):
n = args[0].size(0)
n_batches = (((n + calc_batch_size) - 1) // calc_batch_size)
if (n_batches == 1):
return f(*args)
all_res = [f(*(arg[(i * calc_batch_size):((i + 1) * calc_batch_size)] for arg in args)) for i in range(n_batches)]
def safe_cat(chunks, dim=0):
if (chunks[0] is None):
assert all(((chunk is None) for chunk in chunks))
return None
return torch.cat(chunks, dim)
if isinstance(all_res[0], tuple):
return tuple((safe_cat(res_chunks, 0) for res_chunks in zip(*all_res)))
return safe_cat(all_res, 0) |
def iob2bio(iob_labels):
bio_labels = []
for (prev_label, cur_label) in zip((['O'] + iob_labels[:(- 1)]), iob_labels):
if (((prev_label[0] == 'O') and (cur_label[0] == 'I')) or ((prev_label[0] != 'O') and (cur_label[0] == 'I') and (prev_label[2:] != cur_label[2:]))):
bio_labels.append(('B' + cur_label[1:]))
else:
bio_labels.append(cur_label)
return bio_labels |
def plot_pictures(indexes: list, images=all_images, labels=all_labels):
num_pics = len(indexes)
(_, axarr) = plt.subplots(1, num_pics)
for (idx, im_idx) in enumerate(indexes):
assert (idx < 10000), 'Cannot get such index, there are only 10000'
pic = np.rollaxis(images[im_idx].squeeze().numpy(), 0, 3)
axarr[idx].imshow(pic)
axarr[idx].set_title(labels_names[labels[im_idx]]) |
def convert_size(size_bytes: int):
if (size_bytes == 0):
return '0B'
size_name = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round((size_bytes / p), 2)
return ('%s %s' % (s, size_name[i])) |
def bloom_tokenize(ctx: c_void_p, prompt: bytes, bos: bool=False) -> List[int]:
n_tokens = c_int(0)
c_tokens = _lib.tokenize_api(ctx, prompt, bos, pointer(n_tokens))
tokens = [c_tokens[i] for i in range(0, n_tokens.value)]
c_free(c_tokens)
return tokens |
def load_config(path: Union[(Path, str)]='configs/default.yaml') -> Dict:
if isinstance(path, str):
path = Path(path)
with path.open('r', encoding='utf-8') as ymlfile:
cfg = yaml.safe_load(ymlfile)
return cfg |
def cc(net):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
return net.to(device) |
def train_and_eval():
(train_generator, test_generator, train_size, test_size, input_num, dims_num) = build_dataset(batch_size)
print('train_size {}, test_size {}, input_num {}, dims_num {}'.format(train_size, test_size, input_num, dims_num))
train(train_generator, train_size, input_num, dims_num)
test(model_dir, test_generator, test_size, input_num, dims_num, batch_size) |
def init_device(args, local_rank):
global logger
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'), local_rank)
n_gpu = torch.cuda.device_count()
logger.info('device: {} n_gpu: {}'.format(device, n_gpu))
args.n_gpu = n_gpu
if (((args.batch_size % args.n_gpu) != 0) or ((args.batch_size_val % args.n_gpu) != 0)):
raise ValueError('Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0'.format(args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return (device, n_gpu) |
def diaresnet20_svhn(num_classes=10, **kwargs):
return get_diaresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name='diaresnet20_svhn', **kwargs) |
def _sync_variables_ops():
return [array_ops.check_numerics(v.read_value(), ('Gradient for %s is NaN' % v.name)).op for v in variables.trainable_variables()] |
def initialize_weights(shape, name, init_type, gain='1.0', divisor=1.0):
if (init_type == 'random'):
return tf.get_variable(name, initializer=tf.truncated_normal(shape, stddev=0.1))
if (init_type == 'xavier'):
return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer())
if (init_type == 'identity'):
middle0 = int((shape[0] / 2))
middle1 = int((shape[1] / 2))
if (shape[2] == shape[3]):
array = np.zeros(shape, dtype='float32')
identity = np.eye(shape[2], shape[3])
array[(middle0, middle1)] = identity
else:
m1 = (divisor / shape[2])
m2 = (divisor / shape[3])
sigma = (eps * m2)
array = np.random.normal(loc=0, scale=sigma, size=shape).astype('float32')
for i in range(shape[2]):
for j in range(shape[3]):
if (int((i * m1)) == int((j * m2))):
array[(middle0, middle1, i, j)] = m2
return tf.get_variable(name, initializer=array)
if (init_type == 'varscale'):
return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.variance_scaling_initializer())
if (init_type == 'orthogonal'):
gain = (np.sqrt(2) if (gain == 'relu') else 1.0)
array = np.zeros(shape, dtype='float32')
random = np.random.normal(0.0, 1.0, (shape[2], shape[3])).astype('float32')
(u, _, v_t) = np.linalg.svd(random, full_matrices=False)
middle = int((shape[1] / 2))
array[(0, middle)] = (gain * v_t)
return tf.get_variable(name, initializer=array) |
class CocoDistEvalMRHook(DistEvalHook):
def __init__(self, dataset, interval=1, res_types=['bbox']):
super().__init__(dataset, interval)
self.res_types = res_types
def evaluate(self, runner, results):
tmp_file = osp.join(runner.work_dir, 'temp_0')
result_files = results2json(self.dataset, results, tmp_file)
cocoGt = self.dataset.coco
imgIds = cocoGt.getImgIds()
for res_type in self.res_types:
assert (res_type in ['bbox', 'vis_bbox'])
try:
cocoDt = cocoGt.loadRes(result_files['bbox'])
except IndexError:
print('No prediction found.')
break
metrics = ['MR_Reasonable', 'MR_Small', 'MR_Middle', 'MR_Large', 'MR_Bare', 'MR_Partial', 'MR_Heavy', 'MR_R+HO']
cocoEval = COCOMReval(cocoGt, cocoDt, res_type)
cocoEval.params.imgIds = imgIds
for id_setup in range(0, 8):
cocoEval.evaluate(id_setup)
cocoEval.accumulate()
cocoEval.summarize(id_setup)
key = '{}'.format(metrics[id_setup])
val = float('{:.3f}'.format(cocoEval.stats[id_setup]))
runner.log_buffer.output[key] = val
runner.log_buffer.output['{}_MR_copypaste'.format(res_type)] = '{mr[0]:.3f} {mr[1]:.3f} {mr[2]:.3f} {mr[3]:.3f} {mr[4]:.3f} {mr[5]:.3f} {mr[6]:.3f} {mr[7]:.3f} '.format(mr=cocoEval.stats[:8])
runner.log_buffer.ready = True
os.remove(result_files['bbox']) |
def contract(a, sequence, axis=0, dimension=None):
shape = np.array(a.shape)
ii = [slice(i) for i in shape]
jj = copy.deepcopy(ii)
if (axis == (- 1)):
axis += a.ndim
axis_dimension = (np.amax(sequence) + 1)
if (dimension is None):
dimension = axis_dimension
else:
assert (dimension >= axis_dimension), 'Target dimension too small.'
shape[axis] = dimension
out = np.zeros(shape)
for (i, j) in enumerate(sequence):
jj[axis] = j
ii[axis] = i
out[jj] += a[ii]
return out |
def clear_quad_double_track_data(vrblvl=0):
if (vrblvl > 0):
print('in clear_quad_double_track_data ...')
phc = get_phcfun()
aaa = pointer(c_int32(0))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> clear_quad_double_track_data calls phc', end='')
retval = phc(248, aaa, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
def generate_weights_batch(n_dim, delta_weight):
weights_batch = []
generate_weights_batch_dfs(0, n_dim, 0.0, 1.0, delta_weight, [], weights_batch)
return np.array(weights_batch) |
.parametrize('kwargs', [dict(embedding_sizes=(10, 10, 10)), dict(embedding_sizes=((10, 3), (10, 2), (10, 1))), dict(x_categoricals=['x1', 'x2', 'x3'], embedding_sizes=dict(x1=(10, 10))), dict(x_categoricals=['x1', 'x2', 'x3'], embedding_sizes=dict(x1=(10, 2), xg1=(10, 3)), categorical_groups=dict(xg1=['x2', 'x3']))])
def test_MultiEmbedding(kwargs):
x = torch.randint(0, 10, size=(4, 3))
embedding = MultiEmbedding(**kwargs)
assert (embedding.input_size == x.size(1)), 'Input size should be equal to number of features'
out = embedding(x)
if isinstance(out, dict):
assert isinstance(kwargs['embedding_sizes'], dict)
for (name, o) in out.items():
assert (o.size(1) == embedding.output_size[name]), 'Output size should be equal to number of embedding dimensions'
elif isinstance(out, torch.Tensor):
assert isinstance(kwargs['embedding_sizes'], (tuple, list))
assert (out.size(1) == embedding.output_size), 'Output size should be equal to number of summed embedding dimensions'
else:
raise ValueError(f'Unknown output type {type(out)}') |
def MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=0.001, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000):
if (not ((weights in {'imagenet', None}) or os.path.exists(weights))):
raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as ImageNet with `include_top` as true, `classes` should be 1000')
if (input_shape is None):
default_size = 224
else:
if (K.image_data_format() == 'channels_first'):
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if (rows in [128, 160, 192, 224]):
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=K.image_data_format(), require_flatten=include_top, weights=weights)
if (K.image_data_format() == 'channels_last'):
(row_axis, col_axis) = (0, 1)
else:
(row_axis, col_axis) = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if (weights == 'imagenet'):
if (depth_multiplier != 1):
raise ValueError('If imagenet weights are being loaded, depth multiplier must be 1')
if (alpha not in [0.25, 0.5, 0.75, 1.0]):
raise ValueError('If imagenet weights are being loaded, alpha can be one of`0.25`, `0.50`, `0.75` or `1.0` only.')
if (rows not in [128, 160, 192, 224]):
if (rows is None):
rows = 224
logging.warning('MobileNet shape is undefined. Weights for input shape (224, 224) will be loaded.')
else:
raise ValueError(('If imagenet weights are being loaded, input must have a static square shape (one of (128, 128), (160, 160), (192, 192), or (224, 224)). Input shape provided = %s' % (input_shape,)))
if (K.image_data_format() != 'channels_last'):
logging.warning('The MobileNet family of models is only available for the input data format "channels_last" (width, height, channels). However your settings specify the default data format "channels_first" (channels, width, height). You should set `image_data_format="channels_last"` in your Keras config located at ~/.keras/keras.json. The model being returned right now will expect inputs to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if (input_tensor is None):
img_input = Input(shape=input_shape)
elif (not K.is_keras_tensor(input_tensor)):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if (K.image_data_format() == 'channels_first'):
shape = (int((1024 * alpha)), 1, 1)
else:
shape = (1, 1, int((1024 * alpha)))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
elif (pooling == 'avg'):
x = GlobalAveragePooling2D()(x)
elif (pooling == 'max'):
x = GlobalMaxPooling2D()(x)
if (input_tensor is not None):
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name=('mobilenet_%0.2f_%s' % (alpha, rows)))
if (weights == 'imagenet'):
if (K.image_data_format() == 'channels_first'):
raise ValueError('Weights for "channels_first" format are not available.')
if (alpha == 1.0):
alpha_text = '1_0'
elif (alpha == 0.75):
alpha_text = '7_5'
elif (alpha == 0.5):
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = ('mobilenet_%s_%d_tf.h5' % (alpha_text, rows))
weigh_path = (BASE_WEIGHT_PATH + model_name)
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
else:
model_name = ('mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows))
weigh_path = (BASE_WEIGHT_PATH + model_name)
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
model.load_weights(weights_path)
elif (weights is not None):
model.load_weights(weights)
if old_data_format:
K.set_image_data_format(old_data_format)
return model |
def swap_layer_connection(old_layer: Layer, new_layer: Layer) -> None:
inbound_layers = set()
for node in old_layer._inbound_nodes:
Node(new_layer, node.inbound_layers, node.node_indices, node.tensor_indices, node.input_tensors, node.output_tensors, node.input_masks, node.output_masks, node.input_shapes, node.output_shapes)
inbound_layers.union(set(node.inbound_layers))
for layer in inbound_layers:
old_nodes = filter((lambda n: (n.outbound_layer == old_layer)), layer._outbound_nodes)
for n in old_nodes:
layer._outbound_nodes.remove(n)
outbound_layers = set()
for node in old_layer._outbound_nodes:
layers = list(node.inbound_layers)
while (old_layer in layers):
idx = layers.index(old_layer)
layers[idx] = new_layer
Node(node.outbound_layer, layers, node.node_indices, node.tensor_indices, node.input_tensors, node.output_tensors, node.input_masks, node.output_masks, node.input_shapes, node.output_shapes)
outbound_layers.add(node.outbound_layer)
for layer in outbound_layers:
old_nodes = filter((lambda n: (old_layer in n.inbound_layers)), layer._inbound_nodes)
for n in old_nodes:
layer._inbound_nodes.remove(n) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.