code stringlengths 101 5.91M |
|---|
_model
def ssl_resnet50(pretrained=True, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
model.default_cfg = default_cfgs['ssl_resnet50']
if pretrained:
load_pretrained(model, num_classes=kwargs.get('num_classes', 0), in_chans=kwargs.get('in_chans', 3))
return model |
def preprocess_point_cloud(pcd, voxel_size):
print((':: Downsample with a voxel size %.3f.' % voxel_size))
pcd_down = pcd.voxel_down_sample(voxel_size)
radius_normal = (voxel_size * 2)
print((':: Estimate normal with search radius %.3f.' % radius_normal))
pcd_down.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
radius_feature = (voxel_size * 5)
print((':: Compute FPFH feature with search radius %.3f.' % radius_feature))
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
return (pcd_down, pcd_fpfh) |
class PreTrainedTokenizer(object):
vocab_files_names = {}
pretrained_vocab_files_map = {}
max_model_input_sizes = {}
SPECIAL_TOKENS_ATTRIBUTES = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', 'additional_special_tokens']
def bos_token(self):
if (self._bos_token is None):
logger.error('Using bos_token, but it is not set yet.')
return self._bos_token
def eos_token(self):
if (self._eos_token is None):
logger.error('Using eos_token, but it is not set yet.')
return self._eos_token
def unk_token(self):
if (self._unk_token is None):
logger.error('Using unk_token, but it is not set yet.')
return self._unk_token
def sep_token(self):
if (self._sep_token is None):
logger.error('Using sep_token, but it is not set yet.')
return self._sep_token
def pad_token(self):
if (self._pad_token is None):
logger.error('Using pad_token, but it is not set yet.')
return self._pad_token
def cls_token(self):
if (self._cls_token is None):
logger.error('Using cls_token, but it is not set yet.')
return self._cls_token
def mask_token(self):
if (self._mask_token is None):
logger.error('Using mask_token, but it is not set yet.')
return self._mask_token
def additional_special_tokens(self):
if (self._additional_special_tokens is None):
logger.error('Using additional_special_tokens, but it is not set yet.')
return self._additional_special_tokens
_token.setter
def bos_token(self, value):
self._bos_token = value
_token.setter
def eos_token(self, value):
self._eos_token = value
_token.setter
def unk_token(self, value):
self._unk_token = value
_token.setter
def sep_token(self, value):
self._sep_token = value
_token.setter
def pad_token(self, value):
self._pad_token = value
_token.setter
def cls_token(self, value):
self._cls_token = value
_token.setter
def mask_token(self, value):
self._mask_token = value
_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
def __init__(self, max_len=None, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._additional_special_tokens = []
self.max_len = (max_len if (max_len is not None) else int(.0))
self.added_tokens_encoder = {}
self.added_tokens_decoder = {}
for (key, value) in kwargs.items():
if (key in self.SPECIAL_TOKENS_ATTRIBUTES):
if (key == 'additional_special_tokens'):
assert (isinstance(value, (list, tuple)) and all(((isinstance(t, str) or (six.PY2 and isinstance(t, unicode))) for t in value)))
else:
assert (isinstance(value, str) or (six.PY2 and isinstance(value, unicode)))
setattr(self, key, value)
def from_pretrained(cls, *inputs, **kwargs):
return cls._from_pretrained(*inputs, **kwargs)
def _from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
cache_dir = kwargs.pop('cache_dir', None)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
if (pretrained_model_name_or_path in s3_models):
for (file_id, map_list) in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
else:
logger.info("Model name '{}' not found in model shortcut name list ({}). Assuming '{}' is a path or url to a directory containing tokenizer files.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path))
for (file_id, file_name) in cls.vocab_files_names.items():
if os.path.isdir(pretrained_model_name_or_path):
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
else:
full_file_name = pretrained_model_name_or_path
if (not os.path.exists(full_file_name)):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
all_vocab_files_names = {'added_tokens_file': ADDED_TOKENS_FILE, 'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE}
saved_directory = pretrained_model_name_or_path
if (os.path.exists(saved_directory) and (not os.path.isdir(saved_directory))):
saved_directory = os.path.dirname(saved_directory)
for (file_id, file_name) in all_vocab_files_names.items():
full_file_name = os.path.join(saved_directory, file_name)
if (not os.path.exists(full_file_name)):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
if all(((full_file_name is None) for full_file_name in vocab_files.values())):
logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find tokenizer filesat this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path))
return None
try:
resolved_vocab_files = {}
for (file_id, file_path) in vocab_files.items():
if (file_path is None):
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir)
except EnvironmentError:
if (pretrained_model_name_or_path in s3_models):
logger.error("Couldn't reach server to download vocabulary.")
else:
logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find files {} at this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, str(vocab_files.keys())))
return None
for (file_id, file_path) in vocab_files.items():
if (file_path == resolved_vocab_files[file_id]):
logger.info('loading file {}'.format(file_path))
else:
logger.info('loading file {} from cache at {}'.format(file_path, resolved_vocab_files[file_id]))
if (pretrained_model_name_or_path in cls.max_model_input_sizes):
max_len = cls.max_model_input_sizes[pretrained_model_name_or_path]
if ((max_len is not None) and isinstance(max_len, (int, float))):
kwargs['max_len'] = min(kwargs.get('max_len', int(.0)), max_len)
added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
for (args_name, file_path) in resolved_vocab_files.items():
if (args_name not in kwargs):
kwargs[args_name] = file_path
if (special_tokens_map_file is not None):
special_tokens_map = json.load(open(special_tokens_map_file, encoding='utf-8'))
for (key, value) in special_tokens_map.items():
if (key not in kwargs):
kwargs[key] = value
tokenizer = cls(*inputs, **kwargs)
if (added_tokens_file is not None):
added_tok_encoder = json.load(open(added_tokens_file, encoding='utf-8'))
added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
return tokenizer
def save_pretrained(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Saving directory ({}) should be a directory'.format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
with open(added_tokens_file, 'w', encoding='utf-8') as f:
if self.added_tokens_encoder:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
else:
out_str = u'{}'
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return (vocab_files + (special_tokens_map_file, added_tokens_file))
def save_vocabulary(self, save_directory):
raise NotImplementedError
def vocab_size(self):
raise NotImplementedError
def __len__(self):
return (self.vocab_size + len(self.added_tokens_encoder))
def add_tokens(self, new_tokens):
if (not new_tokens):
return 0
to_add_tokens = []
for token in new_tokens:
assert (isinstance(token, str) or (six.PY2 and isinstance(token, unicode)))
if ((token != self.unk_token) and (self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token))):
to_add_tokens.append(token)
logger.info('Adding %s to the vocabulary', token)
added_tok_encoder = dict(((tok, (len(self) + i)) for (i, tok) in enumerate(to_add_tokens)))
added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
return len(to_add_tokens)
def add_special_tokens(self, special_tokens_dict):
if (not special_tokens_dict):
return 0
added_tokens = 0
for (key, value) in special_tokens_dict.items():
assert (key in self.SPECIAL_TOKENS_ATTRIBUTES)
if (key == 'additional_special_tokens'):
assert (isinstance(value, (list, tuple)) and all(((isinstance(t, str) or (six.PY2 and isinstance(t, unicode))) for t in value)))
added_tokens += self.add_tokens(value)
else:
assert (isinstance(value, str) or (six.PY2 and isinstance(value, unicode)))
added_tokens += self.add_tokens([value])
logger.info('Assigning %s to the %s key of the tokenizer', value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text, **kwargs):
def split_on_tokens(tok_list, text):
if (not text):
return []
if (not tok_list):
return self._tokenize(text, **kwargs)
tok = tok_list[0]
split_text = text.split(tok)
return sum(((split_on_tokens(tok_list[1:], sub_text.strip()) + [tok]) for sub_text in split_text), [])[:(- 1)]
added_tokens = (list(self.added_tokens_encoder.keys()) + self.all_special_tokens)
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
if (isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode))):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
if (len(ids) > self.max_len):
logger.warning('Token indices sequence length is longer than the specified maximum sequence length for this model ({} > {}). Running this sequence through the model will result in indexing errors'.format(len(ids), self.max_len))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if (token in self.added_tokens_encoder):
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(self, text, add_special_tokens=False, *sequences):
if (len(sequences) == 0):
if add_special_tokens:
return self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text)))
else:
return self.convert_tokens_to_ids(self.tokenize(text))
if (len(sequences) > 1):
logger.warning('Tokenization currently only supports sentence pairs. Ignoring every string following the initial two.')
first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text)]
second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(sequences[0])]
if add_special_tokens:
return self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens)
else:
return (first_sentence_tokens, second_sentence_tokens)
def add_special_tokens_single_sentence(self, token_ids):
raise NotImplementedError
def add_special_tokens_sentences_pair(self, *token_ids):
raise NotImplementedError
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
if isinstance(ids, int):
if (ids in self.added_tokens_decoder):
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
if ((index in self.all_special_ids) and skip_special_tokens):
continue
if (index in self.added_tokens_decoder):
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index):
raise NotImplementedError
def convert_tokens_to_string(self, tokens):
return ' '.join(self.convert_ids_to_tokens(tokens))
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
text = self.convert_tokens_to_string(filtered_tokens)
if ((self.sep_token is not None) and (self.sep_token in text)):
text = text.replace(self.cls_token, self.sep_token)
split_text = list(filter((lambda sentence: (len(sentence) > 0)), text.split(self.sep_token)))
if clean_up_tokenization_spaces:
clean_text = [self.clean_up_tokenization(text) for text in split_text]
return clean_text
else:
return split_text
elif clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def special_tokens_map(self):
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, ('_' + attr))
if attr_value:
set_attr[attr] = attr_value
return set_attr
def all_special_tokens(self):
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = (all_toks + (attr_value if isinstance(attr_value, (list, tuple)) else [attr_value]))
all_toks = list(set(all_toks))
return all_toks
def all_special_ids(self):
all_toks = self.all_special_tokens
all_ids = list((self._convert_token_to_id(t) for t in all_toks))
return all_ids
def clean_up_tokenization(out_string):
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(' do not', " don't").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string |
class GPTJOnnxConfig(OnnxConfigWithPast):
def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: List[PatchingSpec]=None, use_past: bool=False):
super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
if (not getattr(self._config, 'pad_token_id', None)):
self._config.pad_token_id = 0
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'}
return common_inputs
def num_layers(self) -> int:
return self._config.n_layer
def num_attention_heads(self) -> int:
return self._config.n_head
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']})
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, seqlen) = common_inputs['input_ids'].shape
past_key_values_length = (seqlen + 2)
past_shape = (batch, self.num_attention_heads, past_key_values_length, (self._config.hidden_size // self.num_attention_heads))
ordered_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)]
ordered_inputs['attention_mask'] = common_inputs['attention_mask']
if self.use_past:
mask_dtype = ordered_inputs['attention_mask'].dtype
ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
return ordered_inputs
def default_onnx_opset(self) -> int:
return 13 |
def actor_net(args, data=None):
model = ActorNet(args)
model.load_state_dict(data)
return model |
def functional_pulse(func):
(func)
def to_pulse(duration, *args, name=None, **kwargs):
if (isinstance(duration, int) and (duration > 0)):
samples = func(duration, *args, **kwargs)
samples = np.asarray(samples, dtype=np.complex128)
return SamplePulse(samples=samples, name=name)
raise PulseError('The first argument must be an integer value representing duration.')
return to_pulse |
def gen_backward():
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "lightconv_cuda.cuh"\n\nstd::vector<at::Tensor> lightconv_cuda_backward(\n at::Tensor gradOutput,\n int padding_l,\n at::Tensor input,\n at::Tensor filters) {\n\n // gradWrtInput\n const int minibatch = input.size(0);\n const int numFeatures = input.size(1);\n const int sequenceLength = input.size(2);\n\n const int numHeads = filters.size(0);\n const int filterSize = filters.size(1);\n\n const dim3 gradBlocks(minibatch, numFeatures);\n const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads);\n const dim3 weightGradSecondpassBlocks(numHeads, filterSize);\n\n const int numFiltersInBlock = numFeatures / numHeads;\n\n auto gradInput = at::zeros_like(input);\n auto gradFilters = at::zeros_like(filters);\n\n at::DeviceGuard g(input.device());\n auto stream = at::cuda::getCurrentCUDAStream();\n\n switch(filterSize) {\n'
sequence_if = '\n if (sequenceLength <= {seq}) {{\n'
case_k = '\n case {k}:\n'
main_block = '\n if (padding_l == {p}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{\n lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<gradBlocks, {b_size}, 0, stream>>>(\n gradOutput.data<scalar_t>(),\n filters.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n gradInput.data<scalar_t>());\n\n'
weight_grad_short = '\n at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat));\n lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n gradOutput.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n numHeads,\n tempSumGradFilters.data<float>()\n );\n\n lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t>\n <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(\n tempSumGradFilters.data<float>(),\n minibatch,\n numFiltersInBlock,\n gradFilters.data<scalar_t>()\n );\n }}));\n }} else\n'
weight_grad = '\n at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat));\n lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<gradBlocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n gradOutput.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n tempSumGradFilters.data<float>()\n );\n\n lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t>\n <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(\n tempSumGradFilters.data<float>(),\n minibatch,\n numFiltersInBlock,\n gradFilters.data<scalar_t>()\n );\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;\n }\n'
breakout = '\n break;\n'
bad_filter = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;\n'
con_else = '\n } else\n'
final_else = '\n {\n switch(filterSize) {\n'
last_return = '\n }\n return {gradInput, gradFilters};\n}\n'
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [(32 * x) for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
thresh = [32, 32, 64, 128, 256, (- 1), (- 1), (- 1)]
max_mem = [(- 1), (- 1), (- 1), (- 1), (- 1), 192, 96, 64]
with open('lightconv_cuda_backward.cu', 'w') as backward:
backward.write(head)
for (k, t, mem) in zip(kernels, thresh, max_mem):
backward.write(case_k.format(k=k))
for seq in seqs:
if (((t == (- 1)) or (seq <= t)) and ((mem == (- 1)) or (seq < mem))):
backward.write(sequence_if.format(seq=seq))
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=seq, p=p))
backward.write(weight_grad_short.format(k=k, b_size=seq, p=p))
backward.write(bad_padding)
else:
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=32, p=p))
backward.write(weight_grad.format(k=k, b_size=32, p=p))
backward.write(bad_padding)
backward.write(breakout)
break
backward.write(con_else)
backward.write(bad_filter)
backward.write(last_return) |
def test_digits_cosine_naive_init():
model = FacilityLocationSelection(100, 'cosine', optimizer='naive', initial_subset=digits_cosine_ranking[:5])
model.fit(X_digits)
assert_array_equal(model.ranking[:(- 5)], digits_cosine_ranking[5:])
assert_array_almost_equal(model.gains[:(- 5)], digits_cosine_gains[5:], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
.parametrize('device', list_devices())
def test_to_linear_transform(device):
TOL = {'rtol': 1e-07, 'atol': 1e-07}
input_data = np.array((10, 25, 0, 13, 5, 40), dtype=np.uint8).reshape((2, 3, 1))
output_ref = (input_data / 255.0)
negative_image_ref = (1.0 - (input_data / 255.0))
saturate_ref = np.array((180, 255, 0, 240, 80, 255), dtype=np.uint8).reshape((2, 3, 1))
t_input = o3c.Tensor(input_data, dtype=o3c.uint8, device=device)
t_input3 = o3c.Tensor(np.broadcast_to(input_data, shape=(2, 3, 3)), dtype=o3c.uint8, device=device)
input1 = Image(t_input)
output1 = input1.to(o3c.float32)
assert (output1.dtype == o3c.float32)
np.testing.assert_allclose(output1.as_tensor().cpu().numpy(), output_ref, **TOL)
input3 = Image(t_input3)
output3 = input3.to(o3c.float32)
np.testing.assert_allclose(output3.as_tensor().cpu().numpy(), np.broadcast_to(output_ref, (2, 3, 3)), **TOL)
output1.linear_transform(scale=(- 1), offset=1)
np.testing.assert_allclose(output1.as_tensor().cpu().numpy(), negative_image_ref)
output3.linear_transform(scale=(- 1), offset=1)
np.testing.assert_allclose(output3.as_tensor().cpu().numpy(), np.broadcast_to(negative_image_ref, (2, 3, 3)), **TOL)
output1 = input1.to(o3c.uint16)
assert (output1.dtype == o3c.uint16)
np.testing.assert_allclose(output1.as_tensor().cpu().numpy(), input_data, **TOL)
output3 = input3.to(o3c.uint16)
np.testing.assert_allclose(output3.as_tensor().cpu().numpy(), np.broadcast_to(input_data, (2, 3, 3)), **TOL)
output1 = input1.linear_transform(scale=20, offset=(- 20))
np.testing.assert_allclose(output1.as_tensor().cpu().numpy(), saturate_ref, **TOL)
output3 = input3.linear_transform(scale=20, offset=(- 20))
np.testing.assert_allclose(output3.as_tensor().cpu().numpy(), np.broadcast_to(saturate_ref, (2, 3, 3)), **TOL) |
class Composite(Null):
def __init__(self, children=[], *args, **kwargs):
super(Composite, self).__init__()
self.children = children
def write_fm(self, json_fm={}):
for child in self.children:
json_fm.update(child.write_fm(json_fm))
return json_fm
def create_node_cache(self, *args, **kwargs):
return [c.create_node_cache(*args, **kwargs) for c in self.children]
def create_edge_cache(self, *args, **kwargs):
return [c.create_edge_cache(*args, **kwargs) for c in self.children]
def update_node_cache(self, g, n1, n2, dst, src):
for (i, child) in enumerate(self.children):
child.update_node_cache(g, n1, n2, dst[i], src[i])
def update_edge_cache(self, g, e1, e2, dst, src):
for (i, child) in enumerate(self.children):
child.update_edge_cache(g, e1, e2, dst[i], src[i])
def compute_node_features(self, g, n, cache=None):
if (cache is None):
cache = g.nodes[n][self.default_cache]
features = []
for (i, child) in enumerate(self.children):
features.append(child.compute_node_features(g, n, cache[i]))
return np.concatenate(features)
def compute_edge_features(self, g, n1, n2, cache=None):
if (cache is None):
cache = g.edges[(n1, n2)][self.default_cache]
features = []
for (i, child) in enumerate(self.children):
features.append(child.compute_edge_features(g, n1, n2, cache[i]))
return np.concatenate(features)
def compute_difference_features(self, g, n1, n2, cache1=None, cache2=None):
if (cache1 is None):
cache1 = g.nodes[n1][self.default_cache]
if (cache2 is None):
cache2 = g.nodes[n2][self.default_cache]
features = []
for (i, child) in enumerate(self.children):
features.append(child.compute_difference_features(g, n1, n2, cache1[i], cache2[i]))
return np.concatenate(features) |
def get_word2vec(args, word_counter):
glove_path = os.path.join(args.glove_dir, 'glove.{}.{}d.txt'.format(args.glove_corpus, args.glove_vec_size))
sizes = {'6B': int(400000.0), '42B': int(1900000.0), '840B': int(2200000.0), '2B': int(1200000.0)}
total = sizes[args.glove_corpus]
word2vec_dict = {}
with open(glove_path, 'r', encoding='utf-8') as fh:
for line in tqdm(fh, total=total):
array = line.lstrip().rstrip().split(' ')
word = array[0]
vector = list(map(float, array[1:]))
if (word in word_counter):
word2vec_dict[word] = vector
elif (word.capitalize() in word_counter):
word2vec_dict[word.capitalize()] = vector
elif (word.lower() in word_counter):
word2vec_dict[word.lower()] = vector
elif (word.upper() in word_counter):
word2vec_dict[word.upper()] = vector
print('{}/{} of word vocab have corresponding vectors in {}'.format(len(word2vec_dict), len(word_counter), glove_path))
return word2vec_dict |
def att_loss(pred, mask, p4, p5):
g = flat(mask)
np4 = torch.sigmoid(p4.detach())
np5 = torch.sigmoid(p5.detach())
p4 = flat(np4)
p5 = flat(np5)
w1 = torch.abs((g - p4))
w2 = torch.abs((g - p5))
w = (((w1 + w2) * 0.5) + 1)
attbce = F.binary_cross_entropy_with_logits(pred, g, weight=(w * 1.0), reduction='mean')
return attbce |
def test_data_processing_pipeline(processed_data: Dict[(str, Dict[(str, Any)])]) -> None:
assert (processed_data == expected_processed_data) |
class FakeQuantize(FakeQuantizeBase):
def __init__(self, per_channel=False, num_bits=8, channel_axis=(- 1), symmetric=True, narrow_range=True):
self.num_bits = num_bits
self.per_channel = per_channel
self.symmetric = symmetric
self.narrow_range = narrow_range
self.channel_axis = channel_axis
self.name_prefix = 'FakeQuantize'
def __call__(self, inputs, ranges, training, **kwargs):
with tf.name_scope(self.name_prefix):
input_shape = inputs.get_shape()
input_dim = len(input_shape)
if (self.channel_axis == (- 1)):
self.channel_axis += input_dim
if (not training):
return self._insert_qdq(inputs, ranges['min_var'], ranges['max_var'])
if self.per_channel:
if (input_dim == 2):
reduce_dims = [0]
elif (input_dim == 4):
reduce_dims = [i for i in range(input_dim) if (i != self.channel_axis)]
if self.per_channel:
if (input_dim >= 2):
batch_min = tf.math.reduce_min(inputs, axis=reduce_dims, name='BatchMin')
else:
batch_min = inputs
else:
batch_min = tf.math.reduce_min(inputs, name='BatchMin')
if self.per_channel:
if (input_dim >= 2):
batch_max = tf.math.reduce_max(inputs, axis=reduce_dims, name='BatchMax')
else:
batch_max = inputs
else:
batch_max = tf.math.reduce_max(inputs, name='BatchMax')
if self.symmetric:
if self.narrow_range:
min_max_ratio = (- 1)
else:
min_max_ratio = ((- ((1 << self.num_bits) - 2)) / (1 << self.num_bits))
range_min = tf.math.minimum(batch_min, (batch_max / min_max_ratio))
range_max = tf.math.maximum(batch_max, (batch_min * min_max_ratio))
else:
range_min = tf.math.minimum(batch_min, 0.0)
range_max = tf.math.maximum(batch_max, 0.0)
assign_min = ranges['min_var'].assign(range_min, name='AssignMinLast')
assign_max = ranges['max_var'].assign(range_max, name='AssignMaxLast')
return self._insert_qdq(inputs, assign_min, assign_max)
def _insert_qdq(self, inputs, min_var, max_var):
if self.per_channel:
return tf.quantization.quantize_and_dequantize_v2(inputs, min_var, max_var, num_bits=self.num_bits, narrow_range=self.narrow_range, axis=self.channel_axis, range_given=True)
else:
assert (min_var.get_shape() == [])
assert (max_var.get_shape() == [])
return tf.quantization.quantize_and_dequantize_v2(inputs, min_var, max_var, num_bits=self.num_bits, narrow_range=self.narrow_range, range_given=True)
def get_config(self):
return {'num_bits': self.num_bits, 'per_channel': self.per_channel, 'symmetric': self.symmetric, 'narrow_range': self.narrow_range}
def __eq__(self, other):
if (not isinstance(other, FakeQuantize)):
return False
return ((self.num_bits == other.num_bits) and (self.per_channel == other.per_channel) and (self.symmetric == other.symmetric) and (self.narrow_range == other.narrow_range))
def __ne__(self, other):
return (not self.__eq__(other)) |
class NetConstructor():
def __init__(self, fun_name, fun_module, args, kwds):
self.fun_name = fun_name
self.fun_module = fun_module
self.args = args
self.kwds = kwds
def get(self):
net_module = importlib.import_module(self.fun_module)
net_fun = getattr(net_module, self.fun_name)
return net_fun(*self.args, **self.kwds) |
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group('Distributed training')
if (default_world_size is None):
default_world_size = max(1, torch.cuda.device_count())
group.add_argument('--distributed-world-size', type=int, metavar='N', default=default_world_size, help='total number of GPUs across all nodes (default: all visible GPUs)')
group.add_argument('--distributed-rank', default=0, type=int, help='rank of the current worker')
group.add_argument('--distributed-backend', default='nccl', type=str, help='distributed backend')
group.add_argument('--distributed-init-method', default=None, type=str, help='typically tcp://hostname:port that will be used to establish initial connetion')
group.add_argument('--distributed-port', default=(- 1), type=int, help='port number (not required if using --distributed-init-method)')
group.add_argument('--device-id', '--local_rank', default=0, type=int, help='which GPU to use (usually configured automatically)')
group.add_argument('--distributed-no-spawn', action='store_true', help='do not spawn multiple processes even if multiple GPUs are visible')
group.add_argument('--ddp-backend', default='c10d', type=str, choices=['c10d', 'no_c10d'], help='DistributedDataParallel backend')
group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB', help='bucket size for reduction')
group.add_argument('--fix-batches-to-gpus', action='store_true', help="don't shuffle batches between GPUs; this reduces overall randomness and may affect precision but avoids the cost of re-reading the data")
group.add_argument('--find-unused-parameters', default=False, action='store_true', help='disable unused parameter detection (not applicable to no_c10d ddp-backend')
group.add_argument('--fast-stat-sync', default=False, action='store_true', help='[deprecated] this is now defined per Criterion')
group.add_argument('--broadcast-buffers', default=False, action='store_true', help='Copy non-trainable parameters between GPUs, such as batchnorm population statistics')
group.add_argument('--distributed-wrapper', default='DDP', type=str, choices=['DDP', 'SlowMo'], help='DistributedDataParallel backend')
group.add_argument('--slowmo-momentum', default=None, type=float, help='SlowMo momentum term; by default use 0.0 for 16 GPUs, 0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs')
group.add_argument('--slowmo-algorithm', default='LocalSGD', choices=['LocalSGD', 'SGP'], help='whether to use LocalSGD or SGP')
group.add_argument('--localsgd-frequency', default=3, type=int, help='Local SGD allreduce frequency')
group.add_argument('--nprocs-per-node', type=int, metavar='N', default=max(1, torch.cuda.device_count()), help='number of GPUs in each node. An allreduce operation across GPUs in a node is very fast. Hence, we do allreduce across GPUs in a node, and gossip across different nodes')
return group |
(from_config=_train_loader_from_config)
def build_detection_train_loader(dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0, collate_fn=None):
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if (mapper is not None):
dataset = MapDataset(dataset, mapper)
if isinstance(dataset, torchdata.IterableDataset):
assert (sampler is None), 'sampler must be None if dataset is IterableDataset'
else:
if (sampler is None):
sampler = TrainingSampler(len(dataset))
assert isinstance(sampler, torchdata.Sampler), f'Expect a Sampler but got {type(sampler)}'
return build_batch_data_loader(dataset, sampler, total_batch_size, aspect_ratio_grouping=aspect_ratio_grouping, num_workers=num_workers, collate_fn=collate_fn) |
def seq_accuracy(preds, labels):
acc = []
for (idx, pred) in enumerate(preds):
acc.append((pred == labels[idx]).mean())
return acc.mean() |
class GraphDataModule(pl.LightningDataModule):
def __init__(self, graph_family, graph_kwargs=None, samples_per_epoch=100000, batch_size=32, distributed_sampler=True, num_workers=1):
super().__init__()
if (graph_kwargs is None):
graph_kwargs = {}
self.graph_family = graph_family
self.graph_kwargs = graph_kwargs
self.samples_per_epoch = samples_per_epoch
self.num_workers = num_workers
self.batch_size = batch_size
self.distributed_sampler = distributed_sampler
self.train_dataset = None
self.eval_dataset = None
self.train_sampler = None
self.eval_sampler = None
def make_dataset(self, samples_per_epoch):
if (self.graph_family == 'binomial'):
ds = BinomialGraphDataset(samples_per_epoch=samples_per_epoch, **self.graph_kwargs)
elif (self.graph_family == 'barabasi_albert'):
ds = BarabasiAlbertGraphDataset(samples_per_epoch=samples_per_epoch, **self.graph_kwargs)
elif (self.graph_family == 'regular'):
ds = RegularGraphDataset(samples_per_epoch=samples_per_epoch, **self.graph_kwargs)
elif (self.graph_family == 'geometric'):
ds = GeometricGraphDataset(samples_per_epoch=samples_per_epoch)
elif (self.graph_family == 'all'):
ds = RandomGraphDataset(samples_per_epoch=samples_per_epoch)
else:
raise NotImplementedError
return ds
def train_dataloader(self):
self.train_dataset = self.make_dataset(samples_per_epoch=self.samples_per_epoch)
if self.distributed_sampler:
train_sampler = DistributedSampler(dataset=self.train_dataset, shuffle=False)
else:
train_sampler = None
return DenseGraphDataLoader(dataset=self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, sampler=train_sampler)
def val_dataloader(self):
self.eval_dataset = self.make_dataset(samples_per_epoch=4096)
if self.distributed_sampler:
eval_sampler = DistributedSampler(dataset=self.eval_dataset, shuffle=False)
else:
eval_sampler = None
return DenseGraphDataLoader(dataset=self.eval_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, sampler=eval_sampler) |
def level_2_pass_manager(transpile_config):
basis_gates = transpile_config.basis_gates
coupling_map = transpile_config.coupling_map
initial_layout = transpile_config.initial_layout
seed_transpiler = transpile_config.seed_transpiler
backend_properties = transpile_config.backend_properties
_given_layout = SetLayout(initial_layout)
def _choose_layout_condition(property_set):
return (not property_set['layout'])
_choose_layout = DenseLayout(coupling_map)
if backend_properties:
_choose_layout = NoiseAdaptiveLayout(backend_properties)
_embed = [FullAncillaAllocation(coupling_map), EnlargeWithAncilla()]
_swap_check = CheckMap(coupling_map)
def _swap_condition(property_set):
return (not property_set['is_swap_mapped'])
_swap = [BarrierBeforeFinalMeasurements(), Unroll3qOrMore(), LegacySwap(coupling_map, trials=20, seed=seed_transpiler), Decompose(SwapGate)]
_unroll = Unroller(basis_gates)
def _direction_condition(property_set):
return ((not coupling_map.is_symmetric) and (not property_set['is_direction_mapped']))
_direction = [CXDirection(coupling_map)]
_reset = RemoveResetInZeroState()
_depth_check = [Depth(), FixedPoint('depth')]
def _opt_control(property_set):
return (not property_set['depth_fixed_point'])
_opt = [Optimize1qGates(), CommutativeCancellation()]
pm2 = PassManager()
if coupling_map:
pm2.append(_given_layout)
pm2.append(_choose_layout, condition=_choose_layout_condition)
pm2.append(_embed)
pm2.append(_unroll)
if coupling_map:
pm2.append(_swap_check)
pm2.append(_swap, condition=_swap_condition)
pm2.append(_direction, condition=_direction_condition)
pm2.append(_reset)
pm2.append((_depth_check + _opt), do_while=_opt_control)
return pm2 |
def stride(init: float, step: float, times: int) -> Iterable[float]:
for _ in range(times):
(yield init)
init += step |
class Runner():
def __init__(self, params):
self.params = params
data = Triples()
(self.entity2id, self.relation2id) = (data.entity2id, data.relation2id)
self.train_triples = data.triples
self.id2entity = {idx: ent for (ent, idx) in self.entity2id.items()}
self.id2relation = {idx: ent for (ent, idx) in self.relation2id.items()}
self.params.nentity = len(self.entity2id)
self.params.nrelation = len(self.relation2id)
print(f'{self.params.nentity} entities, {self.params.nrelation} relations')
self.kge_model = KGEModel(model_name=self.params.model, nentity=self.params.nentity, nrelation=self.params.nrelation, hidden_dim=self.params.hidden_dim, gamma=self.params.gamma, double_entity_embedding=self.params.double_entity_embedding, double_relation_embedding=self.params.double_relation_embedding)
if self.params.cuda:
self.kge_model = self.kge_model.cuda()
self.optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), self.kge_model.parameters()), lr=self.params.learning_rate)
self.train_iterator = self.get_train_iter()
def run(self):
best_result_dict = dict()
for step in range(self.params.max_steps):
training_logs = []
log = self.kge_model.train_step(self.kge_model, self.optimizer, self.train_iterator, self.params)
print(f"[{step}] Loss={log['loss']:.5f}")
self.save()
def get_train_iter(self):
train_dataloader_head = DataLoader(TrainDataset(self.train_triples, self.params.nentity, self.params.nrelation, self.params.negative_sample_size, 'head-batch'), batch_size=self.params.batch_size, shuffle=True, num_workers=max(1, (self.params.cpu_num // 2)), collate_fn=TrainDataset.collate_fn)
train_dataloader_tail = DataLoader(TrainDataset(self.train_triples, self.params.nentity, self.params.nrelation, self.params.negative_sample_size, 'tail-batch'), batch_size=self.params.batch_size, shuffle=True, num_workers=max(1, (self.params.cpu_num // 2)), collate_fn=TrainDataset.collate_fn)
train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)
return train_iterator
def save(self):
with open(f'{self.params.model}_{self.kge_model.entity_dim}_{self.kge_model.relation_dim}.pkl', 'wb') as f:
dict_save = {'id2entity': self.id2entity, 'id2relation': self.id2relation, 'entity': self.kge_model.entity_embedding.data, 'relation': self.kge_model.relation_embedding.data}
pickle.dump(dict_save, f) |
class OCNLI(CLSProcessor):
def __init__(self):
super().__init__(labels_origin=['entailment', 'contradiction', 'neutral'], labels_mapped=['', '', ''])
def get_examples(self, data_dir, split):
path = os.path.join(data_dir, f'{split}.json')
with open(path, encoding='utf8') as f:
for line in f:
example_json = json.loads(line)
if (('label' not in example_json) or (example_json['label'] not in self.labels_origin)):
continue
example = InputExample(meta={'premise': example_json['sentence1'], 'hypothesis': example_json['sentence2'], 'options': self.labels_mapped}, tgt_text=self.get_label(example_json['label']))
examples.append(example)
def get_templates(self):
return [':{premise} : {hypothesis} :? {options}'] |
def test_config_build_detector():
from xdoctest.utils import import_module_from_path
from mmdet.models import build_detector
config_dpath = _get_config_directory()
print('Found config_dpath = {!r}'.format(config_dpath))
config_names = ['dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py', 'htc/htc_without_semantic_r50_fpn_1x.py', 'cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py', 'grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x.py', 'double_heads/dh_faster_rcnn_r50_fpn_1x.py', 'empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x.py', 'guided_anchoring/ga_rpn_r50_caffe_fpn_1x.py', 'foveabox/fovea_r50_fpn_4gpu_1x.py', 'foveabox/fovea_align_gn_ms_r50_fpn_4gpu_2x.py', 'hrnet/fcos_hrnetv2p_w32_gn_1x_4gpu.py', 'gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py', 'pascal_voc/ssd300_voc.py', 'pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py', 'pascal_voc/ssd512_voc.py', 'gcnet/mask_rcnn_r50_fpn_sbn_1x.py', 'gn/mask_rcnn_r50_fpn_gn_contrib_2x.py', 'reppoints/reppoints_moment_r50_fpn_2x.py', 'reppoints/reppoints_partial_minmax_r50_fpn_1x.py', 'reppoints/bbox_r50_grid_center_fpn_1x.py', 'reppoints/reppoints_minmax_r50_fpn_1x.py', 'reppoints/bbox_r50_grid_fpn_1x.py', 'fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py', 'albu_example/mask_rcnn_r50_fpn_1x.py', 'libra_rcnn/libra_faster_rcnn_r50_fpn_1x.py', 'fp16/mask_rcnn_r50_fpn_fp16_1x.py', 'fp16/faster_rcnn_r50_fpn_fp16_1x.py']
print('Using {} config files'.format(len(config_names)))
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = import_module_from_path(config_fpath)
config_mod.model
config_mod.train_cfg
config_mod.test_cfg
print('Building detector, config_fpath = {!r}'.format(config_fpath))
if ('pretrained' in config_mod.model):
config_mod.model['pretrained'] = None
detector = build_detector(config_mod.model, train_cfg=config_mod.train_cfg, test_cfg=config_mod.test_cfg)
assert (detector is not None) |
def import_file(path, name: str=None, add_to_sys=True, disable_warning=False):
global CUSTOM_LOADED_MODULES
path = Path(path)
module_name = path.stem
try:
user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
except KeyError:
user_paths = []
possible_paths = _get_possible_module_path(user_paths)
model_import_name = _get_regular_import_name(path, possible_paths)
if (model_import_name is not None):
return import_name(model_import_name)
if (name is not None):
module_name = name
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if (not disable_warning):
logger.warning(f"Failed to perform regular import for file {path}. this means this file isn't in any folder in PYTHONPATH or don't have __init__.py in that project. directly file import may fail and some reflecting features are disabled even if import succeed. please add your project to PYTHONPATH or add __init__.py to ensure this file can be regularly imported. ")
if add_to_sys:
if ((module_name in sys.modules) and (module_name not in CUSTOM_LOADED_MODULES)):
raise ValueError(f'{module_name} exists in system.')
CUSTOM_LOADED_MODULES[module_name] = module
sys.modules[module_name] = module
return module |
def normlize_image(img):
t_min = np.min(img)
t_max = np.max(img)
img = ((img - t_min) / (t_max - t_min))
return img |
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs) |
class nnUNetTrainerV2_ReLU_biasInSegOutput(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-05, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.ReLU
net_nonlin_kwargs = {'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, (lambda x: x), InitWeights_He(0), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, seg_output_use_bias=True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper |
def main():
torch.set_printoptions(profile='full')
parser = argparse.ArgumentParser(description='mgp')
parser.add_argument('--config_path', type=str, help='path of dataset', required=True)
parser.add_argument('--seed', type=int, default=2021, help='overwrite config seed')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
with open(args.config_path, 'r') as f:
config = yaml.safe_load(f)
config = EasyDict(config)
if (args.seed != 2021):
config.train.seed = args.seed
if (config.train.save and (config.train.save_path is not None)):
config.train.save_path = os.path.join(config.train.save_path, config.model.name, '3DInfomax')
if (not os.path.exists(config.train.save_path)):
os.makedirs(config.train.save_path, exist_ok=True)
print(config)
world_size = torch.cuda.device_count()
print("Let's use", world_size, 'GPUs!')
if (world_size > 1):
dist.init_process_group('nccl', rank=args.local_rank, world_size=world_size)
train(args.local_rank, config, world_size) |
def _dm_nfnet_cfg(depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True):
attn_kwargs = dict(reduction_ratio=0.5, divisor=8)
cfg = NfCfg(depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, num_features=int((channels[(- 1)] * 2.0)), act_layer=act_layer, attn_layer='se', attn_kwargs=attn_kwargs)
return cfg |
def gradient_penalty_loss(discriminator, real_data, fake_data, mask=None):
batch_size = real_data.size(0)
alpha = torch.rand(batch_size, 1, 1, 1).to(real_data)
interpolates = ((alpha * real_data) + ((1.0 - alpha) * fake_data))
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones_like(disc_interpolates), create_graph=True, retain_graph=True, only_inputs=True)[0]
if (mask is not None):
gradients = (gradients * mask)
gradients_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
if (mask is not None):
gradients_penalty /= torch.mean(mask)
return gradients_penalty |
def get_chatgpt_completion_response(prompt_text, max_tokens):
messages = [{'role': 'system', 'content': 'You are a helpful assistant that continues the passage from the sentences provided.'}, {'role': 'user', 'content': prompt_text}]
response = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=messages, temperature=0.7, max_tokens=max_tokens)
return response['choices'][0]['message']['content'] |
class EdgeResidual(nn.Module):
def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0, stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.0):
super(EdgeResidual, self).__init__()
norm_kwargs = (norm_kwargs or {})
if (fake_in_chs > 0):
mid_chs = make_divisible((fake_in_chs * exp_ratio))
else:
mid_chs = make_divisible((in_chs * exp_ratio))
has_se = ((se_ratio is not None) and (se_ratio > 0.0))
self.has_residual = (((in_chs == out_chs) and (stride == 1)) and (not noskip))
self.drop_path_rate = drop_path_rate
self.conv_exp = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if (location == 'expansion'):
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else:
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
x = self.conv_exp(x)
x = self.bn1(x)
x = self.act1(x)
if (self.se is not None):
x = self.se(x)
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_residual:
if (self.drop_path_rate > 0.0):
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x |
class RegionLayer(nn.Module):
def __init__(self, num_classes=0, anchors=[], num_anchors=1, use_cuda=None):
super(RegionLayer, self).__init__()
use_cuda = (torch.cuda.is_available() and (True if (use_cuda is None) else use_cuda))
self.device = torch.device(('cuda' if use_cuda else 'cpu'))
self.num_classes = num_classes
self.num_anchors = num_anchors
self.anchor_step = (len(anchors) // num_anchors)
self.anchors = torch.FloatTensor(anchors).view(self.num_anchors, self.anchor_step).to(self.device)
self.rescore = 1
self.coord_scale = 1
self.noobject_scale = 1
self.object_scale = 5
self.class_scale = 1
self.thresh = 0.6
self.seen = 0
def build_targets(self, pred_boxes, target, nH, nW):
nB = target.size(0)
nA = self.num_anchors
conf_mask = (torch.ones(nB, nA, nH, nW) * self.noobject_scale)
coord_mask = torch.zeros(nB, nA, nH, nW)
cls_mask = torch.zeros(nB, nA, nH, nW)
tcoord = torch.zeros(4, nB, nA, nH, nW)
tconf = torch.zeros(nB, nA, nH, nW)
tcls = torch.zeros(nB, nA, nH, nW)
nAnchors = ((nA * nH) * nW)
nPixels = (nH * nW)
nGT = 0
nRecall = 0
anchors = self.anchors.to('cpu')
if (self.seen < 12800):
tcoord[0].fill_(0.5)
tcoord[1].fill_(0.5)
coord_mask.fill_(1)
for b in range(nB):
cur_pred_boxes = pred_boxes[(b * nAnchors):((b + 1) * nAnchors)].t()
cur_ious = torch.zeros(nAnchors)
tbox = target[b].view((- 1), 5).to('cpu')
for t in range(50):
if (tbox[t][1] == 0):
break
(gx, gw) = [(i * nW) for i in (tbox[t][1], tbox[t][3])]
(gy, gh) = [(i * nH) for i in (tbox[t][2], tbox[t][4])]
cur_gt_boxes = torch.FloatTensor([gx, gy, gw, gh]).repeat(nAnchors, 1).t()
cur_ious = torch.max(cur_ious, multi_bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))
ignore_ix = (cur_ious > self.thresh)
conf_mask[b][ignore_ix.view(nA, nH, nW)] = 0
for t in range(50):
if (tbox[t][1] == 0):
break
nGT += 1
(gx, gw) = [(i * nW) for i in (tbox[t][1], tbox[t][3])]
(gy, gh) = [(i * nH) for i in (tbox[t][2], tbox[t][4])]
(gw, gh) = (gw.float(), gh.float())
(gi, gj) = (int(gx), int(gy))
tmp_gt_boxes = torch.FloatTensor([0, 0, gw, gh]).repeat(nA, 1).t()
anchor_boxes = torch.cat((torch.zeros(nA, 2), anchors), 1).t()
tmp_ious = multi_bbox_ious(tmp_gt_boxes, anchor_boxes, x1y1x2y2=False)
(best_iou, best_n) = torch.max(tmp_ious, 0)
if (self.anchor_step == 4):
tmp_ious_mask = (tmp_ious == best_iou)
if (tmp_ious_mask.sum() > 0):
gt_pos = torch.FloatTensor([gi, gj, gx, gy]).repeat(nA, 1).t()
an_pos = anchor_boxes[4:6]
dist = (pow(((gt_pos[0] + an_pos[0]) - gt_pos[2]), 2) + pow(((gt_pos[1] + an_pos[1]) - gt_pos[3]), 2))
dist[(1 - tmp_ious_mask)] = 10000
(_, best_n) = torch.min(dist, 0)
gt_box = torch.FloatTensor([gx, gy, gw, gh])
pred_box = pred_boxes[((((b * nAnchors) + (best_n * nPixels)) + (gj * nW)) + gi)]
iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)
coord_mask[b][best_n][gj][gi] = 1
cls_mask[b][best_n][gj][gi] = 1
conf_mask[b][best_n][gj][gi] = self.object_scale
tcoord[0][b][best_n][gj][gi] = (gx - gi)
tcoord[1][b][best_n][gj][gi] = (gy - gj)
tcoord[2][b][best_n][gj][gi] = math.log((gw / anchors[best_n][0]))
tcoord[3][b][best_n][gj][gi] = math.log((gh / anchors[best_n][1]))
tcls[b][best_n][gj][gi] = tbox[t][0]
tconf[b][best_n][gj][gi] = (iou if self.rescore else 1.0)
if (iou > 0.5):
nRecall += 1
return (nGT, nRecall, coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls)
def get_mask_boxes(self, output):
if (not isinstance(self.anchors, torch.Tensor)):
self.anchors = torch.FloatTensor(self.anchors).view(self.num_anchors, self.anchor_step).to(self.device)
masked_anchors = self.anchors.view((- 1))
num_anchors = torch.IntTensor([self.num_anchors]).to(self.device)
return {'x': output, 'a': masked_anchors, 'n': num_anchors}
def forward(self, output, target):
t0 = time.time()
nB = output.data.size(0)
nA = self.num_anchors
nC = self.num_classes
nH = output.data.size(2)
nW = output.data.size(3)
cls_anchor_dim = (((nB * nA) * nH) * nW)
if (not isinstance(self.anchors, torch.Tensor)):
self.anchors = torch.FloatTensor(self.anchors).view(self.num_anchors, self.anchor_step).to(self.device)
output = output.view(nB, nA, (5 + nC), nH, nW)
cls_grid = torch.linspace(5, ((5 + nC) - 1), nC).long().to(self.device)
ix = torch.LongTensor(range(0, 5)).to(self.device)
pred_boxes = torch.FloatTensor(4, cls_anchor_dim).to(self.device)
coord = output.index_select(2, ix[0:4]).view((nB * nA), (- 1), (nH * nW)).transpose(0, 1).contiguous().view((- 1), cls_anchor_dim)
coord[0:2] = coord[0:2].sigmoid()
conf = output.index_select(2, ix[4]).view(nB, nA, nH, nW).sigmoid()
cls = output.index_select(2, cls_grid)
cls = cls.view((nB * nA), nC, (nH * nW)).transpose(1, 2).contiguous().view(cls_anchor_dim, nC)
t1 = time.time()
grid_x = torch.linspace(0, (nW - 1), nW).repeat((nB * nA), nH, 1).view(cls_anchor_dim).to(self.device)
grid_y = torch.linspace(0, (nH - 1), nH).repeat(nW, 1).t().repeat((nB * nA), 1, 1).view(cls_anchor_dim).to(self.device)
anchor_w = self.anchors.index_select(1, ix[0]).repeat(1, ((nB * nH) * nW)).view(cls_anchor_dim)
anchor_h = self.anchors.index_select(1, ix[1]).repeat(1, ((nB * nH) * nW)).view(cls_anchor_dim)
pred_boxes[0] = (coord[0] + grid_x)
pred_boxes[1] = (coord[1] + grid_y)
pred_boxes[2] = (coord[2].exp() * anchor_w)
pred_boxes[3] = (coord[3].exp() * anchor_h)
pred_boxes = convert2cpu(pred_boxes.transpose(0, 1).contiguous().view((- 1), 4)).detach()
t2 = time.time()
(nGT, nRecall, coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls) = self.build_targets(pred_boxes, target.detach(), nH, nW)
cls_mask = (cls_mask == 1)
tcls = tcls[cls_mask].long().view((- 1))
cls_mask = cls_mask.view((- 1), 1).repeat(1, nC).to(self.device)
cls = cls[cls_mask].view((- 1), nC)
nProposals = int((conf > 0.25).sum())
tcoord = tcoord.view(4, cls_anchor_dim).to(self.device)
(tconf, tcls) = (tconf.to(self.device), tcls.to(self.device))
(coord_mask, conf_mask) = (coord_mask.view(cls_anchor_dim).to(self.device), conf_mask.sqrt().to(self.device))
t3 = time.time()
loss_coord = ((self.coord_scale * nn.MSELoss(size_average=False)((coord * coord_mask), (tcoord * coord_mask))) / 2)
loss_conf = (nn.MSELoss(size_average=False)((conf * conf_mask), (tconf * conf_mask)) / 2)
loss_cls = ((self.class_scale * nn.CrossEntropyLoss(size_average=False)(cls, tcls)) if (cls.size(0) > 0) else 0)
loss = ((loss_coord + loss_conf) + loss_cls)
t4 = time.time()
if False:
print(('-' * 30))
print((' activation : %f' % (t1 - t0)))
print((' create pred_boxes : %f' % (t2 - t1)))
print((' build targets : %f' % (t3 - t2)))
print((' create loss : %f' % (t4 - t3)))
print((' total : %f' % (t4 - t0)))
print(('%d: nGT %3d, nRC %3d, nPP %3d, loss: box %6.3f, conf %6.3f, class %6.3f, total %7.3f' % (self.seen, nGT, nRecall, nProposals, loss_coord, loss_conf, loss_cls, loss)))
if math.isnan(loss.item()):
print(conf, tconf)
sys.exit(0)
return loss |
class Segmentation(object):
def __init__(self):
self.segments = None
self.stats = SegmenterStats()
def initialize_segments(self, alignment, frame_shift=0.01):
self.segments = []
assert (len(alignment) > 0)
prev_label = None
prev_length = 0
for (i, text_label) in enumerate(alignment):
if ((prev_label is not None) and (int(text_label) != prev_label)):
if (prev_label == 2):
self.segments.append([(float((i - prev_length)) * frame_shift), (float(i) * frame_shift), prev_label])
self.stats.initial_duration += (prev_length * frame_shift)
prev_label = process_label(text_label)
prev_length = 0
elif (prev_label is None):
prev_label = process_label(text_label)
prev_length += 1
if ((prev_length > 0) and (prev_label == 2)):
self.segments.append([(float((len(alignment) - prev_length)) * frame_shift), (float(len(alignment)) * frame_shift), prev_label])
self.stats.initial_duration += (prev_length * frame_shift)
self.stats.num_segments_initial = len(self.segments)
self.stats.num_segments_final = len(self.segments)
self.stats.final_duration = self.stats.initial_duration
def filter_short_segments(self, min_dur):
if (min_dur <= 0):
return
segments_kept = []
for segment in self.segments:
assert (segment[2] == 2), segment
dur = (segment[1] - segment[0])
if (dur < min_dur):
self.stats.filter_short_duration += dur
self.stats.num_short_segments_filtered += 1
else:
segments_kept.append(segment)
self.segments = segments_kept
self.stats.num_segments_final = len(self.segments)
self.stats.final_duration -= self.stats.filter_short_duration
def pad_speech_segments(self, segment_padding, max_duration=float('inf')):
if (max_duration == None):
max_duration = float('inf')
for (i, segment) in enumerate(self.segments):
assert (segment[2] == 2), segment
segment[0] -= segment_padding
self.stats.padding_duration += segment_padding
if (segment[0] < 0.0):
self.stats.padding_duration += segment[0]
segment[0] = 0.0
if ((i >= 1) and (self.segments[(i - 1)][1] > segment[0])):
self.stats.padding_duration -= (self.segments[(i - 1)][1] - segment[0])
segment[0] = self.segments[(i - 1)][1]
segment[1] += segment_padding
self.stats.padding_duration += segment_padding
if (segment[1] >= max_duration):
self.stats.padding_duration -= (segment[1] - max_duration)
segment[1] = max_duration
if (((i + 1) < len(self.segments)) and (segment[1] > self.segments[(i + 1)][0])):
self.stats.padding_duration -= (segment[1] - self.segments[(i + 1)][0])
segment[1] = self.segments[(i + 1)][0]
self.stats.final_duration += self.stats.padding_duration
def merge_consecutive_segments(self, max_dur):
if ((max_dur <= 0) or (not self.segments)):
return
merged_segments = [self.segments[0]]
for segment in self.segments[1:]:
assert (segment[2] == 2), segment
if ((segment[0] == merged_segments[(- 1)][1]) and ((segment[1] - merged_segments[(- 1)][0]) <= max_dur)):
merged_segments[(- 1)][1] = segment[1]
self.stats.num_merges += 1
else:
merged_segments.append(segment)
self.segments = merged_segments
self.stats.num_segments_final = len(self.segments)
def write(self, key, file_handle):
if (global_verbose >= 2):
logger.info('For key {key}, got stats {stats}'.format(key=key, stats=self.stats))
for segment in self.segments:
seg_id = '{key}-{st:07d}-{end:07d}'.format(key=key, st=int((segment[0] * 100)), end=int((segment[1] * 100)))
print('{seg_id} {key} {st:.2f} {end:.2f}'.format(seg_id=seg_id, key=key, st=segment[0], end=segment[1]), file=file_handle) |
class ShrinkRatio():
def __init__(self, w_iter, decay_rate):
self.w_iter = w_iter
self.decay_rate = decay_rate
def __call__(self, n_iter):
return ((1 + (self.w_iter * n_iter)) ** (- self.decay_rate)) |
def baytune_get_setting(self):
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', module='sklearn')
if (len(self._methods) == 1):
(method,) = self._methods
else:
possible_methods = {m: getattr(self._tuners[m], 'scores', ()) for m in self._methods}
method = self._selector.select(possible_methods)
params = self._tuners[method].propose()
return {'method': method, 'params': params} |
def load_car_model(path='models/templates/car.pth'):
template = TemplateUV(L=10, num_layers=3, hidden_size=256)
template.load_state_dict(torch.load(path))
return template |
class TMScoreHead(nn.Module):
def __init__(self, c_z, no_bins, **kwargs):
super(TMScoreHead, self).__init__()
self.c_z = c_z
self.no_bins = no_bins
self.linear = Linear(self.c_z, self.no_bins, init='final')
def forward(self, z):
logits = self.linear(z)
return logits |
class DotProduct(Function):
def forward(ctx, query, pos_enc, out_F, kq_map):
assert (query.is_contiguous() and pos_enc.is_contiguous() and out_F.is_contiguous())
ctx.m = kq_map.shape[1]
(_, ctx.h, ctx.c) = query.shape
ctx.kkk = pos_enc.shape[0]
ctx.save_for_backward(query, pos_enc, kq_map)
cuda_sparse_ops.dot_product_forward(ctx.m, ctx.h, ctx.kkk, ctx.c, query, pos_enc, out_F, kq_map)
return out_F
def backward(ctx, grad_out_F):
(query, pos_enc, kq_map) = ctx.saved_tensors
grad_query = torch.zeros_like(query)
grad_pos = torch.zeros_like(pos_enc)
cuda_sparse_ops.dot_product_backward(ctx.m, ctx.h, ctx.kkk, ctx.c, query, pos_enc, kq_map, grad_query, grad_pos, grad_out_F)
return (grad_query, grad_pos, None, None) |
class SqueezeBertModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class Tile():
def __init__(self, x, y, name, data, interconn_xy, site_insts):
self.x = x
self.y = y
self.name = name
self.data = data
self.interconn_xy = interconn_xy
self.site_insts = site_insts
self.wire_to_node = {}
self.node_autoidx = 0
self.used_wires = None
def get_pip_data(self, i):
return self.data.pips[i]
def get_wire_data(self, i):
return self.data.wires[i]
def tile_type(self):
return self.data.tile_type
def wires(self):
return (Wire(self, i) for i in range(len(self.data.wires)))
def wire(self, name):
return Wire(self, self.data.wires_by_name[name].index)
def pips(self):
return (PIP(self, i) for i in range(len(self.data.pips)))
def sites(self):
return self.site_insts
def site_pin_wire(self, sitetype, rel_xy, pin):
wire_idx = self.data.sitepin_data[(sitetype, rel_xy, pin)].wire_idx
return (Wire(self, wire_idx) if (wire_idx is not None) else None)
def site_pin_timing(self, sitetype, rel_xy, pin):
return self.data.sitepin_data[(sitetype, rel_xy, pin)]
def cell_timing(self):
return self.data.cell_timing
def used_wire_indices(self):
if (self.used_wires is None):
self.used_wires = set()
for pip in self.pips():
self.used_wires.add(pip.src_wire().index)
self.used_wires.add(pip.dst_wire().index)
for site in self.sites():
for v in site.available_variants():
variant = site.variant(v)
for pin in variant.pins():
if (pin.tile_wire() is not None):
self.used_wires.add(pin.tile_wire().index)
return self.used_wires
def split_name(self):
(prefix, xy) = self.name.rsplit('_', 1)
xy_m = re.match('X(\\d+)Y(\\d+)', xy)
return (prefix, int(xy_m.group(1)), int(xy_m.group(2))) |
class CIFAR10Mix(torchvision.datasets.CIFAR10):
def __init__(self, root, out_path, train=False, val=False, transform=None, target_transform=None, download=False):
super(CIFAR10Mix, self).__init__(root, train=train, transform=transform, target_transform=target_transform, download=download)
self.outpath = make_dataset(out_path)
if val:
np.random.seed(3)
p1 = np.random.permutation(len(self.data))
self.data = self.data[p1[:1000]]
self.targets = [self.targets[i] for i in p1.tolist()[:1000]]
np.random.seed(3)
p2 = np.random.permutation(len(self.outpath))
self.outpath = [self.outpath[i] for i in p2.tolist()[:1000]]
else:
np.random.seed(3)
p1 = np.random.permutation(len(self.data))
self.data = self.data[p1[1000:]]
self.targets = [self.targets[i] for i in p1.tolist()[1000:]]
np.random.seed(3)
p2 = np.random.permutation(len(self.outpath))
self.outpath = [self.outpath[i] for i in p2.tolist()[1000:len(p1)]]
def __getitem__(self, index):
if (index < len(self.data)):
(img, target) = (self.data[index], self.targets[index])
img = Image.fromarray(img)
else:
(img_path, target) = (self.outpath[(index - len(self.data))], (- 1))
img = pil_loader(img_path)
img = transforms.Resize(32)(img)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
return (len(self.data) + len(self.outpath)) |
_model
def resnetrs152(pretrained=False, **kwargs):
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
return _create_resnet('resnetrs152', pretrained, **model_args) |
def fine_validation(epoch, training_loss):
fine_model.eval()
fine_validation_loss = 0
scale_invariant_loss = 0
delta1_accuracy = 0
delta2_accuracy = 0
delta3_accuracy = 0
rmse_linear_loss = 0
rmse_log_loss = 0
abs_relative_difference_loss = 0
squared_relative_difference_loss = 0
for (batch_idx, data) in enumerate(val_loader):
(rgb, depth) = (torch.tensor(data['image'].cuda(), requires_grad=False), torch.tensor(data['depth'].cuda(), requires_grad=False))
coarse_output = coarse_model(rgb.type(dtype))
fine_output = fine_model(rgb.type(dtype), coarse_output.type(dtype))
fine_validation_loss += custom_loss_function(fine_output, depth).item()
scale_invariant_loss += scale_invariant(fine_output, depth)
delta1_accuracy += threeshold_percentage(fine_output, depth, 1.25)
delta2_accuracy += threeshold_percentage(fine_output, depth, (1.25 * 1.25))
delta3_accuracy += threeshold_percentage(fine_output, depth, ((1.25 * 1.25) * 1.25))
rmse_linear_loss += rmse_linear(fine_output, depth)
rmse_log_loss += rmse_log(fine_output, depth)
abs_relative_difference_loss += abs_relative_difference(fine_output, depth)
squared_relative_difference_loss += squared_relative_difference(fine_output, depth)
fine_validation_loss /= (batch_idx + 1)
scale_invariant_loss /= (batch_idx + 1)
delta1_accuracy /= (batch_idx + 1)
delta2_accuracy /= (batch_idx + 1)
delta3_accuracy /= (batch_idx + 1)
rmse_linear_loss /= (batch_idx + 1)
rmse_log_loss /= (batch_idx + 1)
abs_relative_difference_loss /= (batch_idx + 1)
squared_relative_difference_loss /= (batch_idx + 1)
logger.scalar_summary('fine validation loss', fine_validation_loss, epoch)
print('Epoch: {} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(epoch, training_loss, fine_validation_loss, delta1_accuracy, delta2_accuracy, delta3_accuracy, rmse_linear_loss, rmse_log_loss, abs_relative_difference_loss, squared_relative_difference_loss)) |
def rouge_single_pair(cand: str, ref: str, metric='rouge1'):
s = full_rouge_scorer.score(cand, ref)
return s[metric].fmeasure |
class TestMatcher(unittest.TestCase):
def test_scriptability(self):
cfg = get_cfg()
anchor_matcher = Matcher(cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True)
match_quality_matrix = torch.tensor([[0.15, 0.45, 0.2, 0.6], [0.3, 0.65, 0.05, 0.1], [0.05, 0.4, 0.25, 0.4]])
expected_matches = torch.tensor([1, 1, 2, 0])
expected_match_labels = torch.tensor([(- 1), 1, 0, 1], dtype=torch.int8)
(matches, match_labels) = anchor_matcher(match_quality_matrix)
self.assertTrue(torch.allclose(matches, expected_matches))
self.assertTrue(torch.allclose(match_labels, expected_match_labels))
from detectron2.layers import nonzero_tuple
def f(thresholds: List[float], labels: List[int]):
return Matcher(thresholds, labels, allow_low_quality_matches=True)
scripted_anchor_matcher = torch.jit.script(f)(cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS)
(matches, match_labels) = scripted_anchor_matcher(match_quality_matrix)
self.assertTrue(torch.allclose(matches, expected_matches))
self.assertTrue(torch.allclose(match_labels, expected_match_labels)) |
def main():
args = get_args()
output_dir = 'output/{}'.format(get_datetime_str())
create_dir(output_dir)
LogHelper.setup(log_path='{}/training.log'.format(output_dir), level='INFO')
_logger = logging.getLogger(__name__)
_logger.info('Finished setting up the logger.')
save_yaml_config(vars(args), path='{}/config.yaml'.format(output_dir))
set_seed(args.seed)
dataset = SyntheticDataset(args.n, args.d, args.graph_type, args.degree, args.noise_type, args.B_scale, args.seed)
_logger.info('Finished loading the dataset.')
if args.init:
if (args.init_path is None):
args.init_path = get_init_path('output/')
B_init = np.load('{}'.format(args.init_path))
_logger.info('Finished loading B_init from {}.'.format(args.init_path))
else:
B_init = None
B_est = golem(dataset.X, args.lambda_1, args.lambda_2, args.equal_variances, args.num_iter, args.learning_rate, args.seed, args.checkpoint_iter, output_dir, B_init)
_logger.info('Finished training the model.')
B_processed = postprocess(B_est, args.graph_thres)
_logger.info('Finished post-processing the estimated graph.')
checkpoint_after_training(output_dir, dataset.X, dataset.B, B_init, B_est, B_processed, _logger.info) |
class DelayStartHook(TrainingHook, tf.train.GlobalStepWaiterHook):
def __init__(self, params, model_dir, run_config):
TrainingHook.__init__(self, params, model_dir, run_config)
self._task_id = self._run_config.task_id
self._delay_k = self.params['delay_k']
self._wait_until_step = int((self._delay_k * self._task_id))
tf.train.GlobalStepWaiterHook.__init__(self, self._wait_until_step)
def default_params():
return {'delay_k': 500} |
class PolynomialLR(_LRScheduler):
def __init__(self, optimizer, step_size, iter_max, power, last_epoch=(- 1)):
self.step_size = step_size
self.iter_max = iter_max
self.power = power
super(PolynomialLR, self).__init__(optimizer, last_epoch)
def polynomial_decay(self, lr):
return (lr * ((1 - (float(self.last_epoch) / self.iter_max)) ** self.power))
def get_lr(self):
if ((self.last_epoch == 0) or ((self.last_epoch % self.step_size) != 0) or (self.last_epoch > self.iter_max)):
return [group['lr'] for group in self.optimizer.param_groups]
return [self.polynomial_decay(lr) for lr in self.base_lrs] |
def register_meta_overrides(orig_target, meta_target):
_MANUAL_META_OVERRIDES[orig_target] = meta_target |
class TestFoldPadConv(unittest.TestCase):
def setUpClass(self):
build_fake_yaml()
def tearDownClass(self):
os.remove('fake_yaml.yaml')
_random()
def test_fold_pad_conv(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name='input')
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(x, paddings, 'CONSTANT')
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding='VALID')
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed, name='op_to_store')
out_name = relu.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
found_pad = False
for i in output_graph.graph_def.node:
if (i.op == 'Pad'):
found_pad = True
break
self.assertEqual(found_pad, False)
_random()
def test_fold_non_const_pad_conv(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name='input')
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
vec = tf.raw_ops.DataFormatVecPermute(x=paddings, src_format='NHWC', dst_format='NHWC')
x_pad = tf.pad(x, vec, 'CONSTANT')
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding='VALID')
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed, name='op_to_store')
out_name = relu.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name])
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
found_pad = False
for i in output_graph.graph_def.node:
if (i.op == 'Pad'):
found_pad = True
break
self.assertEqual(found_pad, False) |
def get_auto_estimator(backend='torch'):
loss = ('mse' if backend.startswith('keras') else torch.nn.MSELoss())
auto_lstm = AutoLSTM(input_feature_num=input_feature_dim, output_target_num=output_feature_dim, past_seq_len=5, optimizer='Adam', loss=loss, metric='mse', hidden_dim=hp.grid_search([32, 64]), layer_num=hp.randint(1, 3), lr=hp.choice([0.001, 0.003, 0.01]), dropout=hp.uniform(0.1, 0.2), logs_dir='/tmp/auto_lstm', cpus_per_trial=2, backend=backend, name='auto_lstm')
return auto_lstm |
class Wav2Vec2ForCTC(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class GANTensorboardWriter(LearnerTensorboardWriter):
def __init__(self, learn: GANLearner, base_dir: Path, name: str, loss_iters: int=25, hist_iters: int=500, stats_iters: int=100, visual_iters: int=100):
super().__init__(learn=learn, base_dir=base_dir, name=name, loss_iters=loss_iters, hist_iters=hist_iters, stats_iters=stats_iters)
self.visual_iters = visual_iters
self.img_gen_vis = ImageTBWriter()
self.gen_stats_updated = True
self.crit_stats_updated = True
def _write_weight_histograms(self, iteration: int) -> None:
(generator, critic) = (self.learn.gan_trainer.generator, self.learn.gan_trainer.critic)
self.hist_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='generator')
self.hist_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='critic')
def _write_gen_model_stats(self, iteration: int) -> None:
generator = self.learn.gan_trainer.generator
self.stats_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='gen_model_stats')
self.gen_stats_updated = True
def _write_critic_model_stats(self, iteration: int) -> None:
critic = self.learn.gan_trainer.critic
self.stats_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='crit_model_stats')
self.crit_stats_updated = True
def _write_model_stats(self, iteration: int) -> None:
gen_mode = self.learn.gan_trainer.gen_mode
if (gen_mode and (not self.gen_stats_updated)):
self._write_gen_model_stats(iteration=iteration)
if ((not gen_mode) and (not self.crit_stats_updated)):
self._write_critic_model_stats(iteration=iteration)
def _write_training_loss(self, iteration: int, last_loss: Tensor) -> None:
recorder = self.learn.gan_trainer.recorder
if (len(recorder.losses) == 0):
return
scalar_value = to_np(recorder.losses[(- 1):][0])
tag = (self.metrics_root + 'train_loss')
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
def _write_images(self, iteration: int) -> None:
trainer = self.learn.gan_trainer
gen_mode = trainer.gen_mode
try:
trainer.switch(gen_mode=True)
self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch, iteration=iteration, tbwriter=self.tbwriter)
finally:
trainer.switch(gen_mode=gen_mode)
def on_batch_end(self, iteration: int, **kwargs) -> None:
super().on_batch_end(iteration=iteration, **kwargs)
if (iteration == 0):
return
if ((iteration % self.visual_iters) == 0):
self._write_images(iteration=iteration)
def on_backward_end(self, iteration: int, **kwargs) -> None:
if (iteration == 0):
return
self._update_batches_if_needed()
if ((iteration % self.stats_iters) == 0):
(self.gen_stats_updated, self.crit_stats_updated) = (False, False)
if (not (self.gen_stats_updated and self.crit_stats_updated)):
self._write_model_stats(iteration=iteration) |
_pytest_unraisable_warning
def test_python_alreadyset_in_destructor(monkeypatch, capsys):
hooked = False
triggered = [False]
if hasattr(sys, 'unraisablehook'):
hooked = True
default_hook = sys.__unraisablehook__
def hook(unraisable_hook_args):
(exc_type, exc_value, exc_tb, err_msg, obj) = unraisable_hook_args
if (obj == 'already_set demo'):
triggered[0] = True
default_hook(unraisable_hook_args)
return
monkeypatch.setattr(sys, 'unraisablehook', hook)
assert (m.python_alreadyset_in_destructor('already_set demo') is True)
if hooked:
assert (triggered[0] is True)
(_, captured_stderr) = capsys.readouterr()
assert (('ignored' in captured_stderr) and ('already_set demo' in captured_stderr)) |
class R_MSFM6(nn.Module):
def __init__(self, x):
super(R_MSFM6, self).__init__()
self.convX11 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True), nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh())
self.convX12 = torch.nn.Sequential(nn.Conv2d(128, 128, (1, 3), padding=(0, 1)), torch.nn.Tanh(), nn.Conv2d(128, 128, (3, 1), padding=(1, 0)), torch.nn.Tanh())
if x:
self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh())
self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=512, out_channels=128, kernel_size=3, stride=1, padding=0, bias=True), torch.nn.Tanh())
else:
self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh())
self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=0, dilation=1, bias=True), torch.nn.Tanh())
self.convX22 = torch.nn.Sequential(nn.Conv2d(128, 128, (1, 3), padding=(0, 1)), torch.nn.Tanh(), nn.Conv2d(128, 128, (3, 1), padding=(1, 0)), torch.nn.Tanh())
self.convX32 = torch.nn.Sequential(nn.Conv2d(128, 128, (1, 3), padding=(0, 1)), torch.nn.Tanh(), nn.Conv2d(128, 128, (3, 1), padding=(1, 0)), torch.nn.Tanh())
self.sigmoid = nn.Sigmoid()
self.gruc = SepConvGRU()
self.update_block = BasicUpdateBlock()
def upsample_depth(self, flow, mask):
(N, _, H, W) = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(flow, [3, 3], padding=1)
up_flow = up_flow.view(N, 1, 9, 1, 1, H, W)
up_flow = torch.sum((mask * up_flow), dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 1, (8 * H), (8 * W))
def forward(self, features, image_size=None, iters=6):
(x1, x2, x3) = features
disp_predictions = {}
(b, c, h, w) = x3.shape
dispFea = torch.zeros([b, 1, h, w], requires_grad=True).to(x1.device)
net = torch.zeros([b, 256, h, w], requires_grad=True).to(x1.device)
for itr in range(iters):
if (itr in [0]):
corr = self.convX31(x3)
elif (itr in [1]):
corrh = corr
corr = self.convX32(corr)
corr = self.gruc(corrh, corr)
elif (itr in [2]):
corrh = corr
corr = self.convX21(x2)
corr = self.gruc(corrh, corr)
elif (itr in [3]):
corrh = corr
corr = self.convX22(corr)
corr = self.gruc(corrh, corr)
elif (itr in [4]):
corrh = corr
corr = self.convX11(x1)
corr = self.gruc(corrh, corr)
elif (itr in [5]):
corrh = corr
corr = self.convX12(corr)
corr = self.gruc(corrh, corr)
(net, up_mask, delta_disp) = self.update_block(net, corr, dispFea)
dispFea = (dispFea + delta_disp)
disp = self.sigmoid(dispFea)
if self.training:
disp_up = self.upsample_depth(disp, up_mask)
disp_predictions[((iters - itr) - 1)] = disp_up
elif ((iters - 1) == itr):
disp_up = self.upsample_depth(disp, up_mask)
disp_predictions[((iters - itr) - 1)] = disp_up
return disp_predictions |
class struct_c__SA_state_battery_out_t(ctypes.Structure):
_pack_ = True
_fields_ = [('stateOfCharge', ctypes.c_double), ('current', ctypes.c_double)] |
(config_name='real', config_path='../configs/bc')
def train(cfg: omegaconf.DictConfig):
assert (cfg.num_gpus == 1)
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
if (not cfg.test):
os.makedirs(cfg.logdir, exist_ok=True)
dump_cfg(cfg, cfg.logdir)
set_np_formatting()
set_seed(cfg.seed, cfg.torch_deterministic)
if (cfg.data.type == 'sim'):
sim_params = parse_sim_params(cfg, cfg_dict)
vec_env = parse_task(cfg, cfg_dict, sim_params)
else:
vec_env = None
bc.train(cfg, vec_env) |
class Resnet50_NL(nn.Module):
def __init__(self, non_layers=[0, 1, 1, 1], stripes=[16, 16, 16, 16], non_type='normal', temporal=None):
super(Resnet50_NL, self).__init__()
original = models.resnet50(pretrained=True).state_dict()
if (non_type == 'normal'):
self.backbone = res.ResNet_Video_nonlocal(last_stride=1, non_layers=non_layers)
elif (non_type == 'stripe'):
self.backbone = res.ResNet_Video_nonlocal_stripe(last_stride=1, non_layers=non_layers, stripes=stripes)
elif (non_type == 'hr'):
self.backbone = res.ResNet_Video_nonlocal_hr(last_stride=1, non_layers=non_layers, stripes=stripes)
elif (non_type == 'stripe_hr'):
self.backbone = res.ResNet_Video_nonlocal_stripe_hr(last_stride=1, non_layers=non_layers, stripes=stripes)
for key in original:
if (key.find('fc') != (- 1)):
continue
self.backbone.state_dict()[key].copy_(original[key])
del original
self.temporal = temporal
if (self.temporal == 'Done'):
self.avgpool = nn.AdaptiveAvgPool3d(1)
def forward(self, x):
if (self.temporal == 'Done'):
x = self.backbone(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], (- 1))
return x |
def nms(dets, thresh, force_cpu=False):
if (dets.shape[0] == 0):
return []
return nms_gpu(dets, thresh) |
class CrossEntropyLoss(torch.nn.Module):
def __init__(self, epsilon=0.1):
super().__init__()
self.epsilon = epsilon
self.softmax = torch.nn.LogSoftmax(dim=(- 1))
def forward(self, x, target):
prob = self.softmax(x)
mean = (- prob.mean(dim=(- 1)))
nll_loss = (- prob.gather(dim=(- 1), index=target.unsqueeze(1)))
nll_loss = nll_loss.squeeze(1)
return (((1.0 - self.epsilon) * nll_loss) + (self.epsilon * mean)).mean() |
def set_schema_simulation_period(schema: dict, count: int, seed: int) -> Tuple[(dict, int, int)]:
assert (1 <= count <= 365), 'count must be between 1 and 365.'
np.random.seed(seed)
filename = schema['buildings'][building_name]['carbon_intensity']
filepath = os.path.join(root_directory, filename)
time_steps = pd.read_csv(filepath).shape[0]
simulation_start_time_step_list = np.arange(0, time_steps, (24 * count))
simulation_start_time_step = np.random.choice(simulation_start_time_step_list, size=1)[0]
simulation_end_time_step = ((simulation_start_time_step + (24 * count)) - 1)
schema['simulation_start_time_step'] = simulation_start_time_step
schema['simulation_end_time_step'] = simulation_end_time_step
return (schema, simulation_start_time_step, simulation_end_time_step) |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = conv3x3(3, 64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x, latent_output=False):
out = x
out = self.conv1(out)
out = self.bn1(out)
out = F.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), (- 1))
x1 = self.linear(out)
if (latent_output == False):
output = x1
else:
output = out
return output |
class SideCamBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = md.Conv2dReLU(in_channels, out_channels, kernel_size=1, padding=0, use_batchnorm=use_batchnorm)
conv2 = md.Conv2dReLU(out_channels, out_channels, kernel_size=1, padding=0, use_batchnorm=use_batchnorm)
super().__init__(conv1, conv2) |
class StatsBatchNorm(_BaseNormalization):
def __init__(self, momentum=0.99, epsilon=0.001, update_stats=False, **kwargs):
super(StatsBatchNorm, self).__init__(**kwargs)
self.momentum = momentum
self.epsilon = epsilon
self.update_stats = update_stats
def build(self, input_shape):
dim = input_shape[(- 1)]
if (dim is None):
raise ValueError('The normalization axis should have a defined dimension')
self.dim = dim
self.gamma = self.add_weight(shape=(dim,), name='gamma', initializer=initializers.get('ones'))
self.beta = self.add_weight(shape=(dim,), name='beta', initializer=initializers.get('zeros'))
self.moving_mean = self.add_weight(shape=(dim,), name='moving_mean', initializer=initializers.get('zeros'), trainable=False)
self.moving_variance = self.add_weight(shape=(dim,), name='moving_variance', initializer=initializers.get('ones'), trainable=False)
self.built = True
def call(self, inputs, training=None):
x = inputs
assert (not isinstance(x, list))
xnorm = K.batch_normalization(x, self.moving_mean, self.moving_variance, self.beta, self.gamma, epsilon=self.epsilon)
if self.update_stats:
(mean, var) = self._moments(x, axes=range((len(K.int_shape(x)) - 1)))
self.add_update([K.moving_average_update(self.moving_mean, mean, self.momentum), K.moving_average_update(self.moving_variance, var, self.momentum)], x)
return xnorm |
class ANN_seq_class(models.Sequential):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) |
def _dressup_style(text: str, bold: bool=False, italics: bool=False) -> str:
if (not (bold or italics)):
return text
unicode_type = 'math sans'
if bold:
unicode_type += ' bold'
if italics:
unicode_type += ' italic'
try:
text = dressuplite.convert(text, unicode_type=unicode_type)
except Exception as exc:
log.exception(f'Error using dressuplite with unicode_type={unicode_type!r} to format {text!r}: {exc}')
return text |
def write_utts(tgt_dir, pair_list, wav_dict, text_dict):
text_writer = open((tgt_dir + '/text'), 'w', encoding='utf-8')
scp_writer = open((tgt_dir + '/wav.scp'), 'w', encoding='utf-8')
def write_utt(path1, path2, path):
(wave1, sr1) = torchaudio.load(path1)
(wave2, sr2) = torchaudio.load(path2)
assert (sr1 == sr2)
wave = torch.cat([wave1, wave2], dim=(- 1))
torchaudio.save(path, wave, sample_rate=sr1)
for (i, (k1, k2)) in enumerate(pair_list):
uttid = ((k1 + '_and_') + k2)
wave_path = (((tgt_dir + '/wavs/') + uttid) + '.wav')
write_utt(wav_dict[k1], wav_dict[k2], wave_path)
text = (((((uttid + ' ') + text_dict[k1]) + ' ') + text_dict[k2]) + '\n')
text_writer.write(text)
text_writer.flush()
scp_info = (((uttid + ' ') + wave_path) + '\n')
scp_writer.write(scp_info)
scp_writer.flush()
if ((i % 10000) == 0):
print(f'have generate {i} utts')
text_writer.close()
scp_writer.close() |
class Segment(object):
def __init__(self, split_lines_of_utt, start_index, end_index, debug_str=None):
self.split_lines_of_utt = split_lines_of_utt
self.start_index = start_index
self.end_index = end_index
self.start_unk_padding = 0.0
self.end_unk_padding = 0.0
if (debug_str == None):
debug_str = 'core-start={0},core-end={1}'.format(start_index, end_index)
self.debug_str = debug_str
self.start_keep_proportion = 1.0
self.end_keep_proportion = 1.0
def PossiblyAddTaintedLines(self):
global non_scored_words
split_lines_of_utt = self.split_lines_of_utt
for b in [False, True]:
if b:
boundary_index = (self.end_index - 1)
adjacent_index = self.end_index
else:
boundary_index = self.start_index
adjacent_index = (self.start_index - 1)
if ((adjacent_index >= 0) and (adjacent_index < len(split_lines_of_utt))):
adjacent_line_is_tainted = IsTainted(split_lines_of_utt[adjacent_index])
if adjacent_line_is_tainted:
boundary_edit_type = split_lines_of_utt[boundary_index][7]
boundary_hyp_word = split_lines_of_utt[boundary_index][7]
if ((boundary_edit_type == 'cor') and (not (boundary_hyp_word in non_scored_words))):
if b:
self.end_index += 1
else:
self.start_index -= 1
def PossiblySplitSegment(self):
global non_scored_words, args
assert ((self.start_unk_padding == 0.0) and (self.end_unk_padding == 0.0) and (self.start_keep_proportion == 1.0) and (self.end_keep_proportion == 1.0))
segments = []
cur_start_index = self.start_index
cur_start_is_split = False
for index_to_split_at in range((cur_start_index + 1), (self.end_index - 1)):
this_split_line = self.split_lines_of_utt[index_to_split_at]
this_duration = float(this_split_line[3])
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
if (((this_edit_type == 'sil') and (this_duration > args.max_internal_silence_length)) or ((this_ref_word in non_scored_words) and (this_duration > args.max_internal_non_scored_length))):
new_segment = Segment(self.split_lines_of_utt, cur_start_index, (index_to_split_at + 1), self.debug_str)
if cur_start_is_split:
new_segment.start_keep_proportion = 0.5
new_segment.end_keep_proportion = 0.5
cur_start_is_split = True
cur_start_index = index_to_split_at
segments.append(new_segment)
if (len(segments) == 0):
segments.append(self)
else:
new_segment = Segment(self.split_lines_of_utt, cur_start_index, self.end_index, self.debug_str)
assert cur_start_is_split
new_segment.start_keep_proportion = 0.5
segments.append(new_segment)
return segments
def PossiblyTruncateBoundaries(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = (self.end_index - 1)
this_split_line = self.split_lines_of_utt[this_index]
truncated_duration = None
this_duration = float(this_split_line[3])
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if ((this_edit == 'sil') and (this_duration > args.max_edge_silence_length)):
truncated_duration = args.max_edge_silence_length
elif ((this_ref_word in non_scored_words) and (this_duration > args.max_edge_non_scored_length)):
truncated_duration = args.max_edge_non_scored_length
if (truncated_duration != None):
keep_proportion = (truncated_duration / this_duration)
if b:
self.start_keep_proportion = keep_proportion
else:
self.end_keep_proportion = keep_proportion
def RelaxBoundaryTruncation(self):
assert (self.start_unk_padding == self.end_unk_padding == 0.0)
if (self.start_keep_proportion == self.end_keep_proportion == 1.0):
return
length_cutoff = max(args.min_new_segment_length, args.min_segment_length)
length_with_truncation = self.Length()
if (length_with_truncation >= length_cutoff):
return
orig_start_keep_proportion = self.start_keep_proportion
orig_end_keep_proportion = self.end_keep_proportion
if (not IsTainted(self.split_lines_of_utt[self.start_index])):
self.start_keep_proportion = 1.0
if (not IsTainted(self.split_lines_of_utt[(self.end_index - 1)])):
self.end_keep_proportion = 1.0
length_with_relaxed_boundaries = self.Length()
if (length_with_relaxed_boundaries <= length_cutoff):
return
a = ((length_cutoff - length_with_relaxed_boundaries) / (length_with_truncation - length_with_relaxed_boundaries))
if ((a < 0.0) or (a > 1.0)):
print("segment_ctm_edits.py: bad 'a' value = {0}".format(a), file=sys.stderr)
return
self.start_keep_proportion = ((a * orig_start_keep_proportion) + ((1 - a) * self.start_keep_proportion))
self.end_keep_proportion = ((a * orig_end_keep_proportion) + ((1 - a) * self.end_keep_proportion))
if (not (abs((self.Length() - length_cutoff)) < 0.01)):
print('segment_ctm_edits.py: possible problem relaxing boundary truncation, length is {0} vs {1}'.format(self.Length(), length_cutoff), file=sys.stderr)
def PossiblyAddUnkPadding(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = (self.end_index - 1)
this_split_line = self.split_lines_of_utt[this_index]
this_start_time = float(this_split_line[2])
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if ((this_edit == 'cor') and (not (this_ref_word in non_scored_words))):
if b:
unk_padding = args.unk_padding
if (unk_padding > this_start_time):
unk_padding = this_start_time
if (unk_padding < (0.5 * args.unk_padding)):
unk_padding = 0.0
self.start_unk_padding = unk_padding
else:
this_end_time = (this_start_time + float(this_split_line[3]))
last_line = self.split_lines_of_utt[(- 1)]
utterance_end_time = (float(last_line[2]) + float(last_line[3]))
max_allowable_padding = (utterance_end_time - this_end_time)
assert (max_allowable_padding > (- 0.01))
unk_padding = args.unk_padding
if (unk_padding > max_allowable_padding):
unk_padding = max_allowable_padding
if (unk_padding < (0.5 * args.unk_padding)):
unk_padding = 0.0
self.end_unk_padding = unk_padding
def MergeWithSegment(self, other):
assert ((self.EndTime() >= other.StartTime()) and (self.StartTime() < other.EndTime()) and (self.split_lines_of_utt is other.split_lines_of_utt))
orig_self_end_index = self.end_index
self.debug_str = '({0}/merged-with/{1})'.format(self.debug_str, other.debug_str)
self.end_index = other.end_index
self.end_unk_padding = other.end_unk_padding
self.end_keep_proportion = other.end_keep_proportion
first_index_of_overlap = min((orig_self_end_index - 1), other.start_index)
last_index_of_overlap = max((orig_self_end_index - 1), other.start_index)
num_deleted_words = 0
for i in range(first_index_of_overlap, (last_index_of_overlap + 1)):
edit_type = self.split_lines_of_utt[i][7]
if (edit_type == 'del'):
num_deleted_words += 1
if (num_deleted_words > args.max_deleted_words_kept_when_merging):
for i in range(first_index_of_overlap, (last_index_of_overlap + 1)):
if (self.split_lines_of_utt[i][7] == 'del'):
self.split_lines_of_utt[i].append('do-not-include-in-text')
def StartTime(self):
first_line = self.split_lines_of_utt[self.start_index]
first_line_start = float(first_line[2])
first_line_duration = float(first_line[3])
first_line_end = (first_line_start + first_line_duration)
return ((first_line_end - self.start_unk_padding) - (first_line_duration * self.start_keep_proportion))
def DebugInfo(self):
return (('start=%d,end=%d,unk-padding=%.2f,%.2f,keep-proportion=%.2f,%.2f,' % (self.start_index, self.end_index, self.start_unk_padding, self.end_unk_padding, self.start_keep_proportion, self.end_keep_proportion)) + self.debug_str)
def EndTime(self):
last_line = self.split_lines_of_utt[(self.end_index - 1)]
last_line_start = float(last_line[2])
last_line_duration = float(last_line[3])
return ((last_line_start + (last_line_duration * self.end_keep_proportion)) + self.end_unk_padding)
def Length(self):
return (self.EndTime() - self.StartTime())
def IsWholeUtterance(self):
last_line_of_utt = self.split_lines_of_utt[(- 1)]
last_line_end_time = (float(last_line_of_utt[2]) + float(last_line_of_utt[3]))
return ((abs((self.StartTime() - 0.0)) < 0.001) and (abs((self.EndTime() - last_line_end_time)) < 0.001))
def JunkProportion(self):
junk_duration = (self.start_unk_padding + self.end_unk_padding)
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
junk_duration += (first_duration * self.start_keep_proportion)
last_split_line = self.split_lines_of_utt[(self.end_index - 1)]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
junk_duration += (last_duration * self.end_keep_proportion)
return (junk_duration / self.Length())
def PossiblyTruncateStartForJunkProportion(self):
begin_junk_duration = self.start_unk_padding
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
begin_junk_duration += (first_duration * self.start_keep_proportion)
if (begin_junk_duration == 0.0):
return
candidate_start_index = None
for i in range((self.start_index + 1), (self.end_index - 1)):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
if (((this_edit_type == 'sil') or ((this_edit_type == 'cor') and (this_ref_word in non_scored_words))) and (float(this_split_line[3]) > args.min_split_point_duration)):
candidate_start_index = i
candidate_start_time = float(this_split_line[2])
break
if (candidate_start_index is None):
return
candidate_removed_piece_duration = (candidate_start_time - self.StartTime())
if ((float(begin_junk_duration) / candidate_removed_piece_duration) < args.max_junk_proportion):
return
self.start_index = candidate_start_index
self.start_unk_padding = 0.0
self.start_keep_proportion = 1.0
self.debug_str += ',truncated-start-for-junk'
def PossiblyTruncateEndForJunkProportion(self):
end_junk_duration = self.end_unk_padding
last_split_line = self.split_lines_of_utt[(self.end_index - 1)]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
end_junk_duration += (last_duration * self.end_keep_proportion)
if (end_junk_duration == 0.0):
return
candidate_end_index = None
for i in reversed(range((self.start_index + 1), (self.end_index - 1))):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
if (((this_edit_type == 'sil') or ((this_edit_type == 'cor') and (this_ref_word in non_scored_words))) and (float(this_split_line[3]) > args.min_split_point_duration)):
candidate_end_index = (i + 1)
candidate_end_time = (float(this_split_line[2]) + float(this_split_line[3]))
break
if (candidate_end_index is None):
return
candidate_removed_piece_duration = (self.EndTime() - candidate_end_time)
if ((float(end_junk_duration) / candidate_removed_piece_duration) < args.max_junk_proportion):
return
self.end_index = candidate_end_index
self.end_unk_padding = 0.0
self.end_keep_proportion = 1.0
self.debug_str += ',truncated-end-for-junk'
def ContainsAtLeastOneScoredNonOovWord(self):
global non_scored_words
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_hyp_word = this_split_line[4]
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if ((this_edit == 'cor') and (not (this_ref_word in non_scored_words)) and (this_ref_word == this_hyp_word)):
return True
return False
def Text(self):
global oov_symbol
text_array = []
if (self.start_unk_padding != 0.0):
text_array.append(oov_symbol)
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if ((this_ref_word != '<eps>') and (this_split_line[(- 1)] != 'do-not-include-in-text')):
text_array.append(this_ref_word)
if (self.end_unk_padding != 0.0):
text_array.append(oov_symbol)
return ' '.join(text_array) |
class BoxCoder(object):
__metaclass__ = ABCMeta
def code_size(self):
pass
def encode(self, boxes, anchors):
with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
def _encode(self, boxes, anchors):
pass
def _decode(self, rel_codes, anchors):
pass |
class SMPL(nn.Module):
NUM_JOINTS = 23
NUM_BODY_JOINTS = 23
NUM_BETAS = 10
def __init__(self, model_path, data_struct=None, create_betas=True, betas=None, create_global_orient=True, global_orient=None, create_body_pose=True, body_pose=None, create_transl=True, transl=None, dtype=torch.float32, batch_size=1, joint_mapper=None, gender='neutral', vertex_ids=None, **kwargs):
self.gender = gender
if (data_struct is None):
if osp.isdir(model_path):
model_fn = 'SMPL_{}.{ext}'.format(gender.upper(), ext='pkl')
smpl_path = os.path.join(model_path, model_fn)
else:
smpl_path = model_path
assert osp.exists(smpl_path), 'Path {} does not exist!'.format(smpl_path)
with open(smpl_path, 'rb') as smpl_file:
data_struct = Struct(**pickle.load(smpl_file, encoding='latin1'))
super(SMPL, self).__init__()
self.batch_size = batch_size
if (vertex_ids is None):
vertex_ids = VERTEX_IDS['smplh']
self.dtype = dtype
self.joint_mapper = joint_mapper
self.vertex_joint_selector = VertexJointSelector(vertex_ids=vertex_ids, **kwargs)
self.faces = data_struct.f
self.register_buffer('faces_tensor', to_tensor(to_np(self.faces, dtype=np.int64), dtype=torch.long))
if create_betas:
if (betas is None):
default_betas = torch.zeros([batch_size, self.NUM_BETAS], dtype=dtype)
elif ('torch.Tensor' in str(type(betas))):
default_betas = betas.clone().detach()
else:
default_betas = torch.tensor(betas, dtype=dtype)
self.register_parameter('betas', nn.Parameter(default_betas, requires_grad=True))
if create_global_orient:
if (global_orient is None):
default_global_orient = torch.zeros([batch_size, 3], dtype=dtype)
elif ('torch.Tensor' in str(type(global_orient))):
default_global_orient = global_orient.clone().detach()
else:
default_global_orient = torch.tensor(global_orient, dtype=dtype)
global_orient = nn.Parameter(default_global_orient, requires_grad=True)
self.register_parameter('global_orient', global_orient)
if create_body_pose:
if (body_pose is None):
default_body_pose = torch.zeros([batch_size, (self.NUM_BODY_JOINTS * 3)], dtype=dtype)
elif ('torch.Tensor' in str(type(body_pose))):
default_body_pose = body_pose.clone().detach()
else:
default_body_pose = torch.tensor(body_pose, dtype=dtype)
self.register_parameter('body_pose', nn.Parameter(default_body_pose, requires_grad=True))
if create_transl:
if (transl is None):
default_transl = torch.zeros([batch_size, 3], dtype=dtype, requires_grad=True)
else:
default_transl = torch.tensor(transl, dtype=dtype)
self.register_parameter('transl', nn.Parameter(default_transl, requires_grad=True))
self.register_buffer('v_template', to_tensor(to_np(data_struct.v_template), dtype=dtype))
shapedirs = data_struct.shapedirs
self.register_buffer('shapedirs', to_tensor(to_np(shapedirs), dtype=dtype))
j_regressor = to_tensor(to_np(data_struct.J_regressor), dtype=dtype)
self.register_buffer('J_regressor', j_regressor)
num_pose_basis = data_struct.posedirs.shape[(- 1)]
posedirs = np.reshape(data_struct.posedirs, [(- 1), num_pose_basis]).T
self.register_buffer('posedirs', to_tensor(to_np(posedirs), dtype=dtype))
parents = to_tensor(to_np(data_struct.kintree_table[0])).long()
parents[0] = (- 1)
self.register_buffer('parents', parents)
self.register_buffer('lbs_weights', to_tensor(to_np(data_struct.weights), dtype=dtype))
def create_mean_pose(self, data_struct):
pass
_grad()
def reset_params(self, **params_dict):
for (param_name, param) in self.named_parameters():
if (param_name in params_dict):
param[:] = torch.tensor(params_dict[param_name])
else:
param.fill_(0)
def get_num_verts(self):
return self.v_template.shape[0]
def get_num_faces(self):
return self.faces.shape[0]
def extra_repr(self):
return 'Number of betas: {}'.format(self.NUM_BETAS)
def forward(self, betas=None, body_pose=None, global_orient=None, transl=None, return_verts=True, return_full_pose=False, pose2rot=True, **kwargs):
global_orient = (global_orient if (global_orient is not None) else self.global_orient)
body_pose = (body_pose if (body_pose is not None) else self.body_pose)
betas = (betas if (betas is not None) else self.betas)
apply_trans = ((transl is not None) or hasattr(self, 'transl'))
if ((transl is None) and hasattr(self, 'transl')):
transl = self.transl
full_pose = torch.cat([global_orient, body_pose], dim=1)
batch_size = max(betas.shape[0], global_orient.shape[0], body_pose.shape[0])
if (betas.shape[0] != batch_size):
num_repeats = int((batch_size / betas.shape[0]))
betas = betas.expand(num_repeats, (- 1))
(vertices, joints) = lbs(betas, full_pose, self.v_template, self.shapedirs, self.posedirs, self.J_regressor, self.parents, self.lbs_weights, pose2rot=pose2rot, dtype=self.dtype)
joints = self.vertex_joint_selector(vertices, joints)
if (self.joint_mapper is not None):
joints = self.joint_mapper(joints)
if apply_trans:
joints += transl.unsqueeze(dim=1)
vertices += transl.unsqueeze(dim=1)
output = ModelOutput(vertices=(vertices if return_verts else None), global_orient=global_orient, body_pose=body_pose, joints=joints, betas=betas, full_pose=(full_pose if return_full_pose else None))
return output |
def plot_scores(*scores: pd.DataFrame, width: int=800, height: int=600, ci: float=0.95) -> pn.layout.Panel:
viewer = _JulearnScoresViewer(scores=[*scores], width=width, height=height, ci=ci)
pn.extension(template='fast')
dashboard_title = pn.panel('## Scores Viewer')
logo = ((Path(__file__).parent / 'res') / 'julearn_logo_generalization.png')
png = pn.panel(logo, width=200)
header = pn.Row(png, pn.Spacer(width=50), dashboard_title)
widget_row = pn.Row(pn.Param(viewer.param.metric, name='Metric', show_name=True, widgets={'metric': {'type': pn.widgets.Select, 'button_type': 'primary', 'name': ''}}))
widget_row.append(pn.Param(viewer.param.show_stats, name='Statistics', show_name=True, widgets={'show_stats': {'type': pn.widgets.Toggle, 'button_type': 'primary', 'name': 'Show'}}))
widget_row.append(pn.Param(viewer.param.group_repeats, name='Aggregate Repeats', show_name=True, widgets={'group_repeats': {'type': pn.widgets.RadioButtonGroup, 'button_type': 'primary', 'options': ['no', 'median', 'mean']}}))
filter_widgets = pn.Column()
if (len(viewer.sets) > 1):
filter_widgets.append(pn.Param(viewer.param.sets, name='Sets', show_name=True, widgets={'sets': {'type': pn.widgets.CheckButtonGroup, 'button_type': 'primary', 'orientation': 'vertical'}}))
filter_widgets.append(pn.Param(viewer.param.models, name='Models', show_name=True, widgets={'models': {'type': pn.widgets.CheckButtonGroup, 'button_type': 'primary', 'orientation': 'vertical'}}))
column = pn.Column(header, widget_row, pn.Row(viewer.plot_scores, filter_widgets), viewer.plot_stats)
return column |
def convert_network(network, dtype):
for module in network.modules():
if (isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and (module.affine is True)):
continue
convert_module(module, dtype)
return network |
def small_scale(run_name='small_scale'):
logdir = os.path.join(BASE_LOGDIR, run_name)
writer = tf.summary.create_file_writer(logdir)
cube = o3d.geometry.TriangleMesh.create_box(1, 2, 4, create_uv_map=True)
cube.compute_vertex_normals()
cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=1.0, height=2.0, resolution=20, split=4, create_uv_map=True)
cylinder.compute_vertex_normals()
colors = [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)]
with writer.as_default():
for step in range(3):
cube.paint_uniform_color(colors[step])
summary.add_3d('cube', to_dict_batch([cube]), step=step, logdir=logdir)
cylinder.paint_uniform_color(colors[step])
summary.add_3d('cylinder', to_dict_batch([cylinder]), step=step, logdir=logdir) |
class Mse_Loss():
def __init__(self):
return
def compute_loss(self, y_input, y_target):
return F.mse_loss(y_input, y_target) |
class TransformerEncoderLayer(nn.Module):
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(activation=getattr(args, 'activation_fn', 'relu'))
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if (self.activation_dropout == 0):
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
layer_norm_map = {'0': 'self_attn_layer_norm', '1': 'final_layer_norm'}
for (old, new) in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if (k in state_dict):
state_dict['{}.{}.{}'.format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor]=None):
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if (attn_mask is not None):
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), (- .0))
(x, _) = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.final_layer_norm(x)
return x |
class SearchAllLibCall(SearchAllCall):
ignore_calls = ['tf.print', 'tf.constant', 'tf.zeros', 'tf.onestf.shape']
def __init__(self, lib_prefix: str):
super().__init__()
self.lib_prefix = lib_prefix
def check_if_ignore(self, api_call) -> bool:
return ((api_call in self.ignore_calls) or util.if_skip_api(api_call, self.lib_prefix))
def search_from_ast(self, o_ast) -> list:
nodes = super().search_from_ast(o_ast)
lib_calls = [(node, name) for (node, name) in nodes if (name.startswith(self.lib_prefix) and (not self.check_if_ignore(name)))]
return lib_calls
def search_from_code(self, snippet) -> list:
nodes = super().search_from_code(snippet)
lib_calls = [(node, name) for (node, name) in nodes if name.startswith(self.lib_prefix)]
return lib_calls |
def put_local_dir_tree_to_remote(local_dir: str, remote_dir: str, over_write: Optional[bool]=False):
if remote_dir.startswith('hdfs'):
return file_utils.put_local_dir_tree_to_remote(local_dir=local_dir, remote_dir=remote_dir, over_write=over_write)
elif remote_dir.startswith('s3'):
access_key_id = os.environ['AWS_ACCESS_KEY_ID']
secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY']
import boto3
s3_client = boto3.Session(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key).client('s3')
path_parts = remote_dir.split('://')[1].split('/')
bucket = path_parts.pop(0)
prefix = '/'.join(path_parts)
local_files = [os.path.join(dirpath, f) for (dirpath, dirnames, filenames) in os.walk(local_dir) for f in filenames]
for file in local_files:
try:
with open(file, 'rb') as f:
s3_client.upload_fileobj(f, Bucket=bucket, Key=((prefix + '/') + file[(len(local_dir) + 1):]))
except Exception as e:
logger.error('cannot upload file to s3: {}'.format(str(e)))
return (- 1)
return 0
else:
if remote_dir.startswith('file://'):
remote_dir = remote_dir[len('file://'):]
try:
copy_tree(local_dir, remote_dir)
except Exception as e:
logger.warning(str(e))
return (- 1)
return 0 |
class TestTorchAlgoUtils(TfGraphTestCase):
.parametrize('discount', [1, 0.95])
.parametrize('num_trajs', [1, 5])
.parametrize('gae_lambda', [0, 0.5, 1])
.parametrize('rewards_traj, baselines_traj', [(ONES, ZEROS), (PI_DIGITS, ARRANGE), (ONES, FIBS)])
def test_compute_advantages(self, num_trajs, discount, gae_lambda, rewards_traj, baselines_traj):
def get_advantage(discount, gae_lambda, rewards, baselines):
adv = torch.zeros(rewards.shape)
for i in range(rewards.shape[0]):
acc = 0
for j in range(rewards.shape[1]):
acc = ((acc * discount) * gae_lambda)
acc += (rewards[i][((- j) - 1)] - baselines[i][((- j) - 1)])
acc += ((discount * baselines[i][(- j)]) if j else 0)
adv[i][((- j) - 1)] = acc
return adv
length = len(rewards_traj)
rewards = torch.Tensor(stack(num_trajs, rewards_traj))
baselines = torch.Tensor(stack(num_trajs, baselines_traj))
expected_adv = get_advantage(discount, gae_lambda, rewards, baselines)
computed_adv = compute_advantages(discount, gae_lambda, length, baselines, rewards)
assert torch.allclose(expected_adv, computed_adv)
def test_add_padding_last_1d(self):
max_length = 10
expected = F.pad(torch.Tensor(nums_1d), (0, (max_length - nums_1d.shape[(- 1)])))
tensor_padding = pad_to_last(nums_1d, total_length=max_length)
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_1d, total_length=10, axis=0)
assert expected.eq(tensor_padding).all()
def test_add_padding_last_2d(self):
max_length = 10
tensor_padding = pad_to_last(nums_2d, total_length=10)
expected = F.pad(torch.Tensor(nums_2d), (0, (max_length - nums_2d.shape[(- 1)])))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_2d, total_length=10, axis=0)
expected = F.pad(torch.Tensor(nums_2d), (0, 0, 0, (max_length - nums_2d.shape[0])))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_2d, total_length=10, axis=1)
expected = F.pad(torch.Tensor(nums_2d), (0, (max_length - nums_2d.shape[(- 1)]), 0, 0))
assert expected.eq(tensor_padding).all()
def test_add_padding_last_3d(self):
max_length = 10
tensor_padding = pad_to_last(nums_3d, total_length=10)
expected = F.pad(torch.Tensor(nums_3d), (0, (max_length - nums_3d.shape[(- 1)]), 0, 0, 0, 0))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_3d, total_length=10, axis=0)
expected = F.pad(torch.Tensor(nums_3d), (0, 0, 0, 0, 0, (max_length - nums_3d.shape[0])))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_3d, total_length=10, axis=1)
expected = F.pad(torch.Tensor(nums_3d), (0, 0, 0, (max_length - nums_3d.shape[(- 1)]), 0, 0))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_3d, total_length=10, axis=2)
expected = F.pad(torch.Tensor(nums_3d), (0, (max_length - nums_3d.shape[(- 1)]), 0, 0, 0, 0))
assert expected.eq(tensor_padding).all()
.parametrize('nums', [nums_1d, nums_2d, nums_3d])
def test_out_of_index_error(self, nums):
with pytest.raises(IndexError):
pad_to_last(nums, total_length=10, axis=len(nums.shape)) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
send_example_telemetry('run_clm', model_args, data_args, framework='tensorflow')
if ((data_args.dataset_name is None) and (data_args.train_file is None) and (data_args.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (data_args.train_file is not None):
extension = data_args.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`train_file` should be a csv, json or txt file.'
if (data_args.validation_file is not None):
extension = data_args.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`validation_file` should be a csv, json or txt file.'
if (training_args.output_dir is not None):
training_args.output_dir = Path(training_args.output_dir)
os.makedirs(training_args.output_dir, exist_ok=True)
checkpoint = None
if ((len(os.listdir(training_args.output_dir)) > 0) and (not training_args.overwrite_output_dir)):
config_path = (training_args.output_dir / CONFIG_NAME)
weights_path = (training_args.output_dir / TF2_WEIGHTS_NAME)
if (config_path.is_file() and weights_path.is_file()):
checkpoint = training_args.output_dir
logger.info(f'Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
else:
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to continue regardless.')
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
if (training_args.seed is not None):
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
raw_datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
else:
data_files = {}
dataset_args = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = (data_args.train_file.split('.')[(- 1)] if (data_args.train_file is not None) else data_args.validation_file.split('.')[(- 1)])
if (extension == 'txt'):
extension = 'text'
dataset_args['keep_linebreaks'] = data_args.keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None), **dataset_args)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(extension, data_files=data_files, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None), **dataset_args)
raw_datasets['train'] = load_dataset(extension, data_files=data_files, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None), **dataset_args)
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
column_names = raw_datasets['train'].column_names
text_column_name = ('text' if ('text' in column_names) else column_names[0])
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset')
if (data_args.block_size is None):
block_size = tokenizer.model_max_length
if (block_size > 1024):
logger.warning(f'The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). Picking 1024 instead. You can change that default value by passing --block_size xxx.')
block_size = 1024
else:
if (data_args.block_size > tokenizer.model_max_length):
logger.warning(f'The block_size passed ({data_args.block_size}) is larger than the maximum length for the model({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.')
block_size = min(data_args.block_size, tokenizer.model_max_length)
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if (total_length >= block_size):
total_length = ((total_length // block_size) * block_size)
result = {k: [t[i:(i + block_size)] for i in range(0, total_length, block_size)] for (k, t) in concatenated_examples.items()}
result['labels'] = result['input_ids'].copy()
return result
lm_datasets = tokenized_datasets.map(group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc=f'Grouping texts in chunks of {block_size}')
train_dataset = lm_datasets['train']
if (data_args.validation_file is not None):
eval_dataset = lm_datasets['validation']
else:
logger.info(f'Validation file not found: using {data_args.validation_split_percentage}% of the dataset as validation as provided in data_args')
(train_indices, val_indices) = train_test_split(list(range(len(train_dataset))), test_size=(data_args.validation_split_percentage / 100))
eval_dataset = train_dataset.select(val_indices)
train_dataset = train_dataset.select(train_indices)
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
for index in random.sample(range(len(train_dataset)), min(3, len(train_dataset))):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
with training_args.strategy.scope():
if (checkpoint is not None):
model = TFAutoModelForCausalLM.from_pretrained(checkpoint, config=config)
elif model_args.model_name_or_path:
model = TFAutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, config=config)
else:
logger.info('Training new model from scratch')
model = TFAutoModelForCausalLM.from_config(config)
embeddings = model.get_input_embeddings()
if hasattr(embeddings, 'embeddings'):
embedding_size = embeddings.embeddings.shape[0]
else:
embedding_size = embeddings.weight.shape[0]
if (len(tokenizer) > embedding_size):
model.resize_token_embeddings(len(tokenizer))
num_replicas = training_args.strategy.num_replicas_in_sync
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
tf_train_dataset = model.prepare_tf_dataset(train_dataset, shuffle=True, batch_size=(num_replicas * training_args.per_device_train_batch_size)).with_options(options)
tf_eval_dataset = model.prepare_tf_dataset(eval_dataset, shuffle=False, batch_size=(num_replicas * training_args.per_device_eval_batch_size), drop_remainder=True).with_options(options)
num_train_steps = (len(tf_train_dataset) * int(training_args.num_train_epochs))
if (training_args.warmup_steps > 0):
num_warmup_steps = training_args.warmup_steps
elif (training_args.warmup_ratio > 0):
num_warmup_steps = int((num_train_steps * training_args.warmup_ratio))
else:
num_warmup_steps = 0
(optimizer, lr_schedule) = create_optimizer(init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm)
model.compile(optimizer=optimizer, jit_compile=training_args.xla)
push_to_hub_model_id = training_args.push_to_hub_model_id
model_name = model_args.model_name_or_path.split('/')[(- 1)]
if (not push_to_hub_model_id):
if (data_args.dataset_name is not None):
push_to_hub_model_id = f'{model_name}-finetuned-{data_args.dataset_name}'
else:
push_to_hub_model_id = f'{model_name}-finetuned-clm'
model_card_kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-generation'}
if (data_args.dataset_name is not None):
model_card_kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
model_card_kwargs['dataset_args'] = data_args.dataset_config_name
model_card_kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
model_card_kwargs['dataset'] = data_args.dataset_name
if training_args.push_to_hub:
callbacks = [PushToHubCallback(output_dir=training_args.output_dir, hub_model_id=push_to_hub_model_id, hub_token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs)]
else:
callbacks = []
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {training_args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {training_args.per_device_train_batch_size}')
logger.info(f' Total train batch size = {(training_args.per_device_train_batch_size * num_replicas)}')
history = model.fit(tf_train_dataset, validation_data=tf_eval_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks)
train_loss = history.history['loss'][(- 1)]
try:
train_perplexity = math.exp(train_loss)
except OverflowError:
train_perplexity = math.inf
logger.info(f' Final train loss: {train_loss:.3f}')
logger.info(f' Final train perplexity: {train_perplexity:.3f}')
validation_loss = history.history['val_loss'][(- 1)]
try:
validation_perplexity = math.exp(validation_loss)
except OverflowError:
validation_perplexity = math.inf
logger.info(f' Final validation loss: {validation_loss:.3f}')
logger.info(f' Final validation perplexity: {validation_perplexity:.3f}')
if (training_args.output_dir is not None):
output_eval_file = os.path.join(training_args.output_dir, 'all_results.json')
results_dict = {}
results_dict['train_loss'] = train_loss
results_dict['train_perplexity'] = train_perplexity
results_dict['eval_loss'] = validation_loss
results_dict['eval_perplexity'] = validation_perplexity
with open(output_eval_file, 'w') as writer:
writer.write(json.dumps(results_dict))
if ((training_args.output_dir is not None) and (not training_args.push_to_hub)):
model.save_pretrained(training_args.output_dir) |
def blocks(files, size=65536):
while True:
b = files.read(size)
if (not b):
break
(yield b) |
class PSRoIAlign(nn.Module):
def __init__(self, output_size: int, spatial_scale: float, sampling_ratio: int):
super(PSRoIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return ps_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)
def __repr__(self) -> str:
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += (', sampling_ratio=' + str(self.sampling_ratio))
tmpstr += ')'
return tmpstr |
def combine_beam(int_order, true_ref, out_path):
result = []
for (idx, num) in enumerate(int_order):
result.append(true_ref[num])
with open((out_path + '_ref'), 'w') as f:
for elem in result:
print(elem, file=f)
return |
def is_anonym_type(index: int, amr: AMR, text_map: Dict, types: List) -> bool:
lemma = amr.lemmas[index]
return ((lemma in text_map) and (text_map[lemma]['ner'] in types)) |
class Vocab():
def __init__(self, voc_path, max_size=None, min_freq=1):
self.pad_index = 0
self.unk_index = 1
self.eos_index = 2
self.sos_index = 3
self.mask_index = 4
print('Building Vocab')
self.itos = list(['<pad>', '<unk>', '<eos>', '<sos>', '<mask>'])
max_size = (None if (max_size is None) else (max_size + len(self.itos)))
for line in open(voc_path).readlines():
if (line.strip() == ''):
continue
self.itos.append(line.strip().split()[0])
self.stoi = {tok: i for (i, tok) in enumerate(self.itos)}
def to_seq(self, sentence: str, seq_len: int=None, with_eos=False, with_sos=False, with_len=False, mid_pad=False) -> list:
tokens = self.tokenizer(sentence)
seq = [self.stoi.get(c, self.unk_index) for c in tokens]
if with_eos:
seq += [self.eos_index]
if with_sos:
seq = ([self.sos_index] + seq)
origin_seq_len = len(seq)
if (seq_len is None):
pass
elif (len(seq) <= seq_len):
if (not mid_pad):
seq += [self.pad_index for _ in range((seq_len - len(seq)))]
else:
front_pad = [self.pad_index for _ in range(int(((seq_len - len(seq)) / 2)))]
end_path = [self.pad_index for _ in range(((seq_len - len(seq)) - len(front_pad)))]
seq = ((front_pad + seq) + end_path)
else:
seq = seq[:seq_len]
return ((seq, origin_seq_len) if with_len else seq)
def from_seq(self, seq, join=False, with_pad=False):
tokens = [(self.itos[idx] if (idx < len(self.itos)) else ('<%d>' % idx)) for idx in seq if (with_pad or (idx != self.pad_index))]
return (self.joiner(tokens) if join else tokens)
def tokenizer(self, sentence: str) -> list:
return sentence.strip().split()
def joiner(self, tokens: list) -> str:
return ' '.join(tokens)
def __len__(self):
return len(self.itos) |
class CompositeMutation(Mutation[Solution]):
def __init__(self, mutation_operator_list: [Mutation]):
super(CompositeMutation, self).__init__(probability=1.0)
Check.is_not_none(mutation_operator_list)
Check.collection_is_not_empty(mutation_operator_list)
self.mutation_operators_list = []
for operator in mutation_operator_list:
Check.that(issubclass(operator.__class__, Mutation), 'Object is not a subclass of Mutation')
self.mutation_operators_list.append(operator)
def execute(self, solution: CompositeSolution) -> CompositeSolution:
Check.is_not_none(solution)
mutated_solution_components = []
for i in range(solution.number_of_variables):
mutated_solution_components.append(self.mutation_operators_list[i].execute(solution.variables[i]))
return CompositeSolution(mutated_solution_components)
def get_name(self) -> str:
return 'Composite mutation operator' |
class UNet(nn.Module):
def __init__(self, nPlanes, reps):
super(UNet, self).__init__()
assert (reps == 1)
assert (len(nPlanes) == 3)
self.res1 = conv_block(nPlanes[0], nPlanes[1])
self.res2 = conv_block(nPlanes[1], nPlanes[2])
self.bridge = bridge_block(nPlanes[2], nPlanes[2])
self.bn_relu = nn.Sequential(nn.BatchNorm3d(((nPlanes[2] + nPlanes[1]) + nPlanes[0])), nn.ReLU(inplace=True))
def forward(self, x):
x0 = x
x1 = self.res1(x0)
x = self.res2(x1)
x = self.bridge(x)
x = upsample(x, 2)
x = torch.cat((x, x1), dim=1)
x = upsample(x, 2)
x = torch.cat((x, x0), dim=1)
return self.bn_relu(x) |
def parse_nullable_value(value):
if ((not value) or (value == '_')):
return None
return value |
def create_logger(filepath):
log_formatter = LogFormatter()
if (filepath is not None):
file_handler = logging.FileHandler(filepath, 'a')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.propagate = False
if (filepath is not None):
logger.addHandler(file_handler)
logger.addHandler(console_handler)
def reset_time():
log_formatter.start_time = time.time()
logger.reset_time = reset_time
return logger |
def learn_halut_multi_core_dict(dict_to_learn: dict[(str, list)], data_path: str, store_path: str, kmeans_options: dict={}, codebook: int=(- 1)) -> None:
for (k, v) in dict_to_learn.items():
print('learning', k, v)
conv2d_options = {'loop_order': 'im2col', 'kernel_size': (3, 3), 'stride': (1, 1), 'padding': (1, 1)}
kmeans_options_here = {'niter': 25, 'nredo': 1, 'min_points_per_centroid': 1, 'max_points_per_centroid': 20000}
if (len(v) > 2):
for i in range(2, len(v)):
conv2d_options[list(conv2d_options.keys())[(i - 2)]] = v[i]
if (len(kmeans_options) > 0):
for key in kmeans_options.keys():
kmeans_options_here[key] = kmeans_options[key]
if ((len(kmeans_options) == 0) and (codebook > (- 1))):
raise Exception('codebook is set but kmeans_options is empty')
learn_halut(l=k, C=v[hm.HalutModuleConfig.C], data_path=data_path, store_path=store_path, K=v[hm.HalutModuleConfig.K], loop_order=conv2d_options['loop_order'], kernel_size=conv2d_options['kernel_size'], stride=conv2d_options['stride'], padding=conv2d_options['padding'], niter=kmeans_options_here['niter'], nredo=kmeans_options_here['nredo'], min_points_per_centroid=kmeans_options_here['min_points_per_centroid'], max_points_per_centroid=kmeans_options_here['max_points_per_centroid'], codebook=codebook)
print('==== FINISHED LEARNING (exited all tasks) =======') |
class Sst2Processor(DataProcessor):
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence'].numpy().decode('utf-8'), None, str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
text_index = (1 if (set_type == 'test') else 0)
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, i))
text_a = line[text_index]
label = (None if (set_type == 'test') else line[1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples |
class Scenario(BaseScenario):
def make_world(self):
world = World()
world.dim_c = 4
num_good_agents = 2
num_adversaries = 4
num_agents = (num_adversaries + num_good_agents)
num_landmarks = 1
num_food = 2
num_forests = 2
world.agents = [Agent() for i in range(num_agents)]
for (i, agent) in enumerate(world.agents):
agent.name = ('agent %d' % i)
agent.collide = True
agent.leader = (True if (i == 0) else False)
agent.silent = (True if (i > 0) else False)
agent.adversary = (True if (i < num_adversaries) else False)
agent.size = (0.075 if agent.adversary else 0.045)
agent.accel = (3.0 if agent.adversary else 4.0)
agent.max_speed = (1.0 if agent.adversary else 1.3)
world.landmarks = [Landmark() for i in range(num_landmarks)]
for (i, landmark) in enumerate(world.landmarks):
landmark.name = ('landmark %d' % i)
landmark.collide = True
landmark.movable = False
landmark.size = 0.2
landmark.boundary = False
world.food = [Landmark() for i in range(num_food)]
for (i, landmark) in enumerate(world.food):
landmark.name = ('food %d' % i)
landmark.collide = False
landmark.movable = False
landmark.size = 0.03
landmark.boundary = False
world.forests = [Landmark() for i in range(num_forests)]
for (i, landmark) in enumerate(world.forests):
landmark.name = ('forest %d' % i)
landmark.collide = False
landmark.movable = False
landmark.size = 0.3
landmark.boundary = False
world.landmarks += world.food
world.landmarks += world.forests
self.reset_world(world)
return world
def set_boundaries(self, world):
boundary_list = []
landmark_size = 1
edge = (1 + landmark_size)
num_landmarks = int(((edge * 2) / landmark_size))
for x_pos in [(- edge), edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([x_pos, ((- 1) + (i * landmark_size))])
boundary_list.append(l)
for y_pos in [(- edge), edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([((- 1) + (i * landmark_size)), y_pos])
boundary_list.append(l)
for (i, l) in enumerate(boundary_list):
l.name = ('boundary %d' % i)
l.collide = True
l.movable = False
l.boundary = True
l.color = np.array([0.75, 0.75, 0.75])
l.size = landmark_size
l.state.p_vel = np.zeros(world.dim_p)
return boundary_list
def reset_world(self, world):
for (i, agent) in enumerate(world.agents):
agent.color = (np.array([0.45, 0.95, 0.45]) if (not agent.adversary) else np.array([0.95, 0.45, 0.45]))
agent.color -= (np.array([0.3, 0.3, 0.3]) if agent.leader else np.array([0, 0, 0]))
for (i, landmark) in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
for (i, landmark) in enumerate(world.food):
landmark.color = np.array([0.15, 0.15, 0.65])
for (i, landmark) in enumerate(world.forests):
landmark.color = np.array([0.6, 0.9, 0.6])
for agent in world.agents:
agent.state.p_pos = np.random.uniform((- 1), (+ 1), world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for (i, landmark) in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform((- 0.9), (+ 0.9), world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
for (i, landmark) in enumerate(world.food):
landmark.state.p_pos = np.random.uniform((- 0.9), (+ 0.9), world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
for (i, landmark) in enumerate(world.forests):
landmark.state.p_pos = np.random.uniform((- 0.9), (+ 0.9), world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
if agent.adversary:
collisions = 0
for a in self.good_agents(world):
if self.is_collision(a, agent):
collisions += 1
return collisions
else:
return 0
def is_collision(self, agent1, agent2):
delta_pos = (agent1.state.p_pos - agent2.state.p_pos)
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = (agent1.size + agent2.size)
return (True if (dist < dist_min) else False)
def good_agents(self, world):
return [agent for agent in world.agents if (not agent.adversary)]
def adversaries(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world):
main_reward = (self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world))
return main_reward
def outside_boundary(self, agent):
if ((agent.state.p_pos[0] > 1) or (agent.state.p_pos[0] < (- 1)) or (agent.state.p_pos[1] > 1) or (agent.state.p_pos[1] < (- 1))):
return True
else:
return False
def agent_reward(self, agent, world):
rew = 0
shape = False
adversaries = self.adversaries(world)
if shape:
for adv in adversaries:
rew += (0.1 * np.sqrt(np.sum(np.square((agent.state.p_pos - adv.state.p_pos)))))
if agent.collide:
for a in adversaries:
if self.is_collision(a, agent):
rew -= 5
def bound(x):
if (x < 0.9):
return 0
if (x < 1.0):
return ((x - 0.9) * 10)
return min(np.exp(((2 * x) - 2)), 10)
for p in range(world.dim_p):
x = abs(agent.state.p_pos[p])
rew -= (2 * bound(x))
for food in world.food:
if self.is_collision(agent, food):
rew += 2
rew += (0.05 * min([np.sqrt(np.sum(np.square((food.state.p_pos - agent.state.p_pos)))) for food in world.food]))
return rew
def adversary_reward(self, agent, world):
rew = 0
shape = True
agents = self.good_agents(world)
adversaries = self.adversaries(world)
if shape:
rew -= (0.1 * min([np.sqrt(np.sum(np.square((a.state.p_pos - agent.state.p_pos)))) for a in agents]))
if agent.collide:
for ag in agents:
for adv in adversaries:
if self.is_collision(ag, adv):
rew += 5
return rew
def observation2(self, agent, world):
entity_pos = []
for entity in world.landmarks:
if (not entity.boundary):
entity_pos.append((entity.state.p_pos - agent.state.p_pos))
food_pos = []
for entity in world.food:
if (not entity.boundary):
food_pos.append((entity.state.p_pos - agent.state.p_pos))
comm = []
other_pos = []
other_vel = []
for other in world.agents:
if (other is agent):
continue
comm.append(other.state.c)
other_pos.append((other.state.p_pos - agent.state.p_pos))
if (not other.adversary):
other_vel.append(other.state.p_vel)
return np.concatenate((((([agent.state.p_vel] + [agent.state.p_pos]) + entity_pos) + other_pos) + other_vel))
def observation(self, agent, world):
entity_pos = []
for entity in world.landmarks:
if (not entity.boundary):
entity_pos.append((entity.state.p_pos - agent.state.p_pos))
in_forest = [np.array([(- 1)]), np.array([(- 1)])]
inf1 = False
inf2 = False
if self.is_collision(agent, world.forests[0]):
in_forest[0] = np.array([1])
inf1 = True
if self.is_collision(agent, world.forests[1]):
in_forest[1] = np.array([1])
inf2 = True
food_pos = []
for entity in world.food:
if (not entity.boundary):
food_pos.append((entity.state.p_pos - agent.state.p_pos))
comm = []
other_pos = []
other_vel = []
for other in world.agents:
if (other is agent):
continue
comm.append(other.state.c)
oth_f1 = self.is_collision(other, world.forests[0])
oth_f2 = self.is_collision(other, world.forests[1])
if ((inf1 and oth_f1) or (inf2 and oth_f2) or ((not inf1) and (not oth_f1) and (not inf2) and (not oth_f2)) or agent.leader):
other_pos.append((other.state.p_pos - agent.state.p_pos))
if (not other.adversary):
other_vel.append(other.state.p_vel)
else:
other_pos.append([0, 0])
if (not other.adversary):
other_vel.append([0, 0])
prey_forest = []
ga = self.good_agents(world)
for a in ga:
if any([self.is_collision(a, f) for f in world.forests]):
prey_forest.append(np.array([1]))
else:
prey_forest.append(np.array([(- 1)]))
prey_forest_lead = []
for f in world.forests:
if any([self.is_collision(a, f) for a in ga]):
prey_forest_lead.append(np.array([1]))
else:
prey_forest_lead.append(np.array([(- 1)]))
comm = [world.agents[0].state.c]
if (agent.adversary and (not agent.leader)):
return np.concatenate((((((([agent.state.p_vel] + [agent.state.p_pos]) + entity_pos) + other_pos) + other_vel) + in_forest) + comm))
if agent.leader:
return np.concatenate((((((([agent.state.p_vel] + [agent.state.p_pos]) + entity_pos) + other_pos) + other_vel) + in_forest) + comm))
else:
return np.concatenate(((((([agent.state.p_vel] + [agent.state.p_pos]) + entity_pos) + other_pos) + in_forest) + other_vel)) |
def generate_labels(dataset, model, batch_size):
with torch.no_grad():
preds = []
if isinstance(model, torch.nn.Module):
device = next(model.parameters()).device
else:
device = torch.device('cpu')
loader = DataLoader(dataset, batch_size=batch_size)
for (x,) in loader:
pred = model(x.to(device)).cpu()
preds.append(pred)
return torch.cat(preds) |
class GaussianSampler(ZooKerasLayer):
def __init__(self, input_shape=None, **kwargs):
super(GaussianSampler, self).__init__(None, (list(input_shape) if input_shape else None), **kwargs) |
class CheckpointEngine(metaclass=ABCMeta):
def __init__(self, checkpoint_dir: str):
self.checkpoint_dir = checkpoint_dir
if dist.is_initialized():
self._rank = dist.get_rank()
self._loader_group = dist.new_group(backend='gloo')
else:
self._rank = 0
self._loader_group = None
self._local_rank = int(os.getenv('LOCAL_RANK', 0))
self._saver_group = None
self._cached_step = 0
self._restart_count = env_utils.get_torch_restart_count()
if (self._local_rank == 0):
self._event_queue = SharedQueue(name=(CheckpointSharedObjPrefix.SAVE_STEP_QNAME + str(0)), create=False)
else:
self._event_queue = None
local_shard_num = self.get_local_shard_num()
self.local_shard_id = (self._local_rank % local_shard_num)
lock_name = (CheckpointSharedObjPrefix.SHM_LOCK_NAME + str(self.local_shard_id))
self._shm_lock = SharedLock(name=lock_name, create=False)
self._shm_handler = SharedMemoryHandler(self.local_shard_id, host=False)
self._notify_agent_to_create_saver()
self._update_saver_config()
def __del__(self):
self.close()
def close(self):
self._shm_handler.close()
def _notify_agent_to_create_saver(self):
if (self._local_rank != 0):
return
if (self._restart_count > 0):
self._shm_lock.release()
return
queue = SharedQueue(name='factory')
local_shard_num = self.get_local_shard_num()
global_shard_num = self.get_global_shard_num()
clazz = self.get_saver_class()
class_meta = SaverClassMeta(module_path=clazz.__module__, class_name=clazz.__name__, init_args={'checkpoint_dir': self.checkpoint_dir, 'local_shard_num': local_shard_num, 'global_shard_num': global_shard_num})
queue.put(class_meta)
queue.unlink()
def _update_saver_config(self):
if (self._local_rank == 0):
global_shard_num = self.get_global_shard_num()
event: CheckpointEvent = CheckpointEvent(type=CheckpointEventType.UPDATE_SHARD, global_shard_num=global_shard_num)
if (self._event_queue is None):
raise ValueError('The event queue cannot be None on local rank 0.')
self._event_queue.put(event)
def save_to_memory(self, step, state_dict, path=''):
conf = SingleFileCheckpointConfig(step=step, path=path)
self.save_state_dict_to_memory(state_dict, conf)
def save_state_dict_to_memory(self, state_dict, conf: CheckpointShardConfig):
if (self._local_rank != self.local_shard_id):
return
if (DLROVER_CKPT_CONFIG_KEY in state_dict):
raise ValueError(f'The state_dict can not have the key {DLROVER_CKPT_CONFIG_KEY}.')
acquired = self._shm_lock.acquire(blocking=False)
all_rank_ready = check_all_rank_ready(self._saver_group, acquired)
if (not all_rank_ready):
logger.info(f'Rank {self._rank} skips the save the checkpoint in CPU memory since it is saving the latest checkpoint from the CPU memory into the storage.')
if acquired:
self._shm_lock.release()
return
self._shm_handler.save_state_dict(state_dict, conf)
if acquired:
self._shm_lock.release()
self._cached_step = conf.step
if dist.is_initialized():
dist.barrier(group=self._saver_group)
def get_state_dict_from_memory(self):
state_dict = {}
default_config = CheckpointShardConfig()
config = self._shm_handler.get_checkpoint_config(default_config)
passed = verify_all_rank_step_consistent(self._loader_group, config.step)
if (passed and (config.step > 0)):
state_dict = self._shm_handler.load_state_dict()
logger.info(f'Load step {config.step} checkpoint from the shared memory.')
return state_dict
def get_saver_class(self):
pass
def get_local_shard_num(self):
pass
def get_global_shard_num(self):
pass
def save_to_storage(self, step, state_dict, path):
pass
def load(self, resume_path=''):
pass |
class TestTranslationGPU(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
((not torch.cuda.is_available()), 'test requires a GPU')
def test_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--fp16'])
generate_main(data_dir)
((not torch.cuda.is_available()), 'test requires a GPU')
def test_memory_efficient_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_memory_efficient_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--memory-efficient-fp16'])
generate_main(data_dir)
((not torch.cuda.is_available()), 'test requires a GPU')
def test_transformer_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--fp16'], run_validation=True)
generate_main(data_dir)
((not torch.cuda.is_available()), 'test requires a GPU')
def test_levenshtein_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_levenshtein_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'levenshtein_transformer', ['--apply-bert-init', '--early-exit', '6,6,6', '--criterion', 'nat_loss'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step']) |
def galton_rvs(theta, n_runs=100, n_rows=n_rows, n_nails=n_nails, random_state=None):
rng = check_random_state(random_state)
all_x = []
all_log_p_xz = []
all_t_xz = []
trajectories = []
for i in range(n_runs):
u = rng.rand(n_rows)
(log_p_xz, (begin, z, x)) = trace(theta, u)
(t_xz, _) = d_trace(theta, u)
all_x.append(x)
all_log_p_xz.append(log_p_xz)
all_t_xz.append(t_xz)
trajectories.append((([begin] + z) + [x]))
all_log_p_xz = np.array(all_log_p_xz)
all_t_xz = np.array(all_t_xz)
return (all_x, all_log_p_xz, all_t_xz, trajectories) |
class Evaluator(nn.Module):
'adapted from
def __init__(self):
super().__init__()
self.lpips = LearnedPerceptualImagePatchSimilarity(net_type='alex')
self.psnr = PeakSignalNoiseRatio(data_range=1)
self.ssim = StructuralSimilarityIndexMeasure(data_range=1)
_fwd(cast_inputs=torch.float32)
def forward(self, rgb, rgb_gt):
return {'psnr': self.psnr(rgb, rgb_gt), 'ssim': self.ssim(rgb, rgb_gt), 'lpips': self.lpips(rgb, rgb_gt)} |
def report_to_dana(dana_util, item_name, metric_name, device, soc, abi, value, trend):
serie_id = dana_util.create_serie_id_lite(TABLE_NAME, ('%s_%s_%s_%s_%s' % (metric_name, device, soc, abi, item_name)))
dana_util.report_benchmark(serie_id=serie_id, value=value, trend=trend) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.